1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp 38 */ 39 40#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp 38 */ 39 40#include <sys/cdefs.h>
|
41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 133688 2004-08-13 23:14:50Z rwatson $");
| 41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 137982 2004-11-22 06:46:30Z yongari $");
|
42 43/* 44 * HME Ethernet module driver. 45 * 46 * The HME is e.g. part of the PCIO PCI multi function device. 47 * It supports TX gathering and TX and RX checksum offloading. 48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 50 * are skipped to make sure the header after the ethernet header is aligned on a 51 * natural boundary, so this ensures minimal wastage in the most common case. 52 * 53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 54 * maximum packet size (this is not verified). Buffers starting on odd 55 * boundaries must be mapped so that the burst can start on a natural boundary. 56 * 57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. 58 * In reality, we can do the same technique for UDP datagram too. However, 59 * the hardware doesn't compensate the checksum for UDP datagram which can yield 60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It 61 * can be reactivated by setting special link option link0 with ifconfig(8). 62 */ 63#define HME_CSUM_FEATURES (CSUM_TCP) 64#define HMEDEBUG 65#define KTR_HME KTR_CT2 /* XXX */ 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/bus.h> 70#include <sys/endian.h> 71#include <sys/kernel.h> 72#include <sys/module.h> 73#include <sys/ktr.h> 74#include <sys/mbuf.h> 75#include <sys/malloc.h> 76#include <sys/socket.h> 77#include <sys/sockio.h> 78 79#include <net/bpf.h> 80#include <net/ethernet.h> 81#include <net/if.h> 82#include <net/if_arp.h> 83#include <net/if_dl.h> 84#include <net/if_media.h> 85#include <net/if_vlan_var.h> 86 87#include <netinet/in.h> 88#include <netinet/in_systm.h> 89#include <netinet/ip.h> 90#include <netinet/tcp.h> 91#include <netinet/udp.h> 92 93#include <dev/mii/mii.h> 94#include <dev/mii/miivar.h> 95 96#include <machine/bus.h> 97 98#include <dev/hme/if_hmereg.h> 99#include <dev/hme/if_hmevar.h> 100 101static void hme_start(struct ifnet *);
| 42 43/* 44 * HME Ethernet module driver. 45 * 46 * The HME is e.g. part of the PCIO PCI multi function device. 47 * It supports TX gathering and TX and RX checksum offloading. 48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 50 * are skipped to make sure the header after the ethernet header is aligned on a 51 * natural boundary, so this ensures minimal wastage in the most common case. 52 * 53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 54 * maximum packet size (this is not verified). Buffers starting on odd 55 * boundaries must be mapped so that the burst can start on a natural boundary. 56 * 57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. 58 * In reality, we can do the same technique for UDP datagram too. However, 59 * the hardware doesn't compensate the checksum for UDP datagram which can yield 60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It 61 * can be reactivated by setting special link option link0 with ifconfig(8). 62 */ 63#define HME_CSUM_FEATURES (CSUM_TCP) 64#define HMEDEBUG 65#define KTR_HME KTR_CT2 /* XXX */ 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/bus.h> 70#include <sys/endian.h> 71#include <sys/kernel.h> 72#include <sys/module.h> 73#include <sys/ktr.h> 74#include <sys/mbuf.h> 75#include <sys/malloc.h> 76#include <sys/socket.h> 77#include <sys/sockio.h> 78 79#include <net/bpf.h> 80#include <net/ethernet.h> 81#include <net/if.h> 82#include <net/if_arp.h> 83#include <net/if_dl.h> 84#include <net/if_media.h> 85#include <net/if_vlan_var.h> 86 87#include <netinet/in.h> 88#include <netinet/in_systm.h> 89#include <netinet/ip.h> 90#include <netinet/tcp.h> 91#include <netinet/udp.h> 92 93#include <dev/mii/mii.h> 94#include <dev/mii/miivar.h> 95 96#include <machine/bus.h> 97 98#include <dev/hme/if_hmereg.h> 99#include <dev/hme/if_hmevar.h> 100 101static void hme_start(struct ifnet *);
|
| 102static void hme_start_locked(struct ifnet *);
|
102static void hme_stop(struct hme_softc *); 103static int hme_ioctl(struct ifnet *, u_long, caddr_t); 104static void hme_tick(void *); 105static void hme_watchdog(struct ifnet *); 106static void hme_init(void *);
| 103static void hme_stop(struct hme_softc *); 104static int hme_ioctl(struct ifnet *, u_long, caddr_t); 105static void hme_tick(void *); 106static void hme_watchdog(struct ifnet *); 107static void hme_init(void *);
|
| 108static void hme_init_locked(void *);
|
107static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 108static int hme_meminit(struct hme_softc *); 109static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 110 u_int32_t, u_int32_t); 111static void hme_mifinit(struct hme_softc *); 112static void hme_reset(struct hme_softc *); 113static void hme_setladrf(struct hme_softc *, int); 114 115static int hme_mediachange(struct ifnet *); 116static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 117 118static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); 119static void hme_read(struct hme_softc *, int, int, u_int32_t); 120static void hme_eint(struct hme_softc *, u_int); 121static void hme_rint(struct hme_softc *); 122static void hme_tint(struct hme_softc *); 123static void hme_txcksum(struct mbuf *, u_int32_t *); 124static void hme_rxcksum(struct mbuf *, u_int32_t); 125 126static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 127static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 128 bus_size_t, int); 129static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 130 bus_size_t, int); 131 132devclass_t hme_devclass; 133 134static int hme_nerr; 135 136DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 137MODULE_DEPEND(hme, miibus, 1, 1, 1); 138 139#define HME_SPC_READ_4(spc, sc, offs) \ 140 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 141 (offs)) 142#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 143 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 144 (offs), (v)) 145 146#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 147#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 148#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 149#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 150#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 151#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 152#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 153#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 154#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 155#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 156 157#define HME_MAXERR 5 158#define HME_WHINE(dev, ...) do { \ 159 if (hme_nerr++ < HME_MAXERR) \ 160 device_printf(dev, __VA_ARGS__); \ 161 if (hme_nerr == HME_MAXERR) { \ 162 device_printf(dev, "too may errors; not reporting any " \ 163 "more\n"); \ 164 } \ 165} while(0) 166 167/* Support oversized VLAN frames. */ 168#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) 169 170int 171hme_config(struct hme_softc *sc) 172{ 173 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 174 struct mii_softc *child; 175 bus_size_t size; 176 int error, rdesc, tdesc, i; 177 178 /* 179 * HME common initialization. 180 * 181 * hme_softc fields that must be initialized by the front-end: 182 * 183 * the DMA bus tag: 184 * sc_dmatag 185 * 186 * the bus handles, tags and offsets (splitted for SBus compatability): 187 * sc_seb{t,h,o} (Shared Ethernet Block registers) 188 * sc_erx{t,h,o} (Receiver Unit registers) 189 * sc_etx{t,h,o} (Transmitter Unit registers) 190 * sc_mac{t,h,o} (MAC registers) 191 * sc_mif{t,h,o} (Management Interface registers) 192 * 193 * the maximum bus burst size: 194 * sc_burst 195 * 196 */ 197
| 109static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 110static int hme_meminit(struct hme_softc *); 111static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 112 u_int32_t, u_int32_t); 113static void hme_mifinit(struct hme_softc *); 114static void hme_reset(struct hme_softc *); 115static void hme_setladrf(struct hme_softc *, int); 116 117static int hme_mediachange(struct ifnet *); 118static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 119 120static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); 121static void hme_read(struct hme_softc *, int, int, u_int32_t); 122static void hme_eint(struct hme_softc *, u_int); 123static void hme_rint(struct hme_softc *); 124static void hme_tint(struct hme_softc *); 125static void hme_txcksum(struct mbuf *, u_int32_t *); 126static void hme_rxcksum(struct mbuf *, u_int32_t); 127 128static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); 129static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 130 bus_size_t, int); 131static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 132 bus_size_t, int); 133 134devclass_t hme_devclass; 135 136static int hme_nerr; 137 138DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 139MODULE_DEPEND(hme, miibus, 1, 1, 1); 140 141#define HME_SPC_READ_4(spc, sc, offs) \ 142 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 143 (offs)) 144#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 145 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 146 (offs), (v)) 147 148#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 149#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 150#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 151#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 152#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 153#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 154#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 155#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 156#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 157#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 158 159#define HME_MAXERR 5 160#define HME_WHINE(dev, ...) do { \ 161 if (hme_nerr++ < HME_MAXERR) \ 162 device_printf(dev, __VA_ARGS__); \ 163 if (hme_nerr == HME_MAXERR) { \ 164 device_printf(dev, "too may errors; not reporting any " \ 165 "more\n"); \ 166 } \ 167} while(0) 168 169/* Support oversized VLAN frames. */ 170#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) 171 172int 173hme_config(struct hme_softc *sc) 174{ 175 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 176 struct mii_softc *child; 177 bus_size_t size; 178 int error, rdesc, tdesc, i; 179 180 /* 181 * HME common initialization. 182 * 183 * hme_softc fields that must be initialized by the front-end: 184 * 185 * the DMA bus tag: 186 * sc_dmatag 187 * 188 * the bus handles, tags and offsets (splitted for SBus compatability): 189 * sc_seb{t,h,o} (Shared Ethernet Block registers) 190 * sc_erx{t,h,o} (Receiver Unit registers) 191 * sc_etx{t,h,o} (Transmitter Unit registers) 192 * sc_mac{t,h,o} (MAC registers) 193 * sc_mif{t,h,o} (Management Interface registers) 194 * 195 * the maximum bus burst size: 196 * sc_burst 197 * 198 */ 199
|
| 200 HME_LOCK_ASSERT(sc, MA_NOTOWNED);
|
198 /* Make sure the chip is stopped. */
| 201 /* Make sure the chip is stopped. */
|
| 202 HME_LOCK(sc);
|
199 hme_stop(sc);
| 203 hme_stop(sc);
|
| 204 HME_UNLOCK(sc);
|
200 201 /* 202 * Allocate DMA capable memory 203 * Buffer descriptors must be aligned on a 2048 byte boundary; 204 * take this into account when calculating the size. Note that 205 * the maximum number of descriptors (256) occupies 2048 bytes, 206 * so we allocate that much regardless of HME_N*DESC. 207 */ 208 size = 4096; 209 210 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 211 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 212 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 213 if (error) 214 return (error); 215 216 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 217 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 218 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex, 219 &Giant, &sc->sc_cdmatag); 220 if (error) 221 goto fail_ptag; 222 223 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 224 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 225 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 226 NULL, NULL, &sc->sc_rdmatag); 227 if (error) 228 goto fail_ctag; 229 230 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 231 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 232 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 233 NULL, NULL, &sc->sc_tdmatag); 234 if (error) 235 goto fail_rtag; 236 237 /* Allocate control/TX DMA buffer */ 238 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 239 0, &sc->sc_cdmamap); 240 if (error != 0) { 241 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 242 goto fail_ttag; 243 } 244 245 /* Load the buffer */ 246 sc->sc_rb.rb_dmabase = 0; 247 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 248 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 249 sc->sc_rb.rb_dmabase == 0) { 250 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 251 error); 252 goto fail_free; 253 } 254 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 255 sc->sc_rb.rb_dmabase); 256 257 /* 258 * Prepare the RX descriptors. rdesc serves as marker for the last 259 * processed descriptor and may be used later on. 260 */ 261 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 262 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 263 error = bus_dmamap_create(sc->sc_rdmatag, 0, 264 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 265 if (error != 0) 266 goto fail_rxdesc; 267 } 268 error = bus_dmamap_create(sc->sc_rdmatag, 0, 269 &sc->sc_rb.rb_spare_dmamap); 270 if (error != 0) 271 goto fail_rxdesc; 272 /* Same for the TX descs. */ 273 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 274 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 275 error = bus_dmamap_create(sc->sc_tdmatag, 0, 276 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 277 if (error != 0) 278 goto fail_txdesc; 279 } 280 281 sc->sc_csum_features = HME_CSUM_FEATURES; 282 /* Initialize ifnet structure. */ 283 ifp->if_softc = sc; 284 if_initname(ifp, device_get_name(sc->sc_dev), 285 device_get_unit(sc->sc_dev)); 286 ifp->if_mtu = ETHERMTU;
| 205 206 /* 207 * Allocate DMA capable memory 208 * Buffer descriptors must be aligned on a 2048 byte boundary; 209 * take this into account when calculating the size. Note that 210 * the maximum number of descriptors (256) occupies 2048 bytes, 211 * so we allocate that much regardless of HME_N*DESC. 212 */ 213 size = 4096; 214 215 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 216 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 217 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); 218 if (error) 219 return (error); 220 221 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 222 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 223 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex, 224 &Giant, &sc->sc_cdmatag); 225 if (error) 226 goto fail_ptag; 227 228 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 229 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 230 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 231 NULL, NULL, &sc->sc_rdmatag); 232 if (error) 233 goto fail_ctag; 234 235 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 236 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 237 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 238 NULL, NULL, &sc->sc_tdmatag); 239 if (error) 240 goto fail_rtag; 241 242 /* Allocate control/TX DMA buffer */ 243 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 244 0, &sc->sc_cdmamap); 245 if (error != 0) { 246 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 247 goto fail_ttag; 248 } 249 250 /* Load the buffer */ 251 sc->sc_rb.rb_dmabase = 0; 252 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 253 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 254 sc->sc_rb.rb_dmabase == 0) { 255 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 256 error); 257 goto fail_free; 258 } 259 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 260 sc->sc_rb.rb_dmabase); 261 262 /* 263 * Prepare the RX descriptors. rdesc serves as marker for the last 264 * processed descriptor and may be used later on. 265 */ 266 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 267 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 268 error = bus_dmamap_create(sc->sc_rdmatag, 0, 269 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 270 if (error != 0) 271 goto fail_rxdesc; 272 } 273 error = bus_dmamap_create(sc->sc_rdmatag, 0, 274 &sc->sc_rb.rb_spare_dmamap); 275 if (error != 0) 276 goto fail_rxdesc; 277 /* Same for the TX descs. */ 278 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { 279 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; 280 error = bus_dmamap_create(sc->sc_tdmatag, 0, 281 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 282 if (error != 0) 283 goto fail_txdesc; 284 } 285 286 sc->sc_csum_features = HME_CSUM_FEATURES; 287 /* Initialize ifnet structure. */ 288 ifp->if_softc = sc; 289 if_initname(ifp, device_get_name(sc->sc_dev), 290 device_get_unit(sc->sc_dev)); 291 ifp->if_mtu = ETHERMTU;
|
287 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 288 IFF_NEEDSGIANT;
| 292 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
289 ifp->if_start = hme_start; 290 ifp->if_ioctl = hme_ioctl; 291 ifp->if_init = hme_init; 292 ifp->if_watchdog = hme_watchdog; 293 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); 294 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; 295 IFQ_SET_READY(&ifp->if_snd); 296
| 293 ifp->if_start = hme_start; 294 ifp->if_ioctl = hme_ioctl; 295 ifp->if_init = hme_init; 296 ifp->if_watchdog = hme_watchdog; 297 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); 298 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; 299 IFQ_SET_READY(&ifp->if_snd); 300
|
| 301 HME_LOCK(sc);
|
297 hme_mifinit(sc);
| 302 hme_mifinit(sc);
|
| 303 HME_UNLOCK(sc);
|
298 299 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 300 hme_mediastatus)) != 0) { 301 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 302 goto fail_rxdesc; 303 } 304 sc->sc_mii = device_get_softc(sc->sc_miibus); 305 306 /* 307 * Walk along the list of attached MII devices and 308 * establish an `MII instance' to `phy number' 309 * mapping. We'll use this mapping in media change 310 * requests to determine which phy to use to program 311 * the MIF configuration register. 312 */ 313 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 314 child = LIST_NEXT(child, mii_list)) { 315 /* 316 * Note: we support just two PHYs: the built-in 317 * internal device and an external on the MII 318 * connector. 319 */ 320 if (child->mii_phy > 1 || child->mii_inst > 1) { 321 device_printf(sc->sc_dev, "cannot accommodate " 322 "MII device %s at phy %d, instance %d\n", 323 device_get_name(child->mii_dev), 324 child->mii_phy, child->mii_inst); 325 continue; 326 } 327 328 sc->sc_phys[child->mii_inst] = child->mii_phy; 329 } 330 331 /* Attach the interface. */ 332 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 333 334 /* 335 * Tell the upper layer(s) we support long frames/checksum offloads. 336 */ 337 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 338 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 339 ifp->if_hwassist |= sc->sc_csum_features; 340 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 341
| 304 305 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 306 hme_mediastatus)) != 0) { 307 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 308 goto fail_rxdesc; 309 } 310 sc->sc_mii = device_get_softc(sc->sc_miibus); 311 312 /* 313 * Walk along the list of attached MII devices and 314 * establish an `MII instance' to `phy number' 315 * mapping. We'll use this mapping in media change 316 * requests to determine which phy to use to program 317 * the MIF configuration register. 318 */ 319 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 320 child = LIST_NEXT(child, mii_list)) { 321 /* 322 * Note: we support just two PHYs: the built-in 323 * internal device and an external on the MII 324 * connector. 325 */ 326 if (child->mii_phy > 1 || child->mii_inst > 1) { 327 device_printf(sc->sc_dev, "cannot accommodate " 328 "MII device %s at phy %d, instance %d\n", 329 device_get_name(child->mii_dev), 330 child->mii_phy, child->mii_inst); 331 continue; 332 } 333 334 sc->sc_phys[child->mii_inst] = child->mii_phy; 335 } 336 337 /* Attach the interface. */ 338 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 339 340 /* 341 * Tell the upper layer(s) we support long frames/checksum offloads. 342 */ 343 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 344 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 345 ifp->if_hwassist |= sc->sc_csum_features; 346 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; 347
|
342 callout_init(&sc->sc_tick_ch, 0);
| 348 callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
|
343 return (0); 344 345fail_txdesc: 346 for (i = 0; i < tdesc; i++) { 347 bus_dmamap_destroy(sc->sc_tdmatag, 348 sc->sc_rb.rb_txdesc[i].htx_dmamap); 349 } 350 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 351fail_rxdesc: 352 for (i = 0; i < rdesc; i++) { 353 bus_dmamap_destroy(sc->sc_rdmatag, 354 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 355 } 356 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 357fail_free: 358 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 359fail_ttag: 360 bus_dma_tag_destroy(sc->sc_tdmatag); 361fail_rtag: 362 bus_dma_tag_destroy(sc->sc_rdmatag); 363fail_ctag: 364 bus_dma_tag_destroy(sc->sc_cdmatag); 365fail_ptag: 366 bus_dma_tag_destroy(sc->sc_pdmatag); 367 return (error); 368} 369 370void 371hme_detach(struct hme_softc *sc) 372{ 373 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 374 int i; 375
| 349 return (0); 350 351fail_txdesc: 352 for (i = 0; i < tdesc; i++) { 353 bus_dmamap_destroy(sc->sc_tdmatag, 354 sc->sc_rb.rb_txdesc[i].htx_dmamap); 355 } 356 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 357fail_rxdesc: 358 for (i = 0; i < rdesc; i++) { 359 bus_dmamap_destroy(sc->sc_rdmatag, 360 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 361 } 362 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 363fail_free: 364 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 365fail_ttag: 366 bus_dma_tag_destroy(sc->sc_tdmatag); 367fail_rtag: 368 bus_dma_tag_destroy(sc->sc_rdmatag); 369fail_ctag: 370 bus_dma_tag_destroy(sc->sc_cdmatag); 371fail_ptag: 372 bus_dma_tag_destroy(sc->sc_pdmatag); 373 return (error); 374} 375 376void 377hme_detach(struct hme_softc *sc) 378{ 379 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 380 int i; 381
|
| 382 HME_LOCK_ASSERT(sc, MA_NOTOWNED); 383
|
376 ether_ifdetach(ifp);
| 384 ether_ifdetach(ifp);
|
| 385 HME_LOCK(sc);
|
377 hme_stop(sc);
| 386 hme_stop(sc);
|
| 387 HME_UNLOCK(sc);
|
378 device_delete_child(sc->sc_dev, sc->sc_miibus); 379 380 for (i = 0; i < HME_NTXQ; i++) { 381 bus_dmamap_destroy(sc->sc_tdmatag, 382 sc->sc_rb.rb_txdesc[i].htx_dmamap); 383 } 384 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 385 for (i = 0; i < HME_NRXDESC; i++) { 386 bus_dmamap_destroy(sc->sc_rdmatag, 387 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 388 } 389 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 390 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); 391 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 392 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 393 bus_dma_tag_destroy(sc->sc_tdmatag); 394 bus_dma_tag_destroy(sc->sc_rdmatag); 395 bus_dma_tag_destroy(sc->sc_cdmatag); 396 bus_dma_tag_destroy(sc->sc_pdmatag); 397} 398 399void 400hme_suspend(struct hme_softc *sc) 401{ 402
| 388 device_delete_child(sc->sc_dev, sc->sc_miibus); 389 390 for (i = 0; i < HME_NTXQ; i++) { 391 bus_dmamap_destroy(sc->sc_tdmatag, 392 sc->sc_rb.rb_txdesc[i].htx_dmamap); 393 } 394 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 395 for (i = 0; i < HME_NRXDESC; i++) { 396 bus_dmamap_destroy(sc->sc_rdmatag, 397 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 398 } 399 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 400 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); 401 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 402 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 403 bus_dma_tag_destroy(sc->sc_tdmatag); 404 bus_dma_tag_destroy(sc->sc_rdmatag); 405 bus_dma_tag_destroy(sc->sc_cdmatag); 406 bus_dma_tag_destroy(sc->sc_pdmatag); 407} 408 409void 410hme_suspend(struct hme_softc *sc) 411{ 412
|
| 413 HME_LOCK(sc);
|
403 hme_stop(sc);
| 414 hme_stop(sc);
|
| 415 HME_UNLOCK(sc);
|
404} 405 406void 407hme_resume(struct hme_softc *sc) 408{ 409 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 410
| 416} 417 418void 419hme_resume(struct hme_softc *sc) 420{ 421 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 422
|
| 423 HME_LOCK(sc);
|
411 if ((ifp->if_flags & IFF_UP) != 0)
| 424 if ((ifp->if_flags & IFF_UP) != 0)
|
412 hme_init(ifp);
| 425 hme_init_locked(ifp); 426 HME_UNLOCK(sc);
|
413} 414 415static void 416hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 417{ 418 struct hme_softc *sc = (struct hme_softc *)xsc; 419 420 if (error != 0) 421 return; 422 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 423 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 424} 425 426static void 427hme_tick(void *arg) 428{ 429 struct hme_softc *sc = arg; 430 int s; 431 432 s = splnet(); 433 mii_tick(sc->sc_mii); 434 splx(s); 435 436 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 437} 438 439static void 440hme_reset(struct hme_softc *sc) 441{ 442 int s; 443
| 427} 428 429static void 430hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 431{ 432 struct hme_softc *sc = (struct hme_softc *)xsc; 433 434 if (error != 0) 435 return; 436 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 437 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 438} 439 440static void 441hme_tick(void *arg) 442{ 443 struct hme_softc *sc = arg; 444 int s; 445 446 s = splnet(); 447 mii_tick(sc->sc_mii); 448 splx(s); 449 450 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 451} 452 453static void 454hme_reset(struct hme_softc *sc) 455{ 456 int s; 457
|
| 458 HME_LOCK(sc);
|
444 s = splnet();
| 459 s = splnet();
|
445 hme_init(sc);
| 460 hme_init_locked(sc);
|
446 splx(s);
| 461 splx(s);
|
| 462 HME_UNLOCK(sc);
|
447} 448 449static void 450hme_stop(struct hme_softc *sc) 451{ 452 u_int32_t v; 453 int n; 454 455 callout_stop(&sc->sc_tick_ch); 456 457 /* Reset transmitter and receiver */ 458 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 459 HME_SEB_RESET_ERX); 460 461 for (n = 0; n < 20; n++) { 462 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 463 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 464 return; 465 DELAY(20); 466 } 467 468 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 469} 470 471static void 472hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 473 bus_size_t totsize, int error) 474{ 475 bus_addr_t *a = xsc; 476 477 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 478 if (error != 0) 479 return; 480 *a = segs[0].ds_addr; 481} 482 483/* 484 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 485 * ring for subsequent use. 486 */ 487static __inline void 488hme_discard_rxbuf(struct hme_softc *sc, int ix) 489{ 490 491 /* 492 * Dropped a packet, reinitialize the descriptor and turn the 493 * ownership back to the hardware. 494 */ 495 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | 496 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); 497} 498 499static int 500hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 501{ 502 struct hme_rxdesc *rd; 503 struct mbuf *m; 504 bus_addr_t ba; 505 bus_dmamap_t map; 506 uintptr_t b; 507 int a, unmap; 508 509 rd = &sc->sc_rb.rb_rxdesc[ri]; 510 unmap = rd->hrx_m != NULL; 511 if (unmap && keepold) { 512 /* 513 * Reinitialize the descriptor flags, as they may have been 514 * altered by the hardware. 515 */ 516 hme_discard_rxbuf(sc, ri); 517 return (0); 518 } 519 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 520 return (ENOBUFS); 521 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 522 b = mtod(m, uintptr_t); 523 /* 524 * Required alignment boundary. At least 16 is needed, but since 525 * the mapping must be done in a way that a burst can start on a 526 * natural boundary we might need to extend this. 527 */ 528 a = max(HME_MINRXALIGN, sc->sc_burst); 529 /* 530 * Make sure the buffer suitably aligned. The 2 byte offset is removed 531 * when the mbuf is handed up. XXX: this ensures at least 16 byte 532 * alignment of the header adjacent to the ethernet header, which 533 * should be sufficient in all cases. Nevertheless, this second-guesses 534 * ALIGN(). 535 */ 536 m_adj(m, roundup2(b, a) - b); 537 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 538 m, hme_rxdma_callback, &ba, 0) != 0) { 539 m_freem(m); 540 return (ENOBUFS); 541 } 542 if (unmap) { 543 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 544 BUS_DMASYNC_POSTREAD); 545 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 546 } 547 map = rd->hrx_dmamap; 548 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 549 sc->sc_rb.rb_spare_dmamap = map; 550 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 551 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba); 552 rd->hrx_m = m; 553 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 554 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 555 return (0); 556} 557 558static int 559hme_meminit(struct hme_softc *sc) 560{ 561 struct hme_ring *hr = &sc->sc_rb; 562 struct hme_txdesc *td; 563 bus_addr_t dma; 564 caddr_t p; 565 unsigned int i; 566 int error; 567 568 p = hr->rb_membase; 569 dma = hr->rb_dmabase; 570 571 /* 572 * Allocate transmit descriptors 573 */ 574 hr->rb_txd = p; 575 hr->rb_txddma = dma; 576 p += HME_NTXDESC * HME_XD_SIZE; 577 dma += HME_NTXDESC * HME_XD_SIZE; 578 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 579 dma = (bus_addr_t)roundup((u_long)dma, 2048); 580 p = (caddr_t)roundup((u_long)p, 2048); 581 582 /* 583 * Allocate receive descriptors 584 */ 585 hr->rb_rxd = p; 586 hr->rb_rxddma = dma; 587 p += HME_NRXDESC * HME_XD_SIZE; 588 dma += HME_NRXDESC * HME_XD_SIZE; 589 /* Again move forward to the next 2048 byte boundary.*/ 590 dma = (bus_addr_t)roundup((u_long)dma, 2048); 591 p = (caddr_t)roundup((u_long)p, 2048); 592 593 /* 594 * Initialize transmit buffer descriptors 595 */ 596 for (i = 0; i < HME_NTXDESC; i++) { 597 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 598 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 599 } 600 601 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 602 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 603 for (i = 0; i < HME_NTXQ; i++) { 604 td = &sc->sc_rb.rb_txdesc[i]; 605 if (td->htx_m != NULL) { 606 m_freem(td->htx_m); 607 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 608 BUS_DMASYNC_POSTWRITE); 609 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 610 td->htx_m = NULL; 611 } 612 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 613 } 614 615 /* 616 * Initialize receive buffer descriptors 617 */ 618 for (i = 0; i < HME_NRXDESC; i++) { 619 error = hme_add_rxbuf(sc, i, 1); 620 if (error != 0) 621 return (error); 622 } 623 624 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); 625 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); 626 627 hr->rb_tdhead = hr->rb_tdtail = 0; 628 hr->rb_td_nbusy = 0; 629 hr->rb_rdtail = 0; 630 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 631 hr->rb_txddma); 632 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 633 hr->rb_rxddma); 634 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 635 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 636 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 637 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 638 return (0); 639} 640 641static int 642hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 643 u_int32_t clr, u_int32_t set) 644{ 645 int i = 0; 646 647 val &= ~clr; 648 val |= set; 649 HME_MAC_WRITE_4(sc, reg, val); 650 if (clr == 0 && set == 0) 651 return (1); /* just write, no bits to wait for */ 652 do { 653 DELAY(100); 654 i++; 655 val = HME_MAC_READ_4(sc, reg); 656 if (i > 40) { 657 /* After 3.5ms, we should have been done. */ 658 device_printf(sc->sc_dev, "timeout while writing to " 659 "MAC configuration register\n"); 660 return (0); 661 } 662 } while ((val & clr) != 0 && (val & set) != set); 663 return (1); 664} 665 666/* 667 * Initialization of interface; set up initialization block 668 * and transmit/receive descriptor rings. 669 */ 670static void 671hme_init(void *xsc) 672{ 673 struct hme_softc *sc = (struct hme_softc *)xsc;
| 463} 464 465static void 466hme_stop(struct hme_softc *sc) 467{ 468 u_int32_t v; 469 int n; 470 471 callout_stop(&sc->sc_tick_ch); 472 473 /* Reset transmitter and receiver */ 474 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 475 HME_SEB_RESET_ERX); 476 477 for (n = 0; n < 20; n++) { 478 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 479 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 480 return; 481 DELAY(20); 482 } 483 484 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 485} 486 487static void 488hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 489 bus_size_t totsize, int error) 490{ 491 bus_addr_t *a = xsc; 492 493 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 494 if (error != 0) 495 return; 496 *a = segs[0].ds_addr; 497} 498 499/* 500 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 501 * ring for subsequent use. 502 */ 503static __inline void 504hme_discard_rxbuf(struct hme_softc *sc, int ix) 505{ 506 507 /* 508 * Dropped a packet, reinitialize the descriptor and turn the 509 * ownership back to the hardware. 510 */ 511 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | 512 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); 513} 514 515static int 516hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 517{ 518 struct hme_rxdesc *rd; 519 struct mbuf *m; 520 bus_addr_t ba; 521 bus_dmamap_t map; 522 uintptr_t b; 523 int a, unmap; 524 525 rd = &sc->sc_rb.rb_rxdesc[ri]; 526 unmap = rd->hrx_m != NULL; 527 if (unmap && keepold) { 528 /* 529 * Reinitialize the descriptor flags, as they may have been 530 * altered by the hardware. 531 */ 532 hme_discard_rxbuf(sc, ri); 533 return (0); 534 } 535 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) 536 return (ENOBUFS); 537 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 538 b = mtod(m, uintptr_t); 539 /* 540 * Required alignment boundary. At least 16 is needed, but since 541 * the mapping must be done in a way that a burst can start on a 542 * natural boundary we might need to extend this. 543 */ 544 a = max(HME_MINRXALIGN, sc->sc_burst); 545 /* 546 * Make sure the buffer suitably aligned. The 2 byte offset is removed 547 * when the mbuf is handed up. XXX: this ensures at least 16 byte 548 * alignment of the header adjacent to the ethernet header, which 549 * should be sufficient in all cases. Nevertheless, this second-guesses 550 * ALIGN(). 551 */ 552 m_adj(m, roundup2(b, a) - b); 553 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 554 m, hme_rxdma_callback, &ba, 0) != 0) { 555 m_freem(m); 556 return (ENOBUFS); 557 } 558 if (unmap) { 559 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 560 BUS_DMASYNC_POSTREAD); 561 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 562 } 563 map = rd->hrx_dmamap; 564 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 565 sc->sc_rb.rb_spare_dmamap = map; 566 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 567 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba); 568 rd->hrx_m = m; 569 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 570 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); 571 return (0); 572} 573 574static int 575hme_meminit(struct hme_softc *sc) 576{ 577 struct hme_ring *hr = &sc->sc_rb; 578 struct hme_txdesc *td; 579 bus_addr_t dma; 580 caddr_t p; 581 unsigned int i; 582 int error; 583 584 p = hr->rb_membase; 585 dma = hr->rb_dmabase; 586 587 /* 588 * Allocate transmit descriptors 589 */ 590 hr->rb_txd = p; 591 hr->rb_txddma = dma; 592 p += HME_NTXDESC * HME_XD_SIZE; 593 dma += HME_NTXDESC * HME_XD_SIZE; 594 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 595 dma = (bus_addr_t)roundup((u_long)dma, 2048); 596 p = (caddr_t)roundup((u_long)p, 2048); 597 598 /* 599 * Allocate receive descriptors 600 */ 601 hr->rb_rxd = p; 602 hr->rb_rxddma = dma; 603 p += HME_NRXDESC * HME_XD_SIZE; 604 dma += HME_NRXDESC * HME_XD_SIZE; 605 /* Again move forward to the next 2048 byte boundary.*/ 606 dma = (bus_addr_t)roundup((u_long)dma, 2048); 607 p = (caddr_t)roundup((u_long)p, 2048); 608 609 /* 610 * Initialize transmit buffer descriptors 611 */ 612 for (i = 0; i < HME_NTXDESC; i++) { 613 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 614 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 615 } 616 617 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 618 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 619 for (i = 0; i < HME_NTXQ; i++) { 620 td = &sc->sc_rb.rb_txdesc[i]; 621 if (td->htx_m != NULL) { 622 m_freem(td->htx_m); 623 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 624 BUS_DMASYNC_POSTWRITE); 625 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 626 td->htx_m = NULL; 627 } 628 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); 629 } 630 631 /* 632 * Initialize receive buffer descriptors 633 */ 634 for (i = 0; i < HME_NRXDESC; i++) { 635 error = hme_add_rxbuf(sc, i, 1); 636 if (error != 0) 637 return (error); 638 } 639 640 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); 641 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); 642 643 hr->rb_tdhead = hr->rb_tdtail = 0; 644 hr->rb_td_nbusy = 0; 645 hr->rb_rdtail = 0; 646 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 647 hr->rb_txddma); 648 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 649 hr->rb_rxddma); 650 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 651 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 652 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 653 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 654 return (0); 655} 656 657static int 658hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 659 u_int32_t clr, u_int32_t set) 660{ 661 int i = 0; 662 663 val &= ~clr; 664 val |= set; 665 HME_MAC_WRITE_4(sc, reg, val); 666 if (clr == 0 && set == 0) 667 return (1); /* just write, no bits to wait for */ 668 do { 669 DELAY(100); 670 i++; 671 val = HME_MAC_READ_4(sc, reg); 672 if (i > 40) { 673 /* After 3.5ms, we should have been done. */ 674 device_printf(sc->sc_dev, "timeout while writing to " 675 "MAC configuration register\n"); 676 return (0); 677 } 678 } while ((val & clr) != 0 && (val & set) != set); 679 return (1); 680} 681 682/* 683 * Initialization of interface; set up initialization block 684 * and transmit/receive descriptor rings. 685 */ 686static void 687hme_init(void *xsc) 688{ 689 struct hme_softc *sc = (struct hme_softc *)xsc;
|
| 690 691 HME_LOCK(sc); 692 hme_init_locked(sc); 693 HME_UNLOCK(sc); 694} 695 696static void 697hme_init_locked(void *xsc) 698{ 699 struct hme_softc *sc = (struct hme_softc *)xsc;
|
674 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 675 u_int8_t *ea; 676 u_int32_t n, v; 677
| 700 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 701 u_int8_t *ea; 702 u_int32_t n, v; 703
|
| 704 HME_LOCK_ASSERT(sc, MA_OWNED);
|
678 /* 679 * Initialization sequence. The numbered steps below correspond 680 * to the sequence outlined in section 6.3.5.1 in the Ethernet 681 * Channel Engine manual (part of the PCIO manual). 682 * See also the STP2002-STQ document from Sun Microsystems. 683 */ 684 685 /* step 1 & 2. Reset the Ethernet Channel */ 686 hme_stop(sc); 687 688 /* Re-initialize the MIF */ 689 hme_mifinit(sc); 690 691#if 0 692 /* Mask all MIF interrupts, just in case */ 693 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 694#endif 695 696 /* step 3. Setup data structures in host memory */ 697 if (hme_meminit(sc) != 0) { 698 device_printf(sc->sc_dev, "out of buffers; init aborted."); 699 return; 700 } 701 702 /* step 4. TX MAC registers & counters */ 703 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 704 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 705 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 706 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 707 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); 708 709 /* Load station MAC address */ 710 ea = sc->sc_arpcom.ac_enaddr; 711 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 712 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 713 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 714 715 /* 716 * Init seed for backoff 717 * (source suggested by manual: low 10 bits of MAC address) 718 */ 719 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 720 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 721 722 723 /* Note: Accepting power-on default for other MAC registers here.. */ 724 725 /* step 5. RX MAC registers & counters */ 726 hme_setladrf(sc, 0); 727 728 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 729 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 730 /* Transmit Descriptor ring size: in increments of 16 */ 731 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 732 733 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 734 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); 735 736 /* step 8. Global Configuration & Interrupt Mask */ 737 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 738 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 739 HME_SEB_STAT_HOSTTOTX | 740 HME_SEB_STAT_RXTOHOST | 741 HME_SEB_STAT_TXALL | 742 HME_SEB_STAT_TXPERR | 743 HME_SEB_STAT_RCNTEXP | 744 HME_SEB_STAT_ALL_ERRORS )); 745 746 switch (sc->sc_burst) { 747 default: 748 v = 0; 749 break; 750 case 16: 751 v = HME_SEB_CFG_BURST16; 752 break; 753 case 32: 754 v = HME_SEB_CFG_BURST32; 755 break; 756 case 64: 757 v = HME_SEB_CFG_BURST64; 758 break; 759 } 760 /* 761 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). 762 * Allowing 64bit transfers breaks TX checksum offload as well. 763 * Don't know this comes from hardware bug or driver's DMAing 764 * scheme. 765 * 766 * if (sc->sc_pci == 0) 767 * v |= HME_SEB_CFG_64BIT; 768 */ 769 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 770 771 /* step 9. ETX Configuration: use mostly default values */ 772 773 /* Enable DMA */ 774 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 775 v |= HME_ETX_CFG_DMAENABLE; 776 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 777 778 /* step 10. ERX Configuration */ 779 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 780 781 /* Encode Receive Descriptor ring size: four possible values */ 782 v &= ~HME_ERX_CFG_RINGSIZEMSK; 783 switch (HME_NRXDESC) { 784 case 32: 785 v |= HME_ERX_CFG_RINGSIZE32; 786 break; 787 case 64: 788 v |= HME_ERX_CFG_RINGSIZE64; 789 break; 790 case 128: 791 v |= HME_ERX_CFG_RINGSIZE128; 792 break; 793 case 256: 794 v |= HME_ERX_CFG_RINGSIZE256; 795 break; 796 default: 797 printf("hme: invalid Receive Descriptor ring size\n"); 798 break; 799 } 800 801 /* Enable DMA, fix RX first byte offset. */ 802 v &= ~HME_ERX_CFG_FBO_MASK; 803 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 804 /* RX TCP/UDP checksum offset */ 805 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; 806 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; 807 v |= n; 808 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 809 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 810 811 /* step 11. XIF Configuration */ 812 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 813 v |= HME_MAC_XIF_OE; 814 /* If an external transceiver is connected, enable its MII drivers */ 815 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 816 v |= HME_MAC_XIF_MIIENABLE; 817 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 818 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 819 820 /* step 12. RX_MAC Configuration Register */ 821 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 822 v |= HME_MAC_RXCFG_ENABLE; 823 v &= ~(HME_MAC_RXCFG_DCRCS); 824 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 825 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 826 827 /* step 13. TX_MAC Configuration Register */ 828 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 829 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 830 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 831 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 832 833 /* step 14. Issue Transmit Pending command */ 834 835#ifdef HMEDEBUG 836 /* Debug: double-check. */ 837 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 838 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 839 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 840 HME_ERX_READ_4(sc, HME_ERXI_RING), 841 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 842 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 843 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 844 HME_ERX_READ_4(sc, HME_ERXI_CFG), 845 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 846 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 847 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 848 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 849#endif 850 851 /* Set the current media. */
| 705 /* 706 * Initialization sequence. The numbered steps below correspond 707 * to the sequence outlined in section 6.3.5.1 in the Ethernet 708 * Channel Engine manual (part of the PCIO manual). 709 * See also the STP2002-STQ document from Sun Microsystems. 710 */ 711 712 /* step 1 & 2. Reset the Ethernet Channel */ 713 hme_stop(sc); 714 715 /* Re-initialize the MIF */ 716 hme_mifinit(sc); 717 718#if 0 719 /* Mask all MIF interrupts, just in case */ 720 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 721#endif 722 723 /* step 3. Setup data structures in host memory */ 724 if (hme_meminit(sc) != 0) { 725 device_printf(sc->sc_dev, "out of buffers; init aborted."); 726 return; 727 } 728 729 /* step 4. TX MAC registers & counters */ 730 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 731 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 732 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 733 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 734 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); 735 736 /* Load station MAC address */ 737 ea = sc->sc_arpcom.ac_enaddr; 738 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 739 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 740 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 741 742 /* 743 * Init seed for backoff 744 * (source suggested by manual: low 10 bits of MAC address) 745 */ 746 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 747 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 748 749 750 /* Note: Accepting power-on default for other MAC registers here.. */ 751 752 /* step 5. RX MAC registers & counters */ 753 hme_setladrf(sc, 0); 754 755 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 756 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 757 /* Transmit Descriptor ring size: in increments of 16 */ 758 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 759 760 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 761 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); 762 763 /* step 8. Global Configuration & Interrupt Mask */ 764 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 765 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 766 HME_SEB_STAT_HOSTTOTX | 767 HME_SEB_STAT_RXTOHOST | 768 HME_SEB_STAT_TXALL | 769 HME_SEB_STAT_TXPERR | 770 HME_SEB_STAT_RCNTEXP | 771 HME_SEB_STAT_ALL_ERRORS )); 772 773 switch (sc->sc_burst) { 774 default: 775 v = 0; 776 break; 777 case 16: 778 v = HME_SEB_CFG_BURST16; 779 break; 780 case 32: 781 v = HME_SEB_CFG_BURST32; 782 break; 783 case 64: 784 v = HME_SEB_CFG_BURST64; 785 break; 786 } 787 /* 788 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). 789 * Allowing 64bit transfers breaks TX checksum offload as well. 790 * Don't know this comes from hardware bug or driver's DMAing 791 * scheme. 792 * 793 * if (sc->sc_pci == 0) 794 * v |= HME_SEB_CFG_64BIT; 795 */ 796 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 797 798 /* step 9. ETX Configuration: use mostly default values */ 799 800 /* Enable DMA */ 801 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 802 v |= HME_ETX_CFG_DMAENABLE; 803 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 804 805 /* step 10. ERX Configuration */ 806 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 807 808 /* Encode Receive Descriptor ring size: four possible values */ 809 v &= ~HME_ERX_CFG_RINGSIZEMSK; 810 switch (HME_NRXDESC) { 811 case 32: 812 v |= HME_ERX_CFG_RINGSIZE32; 813 break; 814 case 64: 815 v |= HME_ERX_CFG_RINGSIZE64; 816 break; 817 case 128: 818 v |= HME_ERX_CFG_RINGSIZE128; 819 break; 820 case 256: 821 v |= HME_ERX_CFG_RINGSIZE256; 822 break; 823 default: 824 printf("hme: invalid Receive Descriptor ring size\n"); 825 break; 826 } 827 828 /* Enable DMA, fix RX first byte offset. */ 829 v &= ~HME_ERX_CFG_FBO_MASK; 830 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); 831 /* RX TCP/UDP checksum offset */ 832 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; 833 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; 834 v |= n; 835 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 836 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 837 838 /* step 11. XIF Configuration */ 839 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 840 v |= HME_MAC_XIF_OE; 841 /* If an external transceiver is connected, enable its MII drivers */ 842 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 843 v |= HME_MAC_XIF_MIIENABLE; 844 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 845 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 846 847 /* step 12. RX_MAC Configuration Register */ 848 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 849 v |= HME_MAC_RXCFG_ENABLE; 850 v &= ~(HME_MAC_RXCFG_DCRCS); 851 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 852 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 853 854 /* step 13. TX_MAC Configuration Register */ 855 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 856 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 857 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 858 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 859 860 /* step 14. Issue Transmit Pending command */ 861 862#ifdef HMEDEBUG 863 /* Debug: double-check. */ 864 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 865 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 866 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 867 HME_ERX_READ_4(sc, HME_ERXI_RING), 868 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 869 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 870 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 871 HME_ERX_READ_4(sc, HME_ERXI_CFG), 872 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 873 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 874 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 875 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 876#endif 877 878 /* Set the current media. */
|
852 /* mii_mediachg(sc->sc_mii); */
| 879 /* 880 * HME_UNLOCK(sc); 881 * mii_mediachg(sc->sc_mii); 882 * HME_LOCK(sc); 883 */
|
853 854 /* Start the one second timer. */ 855 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 856 857 ifp->if_flags |= IFF_RUNNING; 858 ifp->if_flags &= ~IFF_OACTIVE; 859 ifp->if_timer = 0;
| 884 885 /* Start the one second timer. */ 886 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 887 888 ifp->if_flags |= IFF_RUNNING; 889 ifp->if_flags &= ~IFF_OACTIVE; 890 ifp->if_timer = 0;
|
860 hme_start(ifp);
| 891 hme_start_locked(ifp);
|
861} 862 863struct hme_txdma_arg { 864 struct hme_softc *hta_sc; 865 struct hme_txdesc *hta_htx; 866 int hta_ndescs; 867}; 868 869/* 870 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 871 * are readable from the nearest burst boundary on (i.e. potentially before 872 * ds_addr) to the first boundary beyond the end. This is usually a safe 873 * assumption to make, but is not documented. 874 */ 875static void 876hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 877 bus_size_t totsz, int error) 878{ 879 struct hme_txdma_arg *ta = xsc; 880 struct hme_txdesc *htx; 881 bus_size_t len = 0; 882 caddr_t txd; 883 u_int32_t flags = 0; 884 int i, tdhead, pci; 885 886 if (error != 0) 887 return; 888 889 tdhead = ta->hta_sc->sc_rb.rb_tdhead; 890 pci = ta->hta_sc->sc_pci; 891 txd = ta->hta_sc->sc_rb.rb_txd; 892 htx = ta->hta_htx; 893 894 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 895 ta->hta_ndescs = -1; 896 return; 897 } 898 ta->hta_ndescs = nsegs; 899 900 for (i = 0; i < nsegs; i++) { 901 if (segs[i].ds_len == 0) 902 continue; 903 904 /* Fill the ring entry. */ 905 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 906 if (len == 0) 907 flags |= HME_XD_SOP; 908 if (len + segs[i].ds_len == totsz) 909 flags |= HME_XD_EOP; 910 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " 911 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 912 (u_int)segs[i].ds_addr); 913 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 914 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); 915 916 ta->hta_sc->sc_rb.rb_td_nbusy++; 917 htx->htx_lastdesc = tdhead; 918 tdhead = (tdhead + 1) % HME_NTXDESC; 919 len += segs[i].ds_len; 920 } 921 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 922 KASSERT((flags & HME_XD_EOP) != 0, 923 ("hme_txdma_callback: missed end of packet!")); 924} 925 926/* TX TCP/UDP checksum */ 927static void 928hme_txcksum(struct mbuf *m, u_int32_t *cflags) 929{ 930 struct ip *ip; 931 u_int32_t offset, offset2; 932 caddr_t p; 933 934 for(; m && m->m_len == 0; m = m->m_next) 935 ; 936 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 937 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n"); 938 return; /* checksum will be corrupted */ 939 } 940 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 941 if (m->m_len != ETHER_HDR_LEN) { 942 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n"); 943 return; /* checksum will be corrupted */ 944 } 945 /* XXX */ 946 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 947 ; 948 if (m == NULL) 949 return; /* checksum will be corrupted */ 950 ip = mtod(m, struct ip *); 951 } else { 952 p = mtod(m, caddr_t); 953 p += ETHER_HDR_LEN; 954 ip = (struct ip *)p; 955 } 956 offset2 = m->m_pkthdr.csum_data; 957 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 958 *cflags = offset << HME_XD_TXCKSUM_SSHIFT; 959 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 960 *cflags |= HME_XD_TXCKSUM; 961} 962 963/* 964 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 965 * start the transmission. 966 * Returns 0 on success, -1 if there were not enough free descriptors to map 967 * the packet, or an errno otherwise. 968 */ 969static int 970hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) 971{ 972 struct hme_txdma_arg cba; 973 struct hme_txdesc *td; 974 int error, si, ri; 975 u_int32_t flags, cflags = 0; 976 977 si = sc->sc_rb.rb_tdhead; 978 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 979 return (-1); 980 if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 981 hme_txcksum(m0, &cflags); 982 td->htx_m = m0; 983 cba.hta_sc = sc; 984 cba.hta_htx = td; 985 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 986 m0, hme_txdma_callback, &cba, 0)) != 0) 987 goto fail; 988 if (cba.hta_ndescs == -1) { 989 error = -1; 990 goto fail; 991 } 992 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 993 BUS_DMASYNC_PREWRITE); 994 995 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 996 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 997 998 /* Turn descriptor ownership to the hme, back to forth. */ 999 ri = sc->sc_rb.rb_tdhead; 1000 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 1001 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 1002 do { 1003 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 1004 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 1005 HME_XD_OWN | cflags; 1006 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 1007 ri, si, flags); 1008 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 1009 } while (ri != si); 1010 1011 /* start the transmission. */ 1012 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 1013 return (0); 1014fail: 1015 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 1016 return (error); 1017} 1018 1019/* 1020 * Pass a packet to the higher levels. 1021 */ 1022static void 1023hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) 1024{ 1025 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1026 struct mbuf *m; 1027 1028 if (len <= sizeof(struct ether_header) || 1029 len > HME_MAX_FRAMESIZE) { 1030#ifdef HMEDEBUG 1031 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 1032 len); 1033#endif 1034 ifp->if_ierrors++; 1035 hme_discard_rxbuf(sc, ix); 1036 return; 1037 } 1038 1039 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 1040 CTR1(KTR_HME, "hme_read: len %d", len); 1041 1042 if (hme_add_rxbuf(sc, ix, 0) != 0) { 1043 /* 1044 * hme_add_rxbuf will leave the old buffer in the ring until 1045 * it is sure that a new buffer can be mapped. If it can not, 1046 * drop the packet, but leave the interface up. 1047 */ 1048 ifp->if_iqdrops++; 1049 hme_discard_rxbuf(sc, ix); 1050 return; 1051 } 1052 1053 ifp->if_ipackets++; 1054 1055 m->m_pkthdr.rcvif = ifp; 1056 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 1057 m_adj(m, HME_RXOFFS); 1058 /* RX TCP/UDP checksum */ 1059 if (ifp->if_capenable & IFCAP_RXCSUM) 1060 hme_rxcksum(m, flags); 1061 /* Pass the packet up. */
| 892} 893 894struct hme_txdma_arg { 895 struct hme_softc *hta_sc; 896 struct hme_txdesc *hta_htx; 897 int hta_ndescs; 898}; 899 900/* 901 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 902 * are readable from the nearest burst boundary on (i.e. potentially before 903 * ds_addr) to the first boundary beyond the end. This is usually a safe 904 * assumption to make, but is not documented. 905 */ 906static void 907hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 908 bus_size_t totsz, int error) 909{ 910 struct hme_txdma_arg *ta = xsc; 911 struct hme_txdesc *htx; 912 bus_size_t len = 0; 913 caddr_t txd; 914 u_int32_t flags = 0; 915 int i, tdhead, pci; 916 917 if (error != 0) 918 return; 919 920 tdhead = ta->hta_sc->sc_rb.rb_tdhead; 921 pci = ta->hta_sc->sc_pci; 922 txd = ta->hta_sc->sc_rb.rb_txd; 923 htx = ta->hta_htx; 924 925 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 926 ta->hta_ndescs = -1; 927 return; 928 } 929 ta->hta_ndescs = nsegs; 930 931 for (i = 0; i < nsegs; i++) { 932 if (segs[i].ds_len == 0) 933 continue; 934 935 /* Fill the ring entry. */ 936 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 937 if (len == 0) 938 flags |= HME_XD_SOP; 939 if (len + segs[i].ds_len == totsz) 940 flags |= HME_XD_EOP; 941 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " 942 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 943 (u_int)segs[i].ds_addr); 944 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 945 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); 946 947 ta->hta_sc->sc_rb.rb_td_nbusy++; 948 htx->htx_lastdesc = tdhead; 949 tdhead = (tdhead + 1) % HME_NTXDESC; 950 len += segs[i].ds_len; 951 } 952 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 953 KASSERT((flags & HME_XD_EOP) != 0, 954 ("hme_txdma_callback: missed end of packet!")); 955} 956 957/* TX TCP/UDP checksum */ 958static void 959hme_txcksum(struct mbuf *m, u_int32_t *cflags) 960{ 961 struct ip *ip; 962 u_int32_t offset, offset2; 963 caddr_t p; 964 965 for(; m && m->m_len == 0; m = m->m_next) 966 ; 967 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 968 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n"); 969 return; /* checksum will be corrupted */ 970 } 971 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 972 if (m->m_len != ETHER_HDR_LEN) { 973 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n"); 974 return; /* checksum will be corrupted */ 975 } 976 /* XXX */ 977 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 978 ; 979 if (m == NULL) 980 return; /* checksum will be corrupted */ 981 ip = mtod(m, struct ip *); 982 } else { 983 p = mtod(m, caddr_t); 984 p += ETHER_HDR_LEN; 985 ip = (struct ip *)p; 986 } 987 offset2 = m->m_pkthdr.csum_data; 988 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 989 *cflags = offset << HME_XD_TXCKSUM_SSHIFT; 990 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); 991 *cflags |= HME_XD_TXCKSUM; 992} 993 994/* 995 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 996 * start the transmission. 997 * Returns 0 on success, -1 if there were not enough free descriptors to map 998 * the packet, or an errno otherwise. 999 */ 1000static int 1001hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) 1002{ 1003 struct hme_txdma_arg cba; 1004 struct hme_txdesc *td; 1005 int error, si, ri; 1006 u_int32_t flags, cflags = 0; 1007 1008 si = sc->sc_rb.rb_tdhead; 1009 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 1010 return (-1); 1011 if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) 1012 hme_txcksum(m0, &cflags); 1013 td->htx_m = m0; 1014 cba.hta_sc = sc; 1015 cba.hta_htx = td; 1016 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 1017 m0, hme_txdma_callback, &cba, 0)) != 0) 1018 goto fail; 1019 if (cba.hta_ndescs == -1) { 1020 error = -1; 1021 goto fail; 1022 } 1023 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 1024 BUS_DMASYNC_PREWRITE); 1025 1026 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 1027 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 1028 1029 /* Turn descriptor ownership to the hme, back to forth. */ 1030 ri = sc->sc_rb.rb_tdhead; 1031 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 1032 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 1033 do { 1034 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 1035 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 1036 HME_XD_OWN | cflags; 1037 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 1038 ri, si, flags); 1039 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 1040 } while (ri != si); 1041 1042 /* start the transmission. */ 1043 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 1044 return (0); 1045fail: 1046 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 1047 return (error); 1048} 1049 1050/* 1051 * Pass a packet to the higher levels. 1052 */ 1053static void 1054hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) 1055{ 1056 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1057 struct mbuf *m; 1058 1059 if (len <= sizeof(struct ether_header) || 1060 len > HME_MAX_FRAMESIZE) { 1061#ifdef HMEDEBUG 1062 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 1063 len); 1064#endif 1065 ifp->if_ierrors++; 1066 hme_discard_rxbuf(sc, ix); 1067 return; 1068 } 1069 1070 m = sc->sc_rb.rb_rxdesc[ix].hrx_m; 1071 CTR1(KTR_HME, "hme_read: len %d", len); 1072 1073 if (hme_add_rxbuf(sc, ix, 0) != 0) { 1074 /* 1075 * hme_add_rxbuf will leave the old buffer in the ring until 1076 * it is sure that a new buffer can be mapped. If it can not, 1077 * drop the packet, but leave the interface up. 1078 */ 1079 ifp->if_iqdrops++; 1080 hme_discard_rxbuf(sc, ix); 1081 return; 1082 } 1083 1084 ifp->if_ipackets++; 1085 1086 m->m_pkthdr.rcvif = ifp; 1087 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 1088 m_adj(m, HME_RXOFFS); 1089 /* RX TCP/UDP checksum */ 1090 if (ifp->if_capenable & IFCAP_RXCSUM) 1091 hme_rxcksum(m, flags); 1092 /* Pass the packet up. */
|
| 1093 HME_UNLOCK(sc);
|
1062 (*ifp->if_input)(ifp, m);
| 1094 (*ifp->if_input)(ifp, m);
|
| 1095 HME_LOCK(sc);
|
1063} 1064 1065static void 1066hme_start(struct ifnet *ifp) 1067{
| 1096} 1097 1098static void 1099hme_start(struct ifnet *ifp) 1100{
|
| 1101 struct hme_softc *sc = ifp->if_softc; 1102 1103 HME_LOCK(sc); 1104 hme_start_locked(ifp); 1105 HME_UNLOCK(sc); 1106} 1107 1108static void 1109hme_start_locked(struct ifnet *ifp) 1110{
|
1068 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 1069 struct mbuf *m; 1070 int error, enq = 0; 1071 1072 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1073 return; 1074 1075 error = 0; 1076 for (;;) { 1077 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1078 if (m == NULL) 1079 break; 1080 1081 error = hme_load_txmbuf(sc, m); 1082 if (error == -1) { 1083 ifp->if_flags |= IFF_OACTIVE; 1084 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1085 break; 1086 } else if (error > 0) { 1087 printf("hme_start: error %d while loading mbuf\n", 1088 error); 1089 } else { 1090 enq = 1; 1091 BPF_MTAP(ifp, m); 1092 } 1093 } 1094 1095 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 1096 ifp->if_flags |= IFF_OACTIVE; 1097 /* Set watchdog timer if a packet was queued */ 1098 if (enq) { 1099 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1100 BUS_DMASYNC_PREWRITE); 1101 ifp->if_timer = 5; 1102 } 1103} 1104 1105/* 1106 * Transmit interrupt. 1107 */ 1108static void 1109hme_tint(struct hme_softc *sc) 1110{ 1111 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1112 struct hme_txdesc *htx; 1113 unsigned int ri, txflags; 1114 1115 /* 1116 * Unload collision counters 1117 */ 1118 ifp->if_collisions += 1119 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1120 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1121 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1122 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1123 1124 /* 1125 * then clear the hardware counters. 1126 */ 1127 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1128 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1129 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1130 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1131 1132 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1133 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1134 /* Fetch current position in the transmit ring */ 1135 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1136 if (sc->sc_rb.rb_td_nbusy <= 0) { 1137 CTR0(KTR_HME, "hme_tint: not busy!"); 1138 break; 1139 } 1140 1141 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1142 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1143 1144 if ((txflags & HME_XD_OWN) != 0) 1145 break; 1146 1147 CTR0(KTR_HME, "hme_tint: not owned"); 1148 --sc->sc_rb.rb_td_nbusy; 1149 ifp->if_flags &= ~IFF_OACTIVE; 1150 1151 /* Complete packet transmitted? */ 1152 if ((txflags & HME_XD_EOP) == 0) 1153 continue; 1154 1155 KASSERT(htx->htx_lastdesc == ri, 1156 ("hme_tint: ring indices skewed: %d != %d!", 1157 htx->htx_lastdesc, ri)); 1158 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1159 BUS_DMASYNC_POSTWRITE); 1160 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1161 1162 ifp->if_opackets++; 1163 m_freem(htx->htx_m); 1164 htx->htx_m = NULL; 1165 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1166 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1167 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1168 } 1169 /* Turn off watchdog */ 1170 if (sc->sc_rb.rb_td_nbusy == 0) 1171 ifp->if_timer = 0; 1172 1173 /* Update ring */ 1174 sc->sc_rb.rb_tdtail = ri; 1175
| 1111 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 1112 struct mbuf *m; 1113 int error, enq = 0; 1114 1115 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1116 return; 1117 1118 error = 0; 1119 for (;;) { 1120 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1121 if (m == NULL) 1122 break; 1123 1124 error = hme_load_txmbuf(sc, m); 1125 if (error == -1) { 1126 ifp->if_flags |= IFF_OACTIVE; 1127 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1128 break; 1129 } else if (error > 0) { 1130 printf("hme_start: error %d while loading mbuf\n", 1131 error); 1132 } else { 1133 enq = 1; 1134 BPF_MTAP(ifp, m); 1135 } 1136 } 1137 1138 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 1139 ifp->if_flags |= IFF_OACTIVE; 1140 /* Set watchdog timer if a packet was queued */ 1141 if (enq) { 1142 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1143 BUS_DMASYNC_PREWRITE); 1144 ifp->if_timer = 5; 1145 } 1146} 1147 1148/* 1149 * Transmit interrupt. 1150 */ 1151static void 1152hme_tint(struct hme_softc *sc) 1153{ 1154 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1155 struct hme_txdesc *htx; 1156 unsigned int ri, txflags; 1157 1158 /* 1159 * Unload collision counters 1160 */ 1161 ifp->if_collisions += 1162 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1163 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1164 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1165 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1166 1167 /* 1168 * then clear the hardware counters. 1169 */ 1170 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1171 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1172 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1173 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1174 1175 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1176 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1177 /* Fetch current position in the transmit ring */ 1178 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1179 if (sc->sc_rb.rb_td_nbusy <= 0) { 1180 CTR0(KTR_HME, "hme_tint: not busy!"); 1181 break; 1182 } 1183 1184 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1185 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1186 1187 if ((txflags & HME_XD_OWN) != 0) 1188 break; 1189 1190 CTR0(KTR_HME, "hme_tint: not owned"); 1191 --sc->sc_rb.rb_td_nbusy; 1192 ifp->if_flags &= ~IFF_OACTIVE; 1193 1194 /* Complete packet transmitted? */ 1195 if ((txflags & HME_XD_EOP) == 0) 1196 continue; 1197 1198 KASSERT(htx->htx_lastdesc == ri, 1199 ("hme_tint: ring indices skewed: %d != %d!", 1200 htx->htx_lastdesc, ri)); 1201 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1202 BUS_DMASYNC_POSTWRITE); 1203 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1204 1205 ifp->if_opackets++; 1206 m_freem(htx->htx_m); 1207 htx->htx_m = NULL; 1208 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1209 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1210 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); 1211 } 1212 /* Turn off watchdog */ 1213 if (sc->sc_rb.rb_td_nbusy == 0) 1214 ifp->if_timer = 0; 1215 1216 /* Update ring */ 1217 sc->sc_rb.rb_tdtail = ri; 1218
|
1176 hme_start(ifp);
| 1219 hme_start_locked(ifp);
|
1177 1178 if (sc->sc_rb.rb_td_nbusy == 0) 1179 ifp->if_timer = 0; 1180} 1181 1182/* 1183 * RX TCP/UDP checksum 1184 */ 1185static void 1186hme_rxcksum(struct mbuf *m, u_int32_t flags) 1187{ 1188 struct ether_header *eh; 1189 struct ip *ip; 1190 struct udphdr *uh; 1191 int32_t hlen, len, pktlen; 1192 u_int16_t cksum, *opts; 1193 u_int32_t temp32; 1194 1195 pktlen = m->m_pkthdr.len; 1196 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 1197 return; 1198 eh = mtod(m, struct ether_header *); 1199 if (eh->ether_type != htons(ETHERTYPE_IP)) 1200 return; 1201 ip = (struct ip *)(eh + 1); 1202 if (ip->ip_v != IPVERSION) 1203 return; 1204 1205 hlen = ip->ip_hl << 2; 1206 pktlen -= sizeof(struct ether_header); 1207 if (hlen < sizeof(struct ip)) 1208 return; 1209 if (ntohs(ip->ip_len) < hlen) 1210 return; 1211 if (ntohs(ip->ip_len) != pktlen) 1212 return; 1213 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1214 return; /* can't handle fragmented packet */ 1215 1216 switch (ip->ip_p) { 1217 case IPPROTO_TCP: 1218 if (pktlen < (hlen + sizeof(struct tcphdr))) 1219 return; 1220 break; 1221 case IPPROTO_UDP: 1222 if (pktlen < (hlen + sizeof(struct udphdr))) 1223 return; 1224 uh = (struct udphdr *)((caddr_t)ip + hlen); 1225 if (uh->uh_sum == 0) 1226 return; /* no checksum */ 1227 break; 1228 default: 1229 return; 1230 } 1231 1232 cksum = ~(flags & HME_XD_RXCKSUM); 1233 /* checksum fixup for IP options */ 1234 len = hlen - sizeof(struct ip); 1235 if (len > 0) { 1236 opts = (u_int16_t *)(ip + 1); 1237 for (; len > 0; len -= sizeof(u_int16_t), opts++) { 1238 temp32 = cksum - *opts; 1239 temp32 = (temp32 >> 16) + (temp32 & 65535); 1240 cksum = temp32 & 65535; 1241 } 1242 } 1243 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1244 m->m_pkthdr.csum_data = cksum; 1245} 1246 1247/* 1248 * Receive interrupt. 1249 */ 1250static void 1251hme_rint(struct hme_softc *sc) 1252{ 1253 caddr_t xdr = sc->sc_rb.rb_rxd; 1254 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1255 unsigned int ri, len; 1256 int progress = 0; 1257 u_int32_t flags; 1258 1259 /* 1260 * Process all buffers with valid data. 1261 */ 1262 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1263 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1264 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1265 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1266 if ((flags & HME_XD_OWN) != 0) 1267 break; 1268 1269 progress++; 1270 if ((flags & HME_XD_OFL) != 0) { 1271 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1272 "flags=0x%x\n", ri, flags); 1273 ifp->if_ierrors++; 1274 hme_discard_rxbuf(sc, ri); 1275 } else { 1276 len = HME_XD_DECODE_RSIZE(flags); 1277 hme_read(sc, ri, len, flags); 1278 } 1279 } 1280 if (progress) { 1281 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1282 BUS_DMASYNC_PREWRITE); 1283 } 1284 sc->sc_rb.rb_rdtail = ri; 1285} 1286 1287static void 1288hme_eint(struct hme_softc *sc, u_int status) 1289{ 1290 1291 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1292 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1293 return; 1294 } 1295 1296 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1297} 1298 1299void 1300hme_intr(void *v) 1301{ 1302 struct hme_softc *sc = (struct hme_softc *)v; 1303 u_int32_t status; 1304
| 1220 1221 if (sc->sc_rb.rb_td_nbusy == 0) 1222 ifp->if_timer = 0; 1223} 1224 1225/* 1226 * RX TCP/UDP checksum 1227 */ 1228static void 1229hme_rxcksum(struct mbuf *m, u_int32_t flags) 1230{ 1231 struct ether_header *eh; 1232 struct ip *ip; 1233 struct udphdr *uh; 1234 int32_t hlen, len, pktlen; 1235 u_int16_t cksum, *opts; 1236 u_int32_t temp32; 1237 1238 pktlen = m->m_pkthdr.len; 1239 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 1240 return; 1241 eh = mtod(m, struct ether_header *); 1242 if (eh->ether_type != htons(ETHERTYPE_IP)) 1243 return; 1244 ip = (struct ip *)(eh + 1); 1245 if (ip->ip_v != IPVERSION) 1246 return; 1247 1248 hlen = ip->ip_hl << 2; 1249 pktlen -= sizeof(struct ether_header); 1250 if (hlen < sizeof(struct ip)) 1251 return; 1252 if (ntohs(ip->ip_len) < hlen) 1253 return; 1254 if (ntohs(ip->ip_len) != pktlen) 1255 return; 1256 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1257 return; /* can't handle fragmented packet */ 1258 1259 switch (ip->ip_p) { 1260 case IPPROTO_TCP: 1261 if (pktlen < (hlen + sizeof(struct tcphdr))) 1262 return; 1263 break; 1264 case IPPROTO_UDP: 1265 if (pktlen < (hlen + sizeof(struct udphdr))) 1266 return; 1267 uh = (struct udphdr *)((caddr_t)ip + hlen); 1268 if (uh->uh_sum == 0) 1269 return; /* no checksum */ 1270 break; 1271 default: 1272 return; 1273 } 1274 1275 cksum = ~(flags & HME_XD_RXCKSUM); 1276 /* checksum fixup for IP options */ 1277 len = hlen - sizeof(struct ip); 1278 if (len > 0) { 1279 opts = (u_int16_t *)(ip + 1); 1280 for (; len > 0; len -= sizeof(u_int16_t), opts++) { 1281 temp32 = cksum - *opts; 1282 temp32 = (temp32 >> 16) + (temp32 & 65535); 1283 cksum = temp32 & 65535; 1284 } 1285 } 1286 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1287 m->m_pkthdr.csum_data = cksum; 1288} 1289 1290/* 1291 * Receive interrupt. 1292 */ 1293static void 1294hme_rint(struct hme_softc *sc) 1295{ 1296 caddr_t xdr = sc->sc_rb.rb_rxd; 1297 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1298 unsigned int ri, len; 1299 int progress = 0; 1300 u_int32_t flags; 1301 1302 /* 1303 * Process all buffers with valid data. 1304 */ 1305 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); 1306 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1307 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1308 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1309 if ((flags & HME_XD_OWN) != 0) 1310 break; 1311 1312 progress++; 1313 if ((flags & HME_XD_OFL) != 0) { 1314 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1315 "flags=0x%x\n", ri, flags); 1316 ifp->if_ierrors++; 1317 hme_discard_rxbuf(sc, ri); 1318 } else { 1319 len = HME_XD_DECODE_RSIZE(flags); 1320 hme_read(sc, ri, len, flags); 1321 } 1322 } 1323 if (progress) { 1324 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1325 BUS_DMASYNC_PREWRITE); 1326 } 1327 sc->sc_rb.rb_rdtail = ri; 1328} 1329 1330static void 1331hme_eint(struct hme_softc *sc, u_int status) 1332{ 1333 1334 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1335 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1336 return; 1337 } 1338 1339 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1340} 1341 1342void 1343hme_intr(void *v) 1344{ 1345 struct hme_softc *sc = (struct hme_softc *)v; 1346 u_int32_t status; 1347
|
| 1348 HME_LOCK(sc);
|
1305 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1306 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1307 1308 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1309 hme_eint(sc, status); 1310 1311 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1312 hme_tint(sc); 1313 1314 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1315 hme_rint(sc);
| 1349 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1350 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1351 1352 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1353 hme_eint(sc, status); 1354 1355 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1356 hme_tint(sc); 1357 1358 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1359 hme_rint(sc);
|
| 1360 HME_UNLOCK(sc);
|
1316} 1317 1318 1319static void 1320hme_watchdog(struct ifnet *ifp) 1321{ 1322 struct hme_softc *sc = ifp->if_softc; 1323#ifdef HMEDEBUG 1324 u_int32_t status;
| 1361} 1362 1363 1364static void 1365hme_watchdog(struct ifnet *ifp) 1366{ 1367 struct hme_softc *sc = ifp->if_softc; 1368#ifdef HMEDEBUG 1369 u_int32_t status;
|
| 1370#endif
|
1325
| 1371
|
| 1372 HME_LOCK(sc); 1373#ifdef HMEDEBUG
|
1326 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1327 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1328#endif 1329 device_printf(sc->sc_dev, "device timeout\n"); 1330 ++ifp->if_oerrors;
| 1374 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1375 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1376#endif 1377 device_printf(sc->sc_dev, "device timeout\n"); 1378 ++ifp->if_oerrors;
|
| 1379 HME_UNLOCK(sc);
|
1331 1332 hme_reset(sc); 1333} 1334 1335/* 1336 * Initialize the MII Management Interface 1337 */ 1338static void 1339hme_mifinit(struct hme_softc *sc) 1340{ 1341 u_int32_t v; 1342
| 1380 1381 hme_reset(sc); 1382} 1383 1384/* 1385 * Initialize the MII Management Interface 1386 */ 1387static void 1388hme_mifinit(struct hme_softc *sc) 1389{ 1390 u_int32_t v; 1391
|
| 1392 HME_LOCK_ASSERT(sc, MA_OWNED); 1393
|
1343 /* Configure the MIF in frame mode */ 1344 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1345 v &= ~HME_MIF_CFG_BBMODE; 1346 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1347} 1348 1349/* 1350 * MII interface 1351 */ 1352int 1353hme_mii_readreg(device_t dev, int phy, int reg) 1354{ 1355 struct hme_softc *sc = device_get_softc(dev); 1356 int n; 1357 u_int32_t v; 1358
| 1394 /* Configure the MIF in frame mode */ 1395 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1396 v &= ~HME_MIF_CFG_BBMODE; 1397 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1398} 1399 1400/* 1401 * MII interface 1402 */ 1403int 1404hme_mii_readreg(device_t dev, int phy, int reg) 1405{ 1406 struct hme_softc *sc = device_get_softc(dev); 1407 int n; 1408 u_int32_t v; 1409
|
| 1410 HME_LOCK(sc);
|
1359 /* Select the desired PHY in the MIF configuration register */ 1360 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1361 /* Clear PHY select bit */ 1362 v &= ~HME_MIF_CFG_PHY; 1363 if (phy == HME_PHYAD_EXTERNAL) 1364 /* Set PHY select bit to get at external device */ 1365 v |= HME_MIF_CFG_PHY; 1366 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1367 1368 /* Construct the frame command */ 1369 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1370 HME_MIF_FO_TAMSB | 1371 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1372 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1373 (reg << HME_MIF_FO_REGAD_SHIFT); 1374 1375 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1376 for (n = 0; n < 100; n++) { 1377 DELAY(1); 1378 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
| 1411 /* Select the desired PHY in the MIF configuration register */ 1412 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1413 /* Clear PHY select bit */ 1414 v &= ~HME_MIF_CFG_PHY; 1415 if (phy == HME_PHYAD_EXTERNAL) 1416 /* Set PHY select bit to get at external device */ 1417 v |= HME_MIF_CFG_PHY; 1418 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1419 1420 /* Construct the frame command */ 1421 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1422 HME_MIF_FO_TAMSB | 1423 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1424 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1425 (reg << HME_MIF_FO_REGAD_SHIFT); 1426 1427 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1428 for (n = 0; n < 100; n++) { 1429 DELAY(1); 1430 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
|
1379 if (v & HME_MIF_FO_TALSB)
| 1431 if (v & HME_MIF_FO_TALSB) { 1432 HME_UNLOCK(sc);
|
1380 return (v & HME_MIF_FO_DATA);
| 1433 return (v & HME_MIF_FO_DATA);
|
| 1434 }
|
1381 } 1382 1383 device_printf(sc->sc_dev, "mii_read timeout\n");
| 1435 } 1436 1437 device_printf(sc->sc_dev, "mii_read timeout\n");
|
| 1438 HME_UNLOCK(sc);
|
1384 return (0); 1385} 1386 1387int 1388hme_mii_writereg(device_t dev, int phy, int reg, int val) 1389{ 1390 struct hme_softc *sc = device_get_softc(dev); 1391 int n; 1392 u_int32_t v; 1393
| 1439 return (0); 1440} 1441 1442int 1443hme_mii_writereg(device_t dev, int phy, int reg, int val) 1444{ 1445 struct hme_softc *sc = device_get_softc(dev); 1446 int n; 1447 u_int32_t v; 1448
|
| 1449 HME_LOCK(sc);
|
1394 /* Select the desired PHY in the MIF configuration register */ 1395 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1396 /* Clear PHY select bit */ 1397 v &= ~HME_MIF_CFG_PHY; 1398 if (phy == HME_PHYAD_EXTERNAL) 1399 /* Set PHY select bit to get at external device */ 1400 v |= HME_MIF_CFG_PHY; 1401 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1402 1403 /* Construct the frame command */ 1404 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1405 HME_MIF_FO_TAMSB | 1406 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1407 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1408 (reg << HME_MIF_FO_REGAD_SHIFT) | 1409 (val & HME_MIF_FO_DATA); 1410 1411 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1412 for (n = 0; n < 100; n++) { 1413 DELAY(1); 1414 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
| 1450 /* Select the desired PHY in the MIF configuration register */ 1451 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1452 /* Clear PHY select bit */ 1453 v &= ~HME_MIF_CFG_PHY; 1454 if (phy == HME_PHYAD_EXTERNAL) 1455 /* Set PHY select bit to get at external device */ 1456 v |= HME_MIF_CFG_PHY; 1457 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1458 1459 /* Construct the frame command */ 1460 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1461 HME_MIF_FO_TAMSB | 1462 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1463 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1464 (reg << HME_MIF_FO_REGAD_SHIFT) | 1465 (val & HME_MIF_FO_DATA); 1466 1467 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1468 for (n = 0; n < 100; n++) { 1469 DELAY(1); 1470 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
|
1415 if (v & HME_MIF_FO_TALSB)
| 1471 if (v & HME_MIF_FO_TALSB) { 1472 HME_UNLOCK(sc);
|
1416 return (1);
| 1473 return (1);
|
| 1474 }
|
1417 } 1418 1419 device_printf(sc->sc_dev, "mii_write timeout\n");
| 1475 } 1476 1477 device_printf(sc->sc_dev, "mii_write timeout\n");
|
| 1478 HME_UNLOCK(sc);
|
1420 return (0); 1421} 1422 1423void 1424hme_mii_statchg(device_t dev) 1425{ 1426 struct hme_softc *sc = device_get_softc(dev);
| 1479 return (0); 1480} 1481 1482void 1483hme_mii_statchg(device_t dev) 1484{ 1485 struct hme_softc *sc = device_get_softc(dev);
|
1427 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1428 int phy = sc->sc_phys[instance];
| 1486 int instance; 1487 int phy;
|
1429 u_int32_t v; 1430
| 1488 u_int32_t v; 1489
|
| 1490 HME_LOCK(sc); 1491 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1492 phy = sc->sc_phys[instance];
|
1431#ifdef HMEDEBUG 1432 if (sc->sc_debug) 1433 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1434#endif 1435 1436 /* Select the current PHY in the MIF configuration register */ 1437 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1438 v &= ~HME_MIF_CFG_PHY; 1439 if (phy == HME_PHYAD_EXTERNAL) 1440 v |= HME_MIF_CFG_PHY; 1441 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1442 1443 /* Set the MAC Full Duplex bit appropriately */ 1444 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
| 1493#ifdef HMEDEBUG 1494 if (sc->sc_debug) 1495 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1496#endif 1497 1498 /* Select the current PHY in the MIF configuration register */ 1499 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1500 v &= ~HME_MIF_CFG_PHY; 1501 if (phy == HME_PHYAD_EXTERNAL) 1502 v |= HME_MIF_CFG_PHY; 1503 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1504 1505 /* Set the MAC Full Duplex bit appropriately */ 1506 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
|
1445 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
| 1507 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) { 1508 HME_UNLOCK(sc);
|
1446 return;
| 1509 return;
|
| 1510 }
|
1447 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1448 v |= HME_MAC_TXCFG_FULLDPLX; 1449 else 1450 v &= ~HME_MAC_TXCFG_FULLDPLX; 1451 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
| 1511 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1512 v |= HME_MAC_TXCFG_FULLDPLX; 1513 else 1514 v &= ~HME_MAC_TXCFG_FULLDPLX; 1515 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
|
1452 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
| 1516 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) { 1517 HME_UNLOCK(sc);
|
1453 return;
| 1518 return;
|
| 1519 } 1520 HME_UNLOCK(sc);
|
1454} 1455 1456static int 1457hme_mediachange(struct ifnet *ifp) 1458{ 1459 struct hme_softc *sc = ifp->if_softc; 1460 1461 return (mii_mediachg(sc->sc_mii)); 1462} 1463 1464static void 1465hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1466{ 1467 struct hme_softc *sc = ifp->if_softc; 1468
| 1521} 1522 1523static int 1524hme_mediachange(struct ifnet *ifp) 1525{ 1526 struct hme_softc *sc = ifp->if_softc; 1527 1528 return (mii_mediachg(sc->sc_mii)); 1529} 1530 1531static void 1532hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1533{ 1534 struct hme_softc *sc = ifp->if_softc; 1535
|
1469 if ((ifp->if_flags & IFF_UP) == 0)
| 1536 HME_LOCK(sc); 1537 if ((ifp->if_flags & IFF_UP) == 0) { 1538 HME_UNLOCK(sc);
|
1470 return;
| 1539 return;
|
| 1540 }
|
1471
| 1541
|
| 1542 HME_UNLOCK(sc);
|
1472 mii_pollstat(sc->sc_mii);
| 1543 mii_pollstat(sc->sc_mii);
|
| 1544 HME_LOCK(sc);
|
1473 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1474 ifmr->ifm_status = sc->sc_mii->mii_media_status;
| 1545 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1546 ifmr->ifm_status = sc->sc_mii->mii_media_status;
|
| 1547 HME_UNLOCK(sc);
|
1475} 1476 1477/* 1478 * Process an ioctl request. 1479 */ 1480static int 1481hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1482{ 1483 struct hme_softc *sc = ifp->if_softc; 1484 struct ifreq *ifr = (struct ifreq *)data; 1485 int s, error = 0; 1486
| 1548} 1549 1550/* 1551 * Process an ioctl request. 1552 */ 1553static int 1554hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1555{ 1556 struct hme_softc *sc = ifp->if_softc; 1557 struct ifreq *ifr = (struct ifreq *)data; 1558 int s, error = 0; 1559
|
| 1560 HME_LOCK(sc);
|
1487 s = splnet(); 1488 1489 switch (cmd) { 1490 case SIOCSIFFLAGS: 1491 if ((ifp->if_flags & IFF_UP) == 0 && 1492 (ifp->if_flags & IFF_RUNNING) != 0) { 1493 /* 1494 * If interface is marked down and it is running, then 1495 * stop it. 1496 */ 1497 hme_stop(sc); 1498 ifp->if_flags &= ~IFF_RUNNING; 1499 } else if ((ifp->if_flags & IFF_UP) != 0 && 1500 (ifp->if_flags & IFF_RUNNING) == 0) { 1501 /* 1502 * If interface is marked up and it is stopped, then 1503 * start it. 1504 */
| 1561 s = splnet(); 1562 1563 switch (cmd) { 1564 case SIOCSIFFLAGS: 1565 if ((ifp->if_flags & IFF_UP) == 0 && 1566 (ifp->if_flags & IFF_RUNNING) != 0) { 1567 /* 1568 * If interface is marked down and it is running, then 1569 * stop it. 1570 */ 1571 hme_stop(sc); 1572 ifp->if_flags &= ~IFF_RUNNING; 1573 } else if ((ifp->if_flags & IFF_UP) != 0 && 1574 (ifp->if_flags & IFF_RUNNING) == 0) { 1575 /* 1576 * If interface is marked up and it is stopped, then 1577 * start it. 1578 */
|
1505 hme_init(sc);
| 1579 hme_init_locked(sc);
|
1506 } else if ((ifp->if_flags & IFF_UP) != 0) { 1507 /* 1508 * Reset the interface to pick up changes in any other 1509 * flags that affect hardware registers. 1510 */
| 1580 } else if ((ifp->if_flags & IFF_UP) != 0) { 1581 /* 1582 * Reset the interface to pick up changes in any other 1583 * flags that affect hardware registers. 1584 */
|
1511 hme_init(sc);
| 1585 hme_init_locked(sc);
|
1512 } 1513 if ((ifp->if_flags & IFF_LINK0) != 0) 1514 sc->sc_csum_features |= CSUM_UDP; 1515 else 1516 sc->sc_csum_features &= ~CSUM_UDP; 1517 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1518 ifp->if_hwassist = sc->sc_csum_features; 1519#ifdef HMEDEBUG 1520 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1521#endif 1522 break; 1523 1524 case SIOCADDMULTI: 1525 case SIOCDELMULTI: 1526 hme_setladrf(sc, 1); 1527 error = 0; 1528 break; 1529 case SIOCGIFMEDIA: 1530 case SIOCSIFMEDIA:
| 1586 } 1587 if ((ifp->if_flags & IFF_LINK0) != 0) 1588 sc->sc_csum_features |= CSUM_UDP; 1589 else 1590 sc->sc_csum_features &= ~CSUM_UDP; 1591 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1592 ifp->if_hwassist = sc->sc_csum_features; 1593#ifdef HMEDEBUG 1594 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1595#endif 1596 break; 1597 1598 case SIOCADDMULTI: 1599 case SIOCDELMULTI: 1600 hme_setladrf(sc, 1); 1601 error = 0; 1602 break; 1603 case SIOCGIFMEDIA: 1604 case SIOCSIFMEDIA:
|
| 1605 HME_UNLOCK(sc);
|
1531 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
| 1606 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
|
| 1607 HME_LOCK(sc);
|
1532 break; 1533 case SIOCSIFCAP: 1534 ifp->if_capenable = ifr->ifr_reqcap; 1535 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1536 ifp->if_hwassist = sc->sc_csum_features; 1537 else 1538 ifp->if_hwassist = 0; 1539 break; 1540 default:
| 1608 break; 1609 case SIOCSIFCAP: 1610 ifp->if_capenable = ifr->ifr_reqcap; 1611 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1612 ifp->if_hwassist = sc->sc_csum_features; 1613 else 1614 ifp->if_hwassist = 0; 1615 break; 1616 default:
|
| 1617 HME_UNLOCK(sc);
|
1541 error = ether_ioctl(ifp, cmd, data);
| 1618 error = ether_ioctl(ifp, cmd, data);
|
| 1619 HME_LOCK(sc);
|
1542 break; 1543 } 1544 1545 splx(s);
| 1620 break; 1621 } 1622 1623 splx(s);
|
| 1624 HME_UNLOCK(sc);
|
1546 return (error); 1547} 1548 1549/* 1550 * Set up the logical address filter. 1551 */ 1552static void 1553hme_setladrf(struct hme_softc *sc, int reenable) 1554{ 1555 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1556 struct ifmultiaddr *inm; 1557 u_int32_t crc; 1558 u_int32_t hash[4]; 1559 u_int32_t macc; 1560
| 1625 return (error); 1626} 1627 1628/* 1629 * Set up the logical address filter. 1630 */ 1631static void 1632hme_setladrf(struct hme_softc *sc, int reenable) 1633{ 1634 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1635 struct ifmultiaddr *inm; 1636 u_int32_t crc; 1637 u_int32_t hash[4]; 1638 u_int32_t macc; 1639
|
| 1640 HME_LOCK_ASSERT(sc, MA_OWNED);
|
1561 /* Clear hash table */ 1562 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1563 1564 /* Get current RX configuration */ 1565 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1566 1567 /* 1568 * Disable the receiver while changing it's state as the documentation 1569 * mandates. 1570 * We then must wait until the bit clears in the register. This should 1571 * take at most 3.5ms. 1572 */ 1573 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1574 return; 1575 /* Disable the hash filter before writing to the filter registers. */ 1576 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1577 HME_MAC_RXCFG_HENABLE, 0)) 1578 return; 1579 1580 if (reenable) 1581 macc |= HME_MAC_RXCFG_ENABLE; 1582 else 1583 macc &= ~HME_MAC_RXCFG_ENABLE; 1584 1585 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1586 /* Turn on promiscuous mode; turn off the hash filter */ 1587 macc |= HME_MAC_RXCFG_PMISC; 1588 macc &= ~HME_MAC_RXCFG_HENABLE; 1589 ifp->if_flags |= IFF_ALLMULTI; 1590 goto chipit; 1591 } 1592 1593 /* Turn off promiscuous mode; turn on the hash filter */ 1594 macc &= ~HME_MAC_RXCFG_PMISC; 1595 macc |= HME_MAC_RXCFG_HENABLE; 1596 1597 /* 1598 * Set up multicast address filter by passing all multicast addresses 1599 * through a crc generator, and then using the high order 6 bits as an 1600 * index into the 64 bit logical address filter. The high order bit 1601 * selects the word, while the rest of the bits select the bit within 1602 * the word. 1603 */ 1604 1605 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1606 if (inm->ifma_addr->sa_family != AF_LINK) 1607 continue; 1608 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1609 inm->ifma_addr), ETHER_ADDR_LEN); 1610 1611 /* Just want the 6 most significant bits. */ 1612 crc >>= 26; 1613 1614 /* Set the corresponding bit in the filter. */ 1615 hash[crc >> 4] |= 1 << (crc & 0xf); 1616 } 1617 1618 ifp->if_flags &= ~IFF_ALLMULTI; 1619 1620chipit: 1621 /* Now load the hash table into the chip */ 1622 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1623 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1624 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1625 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1626 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1627 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1628}
| 1641 /* Clear hash table */ 1642 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1643 1644 /* Get current RX configuration */ 1645 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1646 1647 /* 1648 * Disable the receiver while changing it's state as the documentation 1649 * mandates. 1650 * We then must wait until the bit clears in the register. This should 1651 * take at most 3.5ms. 1652 */ 1653 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1654 return; 1655 /* Disable the hash filter before writing to the filter registers. */ 1656 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1657 HME_MAC_RXCFG_HENABLE, 0)) 1658 return; 1659 1660 if (reenable) 1661 macc |= HME_MAC_RXCFG_ENABLE; 1662 else 1663 macc &= ~HME_MAC_RXCFG_ENABLE; 1664 1665 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1666 /* Turn on promiscuous mode; turn off the hash filter */ 1667 macc |= HME_MAC_RXCFG_PMISC; 1668 macc &= ~HME_MAC_RXCFG_HENABLE; 1669 ifp->if_flags |= IFF_ALLMULTI; 1670 goto chipit; 1671 } 1672 1673 /* Turn off promiscuous mode; turn on the hash filter */ 1674 macc &= ~HME_MAC_RXCFG_PMISC; 1675 macc |= HME_MAC_RXCFG_HENABLE; 1676 1677 /* 1678 * Set up multicast address filter by passing all multicast addresses 1679 * through a crc generator, and then using the high order 6 bits as an 1680 * index into the 64 bit logical address filter. The high order bit 1681 * selects the word, while the rest of the bits select the bit within 1682 * the word. 1683 */ 1684 1685 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1686 if (inm->ifma_addr->sa_family != AF_LINK) 1687 continue; 1688 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1689 inm->ifma_addr), ETHER_ADDR_LEN); 1690 1691 /* Just want the 6 most significant bits. */ 1692 crc >>= 26; 1693 1694 /* Set the corresponding bit in the filter. */ 1695 hash[crc >> 4] |= 1 << (crc & 0xf); 1696 } 1697 1698 ifp->if_flags &= ~IFF_ALLMULTI; 1699 1700chipit: 1701 /* Now load the hash table into the chip */ 1702 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1703 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1704 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1705 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1706 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1707 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1708}
|