1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
| 1/*- 2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
|
3 * Copyright (c) 2001 Thomas Moestl .
| 3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
|
4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp 38 *
| 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Paul Kranenburg. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 * 37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp 38 *
|
39 * $FreeBSD: head/sys/dev/hme/if_hme.c 106937 2002-11-14 23:54:55Z sam $
| 39 * $FreeBSD: head/sys/dev/hme/if_hme.c 108834 2003-01-06 22:12:57Z tmm $
|
40 */ 41 42/* 43 * HME Ethernet module driver. 44 * 45 * The HME is e.g. part of the PCIO PCI multi function device. 46 * It supports TX gathering and TX and RX checksum offloading. 47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 48 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 49 * are skipped to make sure the header after the ethernet header is aligned on a 50 * natural boundary, so this ensures minimal wastage in the most common case. 51 * 52 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 53 * maximum packet size (this is not verified). Buffers starting on odd 54 * boundaries must be mapped so that the burst can start on a natural boundary. 55 * 56 * Checksumming is not yet supported. 57 */ 58 59#define HMEDEBUG 60#define KTR_HME KTR_CT2 /* XXX */ 61 62#include <sys/param.h> 63#include <sys/systm.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kernel.h> 67#include <sys/ktr.h> 68#include <sys/mbuf.h> 69#include <sys/malloc.h> 70#include <sys/socket.h> 71#include <sys/sockio.h> 72 73#include <net/bpf.h> 74#include <net/ethernet.h> 75#include <net/if.h> 76#include <net/if_arp.h> 77#include <net/if_dl.h> 78#include <net/if_media.h> 79 80#include <dev/mii/mii.h> 81#include <dev/mii/miivar.h> 82 83#include <machine/bus.h> 84 85#include <hme/if_hmereg.h> 86#include <hme/if_hmevar.h> 87 88static void hme_start(struct ifnet *); 89static void hme_stop(struct hme_softc *); 90static int hme_ioctl(struct ifnet *, u_long, caddr_t); 91static void hme_tick(void *); 92static void hme_watchdog(struct ifnet *); 93#if 0 94static void hme_shutdown(void *); 95#endif 96static void hme_init(void *); 97static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 98static int hme_meminit(struct hme_softc *); 99static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 100 u_int32_t, u_int32_t); 101static void hme_mifinit(struct hme_softc *); 102static void hme_reset(struct hme_softc *); 103static void hme_setladrf(struct hme_softc *, int); 104 105static int hme_mediachange(struct ifnet *); 106static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 107
| 40 */ 41 42/* 43 * HME Ethernet module driver. 44 * 45 * The HME is e.g. part of the PCIO PCI multi function device. 46 * It supports TX gathering and TX and RX checksum offloading. 47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 48 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes 49 * are skipped to make sure the header after the ethernet header is aligned on a 50 * natural boundary, so this ensures minimal wastage in the most common case. 51 * 52 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the 53 * maximum packet size (this is not verified). Buffers starting on odd 54 * boundaries must be mapped so that the burst can start on a natural boundary. 55 * 56 * Checksumming is not yet supported. 57 */ 58 59#define HMEDEBUG 60#define KTR_HME KTR_CT2 /* XXX */ 61 62#include <sys/param.h> 63#include <sys/systm.h> 64#include <sys/bus.h> 65#include <sys/endian.h> 66#include <sys/kernel.h> 67#include <sys/ktr.h> 68#include <sys/mbuf.h> 69#include <sys/malloc.h> 70#include <sys/socket.h> 71#include <sys/sockio.h> 72 73#include <net/bpf.h> 74#include <net/ethernet.h> 75#include <net/if.h> 76#include <net/if_arp.h> 77#include <net/if_dl.h> 78#include <net/if_media.h> 79 80#include <dev/mii/mii.h> 81#include <dev/mii/miivar.h> 82 83#include <machine/bus.h> 84 85#include <hme/if_hmereg.h> 86#include <hme/if_hmevar.h> 87 88static void hme_start(struct ifnet *); 89static void hme_stop(struct hme_softc *); 90static int hme_ioctl(struct ifnet *, u_long, caddr_t); 91static void hme_tick(void *); 92static void hme_watchdog(struct ifnet *); 93#if 0 94static void hme_shutdown(void *); 95#endif 96static void hme_init(void *); 97static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); 98static int hme_meminit(struct hme_softc *); 99static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, 100 u_int32_t, u_int32_t); 101static void hme_mifinit(struct hme_softc *); 102static void hme_reset(struct hme_softc *); 103static void hme_setladrf(struct hme_softc *, int); 104 105static int hme_mediachange(struct ifnet *); 106static void hme_mediastatus(struct ifnet *, struct ifmediareq *); 107
|
108static int hme_load_mbuf(struct hme_softc *, struct mbuf *);
| 108static int hme_load_txmbuf(struct hme_softc *, struct mbuf *);
|
109static void hme_read(struct hme_softc *, int, int); 110static void hme_eint(struct hme_softc *, u_int); 111static void hme_rint(struct hme_softc *); 112static void hme_tint(struct hme_softc *); 113 114static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
| 109static void hme_read(struct hme_softc *, int, int); 110static void hme_eint(struct hme_softc *, u_int); 111static void hme_rint(struct hme_softc *); 112static void hme_tint(struct hme_softc *); 113 114static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
|
115static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, int); 116static void hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
| 115static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, 116 bus_size_t, int); 117static void hme_txdma_callback(void *, bus_dma_segment_t *, int, 118 bus_size_t, int);
|
117 118devclass_t hme_devclass; 119 120static int hme_nerr; 121 122DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 123MODULE_DEPEND(hem, miibus, 1, 1, 1); 124 125#define HME_SPC_READ_4(spc, sc, offs) \ 126 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 127 (sc)->sc_ ## spc ## o + (offs)) 128#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 129 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 130 (sc)->sc_ ## spc ## o + (offs), (v)) 131 132#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 133#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 134#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 135#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 136#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 137#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 138#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 139#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 140#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 141#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 142 143#define HME_MAXERR 5 144#define HME_WHINE(dev, ...) do { \ 145 if (hme_nerr++ < HME_MAXERR) \ 146 device_printf(dev, __VA_ARGS__); \ 147 if (hme_nerr == HME_MAXERR) { \ 148 device_printf(dev, "too may errors; not reporting any " \ 149 "more\n"); \ 150 } \ 151} while(0) 152 153int 154hme_config(struct hme_softc *sc) 155{ 156 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 157 struct mii_softc *child; 158 bus_size_t size; 159 int error, rdesc, tdesc, i; 160 161 /* 162 * HME common initialization. 163 * 164 * hme_softc fields that must be initialized by the front-end: 165 * 166 * the dma bus tag: 167 * sc_dmatag 168 * 169 * the bus handles, tags and offsets (splitted for SBus compatability): 170 * sc_seb{t,h,o} (Shared Ethernet Block registers) 171 * sc_erx{t,h,o} (Receiver Unit registers) 172 * sc_etx{t,h,o} (Transmitter Unit registers) 173 * sc_mac{t,h,o} (MAC registers) 174 * sc_mif{t,h,o} (Managment Interface registers) 175 * 176 * the maximum bus burst size: 177 * sc_burst 178 * 179 */ 180 181 /* Make sure the chip is stopped. */ 182 hme_stop(sc); 183 184 /* 185 * Allocate DMA capable memory 186 * Buffer descriptors must be aligned on a 2048 byte boundary; 187 * take this into account when calculating the size. Note that 188 * the maximum number of descriptors (256) occupies 2048 bytes, 189 * so we allocate that much regardless of HME_N*DESC. 190 */ 191 size = 4096; 192 193 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 194 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 195 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 196 if (error) 197 return (error); 198 199 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 200 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 201 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag); 202 if (error) 203 goto fail_ptag; 204 205 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 206 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 207 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 208 &sc->sc_rdmatag); 209 if (error) 210 goto fail_ctag; 211 212 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 213 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 214 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 215 &sc->sc_tdmatag); 216 if (error) 217 goto fail_rtag; 218 219 /* Allocate control/TX DMA buffer */ 220 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 221 0, &sc->sc_cdmamap); 222 if (error != 0) { 223 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 224 goto fail_ttag; 225 } 226 227 /* Load the buffer */ 228 sc->sc_rb.rb_dmabase = 0; 229 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 230 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 231 sc->sc_rb.rb_dmabase == 0) { 232 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 233 error); 234 goto fail_free; 235 } 236 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 237 sc->sc_rb.rb_dmabase); 238 239 /* 240 * Prepare the RX descriptors. rdesc serves as marker for the last 241 * processed descriptor and may be used later on. 242 */ 243 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 244 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 245 error = bus_dmamap_create(sc->sc_rdmatag, 0, 246 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 247 if (error != 0) 248 goto fail_rxdesc; 249 } 250 error = bus_dmamap_create(sc->sc_rdmatag, 0, 251 &sc->sc_rb.rb_spare_dmamap); 252 if (error != 0) 253 goto fail_rxdesc; 254 /* Same for the TX descs. */
| 119 120devclass_t hme_devclass; 121 122static int hme_nerr; 123 124DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); 125MODULE_DEPEND(hem, miibus, 1, 1, 1); 126 127#define HME_SPC_READ_4(spc, sc, offs) \ 128 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 129 (sc)->sc_ ## spc ## o + (offs)) 130#define HME_SPC_WRITE_4(spc, sc, offs, v) \ 131 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ 132 (sc)->sc_ ## spc ## o + (offs), (v)) 133 134#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) 135#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) 136#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) 137#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) 138#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) 139#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) 140#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) 141#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) 142#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) 143#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) 144 145#define HME_MAXERR 5 146#define HME_WHINE(dev, ...) do { \ 147 if (hme_nerr++ < HME_MAXERR) \ 148 device_printf(dev, __VA_ARGS__); \ 149 if (hme_nerr == HME_MAXERR) { \ 150 device_printf(dev, "too may errors; not reporting any " \ 151 "more\n"); \ 152 } \ 153} while(0) 154 155int 156hme_config(struct hme_softc *sc) 157{ 158 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 159 struct mii_softc *child; 160 bus_size_t size; 161 int error, rdesc, tdesc, i; 162 163 /* 164 * HME common initialization. 165 * 166 * hme_softc fields that must be initialized by the front-end: 167 * 168 * the dma bus tag: 169 * sc_dmatag 170 * 171 * the bus handles, tags and offsets (splitted for SBus compatability): 172 * sc_seb{t,h,o} (Shared Ethernet Block registers) 173 * sc_erx{t,h,o} (Receiver Unit registers) 174 * sc_etx{t,h,o} (Transmitter Unit registers) 175 * sc_mac{t,h,o} (MAC registers) 176 * sc_mif{t,h,o} (Managment Interface registers) 177 * 178 * the maximum bus burst size: 179 * sc_burst 180 * 181 */ 182 183 /* Make sure the chip is stopped. */ 184 hme_stop(sc); 185 186 /* 187 * Allocate DMA capable memory 188 * Buffer descriptors must be aligned on a 2048 byte boundary; 189 * take this into account when calculating the size. Note that 190 * the maximum number of descriptors (256) occupies 2048 bytes, 191 * so we allocate that much regardless of HME_N*DESC. 192 */ 193 size = 4096; 194 195 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 196 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, 197 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 198 if (error) 199 return (error); 200 201 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, 202 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 203 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag); 204 if (error) 205 goto fail_ptag; 206 207 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 208 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 209 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 210 &sc->sc_rdmatag); 211 if (error) 212 goto fail_ctag; 213 214 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, 215 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 216 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 217 &sc->sc_tdmatag); 218 if (error) 219 goto fail_rtag; 220 221 /* Allocate control/TX DMA buffer */ 222 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 223 0, &sc->sc_cdmamap); 224 if (error != 0) { 225 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); 226 goto fail_ttag; 227 } 228 229 /* Load the buffer */ 230 sc->sc_rb.rb_dmabase = 0; 231 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, 232 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || 233 sc->sc_rb.rb_dmabase == 0) { 234 device_printf(sc->sc_dev, "DMA buffer map load error %d\n", 235 error); 236 goto fail_free; 237 } 238 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, 239 sc->sc_rb.rb_dmabase); 240 241 /* 242 * Prepare the RX descriptors. rdesc serves as marker for the last 243 * processed descriptor and may be used later on. 244 */ 245 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { 246 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; 247 error = bus_dmamap_create(sc->sc_rdmatag, 0, 248 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); 249 if (error != 0) 250 goto fail_rxdesc; 251 } 252 error = bus_dmamap_create(sc->sc_rdmatag, 0, 253 &sc->sc_rb.rb_spare_dmamap); 254 if (error != 0) 255 goto fail_rxdesc; 256 /* Same for the TX descs. */
|
255 for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
| 257 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
|
256 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
| 258 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
|
257 sc->sc_rb.rb_txdesc[tdesc].htx_flags = 0;
| |
258 error = bus_dmamap_create(sc->sc_tdmatag, 0, 259 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 260 if (error != 0) 261 goto fail_txdesc; 262 } 263 264 device_printf(sc->sc_dev, "Ethernet address:"); 265 for (i = 0; i < 6; i++) 266 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 267 printf("\n"); 268 269 /* Initialize ifnet structure. */ 270 ifp->if_softc = sc; 271 ifp->if_unit = device_get_unit(sc->sc_dev); 272 ifp->if_name = "hme"; 273 ifp->if_mtu = ETHERMTU; 274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST; 275 ifp->if_start = hme_start; 276 ifp->if_ioctl = hme_ioctl; 277 ifp->if_init = hme_init; 278 ifp->if_output = ether_output; 279 ifp->if_watchdog = hme_watchdog;
| 259 error = bus_dmamap_create(sc->sc_tdmatag, 0, 260 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); 261 if (error != 0) 262 goto fail_txdesc; 263 } 264 265 device_printf(sc->sc_dev, "Ethernet address:"); 266 for (i = 0; i < 6; i++) 267 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 268 printf("\n"); 269 270 /* Initialize ifnet structure. */ 271 ifp->if_softc = sc; 272 ifp->if_unit = device_get_unit(sc->sc_dev); 273 ifp->if_name = "hme"; 274 ifp->if_mtu = ETHERMTU; 275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST; 276 ifp->if_start = hme_start; 277 ifp->if_ioctl = hme_ioctl; 278 ifp->if_init = hme_init; 279 ifp->if_output = ether_output; 280 ifp->if_watchdog = hme_watchdog;
|
280 ifp->if_snd.ifq_maxlen = HME_NTXDESC;
| 281 ifp->if_snd.ifq_maxlen = HME_NTXQ;
|
281 282 hme_mifinit(sc); 283 284 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 285 hme_mediastatus)) != 0) { 286 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 287 goto fail_rxdesc; 288 } 289 sc->sc_mii = device_get_softc(sc->sc_miibus); 290 291 /* 292 * Walk along the list of attached MII devices and 293 * establish an `MII instance' to `phy number' 294 * mapping. We'll use this mapping in media change 295 * requests to determine which phy to use to program 296 * the MIF configuration register. 297 */ 298 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 299 child = LIST_NEXT(child, mii_list)) { 300 /* 301 * Note: we support just two PHYs: the built-in 302 * internal device and an external on the MII 303 * connector. 304 */ 305 if (child->mii_phy > 1 || child->mii_inst > 1) { 306 device_printf(sc->sc_dev, "cannot accomodate " 307 "MII device %s at phy %d, instance %d\n", 308 device_get_name(child->mii_dev), 309 child->mii_phy, child->mii_inst); 310 continue; 311 } 312 313 sc->sc_phys[child->mii_inst] = child->mii_phy; 314 } 315 316 /* Attach the interface. */ 317 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 318 319 callout_init(&sc->sc_tick_ch, 0); 320 return (0); 321 322fail_txdesc: 323 for (i = 0; i < tdesc; i++) { 324 bus_dmamap_destroy(sc->sc_tdmatag, 325 sc->sc_rb.rb_txdesc[i].htx_dmamap); 326 } 327 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 328fail_rxdesc: 329 for (i = 0; i < rdesc; i++) { 330 bus_dmamap_destroy(sc->sc_rdmatag, 331 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 332 } 333 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 334fail_free: 335 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 336fail_ttag: 337 bus_dma_tag_destroy(sc->sc_tdmatag); 338fail_rtag: 339 bus_dma_tag_destroy(sc->sc_rdmatag); 340fail_ctag: 341 bus_dma_tag_destroy(sc->sc_cdmatag); 342fail_ptag: 343 bus_dma_tag_destroy(sc->sc_pdmatag); 344 return (error); 345} 346 347static void 348hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 349{ 350 struct hme_softc *sc = (struct hme_softc *)xsc; 351 352 if (error != 0) 353 return; 354 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 355 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 356} 357 358static void 359hme_tick(void *arg) 360{ 361 struct hme_softc *sc = arg; 362 int s; 363 364 s = splnet(); 365 mii_tick(sc->sc_mii); 366 splx(s); 367 368 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 369} 370 371static void 372hme_reset(struct hme_softc *sc) 373{ 374 int s; 375 376 s = splnet(); 377 hme_init(sc); 378 splx(s); 379} 380 381static void 382hme_stop(struct hme_softc *sc) 383{ 384 u_int32_t v; 385 int n; 386 387 callout_stop(&sc->sc_tick_ch); 388 389 /* Reset transmitter and receiver */ 390 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 391 HME_SEB_RESET_ERX); 392 393 for (n = 0; n < 20; n++) { 394 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 395 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 396 return; 397 DELAY(20); 398 } 399 400 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 401} 402 403static void
| 282 283 hme_mifinit(sc); 284 285 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, 286 hme_mediastatus)) != 0) { 287 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 288 goto fail_rxdesc; 289 } 290 sc->sc_mii = device_get_softc(sc->sc_miibus); 291 292 /* 293 * Walk along the list of attached MII devices and 294 * establish an `MII instance' to `phy number' 295 * mapping. We'll use this mapping in media change 296 * requests to determine which phy to use to program 297 * the MIF configuration register. 298 */ 299 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 300 child = LIST_NEXT(child, mii_list)) { 301 /* 302 * Note: we support just two PHYs: the built-in 303 * internal device and an external on the MII 304 * connector. 305 */ 306 if (child->mii_phy > 1 || child->mii_inst > 1) { 307 device_printf(sc->sc_dev, "cannot accomodate " 308 "MII device %s at phy %d, instance %d\n", 309 device_get_name(child->mii_dev), 310 child->mii_phy, child->mii_inst); 311 continue; 312 } 313 314 sc->sc_phys[child->mii_inst] = child->mii_phy; 315 } 316 317 /* Attach the interface. */ 318 ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr); 319 320 callout_init(&sc->sc_tick_ch, 0); 321 return (0); 322 323fail_txdesc: 324 for (i = 0; i < tdesc; i++) { 325 bus_dmamap_destroy(sc->sc_tdmatag, 326 sc->sc_rb.rb_txdesc[i].htx_dmamap); 327 } 328 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); 329fail_rxdesc: 330 for (i = 0; i < rdesc; i++) { 331 bus_dmamap_destroy(sc->sc_rdmatag, 332 sc->sc_rb.rb_rxdesc[i].hrx_dmamap); 333 } 334 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); 335fail_free: 336 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); 337fail_ttag: 338 bus_dma_tag_destroy(sc->sc_tdmatag); 339fail_rtag: 340 bus_dma_tag_destroy(sc->sc_rdmatag); 341fail_ctag: 342 bus_dma_tag_destroy(sc->sc_cdmatag); 343fail_ptag: 344 bus_dma_tag_destroy(sc->sc_pdmatag); 345 return (error); 346} 347 348static void 349hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 350{ 351 struct hme_softc *sc = (struct hme_softc *)xsc; 352 353 if (error != 0) 354 return; 355 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); 356 sc->sc_rb.rb_dmabase = segs[0].ds_addr; 357} 358 359static void 360hme_tick(void *arg) 361{ 362 struct hme_softc *sc = arg; 363 int s; 364 365 s = splnet(); 366 mii_tick(sc->sc_mii); 367 splx(s); 368 369 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 370} 371 372static void 373hme_reset(struct hme_softc *sc) 374{ 375 int s; 376 377 s = splnet(); 378 hme_init(sc); 379 splx(s); 380} 381 382static void 383hme_stop(struct hme_softc *sc) 384{ 385 u_int32_t v; 386 int n; 387 388 callout_stop(&sc->sc_tick_ch); 389 390 /* Reset transmitter and receiver */ 391 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | 392 HME_SEB_RESET_ERX); 393 394 for (n = 0; n < 20; n++) { 395 v = HME_SEB_READ_4(sc, HME_SEBI_RESET); 396 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 397 return; 398 DELAY(20); 399 } 400 401 device_printf(sc->sc_dev, "hme_stop: reset failed\n"); 402} 403 404static void
|
404hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
| 405hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 406 bus_size_t totsize, int error)
|
405{ 406 bus_addr_t *a = xsc; 407
| 407{ 408 bus_addr_t *a = xsc; 409
|
408 /* XXX: A cluster should not contain more than one segment, correct? */ 409 if (error != 0 || nsegs != 1)
| 410 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!")); 411 if (error != 0)
|
410 return; 411 *a = segs[0].ds_addr; 412} 413 414/* 415 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 416 * ring for subsequent use. 417 */ 418static void 419hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync) 420{ 421 422 /* 423 * Dropped a packet, reinitialize the descriptor and turn the 424 * ownership back to the hardware. 425 */ 426 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
| 412 return; 413 *a = segs[0].ds_addr; 414} 415 416/* 417 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the 418 * ring for subsequent use. 419 */ 420static void 421hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync) 422{ 423 424 /* 425 * Dropped a packet, reinitialize the descriptor and turn the 426 * ownership back to the hardware. 427 */ 428 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
|
427 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, 428 sc->sc_rb.rb_rxdesc[ix].hrx_len)));
| 429 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
|
429 if (sync) { 430 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 432 } 433} 434 435static int 436hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 437{ 438 struct hme_rxdesc *rd; 439 struct mbuf *m; 440 bus_addr_t ba;
| 430 if (sync) { 431 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 433 } 434} 435 436static int 437hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) 438{ 439 struct hme_rxdesc *rd; 440 struct mbuf *m; 441 bus_addr_t ba;
|
441 bus_size_t len, offs;
| |
442 bus_dmamap_t map;
| 442 bus_dmamap_t map;
|
| 443 uintptr_t b;
|
443 int a, unmap;
| 444 int a, unmap;
|
444 char *b;
| |
445 446 rd = &sc->sc_rb.rb_rxdesc[ri]; 447 unmap = rd->hrx_m != NULL; 448 if (unmap && keepold) { 449 /* 450 * Reinitialize the descriptor flags, as they may have been 451 * altered by the hardware. 452 */ 453 hme_discard_rxbuf(sc, ri, 0); 454 return (0); 455 }
| 445 446 rd = &sc->sc_rb.rb_rxdesc[ri]; 447 unmap = rd->hrx_m != NULL; 448 if (unmap && keepold) { 449 /* 450 * Reinitialize the descriptor flags, as they may have been 451 * altered by the hardware. 452 */ 453 hme_discard_rxbuf(sc, ri, 0); 454 return (0); 455 }
|
456 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
| 456 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
|
457 return (ENOBUFS);
| 457 return (ENOBUFS);
|
458 m_clget(m, M_DONTWAIT); 459 if ((m->m_flags & M_EXT) == 0) 460 goto fail_mcl; 461 len = m->m_ext.ext_size; 462 b = mtod(m, char *);
| 458 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 459 b = mtod(m, uintptr_t);
|
463 /* 464 * Required alignment boundary. At least 16 is needed, but since 465 * the mapping must be done in a way that a burst can start on a 466 * natural boundary we might need to extend this. 467 */
| 460 /* 461 * Required alignment boundary. At least 16 is needed, but since 462 * the mapping must be done in a way that a burst can start on a 463 * natural boundary we might need to extend this. 464 */
|
468 a = max(0x10, sc->sc_burst);
| 465 a = max(HME_MINRXALIGN, sc->sc_burst);
|
469 /*
| 466 /*
|
470 * Make sure the buffer suitably aligned: we need an offset of 471 * 2 modulo a. XXX: this ensures at least 16 byte alignment of the 472 * header adjacent to the ethernet header, which should be sufficient 473 * in all cases. Nevertheless, this second-guesses ALIGN().
| 467 * Make sure the buffer suitably aligned. The 2 byte offset is removed 468 * when the mbuf is handed up. XXX: this ensures at least 16 byte 469 * alignment of the header adjacent to the ethernet header, which 470 * should be sufficient in all cases. Nevertheless, this second-guesses 471 * ALIGN().
|
474 */
| 472 */
|
475 offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a; 476 len -= offs; 477 /* Align the buffer on the boundary for mapping. */ 478 b += offs - 2; 479 ba = 0; 480 if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 481 b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0) 482 goto fail_mcl;
| 473 m_adj(m, roundup2(b, a) - b); 474 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, 475 m, hme_rxdma_callback, &ba, 0) != 0) { 476 m_freem(m); 477 return (ENOBUFS); 478 }
|
483 if (unmap) { 484 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 485 BUS_DMASYNC_POSTREAD); 486 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 487 } 488 map = rd->hrx_dmamap; 489 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 490 sc->sc_rb.rb_spare_dmamap = map;
| 479 if (unmap) { 480 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, 481 BUS_DMASYNC_POSTREAD); 482 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); 483 } 484 map = rd->hrx_dmamap; 485 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; 486 sc->sc_rb.rb_spare_dmamap = map;
|
491 rd->hrx_offs = offs; 492 rd->hrx_len = len - sc->sc_burst;
| |
493 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 494 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
| 487 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); 488 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
|
495 /* Lazily leave at least one burst size grace space. */ 496 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 497 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
| |
498 rd->hrx_m = m;
| 489 rd->hrx_m = m;
|
| 490 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | 491 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
|
499 return (0);
| 492 return (0);
|
500 501fail_mcl: 502 m_freem(m); 503 return (ENOBUFS);
| |
504} 505 506static int 507hme_meminit(struct hme_softc *sc) 508{ 509 struct hme_ring *hr = &sc->sc_rb; 510 struct hme_txdesc *td; 511 bus_addr_t dma; 512 caddr_t p; 513 unsigned int i; 514 int error; 515 516 p = hr->rb_membase; 517 dma = hr->rb_dmabase; 518 519 /* 520 * Allocate transmit descriptors 521 */ 522 hr->rb_txd = p; 523 hr->rb_txddma = dma; 524 p += HME_NTXDESC * HME_XD_SIZE; 525 dma += HME_NTXDESC * HME_XD_SIZE; 526 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 527 dma = (bus_addr_t)roundup((u_long)dma, 2048); 528 p = (caddr_t)roundup((u_long)p, 2048); 529 530 /* 531 * Allocate receive descriptors 532 */ 533 hr->rb_rxd = p; 534 hr->rb_rxddma = dma; 535 p += HME_NRXDESC * HME_XD_SIZE; 536 dma += HME_NRXDESC * HME_XD_SIZE; 537 /* Again move forward to the next 2048 byte boundary.*/ 538 dma = (bus_addr_t)roundup((u_long)dma, 2048); 539 p = (caddr_t)roundup((u_long)p, 2048); 540 541 /* 542 * Initialize transmit buffer descriptors 543 */ 544 for (i = 0; i < HME_NTXDESC; i++) {
| 493} 494 495static int 496hme_meminit(struct hme_softc *sc) 497{ 498 struct hme_ring *hr = &sc->sc_rb; 499 struct hme_txdesc *td; 500 bus_addr_t dma; 501 caddr_t p; 502 unsigned int i; 503 int error; 504 505 p = hr->rb_membase; 506 dma = hr->rb_dmabase; 507 508 /* 509 * Allocate transmit descriptors 510 */ 511 hr->rb_txd = p; 512 hr->rb_txddma = dma; 513 p += HME_NTXDESC * HME_XD_SIZE; 514 dma += HME_NTXDESC * HME_XD_SIZE; 515 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 516 dma = (bus_addr_t)roundup((u_long)dma, 2048); 517 p = (caddr_t)roundup((u_long)p, 2048); 518 519 /* 520 * Allocate receive descriptors 521 */ 522 hr->rb_rxd = p; 523 hr->rb_rxddma = dma; 524 p += HME_NRXDESC * HME_XD_SIZE; 525 dma += HME_NRXDESC * HME_XD_SIZE; 526 /* Again move forward to the next 2048 byte boundary.*/ 527 dma = (bus_addr_t)roundup((u_long)dma, 2048); 528 p = (caddr_t)roundup((u_long)p, 2048); 529 530 /* 531 * Initialize transmit buffer descriptors 532 */ 533 for (i = 0; i < HME_NTXDESC; i++) {
|
545 td = &sc->sc_rb.rb_txdesc[i];
| |
546 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 547 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
| 534 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 535 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
|
| 536 } 537 538 STAILQ_INIT(&sc->sc_rb.rb_txfreeq); 539 STAILQ_INIT(&sc->sc_rb.rb_txbusyq); 540 for (i = 0; i < HME_NTXQ; i++) { 541 td = &sc->sc_rb.rb_txdesc[i];
|
548 if (td->htx_m != NULL) { 549 m_freem(td->htx_m);
| 542 if (td->htx_m != NULL) { 543 m_freem(td->htx_m);
|
| 544 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
|
550 td->htx_m = NULL; 551 }
| 545 td->htx_m = NULL; 546 }
|
552 if ((td->htx_flags & HTXF_MAPPED) != 0) 553 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 554 td->htx_flags = 0;
| 547 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
|
555 } 556 557 /* 558 * Initialize receive buffer descriptors 559 */ 560 for (i = 0; i < HME_NRXDESC; i++) { 561 error = hme_add_rxbuf(sc, i, 1); 562 if (error != 0) 563 return (error); 564 } 565 566 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 567 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 568 569 hr->rb_tdhead = hr->rb_tdtail = 0; 570 hr->rb_td_nbusy = 0; 571 hr->rb_rdtail = 0; 572 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 573 hr->rb_txddma); 574 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 575 hr->rb_rxddma); 576 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 577 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 578 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 579 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 580 return (0); 581} 582 583static int 584hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 585 u_int32_t clr, u_int32_t set) 586{ 587 int i = 0; 588 589 val &= ~clr; 590 val |= set; 591 HME_MAC_WRITE_4(sc, reg, val); 592 if (clr == 0 && set == 0) 593 return (1); /* just write, no bits to wait for */ 594 do { 595 DELAY(100); 596 i++; 597 val = HME_MAC_READ_4(sc, reg); 598 if (i > 40) { 599 /* After 3.5ms, we should have been done. */ 600 device_printf(sc->sc_dev, "timeout while writing to " 601 "MAC configuration register\n"); 602 return (0); 603 } 604 } while ((val & clr) != 0 && (val & set) != set); 605 return (1); 606} 607 608/* 609 * Initialization of interface; set up initialization block 610 * and transmit/receive descriptor rings. 611 */ 612static void 613hme_init(void *xsc) 614{ 615 struct hme_softc *sc = (struct hme_softc *)xsc; 616 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 617 u_int8_t *ea; 618 u_int32_t v; 619 620 /* 621 * Initialization sequence. The numbered steps below correspond 622 * to the sequence outlined in section 6.3.5.1 in the Ethernet 623 * Channel Engine manual (part of the PCIO manual). 624 * See also the STP2002-STQ document from Sun Microsystems. 625 */ 626 627 /* step 1 & 2. Reset the Ethernet Channel */ 628 hme_stop(sc); 629 630 /* Re-initialize the MIF */ 631 hme_mifinit(sc); 632
| 548 } 549 550 /* 551 * Initialize receive buffer descriptors 552 */ 553 for (i = 0; i < HME_NRXDESC; i++) { 554 error = hme_add_rxbuf(sc, i, 1); 555 if (error != 0) 556 return (error); 557 } 558 559 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 560 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 561 562 hr->rb_tdhead = hr->rb_tdtail = 0; 563 hr->rb_td_nbusy = 0; 564 hr->rb_rdtail = 0; 565 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, 566 hr->rb_txddma); 567 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, 568 hr->rb_rxddma); 569 CTR2(KTR_HME, "rx entry 1: flags %x, address %x", 570 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); 571 CTR2(KTR_HME, "tx entry 1: flags %x, address %x", 572 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); 573 return (0); 574} 575 576static int 577hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, 578 u_int32_t clr, u_int32_t set) 579{ 580 int i = 0; 581 582 val &= ~clr; 583 val |= set; 584 HME_MAC_WRITE_4(sc, reg, val); 585 if (clr == 0 && set == 0) 586 return (1); /* just write, no bits to wait for */ 587 do { 588 DELAY(100); 589 i++; 590 val = HME_MAC_READ_4(sc, reg); 591 if (i > 40) { 592 /* After 3.5ms, we should have been done. */ 593 device_printf(sc->sc_dev, "timeout while writing to " 594 "MAC configuration register\n"); 595 return (0); 596 } 597 } while ((val & clr) != 0 && (val & set) != set); 598 return (1); 599} 600 601/* 602 * Initialization of interface; set up initialization block 603 * and transmit/receive descriptor rings. 604 */ 605static void 606hme_init(void *xsc) 607{ 608 struct hme_softc *sc = (struct hme_softc *)xsc; 609 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 610 u_int8_t *ea; 611 u_int32_t v; 612 613 /* 614 * Initialization sequence. The numbered steps below correspond 615 * to the sequence outlined in section 6.3.5.1 in the Ethernet 616 * Channel Engine manual (part of the PCIO manual). 617 * See also the STP2002-STQ document from Sun Microsystems. 618 */ 619 620 /* step 1 & 2. Reset the Ethernet Channel */ 621 hme_stop(sc); 622 623 /* Re-initialize the MIF */ 624 hme_mifinit(sc); 625
|
633 /* Call MI reset function if any */ 634 if (sc->sc_hwreset) 635 (*sc->sc_hwreset)(sc); 636
| |
637#if 0 638 /* Mask all MIF interrupts, just in case */ 639 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 640#endif 641 642 /* step 3. Setup data structures in host memory */ 643 if (hme_meminit(sc) != 0) { 644 device_printf(sc->sc_dev, "out of buffers; init aborted."); 645 return; 646 } 647 648 /* step 4. TX MAC registers & counters */ 649 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 650 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 651 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 652 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 653 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN); 654 655 /* Load station MAC address */ 656 ea = sc->sc_arpcom.ac_enaddr; 657 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 658 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 659 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 660 661 /* 662 * Init seed for backoff 663 * (source suggested by manual: low 10 bits of MAC address) 664 */ 665 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 666 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 667 668 669 /* Note: Accepting power-on default for other MAC registers here.. */ 670 671 /* step 5. RX MAC registers & counters */ 672 hme_setladrf(sc, 0); 673 674 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 675 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 676 /* Transmit Descriptor ring size: in increments of 16 */ 677 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 678 679 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 680 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN); 681 682 /* step 8. Global Configuration & Interrupt Mask */ 683 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 684 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 685 HME_SEB_STAT_HOSTTOTX | 686 HME_SEB_STAT_RXTOHOST | 687 HME_SEB_STAT_TXALL | 688 HME_SEB_STAT_TXPERR | 689 HME_SEB_STAT_RCNTEXP | 690 HME_SEB_STAT_ALL_ERRORS )); 691 692 switch (sc->sc_burst) { 693 default: 694 v = 0; 695 break; 696 case 16: 697 v = HME_SEB_CFG_BURST16; 698 break; 699 case 32: 700 v = HME_SEB_CFG_BURST32; 701 break; 702 case 64: 703 v = HME_SEB_CFG_BURST64; 704 break; 705 } 706 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 707 708 /* step 9. ETX Configuration: use mostly default values */ 709 710 /* Enable DMA */ 711 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 712 v |= HME_ETX_CFG_DMAENABLE; 713 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 714 715 /* step 10. ERX Configuration */ 716 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 717 718 /* Encode Receive Descriptor ring size: four possible values */ 719 v &= ~HME_ERX_CFG_RINGSIZEMSK; 720 switch (HME_NRXDESC) { 721 case 32: 722 v |= HME_ERX_CFG_RINGSIZE32; 723 break; 724 case 64: 725 v |= HME_ERX_CFG_RINGSIZE64; 726 break; 727 case 128: 728 v |= HME_ERX_CFG_RINGSIZE128; 729 break; 730 case 256: 731 v |= HME_ERX_CFG_RINGSIZE256; 732 break; 733 default: 734 printf("hme: invalid Receive Descriptor ring size\n"); 735 break; 736 } 737
| 626#if 0 627 /* Mask all MIF interrupts, just in case */ 628 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); 629#endif 630 631 /* step 3. Setup data structures in host memory */ 632 if (hme_meminit(sc) != 0) { 633 device_printf(sc->sc_dev, "out of buffers; init aborted."); 634 return; 635 } 636 637 /* step 4. TX MAC registers & counters */ 638 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 639 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 640 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 641 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 642 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN); 643 644 /* Load station MAC address */ 645 ea = sc->sc_arpcom.ac_enaddr; 646 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 647 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 648 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 649 650 /* 651 * Init seed for backoff 652 * (source suggested by manual: low 10 bits of MAC address) 653 */ 654 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 655 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); 656 657 658 /* Note: Accepting power-on default for other MAC registers here.. */ 659 660 /* step 5. RX MAC registers & counters */ 661 hme_setladrf(sc, 0); 662 663 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 664 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); 665 /* Transmit Descriptor ring size: in increments of 16 */ 666 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); 667 668 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 669 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN); 670 671 /* step 8. Global Configuration & Interrupt Mask */ 672 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 673 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 674 HME_SEB_STAT_HOSTTOTX | 675 HME_SEB_STAT_RXTOHOST | 676 HME_SEB_STAT_TXALL | 677 HME_SEB_STAT_TXPERR | 678 HME_SEB_STAT_RCNTEXP | 679 HME_SEB_STAT_ALL_ERRORS )); 680 681 switch (sc->sc_burst) { 682 default: 683 v = 0; 684 break; 685 case 16: 686 v = HME_SEB_CFG_BURST16; 687 break; 688 case 32: 689 v = HME_SEB_CFG_BURST32; 690 break; 691 case 64: 692 v = HME_SEB_CFG_BURST64; 693 break; 694 } 695 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); 696 697 /* step 9. ETX Configuration: use mostly default values */ 698 699 /* Enable DMA */ 700 v = HME_ETX_READ_4(sc, HME_ETXI_CFG); 701 v |= HME_ETX_CFG_DMAENABLE; 702 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); 703 704 /* step 10. ERX Configuration */ 705 v = HME_ERX_READ_4(sc, HME_ERXI_CFG); 706 707 /* Encode Receive Descriptor ring size: four possible values */ 708 v &= ~HME_ERX_CFG_RINGSIZEMSK; 709 switch (HME_NRXDESC) { 710 case 32: 711 v |= HME_ERX_CFG_RINGSIZE32; 712 break; 713 case 64: 714 v |= HME_ERX_CFG_RINGSIZE64; 715 break; 716 case 128: 717 v |= HME_ERX_CFG_RINGSIZE128; 718 break; 719 case 256: 720 v |= HME_ERX_CFG_RINGSIZE256; 721 break; 722 default: 723 printf("hme: invalid Receive Descriptor ring size\n"); 724 break; 725 } 726
|
738 /* Enable DMA, fix RX first byte offset to 2. */
| 727 /* Enable DMA, fix RX first byte offset. */
|
739 v &= ~HME_ERX_CFG_FBO_MASK;
| 728 v &= ~HME_ERX_CFG_FBO_MASK;
|
740 v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
| 729 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
|
741 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 742 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 743 744 /* step 11. XIF Configuration */ 745 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 746 v |= HME_MAC_XIF_OE; 747 /* If an external transceiver is connected, enable its MII drivers */ 748 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 749 v |= HME_MAC_XIF_MIIENABLE; 750 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 751 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 752 753 /* step 12. RX_MAC Configuration Register */ 754 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 755 v |= HME_MAC_RXCFG_ENABLE; 756 v &= ~(HME_MAC_RXCFG_DCRCS); 757 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 758 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 759 760 /* step 13. TX_MAC Configuration Register */ 761 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 762 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 763 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 764 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 765 766 /* step 14. Issue Transmit Pending command */ 767
| 730 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); 731 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); 732 733 /* step 11. XIF Configuration */ 734 v = HME_MAC_READ_4(sc, HME_MACI_XIF); 735 v |= HME_MAC_XIF_OE; 736 /* If an external transceiver is connected, enable its MII drivers */ 737 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) 738 v |= HME_MAC_XIF_MIIENABLE; 739 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); 740 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); 741 742 /* step 12. RX_MAC Configuration Register */ 743 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 744 v |= HME_MAC_RXCFG_ENABLE; 745 v &= ~(HME_MAC_RXCFG_DCRCS); 746 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); 747 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); 748 749 /* step 13. TX_MAC Configuration Register */ 750 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 751 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 752 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); 753 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 754 755 /* step 14. Issue Transmit Pending command */ 756
|
768 /* Call MI initialization function if any */ 769 if (sc->sc_hwinit) 770 (*sc->sc_hwinit)(sc); 771
| |
772#ifdef HMEDEBUG 773 /* Debug: double-check. */ 774 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 775 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 776 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 777 HME_ERX_READ_4(sc, HME_ERXI_RING), 778 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 779 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 780 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 781 HME_ERX_READ_4(sc, HME_ERXI_CFG), 782 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 783 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 784 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 785 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 786#endif 787 788 /* Start the one second timer. */ 789 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 790 791 ifp->if_flags |= IFF_RUNNING; 792 ifp->if_flags &= ~IFF_OACTIVE; 793 ifp->if_timer = 0; 794 hme_start(ifp); 795} 796 797struct hme_txdma_arg {
| 757#ifdef HMEDEBUG 758 /* Debug: double-check. */ 759 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " 760 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), 761 HME_ETX_READ_4(sc, HME_ETXI_RSIZE), 762 HME_ERX_READ_4(sc, HME_ERXI_RING), 763 HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); 764 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", 765 HME_SEB_READ_4(sc, HME_SEBI_IMASK), 766 HME_ERX_READ_4(sc, HME_ERXI_CFG), 767 HME_ETX_READ_4(sc, HME_ETXI_CFG)); 768 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", 769 HME_MAC_READ_4(sc, HME_MACI_RXCFG), 770 HME_MAC_READ_4(sc, HME_MACI_TXCFG)); 771#endif 772 773 /* Start the one second timer. */ 774 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 775 776 ifp->if_flags |= IFF_RUNNING; 777 ifp->if_flags &= ~IFF_OACTIVE; 778 ifp->if_timer = 0; 779 hme_start(ifp); 780} 781 782struct hme_txdma_arg {
|
798 struct hme_softc *hta_sc; 799 struct mbuf *hta_m; 800 int hta_err; 801 int hta_flags; 802 int hta_offs; 803 int hta_pad;
| 783 struct hme_softc *hta_sc; 784 struct hme_txdesc *hta_htx; 785 int hta_ndescs;
|
804}; 805
| 786}; 787
|
806/* Values for hta_flags */ 807#define HTAF_SOP 1 /* Start of packet (first mbuf in chain) */ 808#define HTAF_EOP 2 /* Start of packet (last mbuf in chain) */ 809
| 788/* 789 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() 790 * are readable from the nearest burst boundary on (i.e. potentially before 791 * ds_addr) to the first boundary beyond the end. This is usually a safe 792 * assumption to make, but is not documented. 793 */
|
810static void
| 794static void
|
811hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
| 795hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, 796 bus_size_t totsz, int error)
|
812{ 813 struct hme_txdma_arg *ta = xsc;
| 797{ 798 struct hme_txdma_arg *ta = xsc;
|
814 struct hme_txdesc *td; 815 bus_addr_t addr; 816 bus_size_t sz;
| 799 struct hme_txdesc *htx; 800 bus_size_t len = 0;
|
817 caddr_t txd;
| 801 caddr_t txd;
|
818 u_int32_t flags; 819 int i, *tdhead, pci;
| 802 u_int32_t flags = 0; 803 int i, tdhead, pci;
|
820
| 804
|
821 ta->hta_err = error;
| |
822 if (error != 0) 823 return; 824
| 805 if (error != 0) 806 return; 807
|
825 tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
| 808 tdhead = ta->hta_sc->sc_rb.rb_tdhead;
|
826 pci = ta->hta_sc->sc_pci; 827 txd = ta->hta_sc->sc_rb.rb_txd;
| 809 pci = ta->hta_sc->sc_pci; 810 txd = ta->hta_sc->sc_rb.rb_txd;
|
| 811 htx = ta->hta_htx; 812 813 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { 814 ta->hta_ndescs = -1; 815 return; 816 } 817 ta->hta_ndescs = nsegs; 818
|
828 for (i = 0; i < nsegs; i++) {
| 819 for (i = 0; i < nsegs; i++) {
|
829 if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) { 830 ta->hta_err = -1; 831 return; 832 } 833 td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead]; 834 addr = segs[i].ds_addr; 835 sz = segs[i].ds_len; 836 if (i == 0) { 837 /* Adjust the offsets. */ 838 addr += ta->hta_offs; 839 sz -= ta->hta_offs; 840 td->htx_flags = HTXF_MAPPED; 841 } else 842 td->htx_flags = 0; 843 if (i == nsegs - 1) { 844 /* Subtract the pad. */ 845 if (sz < ta->hta_pad) { 846 /* 847 * Ooops. This should not have happened; it 848 * means that we got a zero-size segment or 849 * segment sizes were unnatural. 850 */ 851 device_printf(ta->hta_sc->sc_dev, 852 "hme_txdma_callback: alignment glitch\n"); 853 ta->hta_err = EINVAL; 854 return; 855 } 856 sz -= ta->hta_pad; 857 /* If sz is 0 now, this does not matter. */ 858 }
| 820 if (segs[i].ds_len == 0) 821 continue; 822
|
859 /* Fill the ring entry. */
| 823 /* Fill the ring entry. */
|
860 flags = HME_XD_ENCODE_TSIZE(sz); 861 if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
| 824 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); 825 if (len == 0)
|
862 flags |= HME_XD_SOP;
| 826 flags |= HME_XD_SOP;
|
863 if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
| 827 if (len + segs[i].ds_len == totsz)
|
864 flags |= HME_XD_EOP;
| 828 flags |= HME_XD_EOP;
|
865 td->htx_m = ta->hta_m; 866 } else 867 td->htx_m = NULL;
| |
868 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
| 829 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
|
869 "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags, 870 (u_int)addr); 871 HME_XD_SETFLAGS(pci, txd, *tdhead, flags); 872 HME_XD_SETADDR(pci, txd, *tdhead, addr);
| 830 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, 831 (u_int)segs[i].ds_addr); 832 HME_XD_SETFLAGS(pci, txd, tdhead, flags); 833 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
|
873 874 ta->hta_sc->sc_rb.rb_td_nbusy++;
| 834 835 ta->hta_sc->sc_rb.rb_td_nbusy++;
|
875 *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
| 836 htx->htx_lastdesc = tdhead; 837 tdhead = (tdhead + 1) % HME_NTXDESC; 838 len += segs[i].ds_len;
|
876 }
| 839 }
|
| 840 ta->hta_sc->sc_rb.rb_tdhead = tdhead; 841 KASSERT((flags & HME_XD_EOP) != 0, 842 ("hme_txdma_callback: missed end of packet!"));
|
877} 878 879/* 880 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 881 * start the transmission. 882 * Returns 0 on success, -1 if there were not enough free descriptors to map 883 * the packet, or an errno otherwise. 884 */ 885static int
| 843} 844 845/* 846 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and 847 * start the transmission. 848 * Returns 0 on success, -1 if there were not enough free descriptors to map 849 * the packet, or an errno otherwise. 850 */ 851static int
|
886hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
| 852hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
|
887{ 888 struct hme_txdma_arg cba;
| 853{ 854 struct hme_txdma_arg cba;
|
889 struct mbuf *m = m0, *n;
| |
890 struct hme_txdesc *td;
| 855 struct hme_txdesc *td;
|
891 char *start; 892 int error, len, si, ri, totlen, sum;
| 856 int error, si, ri;
|
893 u_int32_t flags; 894
| 857 u_int32_t flags; 858
|
895 if ((m->m_flags & M_PKTHDR) == 0) 896 panic("hme_dmamap_load_mbuf: no packet header"); 897 totlen = m->m_pkthdr.len; 898 sum = 0;
| |
899 si = sc->sc_rb.rb_tdhead;
| 859 si = sc->sc_rb.rb_tdhead;
|
| 860 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) 861 return (-1); 862 td->htx_m = m0;
|
900 cba.hta_sc = sc;
| 863 cba.hta_sc = sc;
|
901 cba.hta_err = 0; 902 cba.hta_flags = HTAF_SOP; 903 cba.hta_m = m0; 904 for (; m != NULL && sum < totlen; m = n) { 905 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC) { 906 error = -1; 907 goto fail; 908 } 909 len = m->m_len; 910 n = m->m_next; 911 if (len == 0) 912 continue; 913 sum += len; 914 td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead]; 915 if (n == NULL || sum >= totlen) 916 cba.hta_flags |= HTAF_EOP; 917 /* 918 * This is slightly evil: we must map the buffer in a way that 919 * allows dma transfers to start on a natural burst boundary. 920 * This is done by rounding down the mapping address, and 921 * recording the required offset for the callback. With this, 922 * we cannot cross a page boundary because the burst size 923 * is a small power of two. 924 */ 925 cba.hta_offs = (sc->sc_burst - 926 (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst; 927 start = mtod(m, char *) - cba.hta_offs; 928 len += cba.hta_offs; 929 /* 930 * Similarly, the end of the mapping should be on a natural 931 * burst boundary. XXX: Let's hope that any segment ends 932 * generated by the busdma code are also on such boundaries. 933 */ 934 cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) & 935 (sc->sc_burst - 1))) % sc->sc_burst; 936 len += cba.hta_pad; 937 /* Most of the work is done in the callback. */ 938 if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap, 939 start, len, hme_txdma_callback, &cba, 0)) != 0 || 940 cba.hta_err != 0) 941 goto fail; 942 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 943 BUS_DMASYNC_PREWRITE); 944 945 cba.hta_flags = 0;
| 864 cba.hta_htx = td; 865 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, 866 m0, hme_txdma_callback, &cba, 0)) != 0) 867 goto fail; 868 if (cba.hta_ndescs == -1) { 869 error = -1; 870 goto fail;
|
946 }
| 871 }
|
| 872 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 873 BUS_DMASYNC_PREWRITE); 874 875 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); 876 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); 877
|
947 /* Turn descriptor ownership to the hme, back to forth. */ 948 ri = sc->sc_rb.rb_tdhead; 949 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 950 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 951 do { 952 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 953 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 954 HME_XD_OWN; 955 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 956 ri, si, flags); 957 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 958 } while (ri != si); 959 960 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 962 963 /* start the transmission. */ 964 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 965 return (0); 966fail:
| 878 /* Turn descriptor ownership to the hme, back to forth. */ 879 ri = sc->sc_rb.rb_tdhead; 880 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", 881 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); 882 do { 883 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; 884 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | 885 HME_XD_OWN; 886 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", 887 ri, si, flags); 888 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); 889 } while (ri != si); 890 891 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 892 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 893 894 /* start the transmission. */ 895 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); 896 return (0); 897fail:
|
967 for (ri = si; ri != sc->sc_rb.rb_tdhead; ri = (ri + 1) % HME_NTXDESC) { 968 td = &sc->sc_rb.rb_txdesc[ri]; 969 if ((td->htx_flags & HTXF_MAPPED) != 0) 970 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 971 td->htx_flags = 0; 972 td->htx_m = NULL; 973 sc->sc_rb.rb_td_nbusy--; 974 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, 0); 975 } 976 sc->sc_rb.rb_tdhead = si; 977 error = cba.hta_err != 0 ? cba.hta_err : error; 978 if (error != -1) 979 device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
| 898 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
|
980 return (error); 981} 982 983/* 984 * Pass a packet to the higher levels. 985 */ 986static void 987hme_read(struct hme_softc *sc, int ix, int len) 988{ 989 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 990 struct mbuf *m;
| 899 return (error); 900} 901 902/* 903 * Pass a packet to the higher levels. 904 */ 905static void 906hme_read(struct hme_softc *sc, int ix, int len) 907{ 908 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 909 struct mbuf *m;
|
991 int offs;
| |
992 993 if (len <= sizeof(struct ether_header) || 994 len > ETHERMTU + sizeof(struct ether_header)) { 995#ifdef HMEDEBUG 996 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 997 len); 998#endif 999 ifp->if_ierrors++; 1000 hme_discard_rxbuf(sc, ix, 1); 1001 return; 1002 } 1003 1004 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
| 910 911 if (len <= sizeof(struct ether_header) || 912 len > ETHERMTU + sizeof(struct ether_header)) { 913#ifdef HMEDEBUG 914 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", 915 len); 916#endif 917 ifp->if_ierrors++; 918 hme_discard_rxbuf(sc, ix, 1); 919 return; 920 } 921 922 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
|
1005 offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs; 1006 CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
| 923 CTR1(KTR_HME, "hme_read: len %d", len);
|
1007 1008 if (hme_add_rxbuf(sc, ix, 0) != 0) { 1009 /* 1010 * hme_add_rxbuf will leave the old buffer in the ring until 1011 * it is sure that a new buffer can be mapped. If it can not, 1012 * drop the packet, but leave the interface up. 1013 */ 1014 ifp->if_iqdrops++; 1015 hme_discard_rxbuf(sc, ix, 1); 1016 return; 1017 } 1018 1019 ifp->if_ipackets++; 1020 1021 /* Changed the rings; sync. */ 1022 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 1023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1024 1025 m->m_pkthdr.rcvif = ifp;
| 924 925 if (hme_add_rxbuf(sc, ix, 0) != 0) { 926 /* 927 * hme_add_rxbuf will leave the old buffer in the ring until 928 * it is sure that a new buffer can be mapped. If it can not, 929 * drop the packet, but leave the interface up. 930 */ 931 ifp->if_iqdrops++; 932 hme_discard_rxbuf(sc, ix, 1); 933 return; 934 } 935 936 ifp->if_ipackets++; 937 938 /* Changed the rings; sync. */ 939 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, 940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 941 942 m->m_pkthdr.rcvif = ifp;
|
1026 m->m_pkthdr.len = m->m_len = len + offs; 1027 m_adj(m, offs);
| 943 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; 944 m_adj(m, HME_RXOFFS);
|
1028 /* Pass the packet up. */ 1029 (*ifp->if_input)(ifp, m); 1030} 1031 1032static void 1033hme_start(struct ifnet *ifp) 1034{ 1035 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 1036 struct mbuf *m; 1037 int error, enq = 0; 1038 1039 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1040 return; 1041 1042 error = 0; 1043 for (;;) { 1044 IF_DEQUEUE(&ifp->if_snd, m); 1045 if (m == NULL) 1046 break; 1047
| 945 /* Pass the packet up. */ 946 (*ifp->if_input)(ifp, m); 947} 948 949static void 950hme_start(struct ifnet *ifp) 951{ 952 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 953 struct mbuf *m; 954 int error, enq = 0; 955 956 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 957 return; 958 959 error = 0; 960 for (;;) { 961 IF_DEQUEUE(&ifp->if_snd, m); 962 if (m == NULL) 963 break; 964
|
1048 error = hme_load_mbuf(sc, m); 1049 if (error != 0) {
| 965 error = hme_load_txmbuf(sc, m); 966 if (error == -1) {
|
1050 ifp->if_flags |= IFF_OACTIVE; 1051 IF_PREPEND(&ifp->if_snd, m); 1052 break;
| 967 ifp->if_flags |= IFF_OACTIVE; 968 IF_PREPEND(&ifp->if_snd, m); 969 break;
|
| 970 } else if (error > 0) { 971 printf("hme_start: error %d while loading mbuf\n", 972 error);
|
1053 } else { 1054 enq = 1; 1055 BPF_MTAP(ifp, m); 1056 } 1057 } 1058 1059 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 1060 ifp->if_flags |= IFF_OACTIVE; 1061 /* Set watchdog timer if a packet was queued */ 1062 if (enq) 1063 ifp->if_timer = 5; 1064} 1065 1066/* 1067 * Transmit interrupt. 1068 */ 1069static void 1070hme_tint(struct hme_softc *sc) 1071{ 1072 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
| 973 } else { 974 enq = 1; 975 BPF_MTAP(ifp, m); 976 } 977 } 978 979 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) 980 ifp->if_flags |= IFF_OACTIVE; 981 /* Set watchdog timer if a packet was queued */ 982 if (enq) 983 ifp->if_timer = 5; 984} 985 986/* 987 * Transmit interrupt. 988 */ 989static void 990hme_tint(struct hme_softc *sc) 991{ 992 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
|
1073 struct hme_txdesc *td;
| 993 struct hme_txdesc *htx;
|
1074 unsigned int ri, txflags; 1075 1076 /* 1077 * Unload collision counters 1078 */ 1079 ifp->if_collisions += 1080 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1081 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1082 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1083 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1084 1085 /* 1086 * then clear the hardware counters. 1087 */ 1088 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1089 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1090 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1091 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1092
| 994 unsigned int ri, txflags; 995 996 /* 997 * Unload collision counters 998 */ 999 ifp->if_collisions += 1000 HME_MAC_READ_4(sc, HME_MACI_NCCNT) + 1001 HME_MAC_READ_4(sc, HME_MACI_FCCNT) + 1002 HME_MAC_READ_4(sc, HME_MACI_EXCNT) + 1003 HME_MAC_READ_4(sc, HME_MACI_LTCNT); 1004 1005 /* 1006 * then clear the hardware counters. 1007 */ 1008 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); 1009 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); 1010 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); 1011 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); 1012
|
| 1013 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
|
1093 /* Fetch current position in the transmit ring */ 1094 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1095 if (sc->sc_rb.rb_td_nbusy <= 0) { 1096 CTR0(KTR_HME, "hme_tint: not busy!"); 1097 break; 1098 } 1099 1100 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1101 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1102 1103 if ((txflags & HME_XD_OWN) != 0) 1104 break; 1105
| 1014 /* Fetch current position in the transmit ring */ 1015 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { 1016 if (sc->sc_rb.rb_td_nbusy <= 0) { 1017 CTR0(KTR_HME, "hme_tint: not busy!"); 1018 break; 1019 } 1020 1021 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1022 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); 1023 1024 if ((txflags & HME_XD_OWN) != 0) 1025 break; 1026
|
1106 td = &sc->sc_rb.rb_txdesc[ri]; 1107 CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags); 1108 if ((td->htx_flags & HTXF_MAPPED) != 0) { 1109 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, 1110 BUS_DMASYNC_POSTWRITE); 1111 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); 1112 } 1113 td->htx_flags = 0;
| 1027 CTR0(KTR_HME, "hme_tint: not owned");
|
1114 --sc->sc_rb.rb_td_nbusy; 1115 ifp->if_flags &= ~IFF_OACTIVE; 1116 1117 /* Complete packet transmitted? */ 1118 if ((txflags & HME_XD_EOP) == 0) 1119 continue; 1120
| 1028 --sc->sc_rb.rb_td_nbusy; 1029 ifp->if_flags &= ~IFF_OACTIVE; 1030 1031 /* Complete packet transmitted? */ 1032 if ((txflags & HME_XD_EOP) == 0) 1033 continue; 1034
|
| 1035 KASSERT(htx->htx_lastdesc == ri, 1036 ("hme_tint: ring indices skewed: %d != %d!", 1037 htx->htx_lastdesc, ri)); 1038 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, 1039 BUS_DMASYNC_POSTWRITE); 1040 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); 1041
|
1121 ifp->if_opackets++;
| 1042 ifp->if_opackets++;
|
1122 m_freem(td->htx_m); 1123 td->htx_m = NULL;
| 1043 m_freem(htx->htx_m); 1044 htx->htx_m = NULL; 1045 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); 1046 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); 1047 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
|
1124 } 1125 /* Turn off watchdog */ 1126 if (sc->sc_rb.rb_td_nbusy == 0) 1127 ifp->if_timer = 0; 1128 1129 /* Update ring */ 1130 sc->sc_rb.rb_tdtail = ri; 1131 1132 hme_start(ifp); 1133 1134 if (sc->sc_rb.rb_td_nbusy == 0) 1135 ifp->if_timer = 0; 1136} 1137 1138/* 1139 * Receive interrupt. 1140 */ 1141static void 1142hme_rint(struct hme_softc *sc) 1143{ 1144 caddr_t xdr = sc->sc_rb.rb_rxd; 1145 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1146 unsigned int ri, len; 1147 u_int32_t flags; 1148 1149 /* 1150 * Process all buffers with valid data. 1151 */ 1152 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1153 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1154 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1155 if ((flags & HME_XD_OWN) != 0) 1156 break; 1157 1158 if ((flags & HME_XD_OFL) != 0) { 1159 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1160 "flags=0x%x\n", ri, flags); 1161 ifp->if_ierrors++; 1162 hme_discard_rxbuf(sc, ri, 1); 1163 } else { 1164 len = HME_XD_DECODE_RSIZE(flags); 1165 hme_read(sc, ri, len); 1166 } 1167 } 1168 1169 sc->sc_rb.rb_rdtail = ri; 1170} 1171 1172static void 1173hme_eint(struct hme_softc *sc, u_int status) 1174{ 1175 1176 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1177 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1178 return; 1179 } 1180 1181 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1182} 1183 1184void 1185hme_intr(void *v) 1186{ 1187 struct hme_softc *sc = (struct hme_softc *)v; 1188 u_int32_t status; 1189 1190 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1191 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1192 1193 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1194 hme_eint(sc, status); 1195 1196 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1197 hme_tint(sc); 1198 1199 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1200 hme_rint(sc); 1201} 1202 1203 1204static void 1205hme_watchdog(struct ifnet *ifp) 1206{ 1207 struct hme_softc *sc = ifp->if_softc; 1208#ifdef HMEDEBUG 1209 u_int32_t status; 1210 1211 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1212 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1213#endif 1214 device_printf(sc->sc_dev, "device timeout\n"); 1215 ++ifp->if_oerrors; 1216 1217 hme_reset(sc); 1218} 1219 1220/* 1221 * Initialize the MII Management Interface 1222 */ 1223static void 1224hme_mifinit(struct hme_softc *sc) 1225{ 1226 u_int32_t v; 1227 1228 /* Configure the MIF in frame mode */ 1229 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1230 v &= ~HME_MIF_CFG_BBMODE; 1231 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1232} 1233 1234/* 1235 * MII interface 1236 */ 1237int 1238hme_mii_readreg(device_t dev, int phy, int reg) 1239{ 1240 struct hme_softc *sc = device_get_softc(dev); 1241 int n; 1242 u_int32_t v; 1243 1244 /* Select the desired PHY in the MIF configuration register */ 1245 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1246 /* Clear PHY select bit */ 1247 v &= ~HME_MIF_CFG_PHY; 1248 if (phy == HME_PHYAD_EXTERNAL) 1249 /* Set PHY select bit to get at external device */ 1250 v |= HME_MIF_CFG_PHY; 1251 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1252 1253 /* Construct the frame command */ 1254 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1255 HME_MIF_FO_TAMSB | 1256 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1257 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1258 (reg << HME_MIF_FO_REGAD_SHIFT); 1259 1260 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1261 for (n = 0; n < 100; n++) { 1262 DELAY(1); 1263 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1264 if (v & HME_MIF_FO_TALSB) 1265 return (v & HME_MIF_FO_DATA); 1266 } 1267 1268 device_printf(sc->sc_dev, "mii_read timeout\n"); 1269 return (0); 1270} 1271 1272int 1273hme_mii_writereg(device_t dev, int phy, int reg, int val) 1274{ 1275 struct hme_softc *sc = device_get_softc(dev); 1276 int n; 1277 u_int32_t v; 1278 1279 /* Select the desired PHY in the MIF configuration register */ 1280 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1281 /* Clear PHY select bit */ 1282 v &= ~HME_MIF_CFG_PHY; 1283 if (phy == HME_PHYAD_EXTERNAL) 1284 /* Set PHY select bit to get at external device */ 1285 v |= HME_MIF_CFG_PHY; 1286 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1287 1288 /* Construct the frame command */ 1289 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1290 HME_MIF_FO_TAMSB | 1291 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1292 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1293 (reg << HME_MIF_FO_REGAD_SHIFT) | 1294 (val & HME_MIF_FO_DATA); 1295 1296 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1297 for (n = 0; n < 100; n++) { 1298 DELAY(1); 1299 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1300 if (v & HME_MIF_FO_TALSB) 1301 return (1); 1302 } 1303 1304 device_printf(sc->sc_dev, "mii_write timeout\n"); 1305 return (0); 1306} 1307 1308void 1309hme_mii_statchg(device_t dev) 1310{ 1311 struct hme_softc *sc = device_get_softc(dev); 1312 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1313 int phy = sc->sc_phys[instance]; 1314 u_int32_t v; 1315 1316#ifdef HMEDEBUG 1317 if (sc->sc_debug) 1318 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1319#endif 1320 1321 /* Select the current PHY in the MIF configuration register */ 1322 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1323 v &= ~HME_MIF_CFG_PHY; 1324 if (phy == HME_PHYAD_EXTERNAL) 1325 v |= HME_MIF_CFG_PHY; 1326 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1327 1328 /* Set the MAC Full Duplex bit appropriately */ 1329 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1330 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) 1331 return; 1332 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1333 v |= HME_MAC_TXCFG_FULLDPLX; 1334 else 1335 v &= ~HME_MAC_TXCFG_FULLDPLX; 1336 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 1337 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) 1338 return; 1339} 1340 1341static int 1342hme_mediachange(struct ifnet *ifp) 1343{ 1344 struct hme_softc *sc = ifp->if_softc; 1345 1346 return (mii_mediachg(sc->sc_mii)); 1347} 1348 1349static void 1350hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1351{ 1352 struct hme_softc *sc = ifp->if_softc; 1353 1354 if ((ifp->if_flags & IFF_UP) == 0) 1355 return; 1356 1357 mii_pollstat(sc->sc_mii); 1358 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1359 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1360} 1361 1362/* 1363 * Process an ioctl request. 1364 */ 1365static int 1366hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1367{ 1368 struct hme_softc *sc = ifp->if_softc; 1369 struct ifreq *ifr = (struct ifreq *)data; 1370 int s, error = 0; 1371 1372 s = splnet(); 1373 1374 switch (cmd) { 1375 case SIOCSIFFLAGS: 1376 if ((ifp->if_flags & IFF_UP) == 0 && 1377 (ifp->if_flags & IFF_RUNNING) != 0) { 1378 /* 1379 * If interface is marked down and it is running, then 1380 * stop it. 1381 */ 1382 hme_stop(sc); 1383 ifp->if_flags &= ~IFF_RUNNING; 1384 } else if ((ifp->if_flags & IFF_UP) != 0 && 1385 (ifp->if_flags & IFF_RUNNING) == 0) { 1386 /* 1387 * If interface is marked up and it is stopped, then 1388 * start it. 1389 */ 1390 hme_init(sc); 1391 } else if ((ifp->if_flags & IFF_UP) != 0) { 1392 /* 1393 * Reset the interface to pick up changes in any other 1394 * flags that affect hardware registers. 1395 */
| 1048 } 1049 /* Turn off watchdog */ 1050 if (sc->sc_rb.rb_td_nbusy == 0) 1051 ifp->if_timer = 0; 1052 1053 /* Update ring */ 1054 sc->sc_rb.rb_tdtail = ri; 1055 1056 hme_start(ifp); 1057 1058 if (sc->sc_rb.rb_td_nbusy == 0) 1059 ifp->if_timer = 0; 1060} 1061 1062/* 1063 * Receive interrupt. 1064 */ 1065static void 1066hme_rint(struct hme_softc *sc) 1067{ 1068 caddr_t xdr = sc->sc_rb.rb_rxd; 1069 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1070 unsigned int ri, len; 1071 u_int32_t flags; 1072 1073 /* 1074 * Process all buffers with valid data. 1075 */ 1076 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { 1077 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1078 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); 1079 if ((flags & HME_XD_OWN) != 0) 1080 break; 1081 1082 if ((flags & HME_XD_OFL) != 0) { 1083 device_printf(sc->sc_dev, "buffer overflow, ri=%d; " 1084 "flags=0x%x\n", ri, flags); 1085 ifp->if_ierrors++; 1086 hme_discard_rxbuf(sc, ri, 1); 1087 } else { 1088 len = HME_XD_DECODE_RSIZE(flags); 1089 hme_read(sc, ri, len); 1090 } 1091 } 1092 1093 sc->sc_rb.rb_rdtail = ri; 1094} 1095 1096static void 1097hme_eint(struct hme_softc *sc, u_int status) 1098{ 1099 1100 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1101 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1102 return; 1103 } 1104 1105 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); 1106} 1107 1108void 1109hme_intr(void *v) 1110{ 1111 struct hme_softc *sc = (struct hme_softc *)v; 1112 u_int32_t status; 1113 1114 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1115 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); 1116 1117 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1118 hme_eint(sc, status); 1119 1120 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1121 hme_tint(sc); 1122 1123 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1124 hme_rint(sc); 1125} 1126 1127 1128static void 1129hme_watchdog(struct ifnet *ifp) 1130{ 1131 struct hme_softc *sc = ifp->if_softc; 1132#ifdef HMEDEBUG 1133 u_int32_t status; 1134 1135 status = HME_SEB_READ_4(sc, HME_SEBI_STAT); 1136 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); 1137#endif 1138 device_printf(sc->sc_dev, "device timeout\n"); 1139 ++ifp->if_oerrors; 1140 1141 hme_reset(sc); 1142} 1143 1144/* 1145 * Initialize the MII Management Interface 1146 */ 1147static void 1148hme_mifinit(struct hme_softc *sc) 1149{ 1150 u_int32_t v; 1151 1152 /* Configure the MIF in frame mode */ 1153 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1154 v &= ~HME_MIF_CFG_BBMODE; 1155 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1156} 1157 1158/* 1159 * MII interface 1160 */ 1161int 1162hme_mii_readreg(device_t dev, int phy, int reg) 1163{ 1164 struct hme_softc *sc = device_get_softc(dev); 1165 int n; 1166 u_int32_t v; 1167 1168 /* Select the desired PHY in the MIF configuration register */ 1169 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1170 /* Clear PHY select bit */ 1171 v &= ~HME_MIF_CFG_PHY; 1172 if (phy == HME_PHYAD_EXTERNAL) 1173 /* Set PHY select bit to get at external device */ 1174 v |= HME_MIF_CFG_PHY; 1175 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1176 1177 /* Construct the frame command */ 1178 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1179 HME_MIF_FO_TAMSB | 1180 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1181 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1182 (reg << HME_MIF_FO_REGAD_SHIFT); 1183 1184 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1185 for (n = 0; n < 100; n++) { 1186 DELAY(1); 1187 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1188 if (v & HME_MIF_FO_TALSB) 1189 return (v & HME_MIF_FO_DATA); 1190 } 1191 1192 device_printf(sc->sc_dev, "mii_read timeout\n"); 1193 return (0); 1194} 1195 1196int 1197hme_mii_writereg(device_t dev, int phy, int reg, int val) 1198{ 1199 struct hme_softc *sc = device_get_softc(dev); 1200 int n; 1201 u_int32_t v; 1202 1203 /* Select the desired PHY in the MIF configuration register */ 1204 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1205 /* Clear PHY select bit */ 1206 v &= ~HME_MIF_CFG_PHY; 1207 if (phy == HME_PHYAD_EXTERNAL) 1208 /* Set PHY select bit to get at external device */ 1209 v |= HME_MIF_CFG_PHY; 1210 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1211 1212 /* Construct the frame command */ 1213 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1214 HME_MIF_FO_TAMSB | 1215 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1216 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1217 (reg << HME_MIF_FO_REGAD_SHIFT) | 1218 (val & HME_MIF_FO_DATA); 1219 1220 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); 1221 for (n = 0; n < 100; n++) { 1222 DELAY(1); 1223 v = HME_MIF_READ_4(sc, HME_MIFI_FO); 1224 if (v & HME_MIF_FO_TALSB) 1225 return (1); 1226 } 1227 1228 device_printf(sc->sc_dev, "mii_write timeout\n"); 1229 return (0); 1230} 1231 1232void 1233hme_mii_statchg(device_t dev) 1234{ 1235 struct hme_softc *sc = device_get_softc(dev); 1236 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1237 int phy = sc->sc_phys[instance]; 1238 u_int32_t v; 1239 1240#ifdef HMEDEBUG 1241 if (sc->sc_debug) 1242 printf("hme_mii_statchg: status change: phy = %d\n", phy); 1243#endif 1244 1245 /* Select the current PHY in the MIF configuration register */ 1246 v = HME_MIF_READ_4(sc, HME_MIFI_CFG); 1247 v &= ~HME_MIF_CFG_PHY; 1248 if (phy == HME_PHYAD_EXTERNAL) 1249 v |= HME_MIF_CFG_PHY; 1250 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); 1251 1252 /* Set the MAC Full Duplex bit appropriately */ 1253 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); 1254 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) 1255 return; 1256 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1257 v |= HME_MAC_TXCFG_FULLDPLX; 1258 else 1259 v &= ~HME_MAC_TXCFG_FULLDPLX; 1260 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); 1261 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) 1262 return; 1263} 1264 1265static int 1266hme_mediachange(struct ifnet *ifp) 1267{ 1268 struct hme_softc *sc = ifp->if_softc; 1269 1270 return (mii_mediachg(sc->sc_mii)); 1271} 1272 1273static void 1274hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1275{ 1276 struct hme_softc *sc = ifp->if_softc; 1277 1278 if ((ifp->if_flags & IFF_UP) == 0) 1279 return; 1280 1281 mii_pollstat(sc->sc_mii); 1282 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1283 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1284} 1285 1286/* 1287 * Process an ioctl request. 1288 */ 1289static int 1290hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1291{ 1292 struct hme_softc *sc = ifp->if_softc; 1293 struct ifreq *ifr = (struct ifreq *)data; 1294 int s, error = 0; 1295 1296 s = splnet(); 1297 1298 switch (cmd) { 1299 case SIOCSIFFLAGS: 1300 if ((ifp->if_flags & IFF_UP) == 0 && 1301 (ifp->if_flags & IFF_RUNNING) != 0) { 1302 /* 1303 * If interface is marked down and it is running, then 1304 * stop it. 1305 */ 1306 hme_stop(sc); 1307 ifp->if_flags &= ~IFF_RUNNING; 1308 } else if ((ifp->if_flags & IFF_UP) != 0 && 1309 (ifp->if_flags & IFF_RUNNING) == 0) { 1310 /* 1311 * If interface is marked up and it is stopped, then 1312 * start it. 1313 */ 1314 hme_init(sc); 1315 } else if ((ifp->if_flags & IFF_UP) != 0) { 1316 /* 1317 * Reset the interface to pick up changes in any other 1318 * flags that affect hardware registers. 1319 */
|
1396 /*hme_stop(sc);*/
| |
1397 hme_init(sc); 1398 } 1399#ifdef HMEDEBUG 1400 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1401#endif 1402 break; 1403 1404 case SIOCADDMULTI: 1405 case SIOCDELMULTI: 1406 hme_setladrf(sc, 1); 1407 error = 0; 1408 break; 1409 case SIOCGIFMEDIA: 1410 case SIOCSIFMEDIA: 1411 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1412 break; 1413 default: 1414 error = ether_ioctl(ifp, cmd, data); 1415 break; 1416 } 1417 1418 splx(s); 1419 return (error); 1420} 1421 1422#if 0 1423static void 1424hme_shutdown(void *arg) 1425{ 1426 1427 hme_stop((struct hme_softc *)arg); 1428} 1429#endif 1430 1431/* 1432 * Set up the logical address filter. 1433 */ 1434static void 1435hme_setladrf(struct hme_softc *sc, int reenable) 1436{ 1437 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1438 struct ifmultiaddr *inm; 1439 struct sockaddr_dl *sdl; 1440 u_char *cp; 1441 u_int32_t crc; 1442 u_int32_t hash[4]; 1443 u_int32_t macc; 1444 int len; 1445 1446 /* Clear hash table */ 1447 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1448 1449 /* Get current RX configuration */ 1450 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1451 1452 /* 1453 * Disable the receiver while changing it's state as the documentation 1454 * mandates. 1455 * We then must wait until the bit clears in the register. This should 1456 * take at most 3.5ms. 1457 */ 1458 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1459 return; 1460 /* Disable the hash filter before writing to the filter registers. */ 1461 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1462 HME_MAC_RXCFG_HENABLE, 0)) 1463 return; 1464 1465 if (reenable) 1466 macc |= HME_MAC_RXCFG_ENABLE; 1467 else 1468 macc &= ~HME_MAC_RXCFG_ENABLE; 1469 1470 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1471 /* Turn on promiscuous mode; turn off the hash filter */ 1472 macc |= HME_MAC_RXCFG_PMISC; 1473 macc &= ~HME_MAC_RXCFG_HENABLE; 1474 ifp->if_flags |= IFF_ALLMULTI; 1475 goto chipit; 1476 } 1477 1478 /* Turn off promiscuous mode; turn on the hash filter */ 1479 macc &= ~HME_MAC_RXCFG_PMISC; 1480 macc |= HME_MAC_RXCFG_HENABLE; 1481 1482 /* 1483 * Set up multicast address filter by passing all multicast addresses 1484 * through a crc generator, and then using the high order 6 bits as an 1485 * index into the 64 bit logical address filter. The high order bit 1486 * selects the word, while the rest of the bits select the bit within 1487 * the word. 1488 */ 1489 1490 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1491 if (inm->ifma_addr->sa_family != AF_LINK) 1492 continue; 1493 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1494 cp = LLADDR(sdl); 1495 crc = 0xffffffff; 1496 for (len = sdl->sdl_alen; --len >= 0;) { 1497 int octet = *cp++; 1498 int i; 1499 1500#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1501 for (i = 0; i < 8; i++) { 1502 if ((crc & 1) ^ (octet & 1)) { 1503 crc >>= 1; 1504 crc ^= MC_POLY_LE; 1505 } else { 1506 crc >>= 1; 1507 } 1508 octet >>= 1; 1509 } 1510 } 1511 /* Just want the 6 most significant bits. */ 1512 crc >>= 26; 1513 1514 /* Set the corresponding bit in the filter. */ 1515 hash[crc >> 4] |= 1 << (crc & 0xf); 1516 } 1517 1518 ifp->if_flags &= ~IFF_ALLMULTI; 1519 1520chipit: 1521 /* Now load the hash table into the chip */ 1522 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1523 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1524 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1525 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1526 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1527 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1528}
| 1320 hme_init(sc); 1321 } 1322#ifdef HMEDEBUG 1323 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1324#endif 1325 break; 1326 1327 case SIOCADDMULTI: 1328 case SIOCDELMULTI: 1329 hme_setladrf(sc, 1); 1330 error = 0; 1331 break; 1332 case SIOCGIFMEDIA: 1333 case SIOCSIFMEDIA: 1334 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1335 break; 1336 default: 1337 error = ether_ioctl(ifp, cmd, data); 1338 break; 1339 } 1340 1341 splx(s); 1342 return (error); 1343} 1344 1345#if 0 1346static void 1347hme_shutdown(void *arg) 1348{ 1349 1350 hme_stop((struct hme_softc *)arg); 1351} 1352#endif 1353 1354/* 1355 * Set up the logical address filter. 1356 */ 1357static void 1358hme_setladrf(struct hme_softc *sc, int reenable) 1359{ 1360 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1361 struct ifmultiaddr *inm; 1362 struct sockaddr_dl *sdl; 1363 u_char *cp; 1364 u_int32_t crc; 1365 u_int32_t hash[4]; 1366 u_int32_t macc; 1367 int len; 1368 1369 /* Clear hash table */ 1370 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1371 1372 /* Get current RX configuration */ 1373 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); 1374 1375 /* 1376 * Disable the receiver while changing it's state as the documentation 1377 * mandates. 1378 * We then must wait until the bit clears in the register. This should 1379 * take at most 3.5ms. 1380 */ 1381 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) 1382 return; 1383 /* Disable the hash filter before writing to the filter registers. */ 1384 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 1385 HME_MAC_RXCFG_HENABLE, 0)) 1386 return; 1387 1388 if (reenable) 1389 macc |= HME_MAC_RXCFG_ENABLE; 1390 else 1391 macc &= ~HME_MAC_RXCFG_ENABLE; 1392 1393 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1394 /* Turn on promiscuous mode; turn off the hash filter */ 1395 macc |= HME_MAC_RXCFG_PMISC; 1396 macc &= ~HME_MAC_RXCFG_HENABLE; 1397 ifp->if_flags |= IFF_ALLMULTI; 1398 goto chipit; 1399 } 1400 1401 /* Turn off promiscuous mode; turn on the hash filter */ 1402 macc &= ~HME_MAC_RXCFG_PMISC; 1403 macc |= HME_MAC_RXCFG_HENABLE; 1404 1405 /* 1406 * Set up multicast address filter by passing all multicast addresses 1407 * through a crc generator, and then using the high order 6 bits as an 1408 * index into the 64 bit logical address filter. The high order bit 1409 * selects the word, while the rest of the bits select the bit within 1410 * the word. 1411 */ 1412 1413 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1414 if (inm->ifma_addr->sa_family != AF_LINK) 1415 continue; 1416 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1417 cp = LLADDR(sdl); 1418 crc = 0xffffffff; 1419 for (len = sdl->sdl_alen; --len >= 0;) { 1420 int octet = *cp++; 1421 int i; 1422 1423#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1424 for (i = 0; i < 8; i++) { 1425 if ((crc & 1) ^ (octet & 1)) { 1426 crc >>= 1; 1427 crc ^= MC_POLY_LE; 1428 } else { 1429 crc >>= 1; 1430 } 1431 octet >>= 1; 1432 } 1433 } 1434 /* Just want the 6 most significant bits. */ 1435 crc >>= 26; 1436 1437 /* Set the corresponding bit in the filter. */ 1438 hash[crc >> 4] |= 1 << (crc & 0xf); 1439 } 1440 1441 ifp->if_flags &= ~IFF_ALLMULTI; 1442 1443chipit: 1444 /* Now load the hash table into the chip */ 1445 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); 1446 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); 1447 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); 1448 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); 1449 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, 1450 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE)); 1451}
|