Deleted Added
full compact
if_hme.c (164866) if_hme.c (164932)
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * from: NetBSD: hme.c,v 1.35 2003/02/27 14:58:22 pk Exp
38 */
39
40#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * from: NetBSD: hme.c,v 1.35 2003/02/27 14:58:22 pk Exp
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 164866 2006-12-04 01:53:40Z marius $");
41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 164932 2006-12-06 02:07:20Z marius $");
42
43/*
44 * HME Ethernet module driver.
45 *
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
52 *
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
56 *
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
62 */
63#define HME_CSUM_FEATURES (CSUM_TCP)
64#define HMEDEBUG
65#define KTR_HME KTR_CT2 /* XXX */
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bus.h>
70#include <sys/endian.h>
71#include <sys/kernel.h>
72#include <sys/module.h>
73#include <sys/ktr.h>
74#include <sys/mbuf.h>
75#include <sys/malloc.h>
76#include <sys/socket.h>
77#include <sys/sockio.h>
78
79#include <net/bpf.h>
80#include <net/ethernet.h>
81#include <net/if.h>
82#include <net/if_arp.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85#include <net/if_types.h>
86#include <net/if_vlan_var.h>
87
88#include <netinet/in.h>
89#include <netinet/in_systm.h>
90#include <netinet/ip.h>
91#include <netinet/tcp.h>
92#include <netinet/udp.h>
93
94#include <dev/mii/mii.h>
95#include <dev/mii/miivar.h>
96
97#include <machine/bus.h>
98
99#include <dev/hme/if_hmereg.h>
100#include <dev/hme/if_hmevar.h>
101
102static void hme_start(struct ifnet *);
103static void hme_start_locked(struct ifnet *);
104static void hme_stop(struct hme_softc *);
105static int hme_ioctl(struct ifnet *, u_long, caddr_t);
106static void hme_tick(void *);
42
43/*
44 * HME Ethernet module driver.
45 *
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
52 *
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
56 *
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
62 */
63#define HME_CSUM_FEATURES (CSUM_TCP)
64#define HMEDEBUG
65#define KTR_HME KTR_CT2 /* XXX */
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/bus.h>
70#include <sys/endian.h>
71#include <sys/kernel.h>
72#include <sys/module.h>
73#include <sys/ktr.h>
74#include <sys/mbuf.h>
75#include <sys/malloc.h>
76#include <sys/socket.h>
77#include <sys/sockio.h>
78
79#include <net/bpf.h>
80#include <net/ethernet.h>
81#include <net/if.h>
82#include <net/if_arp.h>
83#include <net/if_dl.h>
84#include <net/if_media.h>
85#include <net/if_types.h>
86#include <net/if_vlan_var.h>
87
88#include <netinet/in.h>
89#include <netinet/in_systm.h>
90#include <netinet/ip.h>
91#include <netinet/tcp.h>
92#include <netinet/udp.h>
93
94#include <dev/mii/mii.h>
95#include <dev/mii/miivar.h>
96
97#include <machine/bus.h>
98
99#include <dev/hme/if_hmereg.h>
100#include <dev/hme/if_hmevar.h>
101
102static void hme_start(struct ifnet *);
103static void hme_start_locked(struct ifnet *);
104static void hme_stop(struct hme_softc *);
105static int hme_ioctl(struct ifnet *, u_long, caddr_t);
106static void hme_tick(void *);
107static void hme_watchdog(struct ifnet *);
107static int hme_watchdog(struct hme_softc *);
108static void hme_init(void *);
109static void hme_init_locked(struct hme_softc *);
110static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
111static int hme_meminit(struct hme_softc *);
112static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
113 u_int32_t, u_int32_t);
114static void hme_mifinit(struct hme_softc *);
115static void hme_setladrf(struct hme_softc *, int);
116
117static int hme_mediachange(struct ifnet *);
118static int hme_mediachange_locked(struct hme_softc *);
119static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
120
121static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
122static void hme_read(struct hme_softc *, int, int, u_int32_t);
123static void hme_eint(struct hme_softc *, u_int);
124static void hme_rint(struct hme_softc *);
125static void hme_tint(struct hme_softc *);
126static void hme_txcksum(struct mbuf *, u_int32_t *);
127static void hme_rxcksum(struct mbuf *, u_int32_t);
128
129static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
130
131devclass_t hme_devclass;
132
133static int hme_nerr;
134
135DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
136MODULE_DEPEND(hme, miibus, 1, 1, 1);
137
138#define HME_SPC_READ_4(spc, sc, offs) \
139 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
140 (offs))
141#define HME_SPC_WRITE_4(spc, sc, offs, v) \
142 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
143 (offs), (v))
144
145#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
146#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
147#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
148#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
149#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
150#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
151#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
152#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
153#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
154#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
155
156#define HME_MAXERR 5
157#define HME_WHINE(dev, ...) do { \
158 if (hme_nerr++ < HME_MAXERR) \
159 device_printf(dev, __VA_ARGS__); \
160 if (hme_nerr == HME_MAXERR) { \
161 device_printf(dev, "too many errors; not reporting " \
162 "any more\n"); \
163 } \
164} while(0)
165
166/* Support oversized VLAN frames. */
167#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
168
169int
170hme_config(struct hme_softc *sc)
171{
172 struct ifnet *ifp;
173 struct mii_softc *child;
174 bus_size_t size;
175 int error, rdesc, tdesc, i;
176
177 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
178 if (ifp == NULL)
179 return (ENOSPC);
180
181 /*
182 * HME common initialization.
183 *
184 * hme_softc fields that must be initialized by the front-end:
185 *
186 * the DMA bus tag:
187 * sc_dmatag
188 *
189 * the bus handles, tags and offsets (splitted for SBus compatability):
190 * sc_seb{t,h,o} (Shared Ethernet Block registers)
191 * sc_erx{t,h,o} (Receiver Unit registers)
192 * sc_etx{t,h,o} (Transmitter Unit registers)
193 * sc_mac{t,h,o} (MAC registers)
194 * sc_mif{t,h,o} (Management Interface registers)
195 *
196 * the maximum bus burst size:
197 * sc_burst
198 *
199 */
200
201 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
202
203 /* Make sure the chip is stopped. */
204 HME_LOCK(sc);
205 hme_stop(sc);
206 HME_UNLOCK(sc);
207
208 /*
209 * Allocate DMA capable memory
210 * Buffer descriptors must be aligned on a 2048 byte boundary;
211 * take this into account when calculating the size. Note that
212 * the maximum number of descriptors (256) occupies 2048 bytes,
213 * so we allocate that much regardless of HME_N*DESC.
214 */
215 size = 4096;
216
108static void hme_init(void *);
109static void hme_init_locked(struct hme_softc *);
110static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
111static int hme_meminit(struct hme_softc *);
112static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
113 u_int32_t, u_int32_t);
114static void hme_mifinit(struct hme_softc *);
115static void hme_setladrf(struct hme_softc *, int);
116
117static int hme_mediachange(struct ifnet *);
118static int hme_mediachange_locked(struct hme_softc *);
119static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
120
121static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
122static void hme_read(struct hme_softc *, int, int, u_int32_t);
123static void hme_eint(struct hme_softc *, u_int);
124static void hme_rint(struct hme_softc *);
125static void hme_tint(struct hme_softc *);
126static void hme_txcksum(struct mbuf *, u_int32_t *);
127static void hme_rxcksum(struct mbuf *, u_int32_t);
128
129static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
130
131devclass_t hme_devclass;
132
133static int hme_nerr;
134
135DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
136MODULE_DEPEND(hme, miibus, 1, 1, 1);
137
138#define HME_SPC_READ_4(spc, sc, offs) \
139 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
140 (offs))
141#define HME_SPC_WRITE_4(spc, sc, offs, v) \
142 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
143 (offs), (v))
144
145#define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
146#define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
147#define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
148#define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
149#define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
150#define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
151#define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
152#define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
153#define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
154#define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
155
156#define HME_MAXERR 5
157#define HME_WHINE(dev, ...) do { \
158 if (hme_nerr++ < HME_MAXERR) \
159 device_printf(dev, __VA_ARGS__); \
160 if (hme_nerr == HME_MAXERR) { \
161 device_printf(dev, "too many errors; not reporting " \
162 "any more\n"); \
163 } \
164} while(0)
165
166/* Support oversized VLAN frames. */
167#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
168
169int
170hme_config(struct hme_softc *sc)
171{
172 struct ifnet *ifp;
173 struct mii_softc *child;
174 bus_size_t size;
175 int error, rdesc, tdesc, i;
176
177 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
178 if (ifp == NULL)
179 return (ENOSPC);
180
181 /*
182 * HME common initialization.
183 *
184 * hme_softc fields that must be initialized by the front-end:
185 *
186 * the DMA bus tag:
187 * sc_dmatag
188 *
189 * the bus handles, tags and offsets (splitted for SBus compatability):
190 * sc_seb{t,h,o} (Shared Ethernet Block registers)
191 * sc_erx{t,h,o} (Receiver Unit registers)
192 * sc_etx{t,h,o} (Transmitter Unit registers)
193 * sc_mac{t,h,o} (MAC registers)
194 * sc_mif{t,h,o} (Management Interface registers)
195 *
196 * the maximum bus burst size:
197 * sc_burst
198 *
199 */
200
201 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
202
203 /* Make sure the chip is stopped. */
204 HME_LOCK(sc);
205 hme_stop(sc);
206 HME_UNLOCK(sc);
207
208 /*
209 * Allocate DMA capable memory
210 * Buffer descriptors must be aligned on a 2048 byte boundary;
211 * take this into account when calculating the size. Note that
212 * the maximum number of descriptors (256) occupies 2048 bytes,
213 * so we allocate that much regardless of HME_N*DESC.
214 */
215 size = 4096;
216
217 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
218 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
219 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
217 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
218 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
219 HME_NTXDESC + HME_NRXDESC + 1, BUS_SPACE_MAXSIZE_32BIT, 0,
220 NULL, NULL, &sc->sc_pdmatag);
220 if (error)
221 goto fail_ifnet;
222
223 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
224 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
225 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
226 &sc->sc_lock, &sc->sc_cdmatag);
227 if (error)
228 goto fail_ptag;
229
230 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
231 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
232 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
233 NULL, NULL, &sc->sc_rdmatag);
234 if (error)
235 goto fail_ctag;
236
237 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
238 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
239 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
240 NULL, NULL, &sc->sc_tdmatag);
241 if (error)
242 goto fail_rtag;
243
244 /* Allocate control/TX DMA buffer */
245 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
246 0, &sc->sc_cdmamap);
247 if (error != 0) {
248 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
249 goto fail_ttag;
250 }
251
252 /* Load the buffer */
253 sc->sc_rb.rb_dmabase = 0;
254 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
255 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
256 sc->sc_rb.rb_dmabase == 0) {
257 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
258 error);
259 goto fail_free;
260 }
261 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
262 sc->sc_rb.rb_dmabase);
263
264 /*
265 * Prepare the RX descriptors. rdesc serves as marker for the last
266 * processed descriptor and may be used later on.
267 */
268 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
269 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
270 error = bus_dmamap_create(sc->sc_rdmatag, 0,
271 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
272 if (error != 0)
273 goto fail_rxdesc;
274 }
275 error = bus_dmamap_create(sc->sc_rdmatag, 0,
276 &sc->sc_rb.rb_spare_dmamap);
277 if (error != 0)
278 goto fail_rxdesc;
279 /* Same for the TX descs. */
280 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
281 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
282 error = bus_dmamap_create(sc->sc_tdmatag, 0,
283 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
284 if (error != 0)
285 goto fail_txdesc;
286 }
287
288 sc->sc_csum_features = HME_CSUM_FEATURES;
289 /* Initialize ifnet structure. */
290 ifp->if_softc = sc;
291 if_initname(ifp, device_get_name(sc->sc_dev),
292 device_get_unit(sc->sc_dev));
221 if (error)
222 goto fail_ifnet;
223
224 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
225 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
226 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
227 &sc->sc_lock, &sc->sc_cdmatag);
228 if (error)
229 goto fail_ptag;
230
231 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
232 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
233 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
234 NULL, NULL, &sc->sc_rdmatag);
235 if (error)
236 goto fail_ctag;
237
238 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
239 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
240 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
241 NULL, NULL, &sc->sc_tdmatag);
242 if (error)
243 goto fail_rtag;
244
245 /* Allocate control/TX DMA buffer */
246 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
247 0, &sc->sc_cdmamap);
248 if (error != 0) {
249 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
250 goto fail_ttag;
251 }
252
253 /* Load the buffer */
254 sc->sc_rb.rb_dmabase = 0;
255 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
256 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
257 sc->sc_rb.rb_dmabase == 0) {
258 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
259 error);
260 goto fail_free;
261 }
262 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
263 sc->sc_rb.rb_dmabase);
264
265 /*
266 * Prepare the RX descriptors. rdesc serves as marker for the last
267 * processed descriptor and may be used later on.
268 */
269 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
270 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
271 error = bus_dmamap_create(sc->sc_rdmatag, 0,
272 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
273 if (error != 0)
274 goto fail_rxdesc;
275 }
276 error = bus_dmamap_create(sc->sc_rdmatag, 0,
277 &sc->sc_rb.rb_spare_dmamap);
278 if (error != 0)
279 goto fail_rxdesc;
280 /* Same for the TX descs. */
281 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
282 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
283 error = bus_dmamap_create(sc->sc_tdmatag, 0,
284 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
285 if (error != 0)
286 goto fail_txdesc;
287 }
288
289 sc->sc_csum_features = HME_CSUM_FEATURES;
290 /* Initialize ifnet structure. */
291 ifp->if_softc = sc;
292 if_initname(ifp, device_get_name(sc->sc_dev),
293 device_get_unit(sc->sc_dev));
293 ifp->if_mtu = ETHERMTU;
294 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 ifp->if_start = hme_start;
296 ifp->if_ioctl = hme_ioctl;
297 ifp->if_init = hme_init;
294 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 ifp->if_start = hme_start;
296 ifp->if_ioctl = hme_ioctl;
297 ifp->if_init = hme_init;
298 ifp->if_watchdog = hme_watchdog;
299 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
300 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
301 IFQ_SET_READY(&ifp->if_snd);
302
303 hme_mifinit(sc);
304
305 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
306 hme_mediastatus)) != 0) {
307 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
308 goto fail_rxdesc;
309 }
310 sc->sc_mii = device_get_softc(sc->sc_miibus);
311
312 /*
313 * Walk along the list of attached MII devices and
314 * establish an `MII instance' to `PHY number'
315 * mapping. We'll use this mapping to enable the MII
316 * drivers of the external transceiver according to
317 * the currently selected media.
318 */
319 sc->sc_phys[0] = sc->sc_phys[1] = -1;
320 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
321 /*
322 * Note: we support just two PHYs: the built-in
323 * internal device and an external on the MII
324 * connector.
325 */
326 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
327 child->mii_phy != HME_PHYAD_INTERNAL) ||
328 child->mii_inst > 1) {
329 device_printf(sc->sc_dev, "cannot accommodate "
330 "MII device %s at phy %d, instance %d\n",
331 device_get_name(child->mii_dev),
332 child->mii_phy, child->mii_inst);
333 continue;
334 }
335
336 sc->sc_phys[child->mii_inst] = child->mii_phy;
337 }
338
339 /* Attach the interface. */
340 ether_ifattach(ifp, sc->sc_enaddr);
341
342 /*
343 * Tell the upper layer(s) we support long frames/checksum offloads.
344 */
345 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
346 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
347 ifp->if_hwassist |= sc->sc_csum_features;
348 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
349 return (0);
350
351fail_txdesc:
352 for (i = 0; i < tdesc; i++) {
353 bus_dmamap_destroy(sc->sc_tdmatag,
354 sc->sc_rb.rb_txdesc[i].htx_dmamap);
355 }
356 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
357fail_rxdesc:
358 for (i = 0; i < rdesc; i++) {
359 bus_dmamap_destroy(sc->sc_rdmatag,
360 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
361 }
362 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
363fail_free:
364 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
365fail_ttag:
366 bus_dma_tag_destroy(sc->sc_tdmatag);
367fail_rtag:
368 bus_dma_tag_destroy(sc->sc_rdmatag);
369fail_ctag:
370 bus_dma_tag_destroy(sc->sc_cdmatag);
371fail_ptag:
372 bus_dma_tag_destroy(sc->sc_pdmatag);
373fail_ifnet:
374 if_free(ifp);
375 return (error);
376}
377
378void
379hme_detach(struct hme_softc *sc)
380{
381 struct ifnet *ifp = sc->sc_ifp;
382 int i;
383
384 HME_LOCK(sc);
385 hme_stop(sc);
386 HME_UNLOCK(sc);
387 callout_drain(&sc->sc_tick_ch);
388 ether_ifdetach(ifp);
389 if_free(ifp);
390 device_delete_child(sc->sc_dev, sc->sc_miibus);
391
392 for (i = 0; i < HME_NTXQ; i++) {
393 bus_dmamap_destroy(sc->sc_tdmatag,
394 sc->sc_rb.rb_txdesc[i].htx_dmamap);
395 }
396 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
397 for (i = 0; i < HME_NRXDESC; i++) {
398 bus_dmamap_destroy(sc->sc_rdmatag,
399 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
400 }
401 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
402 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
403 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
404 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
405 bus_dma_tag_destroy(sc->sc_tdmatag);
406 bus_dma_tag_destroy(sc->sc_rdmatag);
407 bus_dma_tag_destroy(sc->sc_cdmatag);
408 bus_dma_tag_destroy(sc->sc_pdmatag);
409}
410
411void
412hme_suspend(struct hme_softc *sc)
413{
414
415 HME_LOCK(sc);
416 hme_stop(sc);
417 HME_UNLOCK(sc);
418}
419
420void
421hme_resume(struct hme_softc *sc)
422{
423 struct ifnet *ifp = sc->sc_ifp;
424
425 HME_LOCK(sc);
426 if ((ifp->if_flags & IFF_UP) != 0)
427 hme_init_locked(sc);
428 HME_UNLOCK(sc);
429}
430
431static void
432hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
433{
434 struct hme_softc *sc = (struct hme_softc *)xsc;
435
436 if (error != 0)
437 return;
438 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
439 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
440}
441
442static void
443hme_tick(void *arg)
444{
445 struct hme_softc *sc = arg;
446 struct ifnet *ifp;
447
448 HME_LOCK_ASSERT(sc, MA_OWNED);
449
450 ifp = sc->sc_ifp;
451 /*
452 * Unload collision counters
453 */
454 ifp->if_collisions +=
455 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
456 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
457 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
458 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
459
460 /*
461 * then clear the hardware counters.
462 */
463 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
464 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
465 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
466 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
467
468 mii_tick(sc->sc_mii);
469
298 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
299 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
300 IFQ_SET_READY(&ifp->if_snd);
301
302 hme_mifinit(sc);
303
304 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
305 hme_mediastatus)) != 0) {
306 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
307 goto fail_rxdesc;
308 }
309 sc->sc_mii = device_get_softc(sc->sc_miibus);
310
311 /*
312 * Walk along the list of attached MII devices and
313 * establish an `MII instance' to `PHY number'
314 * mapping. We'll use this mapping to enable the MII
315 * drivers of the external transceiver according to
316 * the currently selected media.
317 */
318 sc->sc_phys[0] = sc->sc_phys[1] = -1;
319 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
320 /*
321 * Note: we support just two PHYs: the built-in
322 * internal device and an external on the MII
323 * connector.
324 */
325 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
326 child->mii_phy != HME_PHYAD_INTERNAL) ||
327 child->mii_inst > 1) {
328 device_printf(sc->sc_dev, "cannot accommodate "
329 "MII device %s at phy %d, instance %d\n",
330 device_get_name(child->mii_dev),
331 child->mii_phy, child->mii_inst);
332 continue;
333 }
334
335 sc->sc_phys[child->mii_inst] = child->mii_phy;
336 }
337
338 /* Attach the interface. */
339 ether_ifattach(ifp, sc->sc_enaddr);
340
341 /*
342 * Tell the upper layer(s) we support long frames/checksum offloads.
343 */
344 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
345 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
346 ifp->if_hwassist |= sc->sc_csum_features;
347 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
348 return (0);
349
350fail_txdesc:
351 for (i = 0; i < tdesc; i++) {
352 bus_dmamap_destroy(sc->sc_tdmatag,
353 sc->sc_rb.rb_txdesc[i].htx_dmamap);
354 }
355 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
356fail_rxdesc:
357 for (i = 0; i < rdesc; i++) {
358 bus_dmamap_destroy(sc->sc_rdmatag,
359 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
360 }
361 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
362fail_free:
363 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
364fail_ttag:
365 bus_dma_tag_destroy(sc->sc_tdmatag);
366fail_rtag:
367 bus_dma_tag_destroy(sc->sc_rdmatag);
368fail_ctag:
369 bus_dma_tag_destroy(sc->sc_cdmatag);
370fail_ptag:
371 bus_dma_tag_destroy(sc->sc_pdmatag);
372fail_ifnet:
373 if_free(ifp);
374 return (error);
375}
376
377void
378hme_detach(struct hme_softc *sc)
379{
380 struct ifnet *ifp = sc->sc_ifp;
381 int i;
382
383 HME_LOCK(sc);
384 hme_stop(sc);
385 HME_UNLOCK(sc);
386 callout_drain(&sc->sc_tick_ch);
387 ether_ifdetach(ifp);
388 if_free(ifp);
389 device_delete_child(sc->sc_dev, sc->sc_miibus);
390
391 for (i = 0; i < HME_NTXQ; i++) {
392 bus_dmamap_destroy(sc->sc_tdmatag,
393 sc->sc_rb.rb_txdesc[i].htx_dmamap);
394 }
395 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
396 for (i = 0; i < HME_NRXDESC; i++) {
397 bus_dmamap_destroy(sc->sc_rdmatag,
398 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
399 }
400 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
401 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
402 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
403 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
404 bus_dma_tag_destroy(sc->sc_tdmatag);
405 bus_dma_tag_destroy(sc->sc_rdmatag);
406 bus_dma_tag_destroy(sc->sc_cdmatag);
407 bus_dma_tag_destroy(sc->sc_pdmatag);
408}
409
410void
411hme_suspend(struct hme_softc *sc)
412{
413
414 HME_LOCK(sc);
415 hme_stop(sc);
416 HME_UNLOCK(sc);
417}
418
419void
420hme_resume(struct hme_softc *sc)
421{
422 struct ifnet *ifp = sc->sc_ifp;
423
424 HME_LOCK(sc);
425 if ((ifp->if_flags & IFF_UP) != 0)
426 hme_init_locked(sc);
427 HME_UNLOCK(sc);
428}
429
430static void
431hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
432{
433 struct hme_softc *sc = (struct hme_softc *)xsc;
434
435 if (error != 0)
436 return;
437 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
438 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
439}
440
441static void
442hme_tick(void *arg)
443{
444 struct hme_softc *sc = arg;
445 struct ifnet *ifp;
446
447 HME_LOCK_ASSERT(sc, MA_OWNED);
448
449 ifp = sc->sc_ifp;
450 /*
451 * Unload collision counters
452 */
453 ifp->if_collisions +=
454 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
455 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
456 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
457 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
458
459 /*
460 * then clear the hardware counters.
461 */
462 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
463 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
464 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
465 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
466
467 mii_tick(sc->sc_mii);
468
469 if (hme_watchdog(sc) == EJUSTRETURN)
470 return;
471
470 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
471}
472
473static void
474hme_stop(struct hme_softc *sc)
475{
476 u_int32_t v;
477 int n;
478
479 callout_stop(&sc->sc_tick_ch);
472 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
473}
474
475static void
476hme_stop(struct hme_softc *sc)
477{
478 u_int32_t v;
479 int n;
480
481 callout_stop(&sc->sc_tick_ch);
482 sc->sc_wdog_timer = 0;
480 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
481
482 /* Mask all interrupts */
483 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
484
485 /* Reset transmitter and receiver */
486 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
487 HME_SEB_RESET_ERX);
488
489 for (n = 0; n < 20; n++) {
490 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
491 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
492 return;
493 DELAY(20);
494 }
495
496 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
497}
498
499/*
500 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
501 * ring for subsequent use.
502 */
503static __inline void
504hme_discard_rxbuf(struct hme_softc *sc, int ix)
505{
506
507 /*
508 * Dropped a packet, reinitialize the descriptor and turn the
509 * ownership back to the hardware.
510 */
511 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
512 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
513}
514
515static int
516hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
517{
518 struct hme_rxdesc *rd;
519 struct mbuf *m;
520 bus_dma_segment_t segs[1];
521 bus_dmamap_t map;
522 uintptr_t b;
523 int a, unmap, nsegs;
524
525 rd = &sc->sc_rb.rb_rxdesc[ri];
526 unmap = rd->hrx_m != NULL;
527 if (unmap && keepold) {
528 /*
529 * Reinitialize the descriptor flags, as they may have been
530 * altered by the hardware.
531 */
532 hme_discard_rxbuf(sc, ri);
533 return (0);
534 }
535 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
536 return (ENOBUFS);
537 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
538 b = mtod(m, uintptr_t);
539 /*
540 * Required alignment boundary. At least 16 is needed, but since
541 * the mapping must be done in a way that a burst can start on a
542 * natural boundary we might need to extend this.
543 */
544 a = imax(HME_MINRXALIGN, sc->sc_burst);
545 /*
546 * Make sure the buffer suitably aligned. The 2 byte offset is removed
547 * when the mbuf is handed up. XXX: this ensures at least 16 byte
548 * alignment of the header adjacent to the ethernet header, which
549 * should be sufficient in all cases. Nevertheless, this second-guesses
550 * ALIGN().
551 */
552 m_adj(m, roundup2(b, a) - b);
553 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
554 m, segs, &nsegs, 0) != 0) {
555 m_freem(m);
556 return (ENOBUFS);
557 }
558 /* If nsegs is wrong then the stack is corrupt */
559 KASSERT(nsegs == 1, ("Too many segments returned!"));
560 if (unmap) {
561 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
562 BUS_DMASYNC_POSTREAD);
563 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
564 }
565 map = rd->hrx_dmamap;
566 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
567 sc->sc_rb.rb_spare_dmamap = map;
568 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
569 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr);
570 rd->hrx_m = m;
571 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
572 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
573 return (0);
574}
575
576static int
577hme_meminit(struct hme_softc *sc)
578{
579 struct hme_ring *hr = &sc->sc_rb;
580 struct hme_txdesc *td;
581 bus_addr_t dma;
582 caddr_t p;
583 unsigned int i;
584 int error;
585
586 p = hr->rb_membase;
587 dma = hr->rb_dmabase;
588
589 /*
590 * Allocate transmit descriptors
591 */
592 hr->rb_txd = p;
593 hr->rb_txddma = dma;
594 p += HME_NTXDESC * HME_XD_SIZE;
595 dma += HME_NTXDESC * HME_XD_SIZE;
596 /* We have reserved descriptor space until the next 2048 byte boundary.*/
597 dma = (bus_addr_t)roundup((u_long)dma, 2048);
598 p = (caddr_t)roundup((u_long)p, 2048);
599
600 /*
601 * Allocate receive descriptors
602 */
603 hr->rb_rxd = p;
604 hr->rb_rxddma = dma;
605 p += HME_NRXDESC * HME_XD_SIZE;
606 dma += HME_NRXDESC * HME_XD_SIZE;
607 /* Again move forward to the next 2048 byte boundary.*/
608 dma = (bus_addr_t)roundup((u_long)dma, 2048);
609 p = (caddr_t)roundup((u_long)p, 2048);
610
611 /*
612 * Initialize transmit buffer descriptors
613 */
614 for (i = 0; i < HME_NTXDESC; i++) {
615 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
616 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
617 }
618
619 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
620 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
621 for (i = 0; i < HME_NTXQ; i++) {
622 td = &sc->sc_rb.rb_txdesc[i];
623 if (td->htx_m != NULL) {
624 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
625 BUS_DMASYNC_POSTWRITE);
626 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
627 m_freem(td->htx_m);
628 td->htx_m = NULL;
629 }
630 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
631 }
632
633 /*
634 * Initialize receive buffer descriptors
635 */
636 for (i = 0; i < HME_NRXDESC; i++) {
637 error = hme_add_rxbuf(sc, i, 1);
638 if (error != 0)
639 return (error);
640 }
641
642 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
643 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
644
645 hr->rb_tdhead = hr->rb_tdtail = 0;
646 hr->rb_td_nbusy = 0;
647 hr->rb_rdtail = 0;
648 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
649 hr->rb_txddma);
650 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
651 hr->rb_rxddma);
652 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
653 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
654 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
655 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
656 return (0);
657}
658
659static int
660hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
661 u_int32_t clr, u_int32_t set)
662{
663 int i = 0;
664
665 val &= ~clr;
666 val |= set;
667 HME_MAC_WRITE_4(sc, reg, val);
668 if (clr == 0 && set == 0)
669 return (1); /* just write, no bits to wait for */
670 do {
671 DELAY(100);
672 i++;
673 val = HME_MAC_READ_4(sc, reg);
674 if (i > 40) {
675 /* After 3.5ms, we should have been done. */
676 device_printf(sc->sc_dev, "timeout while writing to "
677 "MAC configuration register\n");
678 return (0);
679 }
680 } while ((val & clr) != 0 && (val & set) != set);
681 return (1);
682}
683
684/*
685 * Initialization of interface; set up initialization block
686 * and transmit/receive descriptor rings.
687 */
688static void
689hme_init(void *xsc)
690{
691 struct hme_softc *sc = (struct hme_softc *)xsc;
692
693 HME_LOCK(sc);
694 hme_init_locked(sc);
695 HME_UNLOCK(sc);
696}
697
698static void
699hme_init_locked(struct hme_softc *sc)
700{
701 struct ifnet *ifp = sc->sc_ifp;
702 u_int8_t *ea;
703 u_int32_t n, v;
704
705 HME_LOCK_ASSERT(sc, MA_OWNED);
706 /*
707 * Initialization sequence. The numbered steps below correspond
708 * to the sequence outlined in section 6.3.5.1 in the Ethernet
709 * Channel Engine manual (part of the PCIO manual).
710 * See also the STP2002-STQ document from Sun Microsystems.
711 */
712
713 /* step 1 & 2. Reset the Ethernet Channel */
714 hme_stop(sc);
715
716 /* Re-initialize the MIF */
717 hme_mifinit(sc);
718
719#if 0
720 /* Mask all MIF interrupts, just in case */
721 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
722#endif
723
724 /* step 3. Setup data structures in host memory */
725 if (hme_meminit(sc) != 0) {
726 device_printf(sc->sc_dev, "out of buffers; init aborted.");
727 return;
728 }
729
730 /* step 4. TX MAC registers & counters */
731 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
732 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
733 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
734 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
735 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
736
737 /* Load station MAC address */
738 ea = IF_LLADDR(sc->sc_ifp);
739 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
740 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
741 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
742
743 /*
744 * Init seed for backoff
745 * (source suggested by manual: low 10 bits of MAC address)
746 */
747 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
748 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
749
750 /* Note: Accepting power-on default for other MAC registers here.. */
751
752 /* step 5. RX MAC registers & counters */
753 hme_setladrf(sc, 0);
754
755 /* step 6 & 7. Program Descriptor Ring Base Addresses */
756 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
757 /* Transmit Descriptor ring size: in increments of 16 */
758 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
759
760 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
761 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
762
763 /* step 8. Global Configuration & Interrupt Mask */
764 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
765 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
766 HME_SEB_STAT_HOSTTOTX |
767 HME_SEB_STAT_RXTOHOST |
768 HME_SEB_STAT_TXALL |
769 HME_SEB_STAT_TXPERR |
770 HME_SEB_STAT_RCNTEXP |
771 HME_SEB_STAT_ALL_ERRORS ));
772
773 switch (sc->sc_burst) {
774 default:
775 v = 0;
776 break;
777 case 16:
778 v = HME_SEB_CFG_BURST16;
779 break;
780 case 32:
781 v = HME_SEB_CFG_BURST32;
782 break;
783 case 64:
784 v = HME_SEB_CFG_BURST64;
785 break;
786 }
787 /*
788 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
789 * Allowing 64bit transfers breaks TX checksum offload as well.
790 * Don't know this comes from hardware bug or driver's DMAing
791 * scheme.
792 *
793 * if (sc->sc_pci == 0)
794 * v |= HME_SEB_CFG_64BIT;
795 */
796 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
797
798 /* step 9. ETX Configuration: use mostly default values */
799
800 /* Enable DMA */
801 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
802 v |= HME_ETX_CFG_DMAENABLE;
803 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
804
805 /* step 10. ERX Configuration */
806 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
807
808 /* Encode Receive Descriptor ring size: four possible values */
809 v &= ~HME_ERX_CFG_RINGSIZEMSK;
810 switch (HME_NRXDESC) {
811 case 32:
812 v |= HME_ERX_CFG_RINGSIZE32;
813 break;
814 case 64:
815 v |= HME_ERX_CFG_RINGSIZE64;
816 break;
817 case 128:
818 v |= HME_ERX_CFG_RINGSIZE128;
819 break;
820 case 256:
821 v |= HME_ERX_CFG_RINGSIZE256;
822 break;
823 default:
824 printf("hme: invalid Receive Descriptor ring size\n");
825 break;
826 }
827
828 /* Enable DMA, fix RX first byte offset. */
829 v &= ~HME_ERX_CFG_FBO_MASK;
830 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
831 /* RX TCP/UDP checksum offset */
832 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
833 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
834 v |= n;
835 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
836 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
837
838 /* step 11. XIF Configuration */
839 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
840 v |= HME_MAC_XIF_OE;
841 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
842 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
843
844 /* step 12. RX_MAC Configuration Register */
845 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
846 v |= HME_MAC_RXCFG_ENABLE;
847 v &= ~(HME_MAC_RXCFG_DCRCS);
848 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
849 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
850
851 /* step 13. TX_MAC Configuration Register */
852 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
853 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
854 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
855 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
856
857 /* step 14. Issue Transmit Pending command */
858
859#ifdef HMEDEBUG
860 /* Debug: double-check. */
861 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
862 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
863 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
864 HME_ERX_READ_4(sc, HME_ERXI_RING),
865 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
866 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
867 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
868 HME_ERX_READ_4(sc, HME_ERXI_CFG),
869 HME_ETX_READ_4(sc, HME_ETXI_CFG));
870 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
871 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
872 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
873#endif
874
875 /* Set the current media. */
876 hme_mediachange_locked(sc);
877
878 /* Start the one second timer. */
483 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
484
485 /* Mask all interrupts */
486 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
487
488 /* Reset transmitter and receiver */
489 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
490 HME_SEB_RESET_ERX);
491
492 for (n = 0; n < 20; n++) {
493 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
494 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
495 return;
496 DELAY(20);
497 }
498
499 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
500}
501
502/*
503 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
504 * ring for subsequent use.
505 */
506static __inline void
507hme_discard_rxbuf(struct hme_softc *sc, int ix)
508{
509
510 /*
511 * Dropped a packet, reinitialize the descriptor and turn the
512 * ownership back to the hardware.
513 */
514 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
515 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
516}
517
518static int
519hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
520{
521 struct hme_rxdesc *rd;
522 struct mbuf *m;
523 bus_dma_segment_t segs[1];
524 bus_dmamap_t map;
525 uintptr_t b;
526 int a, unmap, nsegs;
527
528 rd = &sc->sc_rb.rb_rxdesc[ri];
529 unmap = rd->hrx_m != NULL;
530 if (unmap && keepold) {
531 /*
532 * Reinitialize the descriptor flags, as they may have been
533 * altered by the hardware.
534 */
535 hme_discard_rxbuf(sc, ri);
536 return (0);
537 }
538 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
539 return (ENOBUFS);
540 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
541 b = mtod(m, uintptr_t);
542 /*
543 * Required alignment boundary. At least 16 is needed, but since
544 * the mapping must be done in a way that a burst can start on a
545 * natural boundary we might need to extend this.
546 */
547 a = imax(HME_MINRXALIGN, sc->sc_burst);
548 /*
549 * Make sure the buffer suitably aligned. The 2 byte offset is removed
550 * when the mbuf is handed up. XXX: this ensures at least 16 byte
551 * alignment of the header adjacent to the ethernet header, which
552 * should be sufficient in all cases. Nevertheless, this second-guesses
553 * ALIGN().
554 */
555 m_adj(m, roundup2(b, a) - b);
556 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
557 m, segs, &nsegs, 0) != 0) {
558 m_freem(m);
559 return (ENOBUFS);
560 }
561 /* If nsegs is wrong then the stack is corrupt */
562 KASSERT(nsegs == 1, ("Too many segments returned!"));
563 if (unmap) {
564 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
565 BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
567 }
568 map = rd->hrx_dmamap;
569 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
570 sc->sc_rb.rb_spare_dmamap = map;
571 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
572 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr);
573 rd->hrx_m = m;
574 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
575 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
576 return (0);
577}
578
579static int
580hme_meminit(struct hme_softc *sc)
581{
582 struct hme_ring *hr = &sc->sc_rb;
583 struct hme_txdesc *td;
584 bus_addr_t dma;
585 caddr_t p;
586 unsigned int i;
587 int error;
588
589 p = hr->rb_membase;
590 dma = hr->rb_dmabase;
591
592 /*
593 * Allocate transmit descriptors
594 */
595 hr->rb_txd = p;
596 hr->rb_txddma = dma;
597 p += HME_NTXDESC * HME_XD_SIZE;
598 dma += HME_NTXDESC * HME_XD_SIZE;
599 /* We have reserved descriptor space until the next 2048 byte boundary.*/
600 dma = (bus_addr_t)roundup((u_long)dma, 2048);
601 p = (caddr_t)roundup((u_long)p, 2048);
602
603 /*
604 * Allocate receive descriptors
605 */
606 hr->rb_rxd = p;
607 hr->rb_rxddma = dma;
608 p += HME_NRXDESC * HME_XD_SIZE;
609 dma += HME_NRXDESC * HME_XD_SIZE;
610 /* Again move forward to the next 2048 byte boundary.*/
611 dma = (bus_addr_t)roundup((u_long)dma, 2048);
612 p = (caddr_t)roundup((u_long)p, 2048);
613
614 /*
615 * Initialize transmit buffer descriptors
616 */
617 for (i = 0; i < HME_NTXDESC; i++) {
618 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
619 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
620 }
621
622 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
623 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
624 for (i = 0; i < HME_NTXQ; i++) {
625 td = &sc->sc_rb.rb_txdesc[i];
626 if (td->htx_m != NULL) {
627 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
628 BUS_DMASYNC_POSTWRITE);
629 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
630 m_freem(td->htx_m);
631 td->htx_m = NULL;
632 }
633 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
634 }
635
636 /*
637 * Initialize receive buffer descriptors
638 */
639 for (i = 0; i < HME_NRXDESC; i++) {
640 error = hme_add_rxbuf(sc, i, 1);
641 if (error != 0)
642 return (error);
643 }
644
645 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
646 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
647
648 hr->rb_tdhead = hr->rb_tdtail = 0;
649 hr->rb_td_nbusy = 0;
650 hr->rb_rdtail = 0;
651 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
652 hr->rb_txddma);
653 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
654 hr->rb_rxddma);
655 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
656 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
657 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
658 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
659 return (0);
660}
661
662static int
663hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
664 u_int32_t clr, u_int32_t set)
665{
666 int i = 0;
667
668 val &= ~clr;
669 val |= set;
670 HME_MAC_WRITE_4(sc, reg, val);
671 if (clr == 0 && set == 0)
672 return (1); /* just write, no bits to wait for */
673 do {
674 DELAY(100);
675 i++;
676 val = HME_MAC_READ_4(sc, reg);
677 if (i > 40) {
678 /* After 3.5ms, we should have been done. */
679 device_printf(sc->sc_dev, "timeout while writing to "
680 "MAC configuration register\n");
681 return (0);
682 }
683 } while ((val & clr) != 0 && (val & set) != set);
684 return (1);
685}
686
687/*
688 * Initialization of interface; set up initialization block
689 * and transmit/receive descriptor rings.
690 */
691static void
692hme_init(void *xsc)
693{
694 struct hme_softc *sc = (struct hme_softc *)xsc;
695
696 HME_LOCK(sc);
697 hme_init_locked(sc);
698 HME_UNLOCK(sc);
699}
700
701static void
702hme_init_locked(struct hme_softc *sc)
703{
704 struct ifnet *ifp = sc->sc_ifp;
705 u_int8_t *ea;
706 u_int32_t n, v;
707
708 HME_LOCK_ASSERT(sc, MA_OWNED);
709 /*
710 * Initialization sequence. The numbered steps below correspond
711 * to the sequence outlined in section 6.3.5.1 in the Ethernet
712 * Channel Engine manual (part of the PCIO manual).
713 * See also the STP2002-STQ document from Sun Microsystems.
714 */
715
716 /* step 1 & 2. Reset the Ethernet Channel */
717 hme_stop(sc);
718
719 /* Re-initialize the MIF */
720 hme_mifinit(sc);
721
722#if 0
723 /* Mask all MIF interrupts, just in case */
724 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
725#endif
726
727 /* step 3. Setup data structures in host memory */
728 if (hme_meminit(sc) != 0) {
729 device_printf(sc->sc_dev, "out of buffers; init aborted.");
730 return;
731 }
732
733 /* step 4. TX MAC registers & counters */
734 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
735 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
736 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
737 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
738 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
739
740 /* Load station MAC address */
741 ea = IF_LLADDR(sc->sc_ifp);
742 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
743 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
744 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
745
746 /*
747 * Init seed for backoff
748 * (source suggested by manual: low 10 bits of MAC address)
749 */
750 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
751 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
752
753 /* Note: Accepting power-on default for other MAC registers here.. */
754
755 /* step 5. RX MAC registers & counters */
756 hme_setladrf(sc, 0);
757
758 /* step 6 & 7. Program Descriptor Ring Base Addresses */
759 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
760 /* Transmit Descriptor ring size: in increments of 16 */
761 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
762
763 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
764 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
765
766 /* step 8. Global Configuration & Interrupt Mask */
767 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
768 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
769 HME_SEB_STAT_HOSTTOTX |
770 HME_SEB_STAT_RXTOHOST |
771 HME_SEB_STAT_TXALL |
772 HME_SEB_STAT_TXPERR |
773 HME_SEB_STAT_RCNTEXP |
774 HME_SEB_STAT_ALL_ERRORS ));
775
776 switch (sc->sc_burst) {
777 default:
778 v = 0;
779 break;
780 case 16:
781 v = HME_SEB_CFG_BURST16;
782 break;
783 case 32:
784 v = HME_SEB_CFG_BURST32;
785 break;
786 case 64:
787 v = HME_SEB_CFG_BURST64;
788 break;
789 }
790 /*
791 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
792 * Allowing 64bit transfers breaks TX checksum offload as well.
793 * Don't know this comes from hardware bug or driver's DMAing
794 * scheme.
795 *
796 * if (sc->sc_pci == 0)
797 * v |= HME_SEB_CFG_64BIT;
798 */
799 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
800
801 /* step 9. ETX Configuration: use mostly default values */
802
803 /* Enable DMA */
804 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
805 v |= HME_ETX_CFG_DMAENABLE;
806 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
807
808 /* step 10. ERX Configuration */
809 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
810
811 /* Encode Receive Descriptor ring size: four possible values */
812 v &= ~HME_ERX_CFG_RINGSIZEMSK;
813 switch (HME_NRXDESC) {
814 case 32:
815 v |= HME_ERX_CFG_RINGSIZE32;
816 break;
817 case 64:
818 v |= HME_ERX_CFG_RINGSIZE64;
819 break;
820 case 128:
821 v |= HME_ERX_CFG_RINGSIZE128;
822 break;
823 case 256:
824 v |= HME_ERX_CFG_RINGSIZE256;
825 break;
826 default:
827 printf("hme: invalid Receive Descriptor ring size\n");
828 break;
829 }
830
831 /* Enable DMA, fix RX first byte offset. */
832 v &= ~HME_ERX_CFG_FBO_MASK;
833 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
834 /* RX TCP/UDP checksum offset */
835 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
836 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
837 v |= n;
838 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
839 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
840
841 /* step 11. XIF Configuration */
842 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
843 v |= HME_MAC_XIF_OE;
844 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
845 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
846
847 /* step 12. RX_MAC Configuration Register */
848 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
849 v |= HME_MAC_RXCFG_ENABLE;
850 v &= ~(HME_MAC_RXCFG_DCRCS);
851 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
852 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
853
854 /* step 13. TX_MAC Configuration Register */
855 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
856 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
857 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
858 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
859
860 /* step 14. Issue Transmit Pending command */
861
862#ifdef HMEDEBUG
863 /* Debug: double-check. */
864 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
865 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
866 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
867 HME_ERX_READ_4(sc, HME_ERXI_RING),
868 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
869 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
870 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
871 HME_ERX_READ_4(sc, HME_ERXI_CFG),
872 HME_ETX_READ_4(sc, HME_ETXI_CFG));
873 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
874 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
875 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
876#endif
877
878 /* Set the current media. */
879 hme_mediachange_locked(sc);
880
881 /* Start the one second timer. */
882 sc->sc_wdog_timer = 0;
879 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
880
881 ifp->if_drv_flags |= IFF_DRV_RUNNING;
882 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
883 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
884
885 ifp->if_drv_flags |= IFF_DRV_RUNNING;
886 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
883 ifp->if_timer = 0;
884 hme_start_locked(ifp);
885}
886
887/* TX TCP/UDP checksum */
888static void
889hme_txcksum(struct mbuf *m, u_int32_t *cflags)
890{
891 struct ip *ip;
892 u_int32_t offset, offset2;
893 caddr_t p;
894
895 for(; m && m->m_len == 0; m = m->m_next)
896 ;
897 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
898 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
899 return; /* checksum will be corrupted */
900 }
901 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
902 if (m->m_len != ETHER_HDR_LEN) {
903 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
904 return; /* checksum will be corrupted */
905 }
906 /* XXX */
907 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
908 ;
909 if (m == NULL)
910 return; /* checksum will be corrupted */
911 ip = mtod(m, struct ip *);
912 } else {
913 p = mtod(m, caddr_t);
914 p += ETHER_HDR_LEN;
915 ip = (struct ip *)p;
916 }
917 offset2 = m->m_pkthdr.csum_data;
918 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
919 *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
920 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT);
921 *cflags |= HME_XD_TXCKSUM;
922}
923
924/*
925 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
926 * start the transmission.
927 * Returns 0 on success, -1 if there were not enough free descriptors to map
928 * the packet, or an errno otherwise.
929 *
930 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
931 * are readable from the nearest burst boundary on (i.e. potentially before
932 * ds_addr) to the first boundary beyond the end. This is usually a safe
933 * assumption to make, but is not documented.
934 */
935static int
936hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
937{
938 struct hme_txdesc *htx;
939 caddr_t txd;
940 int i, pci, si, ri, nseg;
941 u_int32_t flags, cflags = 0;
942 int error = 0;
943
944 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
945 return (-1);
946 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
947 *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
948 if (error == EFBIG) {
949 struct mbuf *m;
950
951 m = m_defrag(*m0, M_DONTWAIT);
952 if (m == NULL) {
953 m_freem(*m0);
954 *m0 = NULL;
955 return (ENOMEM);
956 }
957 *m0 = m;
958 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
959 *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
960 if (error != 0) {
961 m_freem(*m0);
962 *m0 = NULL;
963 return (error);
964 }
965 } else if (error != 0)
966 return (error);
967 if (nseg == 0) {
968 m_freem(*m0);
969 *m0 = NULL;
970 return (EIO);
971 }
972 if (sc->sc_rb.rb_td_nbusy + nseg >= HME_NTXDESC) {
973 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
974 /* retry with m_defrag(9)? */
975 return (-2);
976 }
977 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
978 hme_txcksum(*m0, &cflags);
979 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
980
981 si = ri = sc->sc_rb.rb_tdhead;
982 txd = sc->sc_rb.rb_txd;
983 pci = sc->sc_pci;
984 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
985 HME_XD_GETFLAGS(pci, txd, ri));
986 for (i = 0; i < nseg; i++) {
987 /* Fill the ring entry. */
988 flags = HME_XD_ENCODE_TSIZE(sc->sc_rb.rb_txsegs[i].ds_len);
989 if (i == 0)
990 flags |= HME_XD_SOP | cflags;
991 else
992 flags |= HME_XD_OWN | cflags;
993 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
994 ri, si, flags);
995 HME_XD_SETADDR(pci, txd, ri, sc->sc_rb.rb_txsegs[i].ds_addr);
996 HME_XD_SETFLAGS(pci, txd, ri, flags);
997 sc->sc_rb.rb_td_nbusy++;
998 htx->htx_lastdesc = ri;
999 ri = (ri + 1) % HME_NTXDESC;
1000 }
1001 sc->sc_rb.rb_tdhead = ri;
1002
1003 /* set EOP on the last descriptor */
1004 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1005 flags = HME_XD_GETFLAGS(pci, txd, ri);
1006 flags |= HME_XD_EOP;
1007 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1008 flags);
1009 HME_XD_SETFLAGS(pci, txd, ri, flags);
1010
1011 /* Turn the first descriptor ownership to the hme */
1012 flags = HME_XD_GETFLAGS(pci, txd, si);
1013 flags |= HME_XD_OWN;
1014 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1015 ri, flags);
1016 HME_XD_SETFLAGS(pci, txd, si, flags);
1017
1018 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1019 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1020 htx->htx_m = *m0;
1021
1022 /* start the transmission. */
1023 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1024
1025 return (0);
1026}
1027
1028/*
1029 * Pass a packet to the higher levels.
1030 */
1031static void
1032hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1033{
1034 struct ifnet *ifp = sc->sc_ifp;
1035 struct mbuf *m;
1036
1037 if (len <= sizeof(struct ether_header) ||
1038 len > HME_MAX_FRAMESIZE) {
1039#ifdef HMEDEBUG
1040 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1041 len);
1042#endif
1043 ifp->if_ierrors++;
1044 hme_discard_rxbuf(sc, ix);
1045 return;
1046 }
1047
1048 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1049 CTR1(KTR_HME, "hme_read: len %d", len);
1050
1051 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1052 /*
1053 * hme_add_rxbuf will leave the old buffer in the ring until
1054 * it is sure that a new buffer can be mapped. If it can not,
1055 * drop the packet, but leave the interface up.
1056 */
1057 ifp->if_iqdrops++;
1058 hme_discard_rxbuf(sc, ix);
1059 return;
1060 }
1061
1062 ifp->if_ipackets++;
1063
1064 m->m_pkthdr.rcvif = ifp;
1065 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1066 m_adj(m, HME_RXOFFS);
1067 /* RX TCP/UDP checksum */
1068 if (ifp->if_capenable & IFCAP_RXCSUM)
1069 hme_rxcksum(m, flags);
1070 /* Pass the packet up. */
1071 HME_UNLOCK(sc);
1072 (*ifp->if_input)(ifp, m);
1073 HME_LOCK(sc);
1074}
1075
1076static void
1077hme_start(struct ifnet *ifp)
1078{
1079 struct hme_softc *sc = ifp->if_softc;
1080
1081 HME_LOCK(sc);
1082 hme_start_locked(ifp);
1083 HME_UNLOCK(sc);
1084}
1085
1086static void
1087hme_start_locked(struct ifnet *ifp)
1088{
1089 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1090 struct mbuf *m;
1091 int error, enq = 0;
1092
1093 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1094 IFF_DRV_RUNNING)
1095 return;
1096
1097 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1098 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1099 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1100 if (m == NULL)
1101 break;
1102
1103 error = hme_load_txmbuf(sc, &m);
1104 if (error != 0) {
1105 if (m == NULL)
1106 break;
1107 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1108 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1109 break;
1110 }
1111 enq++;
1112 BPF_MTAP(ifp, m);
1113 }
1114
887 hme_start_locked(ifp);
888}
889
890/* TX TCP/UDP checksum */
891static void
892hme_txcksum(struct mbuf *m, u_int32_t *cflags)
893{
894 struct ip *ip;
895 u_int32_t offset, offset2;
896 caddr_t p;
897
898 for(; m && m->m_len == 0; m = m->m_next)
899 ;
900 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
901 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
902 return; /* checksum will be corrupted */
903 }
904 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
905 if (m->m_len != ETHER_HDR_LEN) {
906 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
907 return; /* checksum will be corrupted */
908 }
909 /* XXX */
910 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
911 ;
912 if (m == NULL)
913 return; /* checksum will be corrupted */
914 ip = mtod(m, struct ip *);
915 } else {
916 p = mtod(m, caddr_t);
917 p += ETHER_HDR_LEN;
918 ip = (struct ip *)p;
919 }
920 offset2 = m->m_pkthdr.csum_data;
921 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
922 *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
923 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT);
924 *cflags |= HME_XD_TXCKSUM;
925}
926
927/*
928 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
929 * start the transmission.
930 * Returns 0 on success, -1 if there were not enough free descriptors to map
931 * the packet, or an errno otherwise.
932 *
933 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
934 * are readable from the nearest burst boundary on (i.e. potentially before
935 * ds_addr) to the first boundary beyond the end. This is usually a safe
936 * assumption to make, but is not documented.
937 */
938static int
939hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
940{
941 struct hme_txdesc *htx;
942 caddr_t txd;
943 int i, pci, si, ri, nseg;
944 u_int32_t flags, cflags = 0;
945 int error = 0;
946
947 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
948 return (-1);
949 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
950 *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
951 if (error == EFBIG) {
952 struct mbuf *m;
953
954 m = m_defrag(*m0, M_DONTWAIT);
955 if (m == NULL) {
956 m_freem(*m0);
957 *m0 = NULL;
958 return (ENOMEM);
959 }
960 *m0 = m;
961 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
962 *m0, sc->sc_rb.rb_txsegs, &nseg, 0);
963 if (error != 0) {
964 m_freem(*m0);
965 *m0 = NULL;
966 return (error);
967 }
968 } else if (error != 0)
969 return (error);
970 if (nseg == 0) {
971 m_freem(*m0);
972 *m0 = NULL;
973 return (EIO);
974 }
975 if (sc->sc_rb.rb_td_nbusy + nseg >= HME_NTXDESC) {
976 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
977 /* retry with m_defrag(9)? */
978 return (-2);
979 }
980 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
981 hme_txcksum(*m0, &cflags);
982 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
983
984 si = ri = sc->sc_rb.rb_tdhead;
985 txd = sc->sc_rb.rb_txd;
986 pci = sc->sc_pci;
987 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
988 HME_XD_GETFLAGS(pci, txd, ri));
989 for (i = 0; i < nseg; i++) {
990 /* Fill the ring entry. */
991 flags = HME_XD_ENCODE_TSIZE(sc->sc_rb.rb_txsegs[i].ds_len);
992 if (i == 0)
993 flags |= HME_XD_SOP | cflags;
994 else
995 flags |= HME_XD_OWN | cflags;
996 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
997 ri, si, flags);
998 HME_XD_SETADDR(pci, txd, ri, sc->sc_rb.rb_txsegs[i].ds_addr);
999 HME_XD_SETFLAGS(pci, txd, ri, flags);
1000 sc->sc_rb.rb_td_nbusy++;
1001 htx->htx_lastdesc = ri;
1002 ri = (ri + 1) % HME_NTXDESC;
1003 }
1004 sc->sc_rb.rb_tdhead = ri;
1005
1006 /* set EOP on the last descriptor */
1007 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1008 flags = HME_XD_GETFLAGS(pci, txd, ri);
1009 flags |= HME_XD_EOP;
1010 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1011 flags);
1012 HME_XD_SETFLAGS(pci, txd, ri, flags);
1013
1014 /* Turn the first descriptor ownership to the hme */
1015 flags = HME_XD_GETFLAGS(pci, txd, si);
1016 flags |= HME_XD_OWN;
1017 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1018 ri, flags);
1019 HME_XD_SETFLAGS(pci, txd, si, flags);
1020
1021 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1022 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1023 htx->htx_m = *m0;
1024
1025 /* start the transmission. */
1026 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1027
1028 return (0);
1029}
1030
1031/*
1032 * Pass a packet to the higher levels.
1033 */
1034static void
1035hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1036{
1037 struct ifnet *ifp = sc->sc_ifp;
1038 struct mbuf *m;
1039
1040 if (len <= sizeof(struct ether_header) ||
1041 len > HME_MAX_FRAMESIZE) {
1042#ifdef HMEDEBUG
1043 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1044 len);
1045#endif
1046 ifp->if_ierrors++;
1047 hme_discard_rxbuf(sc, ix);
1048 return;
1049 }
1050
1051 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1052 CTR1(KTR_HME, "hme_read: len %d", len);
1053
1054 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1055 /*
1056 * hme_add_rxbuf will leave the old buffer in the ring until
1057 * it is sure that a new buffer can be mapped. If it can not,
1058 * drop the packet, but leave the interface up.
1059 */
1060 ifp->if_iqdrops++;
1061 hme_discard_rxbuf(sc, ix);
1062 return;
1063 }
1064
1065 ifp->if_ipackets++;
1066
1067 m->m_pkthdr.rcvif = ifp;
1068 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1069 m_adj(m, HME_RXOFFS);
1070 /* RX TCP/UDP checksum */
1071 if (ifp->if_capenable & IFCAP_RXCSUM)
1072 hme_rxcksum(m, flags);
1073 /* Pass the packet up. */
1074 HME_UNLOCK(sc);
1075 (*ifp->if_input)(ifp, m);
1076 HME_LOCK(sc);
1077}
1078
1079static void
1080hme_start(struct ifnet *ifp)
1081{
1082 struct hme_softc *sc = ifp->if_softc;
1083
1084 HME_LOCK(sc);
1085 hme_start_locked(ifp);
1086 HME_UNLOCK(sc);
1087}
1088
1089static void
1090hme_start_locked(struct ifnet *ifp)
1091{
1092 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1093 struct mbuf *m;
1094 int error, enq = 0;
1095
1096 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1097 IFF_DRV_RUNNING)
1098 return;
1099
1100 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1101 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1102 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1103 if (m == NULL)
1104 break;
1105
1106 error = hme_load_txmbuf(sc, &m);
1107 if (error != 0) {
1108 if (m == NULL)
1109 break;
1110 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1111 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1112 break;
1113 }
1114 enq++;
1115 BPF_MTAP(ifp, m);
1116 }
1117
1115 /* Set watchdog timer if a packet was queued */
1116 if (enq > 0) {
1117 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1118 BUS_DMASYNC_PREWRITE);
1118 if (enq > 0) {
1119 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1120 BUS_DMASYNC_PREWRITE);
1119 ifp->if_timer = 5;
1121 sc->sc_wdog_timer = 5;
1120 }
1121}
1122
1123/*
1124 * Transmit interrupt.
1125 */
1126static void
1127hme_tint(struct hme_softc *sc)
1128{
1129 caddr_t txd;
1130 struct ifnet *ifp = sc->sc_ifp;
1131 struct hme_txdesc *htx;
1132 unsigned int ri, txflags;
1133
1134 txd = sc->sc_rb.rb_txd;
1135 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1136 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1137 /* Fetch current position in the transmit ring */
1138 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1139 if (sc->sc_rb.rb_td_nbusy <= 0) {
1140 CTR0(KTR_HME, "hme_tint: not busy!");
1141 break;
1142 }
1143
1144 txflags = HME_XD_GETFLAGS(sc->sc_pci, txd, ri);
1145 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1146
1147 if ((txflags & HME_XD_OWN) != 0)
1148 break;
1149
1150 CTR0(KTR_HME, "hme_tint: not owned");
1151 --sc->sc_rb.rb_td_nbusy;
1152 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1153
1154 /* Complete packet transmitted? */
1155 if ((txflags & HME_XD_EOP) == 0)
1156 continue;
1157
1158 KASSERT(htx->htx_lastdesc == ri,
1159 ("hme_tint: ring indices skewed: %d != %d!",
1160 htx->htx_lastdesc, ri));
1161 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1162 BUS_DMASYNC_POSTWRITE);
1163 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1164
1165 ifp->if_opackets++;
1166 m_freem(htx->htx_m);
1167 htx->htx_m = NULL;
1168 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1169 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1170 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1171 }
1122 }
1123}
1124
1125/*
1126 * Transmit interrupt.
1127 */
1128static void
1129hme_tint(struct hme_softc *sc)
1130{
1131 caddr_t txd;
1132 struct ifnet *ifp = sc->sc_ifp;
1133 struct hme_txdesc *htx;
1134 unsigned int ri, txflags;
1135
1136 txd = sc->sc_rb.rb_txd;
1137 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1138 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1139 /* Fetch current position in the transmit ring */
1140 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1141 if (sc->sc_rb.rb_td_nbusy <= 0) {
1142 CTR0(KTR_HME, "hme_tint: not busy!");
1143 break;
1144 }
1145
1146 txflags = HME_XD_GETFLAGS(sc->sc_pci, txd, ri);
1147 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1148
1149 if ((txflags & HME_XD_OWN) != 0)
1150 break;
1151
1152 CTR0(KTR_HME, "hme_tint: not owned");
1153 --sc->sc_rb.rb_td_nbusy;
1154 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1155
1156 /* Complete packet transmitted? */
1157 if ((txflags & HME_XD_EOP) == 0)
1158 continue;
1159
1160 KASSERT(htx->htx_lastdesc == ri,
1161 ("hme_tint: ring indices skewed: %d != %d!",
1162 htx->htx_lastdesc, ri));
1163 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1164 BUS_DMASYNC_POSTWRITE);
1165 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1166
1167 ifp->if_opackets++;
1168 m_freem(htx->htx_m);
1169 htx->htx_m = NULL;
1170 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1171 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1172 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1173 }
1172 /* Turn off watchdog if hme(4) transmitted queued packet */
1173 ifp->if_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1174 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1174
1175 /* Update ring */
1176 sc->sc_rb.rb_tdtail = ri;
1177
1178 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1179 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1180 hme_start_locked(ifp);
1181}
1182
1183/*
1184 * RX TCP/UDP checksum
1185 */
1186static void
1187hme_rxcksum(struct mbuf *m, u_int32_t flags)
1188{
1189 struct ether_header *eh;
1190 struct ip *ip;
1191 struct udphdr *uh;
1192 int32_t hlen, len, pktlen;
1193 u_int16_t cksum, *opts;
1194 u_int32_t temp32;
1195
1196 pktlen = m->m_pkthdr.len;
1197 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1198 return;
1199 eh = mtod(m, struct ether_header *);
1200 if (eh->ether_type != htons(ETHERTYPE_IP))
1201 return;
1202 ip = (struct ip *)(eh + 1);
1203 if (ip->ip_v != IPVERSION)
1204 return;
1205
1206 hlen = ip->ip_hl << 2;
1207 pktlen -= sizeof(struct ether_header);
1208 if (hlen < sizeof(struct ip))
1209 return;
1210 if (ntohs(ip->ip_len) < hlen)
1211 return;
1212 if (ntohs(ip->ip_len) != pktlen)
1213 return;
1214 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1215 return; /* can't handle fragmented packet */
1216
1217 switch (ip->ip_p) {
1218 case IPPROTO_TCP:
1219 if (pktlen < (hlen + sizeof(struct tcphdr)))
1220 return;
1221 break;
1222 case IPPROTO_UDP:
1223 if (pktlen < (hlen + sizeof(struct udphdr)))
1224 return;
1225 uh = (struct udphdr *)((caddr_t)ip + hlen);
1226 if (uh->uh_sum == 0)
1227 return; /* no checksum */
1228 break;
1229 default:
1230 return;
1231 }
1232
1233 cksum = ~(flags & HME_XD_RXCKSUM);
1234 /* checksum fixup for IP options */
1235 len = hlen - sizeof(struct ip);
1236 if (len > 0) {
1237 opts = (u_int16_t *)(ip + 1);
1238 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1239 temp32 = cksum - *opts;
1240 temp32 = (temp32 >> 16) + (temp32 & 65535);
1241 cksum = temp32 & 65535;
1242 }
1243 }
1244 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1245 m->m_pkthdr.csum_data = cksum;
1246}
1247
1248/*
1249 * Receive interrupt.
1250 */
1251static void
1252hme_rint(struct hme_softc *sc)
1253{
1254 caddr_t xdr = sc->sc_rb.rb_rxd;
1255 struct ifnet *ifp = sc->sc_ifp;
1256 unsigned int ri, len;
1257 int progress = 0;
1258 u_int32_t flags;
1259
1260 /*
1261 * Process all buffers with valid data.
1262 */
1263 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1264 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1265 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1266 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1267 if ((flags & HME_XD_OWN) != 0)
1268 break;
1269
1270 progress++;
1271 if ((flags & HME_XD_OFL) != 0) {
1272 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1273 "flags=0x%x\n", ri, flags);
1274 ifp->if_ierrors++;
1275 hme_discard_rxbuf(sc, ri);
1276 } else {
1277 len = HME_XD_DECODE_RSIZE(flags);
1278 hme_read(sc, ri, len, flags);
1279 }
1280 }
1281 if (progress) {
1282 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1283 BUS_DMASYNC_PREWRITE);
1284 }
1285 sc->sc_rb.rb_rdtail = ri;
1286}
1287
1288static void
1289hme_eint(struct hme_softc *sc, u_int status)
1290{
1291
1292 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1293 device_printf(sc->sc_dev, "XXXlink status changed: "
1294 "cfg=%#x, stat=%#x, sm=%#x\n",
1295 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1296 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1297 HME_MIF_READ_4(sc, HME_MIFI_SM));
1298 return;
1299 }
1300
1301 /* check for fatal errors that needs reset to unfreeze DMA engine */
1302 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1303 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1304 hme_init_locked(sc);
1305 }
1306}
1307
1308void
1309hme_intr(void *v)
1310{
1311 struct hme_softc *sc = (struct hme_softc *)v;
1312 u_int32_t status;
1313
1314 HME_LOCK(sc);
1315 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1316 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1317
1318 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1319 hme_eint(sc, status);
1320
1321 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1322 hme_tint(sc);
1323
1324 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1325 hme_rint(sc);
1326 HME_UNLOCK(sc);
1327}
1328
1175
1176 /* Update ring */
1177 sc->sc_rb.rb_tdtail = ri;
1178
1179 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1180 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1181 hme_start_locked(ifp);
1182}
1183
1184/*
1185 * RX TCP/UDP checksum
1186 */
1187static void
1188hme_rxcksum(struct mbuf *m, u_int32_t flags)
1189{
1190 struct ether_header *eh;
1191 struct ip *ip;
1192 struct udphdr *uh;
1193 int32_t hlen, len, pktlen;
1194 u_int16_t cksum, *opts;
1195 u_int32_t temp32;
1196
1197 pktlen = m->m_pkthdr.len;
1198 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1199 return;
1200 eh = mtod(m, struct ether_header *);
1201 if (eh->ether_type != htons(ETHERTYPE_IP))
1202 return;
1203 ip = (struct ip *)(eh + 1);
1204 if (ip->ip_v != IPVERSION)
1205 return;
1206
1207 hlen = ip->ip_hl << 2;
1208 pktlen -= sizeof(struct ether_header);
1209 if (hlen < sizeof(struct ip))
1210 return;
1211 if (ntohs(ip->ip_len) < hlen)
1212 return;
1213 if (ntohs(ip->ip_len) != pktlen)
1214 return;
1215 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1216 return; /* can't handle fragmented packet */
1217
1218 switch (ip->ip_p) {
1219 case IPPROTO_TCP:
1220 if (pktlen < (hlen + sizeof(struct tcphdr)))
1221 return;
1222 break;
1223 case IPPROTO_UDP:
1224 if (pktlen < (hlen + sizeof(struct udphdr)))
1225 return;
1226 uh = (struct udphdr *)((caddr_t)ip + hlen);
1227 if (uh->uh_sum == 0)
1228 return; /* no checksum */
1229 break;
1230 default:
1231 return;
1232 }
1233
1234 cksum = ~(flags & HME_XD_RXCKSUM);
1235 /* checksum fixup for IP options */
1236 len = hlen - sizeof(struct ip);
1237 if (len > 0) {
1238 opts = (u_int16_t *)(ip + 1);
1239 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1240 temp32 = cksum - *opts;
1241 temp32 = (temp32 >> 16) + (temp32 & 65535);
1242 cksum = temp32 & 65535;
1243 }
1244 }
1245 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1246 m->m_pkthdr.csum_data = cksum;
1247}
1248
1249/*
1250 * Receive interrupt.
1251 */
1252static void
1253hme_rint(struct hme_softc *sc)
1254{
1255 caddr_t xdr = sc->sc_rb.rb_rxd;
1256 struct ifnet *ifp = sc->sc_ifp;
1257 unsigned int ri, len;
1258 int progress = 0;
1259 u_int32_t flags;
1260
1261 /*
1262 * Process all buffers with valid data.
1263 */
1264 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1265 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1266 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1267 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1268 if ((flags & HME_XD_OWN) != 0)
1269 break;
1270
1271 progress++;
1272 if ((flags & HME_XD_OFL) != 0) {
1273 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1274 "flags=0x%x\n", ri, flags);
1275 ifp->if_ierrors++;
1276 hme_discard_rxbuf(sc, ri);
1277 } else {
1278 len = HME_XD_DECODE_RSIZE(flags);
1279 hme_read(sc, ri, len, flags);
1280 }
1281 }
1282 if (progress) {
1283 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1284 BUS_DMASYNC_PREWRITE);
1285 }
1286 sc->sc_rb.rb_rdtail = ri;
1287}
1288
1289static void
1290hme_eint(struct hme_softc *sc, u_int status)
1291{
1292
1293 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1294 device_printf(sc->sc_dev, "XXXlink status changed: "
1295 "cfg=%#x, stat=%#x, sm=%#x\n",
1296 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1297 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1298 HME_MIF_READ_4(sc, HME_MIFI_SM));
1299 return;
1300 }
1301
1302 /* check for fatal errors that needs reset to unfreeze DMA engine */
1303 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1304 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1305 hme_init_locked(sc);
1306 }
1307}
1308
1309void
1310hme_intr(void *v)
1311{
1312 struct hme_softc *sc = (struct hme_softc *)v;
1313 u_int32_t status;
1314
1315 HME_LOCK(sc);
1316 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1317 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1318
1319 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1320 hme_eint(sc, status);
1321
1322 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1323 hme_tint(sc);
1324
1325 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1326 hme_rint(sc);
1327 HME_UNLOCK(sc);
1328}
1329
1329static void
1330hme_watchdog(struct ifnet *ifp)
1330static int
1331hme_watchdog(struct hme_softc *sc)
1331{
1332{
1332 struct hme_softc *sc = ifp->if_softc;
1333#ifdef HMEDEBUG
1334 u_int32_t status;
1335#endif
1336
1333#ifdef HMEDEBUG
1334 u_int32_t status;
1335#endif
1336
1337 HME_LOCK(sc);
1337 HME_LOCK_ASSERT(sc, MA_OWNED);
1338#ifdef HMEDEBUG
1339 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1340 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1341#endif
1338#ifdef HMEDEBUG
1339 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1340 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1341#endif
1342
1343 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1344 return (0);
1345
1342 device_printf(sc->sc_dev, "device timeout\n");
1346 device_printf(sc->sc_dev, "device timeout\n");
1343 ++ifp->if_oerrors;
1347 ++sc->sc_ifp->if_oerrors;
1344
1345 hme_init_locked(sc);
1348
1349 hme_init_locked(sc);
1346 HME_UNLOCK(sc);
1350 return (EJUSTRETURN);
1347}
1348
1349/*
1350 * Initialize the MII Management Interface
1351 */
1352static void
1353hme_mifinit(struct hme_softc *sc)
1354{
1355 u_int32_t v;
1356
1357 /*
1358 * Configure the MIF in frame mode, polling disabled, internal PHY
1359 * selected.
1360 */
1361 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1362
1363 /*
1364 * If the currently selected media uses the external transceiver,
1365 * enable its MII drivers (which basically isolates the internal
1366 * one and vice versa). In case the current media hasn't been set,
1367 * yet, we default to the internal transceiver.
1368 */
1369 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1370 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1371 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1372 HME_PHYAD_EXTERNAL)
1373 v |= HME_MAC_XIF_MIIENABLE;
1374 else
1375 v &= ~HME_MAC_XIF_MIIENABLE;
1376 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1377}
1378
1379/*
1380 * MII interface
1381 */
1382int
1383hme_mii_readreg(device_t dev, int phy, int reg)
1384{
1385 struct hme_softc *sc;
1386 int n;
1387 u_int32_t v;
1388
1389 /* We can at most have two PHYs. */
1390 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1391 return (0);
1392
1393 sc = device_get_softc(dev);
1394 /* Select the desired PHY in the MIF configuration register */
1395 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1396 if (phy == HME_PHYAD_EXTERNAL)
1397 v |= HME_MIF_CFG_PHY;
1398 else
1399 v &= ~HME_MIF_CFG_PHY;
1400 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1401
1402 /* Construct the frame command */
1403 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1404 HME_MIF_FO_TAMSB |
1405 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1406 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1407 (reg << HME_MIF_FO_REGAD_SHIFT);
1408
1409 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1410 for (n = 0; n < 100; n++) {
1411 DELAY(1);
1412 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1413 if (v & HME_MIF_FO_TALSB)
1414 return (v & HME_MIF_FO_DATA);
1415 }
1416
1417 device_printf(sc->sc_dev, "mii_read timeout\n");
1418 return (0);
1419}
1420
1421int
1422hme_mii_writereg(device_t dev, int phy, int reg, int val)
1423{
1424 struct hme_softc *sc;
1425 int n;
1426 u_int32_t v;
1427
1428 /* We can at most have two PHYs. */
1429 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1430 return (0);
1431
1432 sc = device_get_softc(dev);
1433 /* Select the desired PHY in the MIF configuration register */
1434 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1435 if (phy == HME_PHYAD_EXTERNAL)
1436 v |= HME_MIF_CFG_PHY;
1437 else
1438 v &= ~HME_MIF_CFG_PHY;
1439 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1440
1441 /* Construct the frame command */
1442 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1443 HME_MIF_FO_TAMSB |
1444 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1445 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1446 (reg << HME_MIF_FO_REGAD_SHIFT) |
1447 (val & HME_MIF_FO_DATA);
1448
1449 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1450 for (n = 0; n < 100; n++) {
1451 DELAY(1);
1452 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1453 if (v & HME_MIF_FO_TALSB)
1454 return (1);
1455 }
1456
1457 device_printf(sc->sc_dev, "mii_write timeout\n");
1458 return (0);
1459}
1460
1461void
1462hme_mii_statchg(device_t dev)
1463{
1464 struct hme_softc *sc;
1465 u_int32_t v;
1466
1467 sc = device_get_softc(dev);
1468
1469#ifdef HMEDEBUG
1470 if (sc->sc_debug)
1471 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1472#endif
1473
1474 /* Set the MAC Full Duplex bit appropriately */
1475 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1476 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1477 return;
1478 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1479 v |= HME_MAC_TXCFG_FULLDPLX;
1480 else
1481 v &= ~HME_MAC_TXCFG_FULLDPLX;
1482 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1483 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1484 return;
1485}
1486
1487static int
1488hme_mediachange(struct ifnet *ifp)
1489{
1490 struct hme_softc *sc = ifp->if_softc;
1491 int error;
1492
1493 HME_LOCK(sc);
1494 error = hme_mediachange_locked(sc);
1495 HME_UNLOCK(sc);
1496 return (error);
1497}
1498
1499static int
1500hme_mediachange_locked(struct hme_softc *sc)
1501{
1502 struct mii_softc *child;
1503
1504 HME_LOCK_ASSERT(sc, MA_OWNED);
1505#ifdef HMEDEBUG
1506 if (sc->sc_debug)
1507 device_printf(sc->sc_dev, "hme_mediachange_locked");
1508#endif
1509
1510 hme_mifinit(sc);
1511
1512 /*
1513 * If both PHYs are present reset them. This is required for
1514 * unisolating the previously isolated PHY when switching PHYs.
1515 * As the above hme_mifinit() call will set the MII drivers in
1516 * the XIF configuration register accoring to the currently
1517 * selected media, there should be no window during which the
1518 * data paths of both transceivers are open at the same time,
1519 * even if the PHY device drivers use MIIF_NOISOLATE.
1520 */
1521 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1522 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1523 mii_phy_reset(child);
1524 return (mii_mediachg(sc->sc_mii));
1525}
1526
1527static void
1528hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1529{
1530 struct hme_softc *sc = ifp->if_softc;
1531
1532 HME_LOCK(sc);
1533 if ((ifp->if_flags & IFF_UP) == 0) {
1534 HME_UNLOCK(sc);
1535 return;
1536 }
1537
1538 mii_pollstat(sc->sc_mii);
1539 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1540 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1541 HME_UNLOCK(sc);
1542}
1543
1544/*
1545 * Process an ioctl request.
1546 */
1547static int
1548hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1549{
1550 struct hme_softc *sc = ifp->if_softc;
1551 struct ifreq *ifr = (struct ifreq *)data;
1552 int error = 0;
1553
1554 switch (cmd) {
1555 case SIOCSIFFLAGS:
1556 HME_LOCK(sc);
1557 if ((ifp->if_flags & IFF_UP) == 0 &&
1558 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1559 /*
1560 * If interface is marked down and it is running, then
1561 * stop it.
1562 */
1563 hme_stop(sc);
1564 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1565 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1566 /*
1567 * If interface is marked up and it is stopped, then
1568 * start it.
1569 */
1570 hme_init_locked(sc);
1571 } else if ((ifp->if_flags & IFF_UP) != 0) {
1572 /*
1573 * Reset the interface to pick up changes in any other
1574 * flags that affect hardware registers.
1575 */
1576 hme_init_locked(sc);
1577 }
1578 if ((ifp->if_flags & IFF_LINK0) != 0)
1579 sc->sc_csum_features |= CSUM_UDP;
1580 else
1581 sc->sc_csum_features &= ~CSUM_UDP;
1582 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1583 ifp->if_hwassist = sc->sc_csum_features;
1584#ifdef HMEDEBUG
1585 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1586#endif
1587 HME_UNLOCK(sc);
1588 break;
1589
1590 case SIOCADDMULTI:
1591 case SIOCDELMULTI:
1592 HME_LOCK(sc);
1593 hme_setladrf(sc, 1);
1594 HME_UNLOCK(sc);
1595 error = 0;
1596 break;
1597 case SIOCGIFMEDIA:
1598 case SIOCSIFMEDIA:
1599 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1600 break;
1601 case SIOCSIFCAP:
1602 HME_LOCK(sc);
1603 ifp->if_capenable = ifr->ifr_reqcap;
1604 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1605 ifp->if_hwassist = sc->sc_csum_features;
1606 else
1607 ifp->if_hwassist = 0;
1608 HME_UNLOCK(sc);
1609 break;
1610 default:
1611 error = ether_ioctl(ifp, cmd, data);
1612 break;
1613 }
1614
1615 return (error);
1616}
1617
1618/*
1619 * Set up the logical address filter.
1620 */
1621static void
1622hme_setladrf(struct hme_softc *sc, int reenable)
1623{
1624 struct ifnet *ifp = sc->sc_ifp;
1625 struct ifmultiaddr *inm;
1626 u_int32_t crc;
1627 u_int32_t hash[4];
1628 u_int32_t macc;
1629
1630 HME_LOCK_ASSERT(sc, MA_OWNED);
1631 /* Clear hash table */
1632 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1633
1634 /* Get current RX configuration */
1635 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1636
1637 /*
1638 * Disable the receiver while changing it's state as the documentation
1639 * mandates.
1640 * We then must wait until the bit clears in the register. This should
1641 * take at most 3.5ms.
1642 */
1643 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1644 return;
1645 /* Disable the hash filter before writing to the filter registers. */
1646 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1647 HME_MAC_RXCFG_HENABLE, 0))
1648 return;
1649
1650 /* make RXMAC really SIMPLEX */
1651 macc |= HME_MAC_RXCFG_ME;
1652 if (reenable)
1653 macc |= HME_MAC_RXCFG_ENABLE;
1654 else
1655 macc &= ~HME_MAC_RXCFG_ENABLE;
1656
1657 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1658 /* Turn on promiscuous mode; turn off the hash filter */
1659 macc |= HME_MAC_RXCFG_PMISC;
1660 macc &= ~HME_MAC_RXCFG_HENABLE;
1661 ifp->if_flags |= IFF_ALLMULTI;
1662 goto chipit;
1663 }
1664
1665 /* Turn off promiscuous mode; turn on the hash filter */
1666 macc &= ~HME_MAC_RXCFG_PMISC;
1667 macc |= HME_MAC_RXCFG_HENABLE;
1668
1669 /*
1670 * Set up multicast address filter by passing all multicast addresses
1671 * through a crc generator, and then using the high order 6 bits as an
1672 * index into the 64 bit logical address filter. The high order bit
1673 * selects the word, while the rest of the bits select the bit within
1674 * the word.
1675 */
1676
1677 IF_ADDR_LOCK(sc->sc_ifp);
1678 TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) {
1679 if (inm->ifma_addr->sa_family != AF_LINK)
1680 continue;
1681 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1682 inm->ifma_addr), ETHER_ADDR_LEN);
1683
1684 /* Just want the 6 most significant bits. */
1685 crc >>= 26;
1686
1687 /* Set the corresponding bit in the filter. */
1688 hash[crc >> 4] |= 1 << (crc & 0xf);
1689 }
1690 IF_ADDR_UNLOCK(sc->sc_ifp);
1691
1692 ifp->if_flags &= ~IFF_ALLMULTI;
1693
1694chipit:
1695 /* Now load the hash table into the chip */
1696 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1697 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1698 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1699 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1700 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1701 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1702 HME_MAC_RXCFG_ME));
1703}
1351}
1352
1353/*
1354 * Initialize the MII Management Interface
1355 */
1356static void
1357hme_mifinit(struct hme_softc *sc)
1358{
1359 u_int32_t v;
1360
1361 /*
1362 * Configure the MIF in frame mode, polling disabled, internal PHY
1363 * selected.
1364 */
1365 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1366
1367 /*
1368 * If the currently selected media uses the external transceiver,
1369 * enable its MII drivers (which basically isolates the internal
1370 * one and vice versa). In case the current media hasn't been set,
1371 * yet, we default to the internal transceiver.
1372 */
1373 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1374 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1375 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1376 HME_PHYAD_EXTERNAL)
1377 v |= HME_MAC_XIF_MIIENABLE;
1378 else
1379 v &= ~HME_MAC_XIF_MIIENABLE;
1380 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1381}
1382
1383/*
1384 * MII interface
1385 */
1386int
1387hme_mii_readreg(device_t dev, int phy, int reg)
1388{
1389 struct hme_softc *sc;
1390 int n;
1391 u_int32_t v;
1392
1393 /* We can at most have two PHYs. */
1394 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1395 return (0);
1396
1397 sc = device_get_softc(dev);
1398 /* Select the desired PHY in the MIF configuration register */
1399 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1400 if (phy == HME_PHYAD_EXTERNAL)
1401 v |= HME_MIF_CFG_PHY;
1402 else
1403 v &= ~HME_MIF_CFG_PHY;
1404 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1405
1406 /* Construct the frame command */
1407 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1408 HME_MIF_FO_TAMSB |
1409 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1410 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1411 (reg << HME_MIF_FO_REGAD_SHIFT);
1412
1413 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1414 for (n = 0; n < 100; n++) {
1415 DELAY(1);
1416 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1417 if (v & HME_MIF_FO_TALSB)
1418 return (v & HME_MIF_FO_DATA);
1419 }
1420
1421 device_printf(sc->sc_dev, "mii_read timeout\n");
1422 return (0);
1423}
1424
1425int
1426hme_mii_writereg(device_t dev, int phy, int reg, int val)
1427{
1428 struct hme_softc *sc;
1429 int n;
1430 u_int32_t v;
1431
1432 /* We can at most have two PHYs. */
1433 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1434 return (0);
1435
1436 sc = device_get_softc(dev);
1437 /* Select the desired PHY in the MIF configuration register */
1438 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1439 if (phy == HME_PHYAD_EXTERNAL)
1440 v |= HME_MIF_CFG_PHY;
1441 else
1442 v &= ~HME_MIF_CFG_PHY;
1443 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1444
1445 /* Construct the frame command */
1446 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1447 HME_MIF_FO_TAMSB |
1448 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1449 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1450 (reg << HME_MIF_FO_REGAD_SHIFT) |
1451 (val & HME_MIF_FO_DATA);
1452
1453 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1454 for (n = 0; n < 100; n++) {
1455 DELAY(1);
1456 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1457 if (v & HME_MIF_FO_TALSB)
1458 return (1);
1459 }
1460
1461 device_printf(sc->sc_dev, "mii_write timeout\n");
1462 return (0);
1463}
1464
1465void
1466hme_mii_statchg(device_t dev)
1467{
1468 struct hme_softc *sc;
1469 u_int32_t v;
1470
1471 sc = device_get_softc(dev);
1472
1473#ifdef HMEDEBUG
1474 if (sc->sc_debug)
1475 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1476#endif
1477
1478 /* Set the MAC Full Duplex bit appropriately */
1479 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1480 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1481 return;
1482 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1483 v |= HME_MAC_TXCFG_FULLDPLX;
1484 else
1485 v &= ~HME_MAC_TXCFG_FULLDPLX;
1486 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1487 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1488 return;
1489}
1490
1491static int
1492hme_mediachange(struct ifnet *ifp)
1493{
1494 struct hme_softc *sc = ifp->if_softc;
1495 int error;
1496
1497 HME_LOCK(sc);
1498 error = hme_mediachange_locked(sc);
1499 HME_UNLOCK(sc);
1500 return (error);
1501}
1502
1503static int
1504hme_mediachange_locked(struct hme_softc *sc)
1505{
1506 struct mii_softc *child;
1507
1508 HME_LOCK_ASSERT(sc, MA_OWNED);
1509#ifdef HMEDEBUG
1510 if (sc->sc_debug)
1511 device_printf(sc->sc_dev, "hme_mediachange_locked");
1512#endif
1513
1514 hme_mifinit(sc);
1515
1516 /*
1517 * If both PHYs are present reset them. This is required for
1518 * unisolating the previously isolated PHY when switching PHYs.
1519 * As the above hme_mifinit() call will set the MII drivers in
1520 * the XIF configuration register accoring to the currently
1521 * selected media, there should be no window during which the
1522 * data paths of both transceivers are open at the same time,
1523 * even if the PHY device drivers use MIIF_NOISOLATE.
1524 */
1525 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1526 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1527 mii_phy_reset(child);
1528 return (mii_mediachg(sc->sc_mii));
1529}
1530
1531static void
1532hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1533{
1534 struct hme_softc *sc = ifp->if_softc;
1535
1536 HME_LOCK(sc);
1537 if ((ifp->if_flags & IFF_UP) == 0) {
1538 HME_UNLOCK(sc);
1539 return;
1540 }
1541
1542 mii_pollstat(sc->sc_mii);
1543 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1544 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1545 HME_UNLOCK(sc);
1546}
1547
1548/*
1549 * Process an ioctl request.
1550 */
1551static int
1552hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1553{
1554 struct hme_softc *sc = ifp->if_softc;
1555 struct ifreq *ifr = (struct ifreq *)data;
1556 int error = 0;
1557
1558 switch (cmd) {
1559 case SIOCSIFFLAGS:
1560 HME_LOCK(sc);
1561 if ((ifp->if_flags & IFF_UP) == 0 &&
1562 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1563 /*
1564 * If interface is marked down and it is running, then
1565 * stop it.
1566 */
1567 hme_stop(sc);
1568 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1569 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1570 /*
1571 * If interface is marked up and it is stopped, then
1572 * start it.
1573 */
1574 hme_init_locked(sc);
1575 } else if ((ifp->if_flags & IFF_UP) != 0) {
1576 /*
1577 * Reset the interface to pick up changes in any other
1578 * flags that affect hardware registers.
1579 */
1580 hme_init_locked(sc);
1581 }
1582 if ((ifp->if_flags & IFF_LINK0) != 0)
1583 sc->sc_csum_features |= CSUM_UDP;
1584 else
1585 sc->sc_csum_features &= ~CSUM_UDP;
1586 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1587 ifp->if_hwassist = sc->sc_csum_features;
1588#ifdef HMEDEBUG
1589 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1590#endif
1591 HME_UNLOCK(sc);
1592 break;
1593
1594 case SIOCADDMULTI:
1595 case SIOCDELMULTI:
1596 HME_LOCK(sc);
1597 hme_setladrf(sc, 1);
1598 HME_UNLOCK(sc);
1599 error = 0;
1600 break;
1601 case SIOCGIFMEDIA:
1602 case SIOCSIFMEDIA:
1603 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1604 break;
1605 case SIOCSIFCAP:
1606 HME_LOCK(sc);
1607 ifp->if_capenable = ifr->ifr_reqcap;
1608 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1609 ifp->if_hwassist = sc->sc_csum_features;
1610 else
1611 ifp->if_hwassist = 0;
1612 HME_UNLOCK(sc);
1613 break;
1614 default:
1615 error = ether_ioctl(ifp, cmd, data);
1616 break;
1617 }
1618
1619 return (error);
1620}
1621
1622/*
1623 * Set up the logical address filter.
1624 */
1625static void
1626hme_setladrf(struct hme_softc *sc, int reenable)
1627{
1628 struct ifnet *ifp = sc->sc_ifp;
1629 struct ifmultiaddr *inm;
1630 u_int32_t crc;
1631 u_int32_t hash[4];
1632 u_int32_t macc;
1633
1634 HME_LOCK_ASSERT(sc, MA_OWNED);
1635 /* Clear hash table */
1636 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1637
1638 /* Get current RX configuration */
1639 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1640
1641 /*
1642 * Disable the receiver while changing it's state as the documentation
1643 * mandates.
1644 * We then must wait until the bit clears in the register. This should
1645 * take at most 3.5ms.
1646 */
1647 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1648 return;
1649 /* Disable the hash filter before writing to the filter registers. */
1650 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1651 HME_MAC_RXCFG_HENABLE, 0))
1652 return;
1653
1654 /* make RXMAC really SIMPLEX */
1655 macc |= HME_MAC_RXCFG_ME;
1656 if (reenable)
1657 macc |= HME_MAC_RXCFG_ENABLE;
1658 else
1659 macc &= ~HME_MAC_RXCFG_ENABLE;
1660
1661 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1662 /* Turn on promiscuous mode; turn off the hash filter */
1663 macc |= HME_MAC_RXCFG_PMISC;
1664 macc &= ~HME_MAC_RXCFG_HENABLE;
1665 ifp->if_flags |= IFF_ALLMULTI;
1666 goto chipit;
1667 }
1668
1669 /* Turn off promiscuous mode; turn on the hash filter */
1670 macc &= ~HME_MAC_RXCFG_PMISC;
1671 macc |= HME_MAC_RXCFG_HENABLE;
1672
1673 /*
1674 * Set up multicast address filter by passing all multicast addresses
1675 * through a crc generator, and then using the high order 6 bits as an
1676 * index into the 64 bit logical address filter. The high order bit
1677 * selects the word, while the rest of the bits select the bit within
1678 * the word.
1679 */
1680
1681 IF_ADDR_LOCK(sc->sc_ifp);
1682 TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) {
1683 if (inm->ifma_addr->sa_family != AF_LINK)
1684 continue;
1685 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1686 inm->ifma_addr), ETHER_ADDR_LEN);
1687
1688 /* Just want the 6 most significant bits. */
1689 crc >>= 26;
1690
1691 /* Set the corresponding bit in the filter. */
1692 hash[crc >> 4] |= 1 << (crc & 0xf);
1693 }
1694 IF_ADDR_UNLOCK(sc->sc_ifp);
1695
1696 ifp->if_flags &= ~IFF_ALLMULTI;
1697
1698chipit:
1699 /* Now load the hash table into the chip */
1700 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1701 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1702 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1703 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1704 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1705 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1706 HME_MAC_RXCFG_ME));
1707}