if_hme.c revision 130270
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *        This product includes software developed by the NetBSD
20 *        Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 *	from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 130270 2004-06-09 14:34:04Z naddy $");
42
43/*
44 * HME Ethernet module driver.
45 *
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
52 *
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
56 *
57 * Checksumming is not yet supported.
58 */
59
60#define HMEDEBUG
61#define	KTR_HME		KTR_CT2		/* XXX */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/bus.h>
66#include <sys/endian.h>
67#include <sys/kernel.h>
68#include <sys/module.h>
69#include <sys/ktr.h>
70#include <sys/mbuf.h>
71#include <sys/malloc.h>
72#include <sys/socket.h>
73#include <sys/sockio.h>
74
75#include <net/bpf.h>
76#include <net/ethernet.h>
77#include <net/if.h>
78#include <net/if_arp.h>
79#include <net/if_dl.h>
80#include <net/if_media.h>
81#include <net/if_vlan_var.h>
82
83#include <dev/mii/mii.h>
84#include <dev/mii/miivar.h>
85
86#include <machine/bus.h>
87
88#include <dev/hme/if_hmereg.h>
89#include <dev/hme/if_hmevar.h>
90
91static void	hme_start(struct ifnet *);
92static void	hme_stop(struct hme_softc *);
93static int	hme_ioctl(struct ifnet *, u_long, caddr_t);
94static void	hme_tick(void *);
95static void	hme_watchdog(struct ifnet *);
96static void	hme_init(void *);
97static int	hme_add_rxbuf(struct hme_softc *, unsigned int, int);
98static int	hme_meminit(struct hme_softc *);
99static int	hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
100    u_int32_t, u_int32_t);
101static void	hme_mifinit(struct hme_softc *);
102static void	hme_reset(struct hme_softc *);
103static void	hme_setladrf(struct hme_softc *, int);
104
105static int	hme_mediachange(struct ifnet *);
106static void	hme_mediastatus(struct ifnet *, struct ifmediareq *);
107
108static int	hme_load_txmbuf(struct hme_softc *, struct mbuf *);
109static void	hme_read(struct hme_softc *, int, int);
110static void	hme_eint(struct hme_softc *, u_int);
111static void	hme_rint(struct hme_softc *);
112static void	hme_tint(struct hme_softc *);
113
114static void	hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
115static void	hme_rxdma_callback(void *, bus_dma_segment_t *, int,
116    bus_size_t, int);
117static void	hme_txdma_callback(void *, bus_dma_segment_t *, int,
118    bus_size_t, int);
119
120devclass_t hme_devclass;
121
122static int hme_nerr;
123
124DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
125MODULE_DEPEND(hme, miibus, 1, 1, 1);
126
127#define	HME_SPC_READ_4(spc, sc, offs) \
128	bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
129	    (sc)->sc_ ## spc ## o + (offs))
130#define	HME_SPC_WRITE_4(spc, sc, offs, v) \
131	bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
132	    (sc)->sc_ ## spc ## o + (offs), (v))
133
134#define	HME_SEB_READ_4(sc, offs)	HME_SPC_READ_4(seb, (sc), (offs))
135#define	HME_SEB_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(seb, (sc), (offs), (v))
136#define	HME_ERX_READ_4(sc, offs)	HME_SPC_READ_4(erx, (sc), (offs))
137#define	HME_ERX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(erx, (sc), (offs), (v))
138#define	HME_ETX_READ_4(sc, offs)	HME_SPC_READ_4(etx, (sc), (offs))
139#define	HME_ETX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(etx, (sc), (offs), (v))
140#define	HME_MAC_READ_4(sc, offs)	HME_SPC_READ_4(mac, (sc), (offs))
141#define	HME_MAC_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mac, (sc), (offs), (v))
142#define	HME_MIF_READ_4(sc, offs)	HME_SPC_READ_4(mif, (sc), (offs))
143#define	HME_MIF_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mif, (sc), (offs), (v))
144
145#define	HME_MAXERR	5
146#define	HME_WHINE(dev, ...) do {					\
147	if (hme_nerr++ < HME_MAXERR)					\
148		device_printf(dev, __VA_ARGS__);			\
149	if (hme_nerr == HME_MAXERR) {					\
150		device_printf(dev, "too may errors; not reporting any "	\
151		    "more\n");						\
152	}								\
153} while(0)
154
155/* Support oversized VLAN frames. */
156#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
157
158int
159hme_config(struct hme_softc *sc)
160{
161	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
162	struct mii_softc *child;
163	bus_size_t size;
164	int error, rdesc, tdesc, i;
165
166	/*
167	 * HME common initialization.
168	 *
169	 * hme_softc fields that must be initialized by the front-end:
170	 *
171	 * the DMA bus tag:
172	 *	sc_dmatag
173	 *
174	 * the bus handles, tags and offsets (splitted for SBus compatability):
175	 *	sc_seb{t,h,o}	(Shared Ethernet Block registers)
176	 *	sc_erx{t,h,o}	(Receiver Unit registers)
177	 *	sc_etx{t,h,o}	(Transmitter Unit registers)
178	 *	sc_mac{t,h,o}	(MAC registers)
179	 *	sc_mif{t,h,o}	(Management Interface registers)
180	 *
181	 * the maximum bus burst size:
182	 *	sc_burst
183	 *
184	 */
185
186	/* Make sure the chip is stopped. */
187	hme_stop(sc);
188
189	/*
190	 * Allocate DMA capable memory
191	 * Buffer descriptors must be aligned on a 2048 byte boundary;
192	 * take this into account when calculating the size. Note that
193	 * the maximum number of descriptors (256) occupies 2048 bytes,
194	 * so we allocate that much regardless of HME_N*DESC.
195	 */
196	size =	4096;
197
198	error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
199	    BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
200	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
201	if (error)
202		return (error);
203
204	error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
205	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
206	    1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
207	    &Giant, &sc->sc_cdmatag);
208	if (error)
209		goto fail_ptag;
210
211	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
212	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
213	    HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
214	    NULL, NULL, &sc->sc_rdmatag);
215	if (error)
216		goto fail_ctag;
217
218	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
219	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
220	    HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
221	    NULL, NULL, &sc->sc_tdmatag);
222	if (error)
223		goto fail_rtag;
224
225	/* Allocate control/TX DMA buffer */
226	error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
227	    0, &sc->sc_cdmamap);
228	if (error != 0) {
229		device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
230		goto fail_ttag;
231	}
232
233	/* Load the buffer */
234	sc->sc_rb.rb_dmabase = 0;
235	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
236	     sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
237	    sc->sc_rb.rb_dmabase == 0) {
238		device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
239		    error);
240		goto fail_free;
241	}
242	CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
243	    sc->sc_rb.rb_dmabase);
244
245	/*
246	 * Prepare the RX descriptors. rdesc serves as marker for the last
247	 * processed descriptor and may be used later on.
248	 */
249	for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
250		sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
251		error = bus_dmamap_create(sc->sc_rdmatag, 0,
252		    &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
253		if (error != 0)
254			goto fail_rxdesc;
255	}
256	error = bus_dmamap_create(sc->sc_rdmatag, 0,
257	    &sc->sc_rb.rb_spare_dmamap);
258	if (error != 0)
259		goto fail_rxdesc;
260	/* Same for the TX descs. */
261	for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
262		sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
263		error = bus_dmamap_create(sc->sc_tdmatag, 0,
264		    &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
265		if (error != 0)
266			goto fail_txdesc;
267	}
268
269	/* Initialize ifnet structure. */
270	ifp->if_softc = sc;
271	if_initname(ifp, device_get_name(sc->sc_dev),
272	    device_get_unit(sc->sc_dev));
273	ifp->if_mtu = ETHERMTU;
274	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
275	ifp->if_start = hme_start;
276	ifp->if_ioctl = hme_ioctl;
277	ifp->if_init = hme_init;
278	ifp->if_watchdog = hme_watchdog;
279	ifp->if_snd.ifq_maxlen = HME_NTXQ;
280
281	hme_mifinit(sc);
282
283	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
284	    hme_mediastatus)) != 0) {
285		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
286		goto fail_rxdesc;
287	}
288	sc->sc_mii = device_get_softc(sc->sc_miibus);
289
290	/*
291	 * Walk along the list of attached MII devices and
292	 * establish an `MII instance' to `phy number'
293	 * mapping. We'll use this mapping in media change
294	 * requests to determine which phy to use to program
295	 * the MIF configuration register.
296	 */
297	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
298	     child = LIST_NEXT(child, mii_list)) {
299		/*
300		 * Note: we support just two PHYs: the built-in
301		 * internal device and an external on the MII
302		 * connector.
303		 */
304		if (child->mii_phy > 1 || child->mii_inst > 1) {
305			device_printf(sc->sc_dev, "cannot accommodate "
306			    "MII device %s at phy %d, instance %d\n",
307			    device_get_name(child->mii_dev),
308			    child->mii_phy, child->mii_inst);
309			continue;
310		}
311
312		sc->sc_phys[child->mii_inst] = child->mii_phy;
313	}
314
315	/* Attach the interface. */
316	ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
317
318	/*
319	 * Tell the upper layer(s) we support long frames.
320	 */
321	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
322	ifp->if_capabilities |= IFCAP_VLAN_MTU;
323	ifp->if_capenable |= IFCAP_VLAN_MTU;
324
325	callout_init(&sc->sc_tick_ch, 0);
326	return (0);
327
328fail_txdesc:
329	for (i = 0; i < tdesc; i++) {
330		bus_dmamap_destroy(sc->sc_tdmatag,
331		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
332	}
333	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
334fail_rxdesc:
335	for (i = 0; i < rdesc; i++) {
336		bus_dmamap_destroy(sc->sc_rdmatag,
337		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
338	}
339	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
340fail_free:
341	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
342fail_ttag:
343	bus_dma_tag_destroy(sc->sc_tdmatag);
344fail_rtag:
345	bus_dma_tag_destroy(sc->sc_rdmatag);
346fail_ctag:
347	bus_dma_tag_destroy(sc->sc_cdmatag);
348fail_ptag:
349	bus_dma_tag_destroy(sc->sc_pdmatag);
350	return (error);
351}
352
353void
354hme_detach(struct hme_softc *sc)
355{
356	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
357	int i;
358
359	ether_ifdetach(ifp);
360	hme_stop(sc);
361	device_delete_child(sc->sc_dev, sc->sc_miibus);
362
363	for (i = 0; i < HME_NTXQ; i++) {
364		bus_dmamap_destroy(sc->sc_tdmatag,
365		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
366	}
367	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
368	for (i = 0; i < HME_NRXDESC; i++) {
369		bus_dmamap_destroy(sc->sc_rdmatag,
370		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
371	}
372	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
373	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
374	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
375	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
376	bus_dma_tag_destroy(sc->sc_tdmatag);
377	bus_dma_tag_destroy(sc->sc_rdmatag);
378	bus_dma_tag_destroy(sc->sc_cdmatag);
379	bus_dma_tag_destroy(sc->sc_pdmatag);
380}
381
382void
383hme_suspend(struct hme_softc *sc)
384{
385
386	hme_stop(sc);
387}
388
389void
390hme_resume(struct hme_softc *sc)
391{
392	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
393
394	if ((ifp->if_flags & IFF_UP) != 0)
395		hme_init(ifp);
396}
397
398static void
399hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
400{
401	struct hme_softc *sc = (struct hme_softc *)xsc;
402
403	if (error != 0)
404		return;
405	KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
406	sc->sc_rb.rb_dmabase = segs[0].ds_addr;
407}
408
409static void
410hme_tick(void *arg)
411{
412	struct hme_softc *sc = arg;
413	int s;
414
415	s = splnet();
416	mii_tick(sc->sc_mii);
417	splx(s);
418
419	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
420}
421
422static void
423hme_reset(struct hme_softc *sc)
424{
425	int s;
426
427	s = splnet();
428	hme_init(sc);
429	splx(s);
430}
431
432static void
433hme_stop(struct hme_softc *sc)
434{
435	u_int32_t v;
436	int n;
437
438	callout_stop(&sc->sc_tick_ch);
439
440	/* Reset transmitter and receiver */
441	HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
442	    HME_SEB_RESET_ERX);
443
444	for (n = 0; n < 20; n++) {
445		v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
446		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
447			return;
448		DELAY(20);
449	}
450
451	device_printf(sc->sc_dev, "hme_stop: reset failed\n");
452}
453
454static void
455hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
456    bus_size_t totsize, int error)
457{
458	bus_addr_t *a = xsc;
459
460	KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
461	if (error != 0)
462		return;
463	*a = segs[0].ds_addr;
464}
465
466/*
467 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
468 * ring for subsequent use.
469 */
470static __inline void
471hme_discard_rxbuf(struct hme_softc *sc, int ix)
472{
473
474	/*
475	 * Dropped a packet, reinitialize the descriptor and turn the
476	 * ownership back to the hardware.
477	 */
478	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
479	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
480}
481
482static int
483hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
484{
485	struct hme_rxdesc *rd;
486	struct mbuf *m;
487	bus_addr_t ba;
488	bus_dmamap_t map;
489	uintptr_t b;
490	int a, unmap;
491
492	rd = &sc->sc_rb.rb_rxdesc[ri];
493	unmap = rd->hrx_m != NULL;
494	if (unmap && keepold) {
495		/*
496		 * Reinitialize the descriptor flags, as they may have been
497		 * altered by the hardware.
498		 */
499		hme_discard_rxbuf(sc, ri);
500		return (0);
501	}
502	if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
503		return (ENOBUFS);
504	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
505	b = mtod(m, uintptr_t);
506	/*
507	 * Required alignment boundary. At least 16 is needed, but since
508	 * the mapping must be done in a way that a burst can start on a
509	 * natural boundary we might need to extend this.
510	 */
511	a = max(HME_MINRXALIGN, sc->sc_burst);
512	/*
513	 * Make sure the buffer suitably aligned. The 2 byte offset is removed
514	 * when the mbuf is handed up. XXX: this ensures at least 16 byte
515	 * alignment of the header adjacent to the ethernet header, which
516	 * should be sufficient in all cases. Nevertheless, this second-guesses
517	 * ALIGN().
518	 */
519	m_adj(m, roundup2(b, a) - b);
520	if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
521	    m, hme_rxdma_callback, &ba, 0) != 0) {
522		m_freem(m);
523		return (ENOBUFS);
524	}
525	if (unmap) {
526		bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
527		    BUS_DMASYNC_POSTREAD);
528		bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
529	}
530	map = rd->hrx_dmamap;
531	rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
532	sc->sc_rb.rb_spare_dmamap = map;
533	bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
534	HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
535	rd->hrx_m = m;
536	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
537	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
538	return (0);
539}
540
541static int
542hme_meminit(struct hme_softc *sc)
543{
544	struct hme_ring *hr = &sc->sc_rb;
545	struct hme_txdesc *td;
546	bus_addr_t dma;
547	caddr_t p;
548	unsigned int i;
549	int error;
550
551	p = hr->rb_membase;
552	dma = hr->rb_dmabase;
553
554	/*
555	 * Allocate transmit descriptors
556	 */
557	hr->rb_txd = p;
558	hr->rb_txddma = dma;
559	p += HME_NTXDESC * HME_XD_SIZE;
560	dma += HME_NTXDESC * HME_XD_SIZE;
561	/* We have reserved descriptor space until the next 2048 byte boundary.*/
562	dma = (bus_addr_t)roundup((u_long)dma, 2048);
563	p = (caddr_t)roundup((u_long)p, 2048);
564
565	/*
566	 * Allocate receive descriptors
567	 */
568	hr->rb_rxd = p;
569	hr->rb_rxddma = dma;
570	p += HME_NRXDESC * HME_XD_SIZE;
571	dma += HME_NRXDESC * HME_XD_SIZE;
572	/* Again move forward to the next 2048 byte boundary.*/
573	dma = (bus_addr_t)roundup((u_long)dma, 2048);
574	p = (caddr_t)roundup((u_long)p, 2048);
575
576	/*
577	 * Initialize transmit buffer descriptors
578	 */
579	for (i = 0; i < HME_NTXDESC; i++) {
580		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
581		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
582	}
583
584	STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
585	STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
586	for (i = 0; i < HME_NTXQ; i++) {
587		td = &sc->sc_rb.rb_txdesc[i];
588		if (td->htx_m != NULL) {
589			m_freem(td->htx_m);
590			bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
591			    BUS_DMASYNC_POSTWRITE);
592			bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
593			td->htx_m = NULL;
594		}
595		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
596	}
597
598	/*
599	 * Initialize receive buffer descriptors
600	 */
601	for (i = 0; i < HME_NRXDESC; i++) {
602		error = hme_add_rxbuf(sc, i, 1);
603		if (error != 0)
604			return (error);
605	}
606
607	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
608	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
609
610	hr->rb_tdhead = hr->rb_tdtail = 0;
611	hr->rb_td_nbusy = 0;
612	hr->rb_rdtail = 0;
613	CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
614	    hr->rb_txddma);
615	CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
616	    hr->rb_rxddma);
617	CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
618	    *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
619	CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
620	    *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
621	return (0);
622}
623
624static int
625hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
626    u_int32_t clr, u_int32_t set)
627{
628	int i = 0;
629
630	val &= ~clr;
631	val |= set;
632	HME_MAC_WRITE_4(sc, reg, val);
633	if (clr == 0 && set == 0)
634		return (1);	/* just write, no bits to wait for */
635	do {
636		DELAY(100);
637		i++;
638		val = HME_MAC_READ_4(sc, reg);
639		if (i > 40) {
640			/* After 3.5ms, we should have been done. */
641			device_printf(sc->sc_dev, "timeout while writing to "
642			    "MAC configuration register\n");
643			return (0);
644		}
645	} while ((val & clr) != 0 && (val & set) != set);
646	return (1);
647}
648
649/*
650 * Initialization of interface; set up initialization block
651 * and transmit/receive descriptor rings.
652 */
653static void
654hme_init(void *xsc)
655{
656	struct hme_softc *sc = (struct hme_softc *)xsc;
657	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
658	u_int8_t *ea;
659	u_int32_t v;
660
661	/*
662	 * Initialization sequence. The numbered steps below correspond
663	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
664	 * Channel Engine manual (part of the PCIO manual).
665	 * See also the STP2002-STQ document from Sun Microsystems.
666	 */
667
668	/* step 1 & 2. Reset the Ethernet Channel */
669	hme_stop(sc);
670
671	/* Re-initialize the MIF */
672	hme_mifinit(sc);
673
674#if 0
675	/* Mask all MIF interrupts, just in case */
676	HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
677#endif
678
679	/* step 3. Setup data structures in host memory */
680	if (hme_meminit(sc) != 0) {
681		device_printf(sc->sc_dev, "out of buffers; init aborted.");
682		return;
683	}
684
685	/* step 4. TX MAC registers & counters */
686	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
687	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
688	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
689	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
690	HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
691
692	/* Load station MAC address */
693	ea = sc->sc_arpcom.ac_enaddr;
694	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
695	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
696	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
697
698	/*
699	 * Init seed for backoff
700	 * (source suggested by manual: low 10 bits of MAC address)
701	 */
702	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
703	HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
704
705
706	/* Note: Accepting power-on default for other MAC registers here.. */
707
708	/* step 5. RX MAC registers & counters */
709	hme_setladrf(sc, 0);
710
711	/* step 6 & 7. Program Descriptor Ring Base Addresses */
712	HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
713	/* Transmit Descriptor ring size: in increments of 16 */
714	HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
715
716	HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
717	HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
718
719	/* step 8. Global Configuration & Interrupt Mask */
720	HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
721	    ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
722		HME_SEB_STAT_HOSTTOTX |
723		HME_SEB_STAT_RXTOHOST |
724		HME_SEB_STAT_TXALL |
725		HME_SEB_STAT_TXPERR |
726		HME_SEB_STAT_RCNTEXP |
727		HME_SEB_STAT_ALL_ERRORS ));
728
729	switch (sc->sc_burst) {
730	default:
731		v = 0;
732		break;
733	case 16:
734		v = HME_SEB_CFG_BURST16;
735		break;
736	case 32:
737		v = HME_SEB_CFG_BURST32;
738		break;
739	case 64:
740		v = HME_SEB_CFG_BURST64;
741		break;
742	}
743	HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
744
745	/* step 9. ETX Configuration: use mostly default values */
746
747	/* Enable DMA */
748	v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
749	v |= HME_ETX_CFG_DMAENABLE;
750	HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
751
752	/* step 10. ERX Configuration */
753	v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
754
755	/* Encode Receive Descriptor ring size: four possible values */
756	v &= ~HME_ERX_CFG_RINGSIZEMSK;
757	switch (HME_NRXDESC) {
758	case 32:
759		v |= HME_ERX_CFG_RINGSIZE32;
760		break;
761	case 64:
762		v |= HME_ERX_CFG_RINGSIZE64;
763		break;
764	case 128:
765		v |= HME_ERX_CFG_RINGSIZE128;
766		break;
767	case 256:
768		v |= HME_ERX_CFG_RINGSIZE256;
769		break;
770	default:
771		printf("hme: invalid Receive Descriptor ring size\n");
772		break;
773	}
774
775	/* Enable DMA, fix RX first byte offset. */
776	v &= ~HME_ERX_CFG_FBO_MASK;
777	v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
778	CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
779	HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
780
781	/* step 11. XIF Configuration */
782	v = HME_MAC_READ_4(sc, HME_MACI_XIF);
783	v |= HME_MAC_XIF_OE;
784	/* If an external transceiver is connected, enable its MII drivers */
785	if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
786		v |= HME_MAC_XIF_MIIENABLE;
787	CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
788	HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
789
790	/* step 12. RX_MAC Configuration Register */
791	v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
792	v |= HME_MAC_RXCFG_ENABLE;
793	v &= ~(HME_MAC_RXCFG_DCRCS);
794	CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
795	HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
796
797	/* step 13. TX_MAC Configuration Register */
798	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
799	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
800	CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
801	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
802
803	/* step 14. Issue Transmit Pending command */
804
805#ifdef HMEDEBUG
806	/* Debug: double-check. */
807	CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
808	    "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
809	    HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
810	    HME_ERX_READ_4(sc, HME_ERXI_RING),
811	    HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
812	CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
813	    HME_SEB_READ_4(sc, HME_SEBI_IMASK),
814	    HME_ERX_READ_4(sc, HME_ERXI_CFG),
815	    HME_ETX_READ_4(sc, HME_ETXI_CFG));
816	CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
817	    HME_MAC_READ_4(sc, HME_MACI_RXCFG),
818	    HME_MAC_READ_4(sc, HME_MACI_TXCFG));
819#endif
820
821	/* Set the current media. */
822	mii_mediachg(sc->sc_mii);
823
824	/* Start the one second timer. */
825	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
826
827	ifp->if_flags |= IFF_RUNNING;
828	ifp->if_flags &= ~IFF_OACTIVE;
829	ifp->if_timer = 0;
830	hme_start(ifp);
831}
832
833struct hme_txdma_arg {
834	struct hme_softc	*hta_sc;
835	struct hme_txdesc	*hta_htx;
836	int			hta_ndescs;
837};
838
839/*
840 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
841 * are readable from the nearest burst boundary on (i.e. potentially before
842 * ds_addr) to the first boundary beyond the end. This is usually a safe
843 * assumption to make, but is not documented.
844 */
845static void
846hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
847    bus_size_t totsz, int error)
848{
849	struct hme_txdma_arg *ta = xsc;
850	struct hme_txdesc *htx;
851	bus_size_t len = 0;
852	caddr_t txd;
853	u_int32_t flags = 0;
854	int i, tdhead, pci;
855
856	if (error != 0)
857		return;
858
859	tdhead = ta->hta_sc->sc_rb.rb_tdhead;
860	pci = ta->hta_sc->sc_pci;
861	txd = ta->hta_sc->sc_rb.rb_txd;
862	htx = ta->hta_htx;
863
864	if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
865		ta->hta_ndescs = -1;
866		return;
867	}
868	ta->hta_ndescs = nsegs;
869
870	for (i = 0; i < nsegs; i++) {
871		if (segs[i].ds_len == 0)
872			continue;
873
874		/* Fill the ring entry. */
875		flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
876		if (len == 0)
877			flags |= HME_XD_SOP;
878		if (len + segs[i].ds_len == totsz)
879			flags |= HME_XD_EOP;
880		CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
881		    "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
882		    (u_int)segs[i].ds_addr);
883		HME_XD_SETFLAGS(pci, txd, tdhead, flags);
884		HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
885
886		ta->hta_sc->sc_rb.rb_td_nbusy++;
887		htx->htx_lastdesc = tdhead;
888		tdhead = (tdhead + 1) % HME_NTXDESC;
889		len += segs[i].ds_len;
890	}
891	ta->hta_sc->sc_rb.rb_tdhead = tdhead;
892	KASSERT((flags & HME_XD_EOP) != 0,
893	    ("hme_txdma_callback: missed end of packet!"));
894}
895
896/*
897 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
898 * start the transmission.
899 * Returns 0 on success, -1 if there were not enough free descriptors to map
900 * the packet, or an errno otherwise.
901 */
902static int
903hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
904{
905	struct hme_txdma_arg cba;
906	struct hme_txdesc *td;
907	int error, si, ri;
908	u_int32_t flags;
909
910	si = sc->sc_rb.rb_tdhead;
911	if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
912		return (-1);
913	td->htx_m = m0;
914	cba.hta_sc = sc;
915	cba.hta_htx = td;
916	if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
917	     m0, hme_txdma_callback, &cba, 0)) != 0)
918		goto fail;
919	if (cba.hta_ndescs == -1) {
920		error = -1;
921		goto fail;
922	}
923	bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
924	    BUS_DMASYNC_PREWRITE);
925
926	STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
927	STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
928
929	/* Turn descriptor ownership to the hme, back to forth. */
930	ri = sc->sc_rb.rb_tdhead;
931	CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
932	    ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
933	do {
934		ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
935		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
936		    HME_XD_OWN;
937		CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
938		    ri, si, flags);
939		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
940	} while (ri != si);
941
942	/* start the transmission. */
943	HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
944	return (0);
945fail:
946	bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
947	return (error);
948}
949
950/*
951 * Pass a packet to the higher levels.
952 */
953static void
954hme_read(struct hme_softc *sc, int ix, int len)
955{
956	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
957	struct mbuf *m;
958
959	if (len <= sizeof(struct ether_header) ||
960	    len > HME_MAX_FRAMESIZE) {
961#ifdef HMEDEBUG
962		HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
963		    len);
964#endif
965		ifp->if_ierrors++;
966		hme_discard_rxbuf(sc, ix);
967		return;
968	}
969
970	m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
971	CTR1(KTR_HME, "hme_read: len %d", len);
972
973	if (hme_add_rxbuf(sc, ix, 0) != 0) {
974		/*
975		 * hme_add_rxbuf will leave the old buffer in the ring until
976		 * it is sure that a new buffer can be mapped. If it can not,
977		 * drop the packet, but leave the interface up.
978		 */
979		ifp->if_iqdrops++;
980		hme_discard_rxbuf(sc, ix);
981		return;
982	}
983
984	ifp->if_ipackets++;
985
986	m->m_pkthdr.rcvif = ifp;
987	m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
988	m_adj(m, HME_RXOFFS);
989	/* Pass the packet up. */
990	(*ifp->if_input)(ifp, m);
991}
992
993static void
994hme_start(struct ifnet *ifp)
995{
996	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
997	struct mbuf *m;
998	int error, enq = 0;
999
1000	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1001		return;
1002
1003	error = 0;
1004	for (;;) {
1005		IF_DEQUEUE(&ifp->if_snd, m);
1006		if (m == NULL)
1007			break;
1008
1009		error = hme_load_txmbuf(sc, m);
1010		if (error == -1) {
1011			ifp->if_flags |= IFF_OACTIVE;
1012			IF_PREPEND(&ifp->if_snd, m);
1013			break;
1014		} else if (error > 0) {
1015			printf("hme_start: error %d while loading mbuf\n",
1016			    error);
1017		} else {
1018			enq = 1;
1019			BPF_MTAP(ifp, m);
1020		}
1021	}
1022
1023	if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1024		ifp->if_flags |= IFF_OACTIVE;
1025	/* Set watchdog timer if a packet was queued */
1026	if (enq) {
1027		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1028		    BUS_DMASYNC_PREWRITE);
1029		ifp->if_timer = 5;
1030	}
1031}
1032
1033/*
1034 * Transmit interrupt.
1035 */
1036static void
1037hme_tint(struct hme_softc *sc)
1038{
1039	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1040	struct hme_txdesc *htx;
1041	unsigned int ri, txflags;
1042
1043	/*
1044	 * Unload collision counters
1045	 */
1046	ifp->if_collisions +=
1047		HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1048		HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1049		HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1050		HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1051
1052	/*
1053	 * then clear the hardware counters.
1054	 */
1055	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1056	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1057	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1058	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1059
1060	htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1061	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1062	/* Fetch current position in the transmit ring */
1063	for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1064		if (sc->sc_rb.rb_td_nbusy <= 0) {
1065			CTR0(KTR_HME, "hme_tint: not busy!");
1066			break;
1067		}
1068
1069		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1070		CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1071
1072		if ((txflags & HME_XD_OWN) != 0)
1073			break;
1074
1075		CTR0(KTR_HME, "hme_tint: not owned");
1076		--sc->sc_rb.rb_td_nbusy;
1077		ifp->if_flags &= ~IFF_OACTIVE;
1078
1079		/* Complete packet transmitted? */
1080		if ((txflags & HME_XD_EOP) == 0)
1081			continue;
1082
1083		KASSERT(htx->htx_lastdesc == ri,
1084		    ("hme_tint: ring indices skewed: %d != %d!",
1085		     htx->htx_lastdesc, ri));
1086		bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1087		    BUS_DMASYNC_POSTWRITE);
1088		bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1089
1090		ifp->if_opackets++;
1091		m_freem(htx->htx_m);
1092		htx->htx_m = NULL;
1093		STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1094		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1095		htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1096	}
1097	/* Turn off watchdog */
1098	if (sc->sc_rb.rb_td_nbusy == 0)
1099		ifp->if_timer = 0;
1100
1101	/* Update ring */
1102	sc->sc_rb.rb_tdtail = ri;
1103
1104	hme_start(ifp);
1105
1106	if (sc->sc_rb.rb_td_nbusy == 0)
1107		ifp->if_timer = 0;
1108}
1109
1110/*
1111 * Receive interrupt.
1112 */
1113static void
1114hme_rint(struct hme_softc *sc)
1115{
1116	caddr_t xdr = sc->sc_rb.rb_rxd;
1117	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1118	unsigned int ri, len;
1119	int progress = 0;
1120	u_int32_t flags;
1121
1122	/*
1123	 * Process all buffers with valid data.
1124	 */
1125	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1126	for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1127		flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1128		CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1129		if ((flags & HME_XD_OWN) != 0)
1130			break;
1131
1132		progress++;
1133		if ((flags & HME_XD_OFL) != 0) {
1134			device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1135			    "flags=0x%x\n", ri, flags);
1136			ifp->if_ierrors++;
1137			hme_discard_rxbuf(sc, ri);
1138		} else {
1139			len = HME_XD_DECODE_RSIZE(flags);
1140			hme_read(sc, ri, len);
1141		}
1142	}
1143	if (progress) {
1144		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1145		    BUS_DMASYNC_PREWRITE);
1146	}
1147	sc->sc_rb.rb_rdtail = ri;
1148}
1149
1150static void
1151hme_eint(struct hme_softc *sc, u_int status)
1152{
1153
1154	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1155		device_printf(sc->sc_dev, "XXXlink status changed\n");
1156		return;
1157	}
1158
1159	HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1160}
1161
1162void
1163hme_intr(void *v)
1164{
1165	struct hme_softc *sc = (struct hme_softc *)v;
1166	u_int32_t status;
1167
1168	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1169	CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1170
1171	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1172		hme_eint(sc, status);
1173
1174	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1175		hme_tint(sc);
1176
1177	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1178		hme_rint(sc);
1179}
1180
1181
1182static void
1183hme_watchdog(struct ifnet *ifp)
1184{
1185	struct hme_softc *sc = ifp->if_softc;
1186#ifdef HMEDEBUG
1187	u_int32_t status;
1188
1189	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1190	CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1191#endif
1192	device_printf(sc->sc_dev, "device timeout\n");
1193	++ifp->if_oerrors;
1194
1195	hme_reset(sc);
1196}
1197
1198/*
1199 * Initialize the MII Management Interface
1200 */
1201static void
1202hme_mifinit(struct hme_softc *sc)
1203{
1204	u_int32_t v;
1205
1206	/* Configure the MIF in frame mode */
1207	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1208	v &= ~HME_MIF_CFG_BBMODE;
1209	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1210}
1211
1212/*
1213 * MII interface
1214 */
1215int
1216hme_mii_readreg(device_t dev, int phy, int reg)
1217{
1218	struct hme_softc *sc = device_get_softc(dev);
1219	int n;
1220	u_int32_t v;
1221
1222	/* Select the desired PHY in the MIF configuration register */
1223	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1224	/* Clear PHY select bit */
1225	v &= ~HME_MIF_CFG_PHY;
1226	if (phy == HME_PHYAD_EXTERNAL)
1227		/* Set PHY select bit to get at external device */
1228		v |= HME_MIF_CFG_PHY;
1229	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1230
1231	/* Construct the frame command */
1232	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1233	    HME_MIF_FO_TAMSB |
1234	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1235	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1236	    (reg << HME_MIF_FO_REGAD_SHIFT);
1237
1238	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1239	for (n = 0; n < 100; n++) {
1240		DELAY(1);
1241		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1242		if (v & HME_MIF_FO_TALSB)
1243			return (v & HME_MIF_FO_DATA);
1244	}
1245
1246	device_printf(sc->sc_dev, "mii_read timeout\n");
1247	return (0);
1248}
1249
1250int
1251hme_mii_writereg(device_t dev, int phy, int reg, int val)
1252{
1253	struct hme_softc *sc = device_get_softc(dev);
1254	int n;
1255	u_int32_t v;
1256
1257	/* Select the desired PHY in the MIF configuration register */
1258	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1259	/* Clear PHY select bit */
1260	v &= ~HME_MIF_CFG_PHY;
1261	if (phy == HME_PHYAD_EXTERNAL)
1262		/* Set PHY select bit to get at external device */
1263		v |= HME_MIF_CFG_PHY;
1264	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1265
1266	/* Construct the frame command */
1267	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1268	    HME_MIF_FO_TAMSB				|
1269	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1270	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1271	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1272	    (val & HME_MIF_FO_DATA);
1273
1274	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1275	for (n = 0; n < 100; n++) {
1276		DELAY(1);
1277		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1278		if (v & HME_MIF_FO_TALSB)
1279			return (1);
1280	}
1281
1282	device_printf(sc->sc_dev, "mii_write timeout\n");
1283	return (0);
1284}
1285
1286void
1287hme_mii_statchg(device_t dev)
1288{
1289	struct hme_softc *sc = device_get_softc(dev);
1290	int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1291	int phy = sc->sc_phys[instance];
1292	u_int32_t v;
1293
1294#ifdef HMEDEBUG
1295	if (sc->sc_debug)
1296		printf("hme_mii_statchg: status change: phy = %d\n", phy);
1297#endif
1298
1299	/* Select the current PHY in the MIF configuration register */
1300	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1301	v &= ~HME_MIF_CFG_PHY;
1302	if (phy == HME_PHYAD_EXTERNAL)
1303		v |= HME_MIF_CFG_PHY;
1304	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1305
1306	/* Set the MAC Full Duplex bit appropriately */
1307	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1308	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1309		return;
1310	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1311		v |= HME_MAC_TXCFG_FULLDPLX;
1312	else
1313		v &= ~HME_MAC_TXCFG_FULLDPLX;
1314	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1315	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1316		return;
1317}
1318
1319static int
1320hme_mediachange(struct ifnet *ifp)
1321{
1322	struct hme_softc *sc = ifp->if_softc;
1323
1324	return (mii_mediachg(sc->sc_mii));
1325}
1326
1327static void
1328hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1329{
1330	struct hme_softc *sc = ifp->if_softc;
1331
1332	if ((ifp->if_flags & IFF_UP) == 0)
1333		return;
1334
1335	mii_pollstat(sc->sc_mii);
1336	ifmr->ifm_active = sc->sc_mii->mii_media_active;
1337	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1338}
1339
1340/*
1341 * Process an ioctl request.
1342 */
1343static int
1344hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1345{
1346	struct hme_softc *sc = ifp->if_softc;
1347	struct ifreq *ifr = (struct ifreq *)data;
1348	int s, error = 0;
1349
1350	s = splnet();
1351
1352	switch (cmd) {
1353	case SIOCSIFFLAGS:
1354		if ((ifp->if_flags & IFF_UP) == 0 &&
1355		    (ifp->if_flags & IFF_RUNNING) != 0) {
1356			/*
1357			 * If interface is marked down and it is running, then
1358			 * stop it.
1359			 */
1360			hme_stop(sc);
1361			ifp->if_flags &= ~IFF_RUNNING;
1362		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1363		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1364			/*
1365			 * If interface is marked up and it is stopped, then
1366			 * start it.
1367			 */
1368			hme_init(sc);
1369		} else if ((ifp->if_flags & IFF_UP) != 0) {
1370			/*
1371			 * Reset the interface to pick up changes in any other
1372			 * flags that affect hardware registers.
1373			 */
1374			hme_init(sc);
1375		}
1376#ifdef HMEDEBUG
1377		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1378#endif
1379		break;
1380
1381	case SIOCADDMULTI:
1382	case SIOCDELMULTI:
1383		hme_setladrf(sc, 1);
1384		error = 0;
1385		break;
1386	case SIOCGIFMEDIA:
1387	case SIOCSIFMEDIA:
1388		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1389		break;
1390	default:
1391		error = ether_ioctl(ifp, cmd, data);
1392		break;
1393	}
1394
1395	splx(s);
1396	return (error);
1397}
1398
1399/*
1400 * Set up the logical address filter.
1401 */
1402static void
1403hme_setladrf(struct hme_softc *sc, int reenable)
1404{
1405	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1406	struct ifmultiaddr *inm;
1407	struct sockaddr_dl *sdl;
1408	u_int32_t crc;
1409	u_int32_t hash[4];
1410	u_int32_t macc;
1411	int len;
1412
1413	/* Clear hash table */
1414	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1415
1416	/* Get current RX configuration */
1417	macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1418
1419	/*
1420	 * Disable the receiver while changing it's state as the documentation
1421	 * mandates.
1422	 * We then must wait until the bit clears in the register. This should
1423	 * take at most 3.5ms.
1424	 */
1425	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1426		return;
1427	/* Disable the hash filter before writing to the filter registers. */
1428	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1429	    HME_MAC_RXCFG_HENABLE, 0))
1430		return;
1431
1432	if (reenable)
1433		macc |= HME_MAC_RXCFG_ENABLE;
1434	else
1435		macc &= ~HME_MAC_RXCFG_ENABLE;
1436
1437	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1438		/* Turn on promiscuous mode; turn off the hash filter */
1439		macc |= HME_MAC_RXCFG_PMISC;
1440		macc &= ~HME_MAC_RXCFG_HENABLE;
1441		ifp->if_flags |= IFF_ALLMULTI;
1442		goto chipit;
1443	}
1444
1445	/* Turn off promiscuous mode; turn on the hash filter */
1446	macc &= ~HME_MAC_RXCFG_PMISC;
1447	macc |= HME_MAC_RXCFG_HENABLE;
1448
1449	/*
1450	 * Set up multicast address filter by passing all multicast addresses
1451	 * through a crc generator, and then using the high order 6 bits as an
1452	 * index into the 64 bit logical address filter.  The high order bit
1453	 * selects the word, while the rest of the bits select the bit within
1454	 * the word.
1455	 */
1456
1457	TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
1458		if (inm->ifma_addr->sa_family != AF_LINK)
1459			continue;
1460		sdl = (struct sockaddr_dl *)inm->ifma_addr;
1461		crc = ether_crc32_le(sdl, ETHER_ADDR_LEN);
1462
1463		/* Just want the 6 most significant bits. */
1464		crc >>= 26;
1465
1466		/* Set the corresponding bit in the filter. */
1467		hash[crc >> 4] |= 1 << (crc & 0xf);
1468	}
1469
1470	ifp->if_flags &= ~IFF_ALLMULTI;
1471
1472chipit:
1473	/* Now load the hash table into the chip */
1474	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1475	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1476	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1477	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1478	hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1479	    macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
1480}
1481