if_hme.c revision 117126
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *        This product includes software developed by the NetBSD
20 *        Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 *	from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
38 *
39 * $FreeBSD: head/sys/dev/hme/if_hme.c 117126 2003-07-01 15:52:06Z scottl $
40 */
41
42/*
43 * HME Ethernet module driver.
44 *
45 * The HME is e.g. part of the PCIO PCI multi function device.
46 * It supports TX gathering and TX and RX checksum offloading.
47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
48 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
49 * are skipped to make sure the header after the ethernet header is aligned on a
50 * natural boundary, so this ensures minimal wastage in the most common case.
51 *
52 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
53 * maximum packet size (this is not verified). Buffers starting on odd
54 * boundaries must be mapped so that the burst can start on a natural boundary.
55 *
56 * Checksumming is not yet supported.
57 */
58
59#define HMEDEBUG
60#define	KTR_HME		KTR_CT2		/* XXX */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/bus.h>
65#include <sys/endian.h>
66#include <sys/kernel.h>
67#include <sys/ktr.h>
68#include <sys/mbuf.h>
69#include <sys/malloc.h>
70#include <sys/socket.h>
71#include <sys/sockio.h>
72
73#include <net/bpf.h>
74#include <net/ethernet.h>
75#include <net/if.h>
76#include <net/if_arp.h>
77#include <net/if_dl.h>
78#include <net/if_media.h>
79
80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h>
82
83#include <machine/bus.h>
84
85#include <hme/if_hmereg.h>
86#include <hme/if_hmevar.h>
87
88static void	hme_start(struct ifnet *);
89static void	hme_stop(struct hme_softc *);
90static int	hme_ioctl(struct ifnet *, u_long, caddr_t);
91static void	hme_tick(void *);
92static void	hme_watchdog(struct ifnet *);
93static void	hme_init(void *);
94static int	hme_add_rxbuf(struct hme_softc *, unsigned int, int);
95static int	hme_meminit(struct hme_softc *);
96static int	hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
97    u_int32_t, u_int32_t);
98static void	hme_mifinit(struct hme_softc *);
99static void	hme_reset(struct hme_softc *);
100static void	hme_setladrf(struct hme_softc *, int);
101
102static int	hme_mediachange(struct ifnet *);
103static void	hme_mediastatus(struct ifnet *, struct ifmediareq *);
104
105static int	hme_load_txmbuf(struct hme_softc *, struct mbuf *);
106static void	hme_read(struct hme_softc *, int, int);
107static void	hme_eint(struct hme_softc *, u_int);
108static void	hme_rint(struct hme_softc *);
109static void	hme_tint(struct hme_softc *);
110
111static void	hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
112static void	hme_rxdma_callback(void *, bus_dma_segment_t *, int,
113    bus_size_t, int);
114static void	hme_txdma_callback(void *, bus_dma_segment_t *, int,
115    bus_size_t, int);
116
117devclass_t hme_devclass;
118
119static int hme_nerr;
120
121DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
122MODULE_DEPEND(hme, miibus, 1, 1, 1);
123
124#define	HME_SPC_READ_4(spc, sc, offs) \
125	bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
126	    (sc)->sc_ ## spc ## o + (offs))
127#define	HME_SPC_WRITE_4(spc, sc, offs, v) \
128	bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
129	    (sc)->sc_ ## spc ## o + (offs), (v))
130
131#define	HME_SEB_READ_4(sc, offs)	HME_SPC_READ_4(seb, (sc), (offs))
132#define	HME_SEB_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(seb, (sc), (offs), (v))
133#define	HME_ERX_READ_4(sc, offs)	HME_SPC_READ_4(erx, (sc), (offs))
134#define	HME_ERX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(erx, (sc), (offs), (v))
135#define	HME_ETX_READ_4(sc, offs)	HME_SPC_READ_4(etx, (sc), (offs))
136#define	HME_ETX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(etx, (sc), (offs), (v))
137#define	HME_MAC_READ_4(sc, offs)	HME_SPC_READ_4(mac, (sc), (offs))
138#define	HME_MAC_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mac, (sc), (offs), (v))
139#define	HME_MIF_READ_4(sc, offs)	HME_SPC_READ_4(mif, (sc), (offs))
140#define	HME_MIF_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mif, (sc), (offs), (v))
141
142#define	HME_MAXERR	5
143#define	HME_WHINE(dev, ...) do {					\
144	if (hme_nerr++ < HME_MAXERR)					\
145		device_printf(dev, __VA_ARGS__);			\
146	if (hme_nerr == HME_MAXERR) {					\
147		device_printf(dev, "too may errors; not reporting any "	\
148		    "more\n");						\
149	}								\
150} while(0)
151
152int
153hme_config(struct hme_softc *sc)
154{
155	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
156	struct mii_softc *child;
157	bus_size_t size;
158	int error, rdesc, tdesc, i;
159
160	/*
161	 * HME common initialization.
162	 *
163	 * hme_softc fields that must be initialized by the front-end:
164	 *
165	 * the dma bus tag:
166	 *	sc_dmatag
167	 *
168	 * the bus handles, tags and offsets (splitted for SBus compatability):
169	 *	sc_seb{t,h,o}	(Shared Ethernet Block registers)
170	 *	sc_erx{t,h,o}	(Receiver Unit registers)
171	 *	sc_etx{t,h,o}	(Transmitter Unit registers)
172	 *	sc_mac{t,h,o}	(MAC registers)
173	 *	sc_mif{t,h,o}	(Managment Interface registers)
174	 *
175	 * the maximum bus burst size:
176	 *	sc_burst
177	 *
178	 */
179
180	/* Make sure the chip is stopped. */
181	hme_stop(sc);
182
183	/*
184	 * Allocate DMA capable memory
185	 * Buffer descriptors must be aligned on a 2048 byte boundary;
186	 * take this into account when calculating the size. Note that
187	 * the maximum number of descriptors (256) occupies 2048 bytes,
188	 * so we allocate that much regardless of HME_N*DESC.
189	 */
190	size =	4096;
191
192	error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
193	    BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
194	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
195	if (error)
196		return (error);
197
198	error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
199	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
200	    1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
201	    &Giant, &sc->sc_cdmatag);
202	if (error)
203		goto fail_ptag;
204
205	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
206	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
207	    HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
208	    NULL, NULL, &sc->sc_rdmatag);
209	if (error)
210		goto fail_ctag;
211
212	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
213	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
214	    HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
215	    NULL, NULL, &sc->sc_tdmatag);
216	if (error)
217		goto fail_rtag;
218
219	/* Allocate control/TX DMA buffer */
220	error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
221	    0, &sc->sc_cdmamap);
222	if (error != 0) {
223		device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
224		goto fail_ttag;
225	}
226
227	/* Load the buffer */
228	sc->sc_rb.rb_dmabase = 0;
229	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
230	     sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
231	    sc->sc_rb.rb_dmabase == 0) {
232		device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
233		    error);
234		goto fail_free;
235	}
236	CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
237	    sc->sc_rb.rb_dmabase);
238
239	/*
240	 * Prepare the RX descriptors. rdesc serves as marker for the last
241	 * processed descriptor and may be used later on.
242	 */
243	for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
244		sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
245		error = bus_dmamap_create(sc->sc_rdmatag, 0,
246		    &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
247		if (error != 0)
248			goto fail_rxdesc;
249	}
250	error = bus_dmamap_create(sc->sc_rdmatag, 0,
251	    &sc->sc_rb.rb_spare_dmamap);
252	if (error != 0)
253		goto fail_rxdesc;
254	/* Same for the TX descs. */
255	for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
256		sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
257		error = bus_dmamap_create(sc->sc_tdmatag, 0,
258		    &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
259		if (error != 0)
260			goto fail_txdesc;
261	}
262
263	device_printf(sc->sc_dev, "Ethernet address:");
264	for (i = 0; i < 6; i++)
265		printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
266	printf("\n");
267
268	/* Initialize ifnet structure. */
269	ifp->if_softc = sc;
270	ifp->if_unit = device_get_unit(sc->sc_dev);
271	ifp->if_name = "hme";
272	ifp->if_mtu = ETHERMTU;
273	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
274	ifp->if_start = hme_start;
275	ifp->if_ioctl = hme_ioctl;
276	ifp->if_init = hme_init;
277	ifp->if_output = ether_output;
278	ifp->if_watchdog = hme_watchdog;
279	ifp->if_snd.ifq_maxlen = HME_NTXQ;
280
281	hme_mifinit(sc);
282
283	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
284	    hme_mediastatus)) != 0) {
285		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
286		goto fail_rxdesc;
287	}
288	sc->sc_mii = device_get_softc(sc->sc_miibus);
289
290	/*
291	 * Walk along the list of attached MII devices and
292	 * establish an `MII instance' to `phy number'
293	 * mapping. We'll use this mapping in media change
294	 * requests to determine which phy to use to program
295	 * the MIF configuration register.
296	 */
297	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
298	     child = LIST_NEXT(child, mii_list)) {
299		/*
300		 * Note: we support just two PHYs: the built-in
301		 * internal device and an external on the MII
302		 * connector.
303		 */
304		if (child->mii_phy > 1 || child->mii_inst > 1) {
305			device_printf(sc->sc_dev, "cannot accomodate "
306			    "MII device %s at phy %d, instance %d\n",
307			    device_get_name(child->mii_dev),
308			    child->mii_phy, child->mii_inst);
309			continue;
310		}
311
312		sc->sc_phys[child->mii_inst] = child->mii_phy;
313	}
314
315	/* Attach the interface. */
316	ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
317
318	callout_init(&sc->sc_tick_ch, 0);
319	return (0);
320
321fail_txdesc:
322	for (i = 0; i < tdesc; i++) {
323		bus_dmamap_destroy(sc->sc_tdmatag,
324		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
325	}
326	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
327fail_rxdesc:
328	for (i = 0; i < rdesc; i++) {
329		bus_dmamap_destroy(sc->sc_rdmatag,
330		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
331	}
332	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
333fail_free:
334	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
335fail_ttag:
336	bus_dma_tag_destroy(sc->sc_tdmatag);
337fail_rtag:
338	bus_dma_tag_destroy(sc->sc_rdmatag);
339fail_ctag:
340	bus_dma_tag_destroy(sc->sc_cdmatag);
341fail_ptag:
342	bus_dma_tag_destroy(sc->sc_pdmatag);
343	return (error);
344}
345
346void
347hme_detach(struct hme_softc *sc)
348{
349	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
350	int i;
351
352	ether_ifdetach(ifp);
353	hme_stop(sc);
354	device_delete_child(sc->sc_dev, sc->sc_miibus);
355
356	for (i = 0; i < HME_NTXQ; i++) {
357		bus_dmamap_destroy(sc->sc_tdmatag,
358		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
359	}
360	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
361	for (i = 0; i < HME_NRXDESC; i++) {
362		bus_dmamap_destroy(sc->sc_rdmatag,
363		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
364	}
365	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
366	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
367	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
368	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
369	bus_dma_tag_destroy(sc->sc_tdmatag);
370	bus_dma_tag_destroy(sc->sc_rdmatag);
371	bus_dma_tag_destroy(sc->sc_cdmatag);
372	bus_dma_tag_destroy(sc->sc_pdmatag);
373}
374
375void
376hme_suspend(struct hme_softc *sc)
377{
378
379	hme_stop(sc);
380}
381
382void
383hme_resume(struct hme_softc *sc)
384{
385	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
386
387	if ((ifp->if_flags & IFF_UP) != 0)
388		hme_init(ifp);
389}
390
391static void
392hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
393{
394	struct hme_softc *sc = (struct hme_softc *)xsc;
395
396	if (error != 0)
397		return;
398	KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
399	sc->sc_rb.rb_dmabase = segs[0].ds_addr;
400}
401
402static void
403hme_tick(void *arg)
404{
405	struct hme_softc *sc = arg;
406	int s;
407
408	s = splnet();
409	mii_tick(sc->sc_mii);
410	splx(s);
411
412	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
413}
414
415static void
416hme_reset(struct hme_softc *sc)
417{
418	int s;
419
420	s = splnet();
421	hme_init(sc);
422	splx(s);
423}
424
425static void
426hme_stop(struct hme_softc *sc)
427{
428	u_int32_t v;
429	int n;
430
431	callout_stop(&sc->sc_tick_ch);
432
433	/* Reset transmitter and receiver */
434	HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
435	    HME_SEB_RESET_ERX);
436
437	for (n = 0; n < 20; n++) {
438		v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
439		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
440			return;
441		DELAY(20);
442	}
443
444	device_printf(sc->sc_dev, "hme_stop: reset failed\n");
445}
446
447static void
448hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
449    bus_size_t totsize, int error)
450{
451	bus_addr_t *a = xsc;
452
453	KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
454	if (error != 0)
455		return;
456	*a = segs[0].ds_addr;
457}
458
459/*
460 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
461 * ring for subsequent use.
462 */
463static __inline void
464hme_discard_rxbuf(struct hme_softc *sc, int ix)
465{
466
467	/*
468	 * Dropped a packet, reinitialize the descriptor and turn the
469	 * ownership back to the hardware.
470	 */
471	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
472	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
473}
474
475static int
476hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
477{
478	struct hme_rxdesc *rd;
479	struct mbuf *m;
480	bus_addr_t ba;
481	bus_dmamap_t map;
482	uintptr_t b;
483	int a, unmap;
484
485	rd = &sc->sc_rb.rb_rxdesc[ri];
486	unmap = rd->hrx_m != NULL;
487	if (unmap && keepold) {
488		/*
489		 * Reinitialize the descriptor flags, as they may have been
490		 * altered by the hardware.
491		 */
492		hme_discard_rxbuf(sc, ri);
493		return (0);
494	}
495	if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
496		return (ENOBUFS);
497	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
498	b = mtod(m, uintptr_t);
499	/*
500	 * Required alignment boundary. At least 16 is needed, but since
501	 * the mapping must be done in a way that a burst can start on a
502	 * natural boundary we might need to extend this.
503	 */
504	a = max(HME_MINRXALIGN, sc->sc_burst);
505	/*
506	 * Make sure the buffer suitably aligned. The 2 byte offset is removed
507	 * when the mbuf is handed up. XXX: this ensures at least 16 byte
508	 * alignment of the header adjacent to the ethernet header, which
509	 * should be sufficient in all cases. Nevertheless, this second-guesses
510	 * ALIGN().
511	 */
512	m_adj(m, roundup2(b, a) - b);
513	if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
514	    m, hme_rxdma_callback, &ba, 0) != 0) {
515		m_freem(m);
516		return (ENOBUFS);
517	}
518	if (unmap) {
519		bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
520		    BUS_DMASYNC_POSTREAD);
521		bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
522	}
523	map = rd->hrx_dmamap;
524	rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
525	sc->sc_rb.rb_spare_dmamap = map;
526	bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
527	HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
528	rd->hrx_m = m;
529	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
530	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
531	return (0);
532}
533
534static int
535hme_meminit(struct hme_softc *sc)
536{
537	struct hme_ring *hr = &sc->sc_rb;
538	struct hme_txdesc *td;
539	bus_addr_t dma;
540	caddr_t p;
541	unsigned int i;
542	int error;
543
544	p = hr->rb_membase;
545	dma = hr->rb_dmabase;
546
547	/*
548	 * Allocate transmit descriptors
549	 */
550	hr->rb_txd = p;
551	hr->rb_txddma = dma;
552	p += HME_NTXDESC * HME_XD_SIZE;
553	dma += HME_NTXDESC * HME_XD_SIZE;
554	/* We have reserved descriptor space until the next 2048 byte boundary.*/
555	dma = (bus_addr_t)roundup((u_long)dma, 2048);
556	p = (caddr_t)roundup((u_long)p, 2048);
557
558	/*
559	 * Allocate receive descriptors
560	 */
561	hr->rb_rxd = p;
562	hr->rb_rxddma = dma;
563	p += HME_NRXDESC * HME_XD_SIZE;
564	dma += HME_NRXDESC * HME_XD_SIZE;
565	/* Again move forward to the next 2048 byte boundary.*/
566	dma = (bus_addr_t)roundup((u_long)dma, 2048);
567	p = (caddr_t)roundup((u_long)p, 2048);
568
569	/*
570	 * Initialize transmit buffer descriptors
571	 */
572	for (i = 0; i < HME_NTXDESC; i++) {
573		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
574		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
575	}
576
577	STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
578	STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
579	for (i = 0; i < HME_NTXQ; i++) {
580		td = &sc->sc_rb.rb_txdesc[i];
581		if (td->htx_m != NULL) {
582			m_freem(td->htx_m);
583			bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
584			    BUS_DMASYNC_POSTWRITE);
585			bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
586			td->htx_m = NULL;
587		}
588		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
589	}
590
591	/*
592	 * Initialize receive buffer descriptors
593	 */
594	for (i = 0; i < HME_NRXDESC; i++) {
595		error = hme_add_rxbuf(sc, i, 1);
596		if (error != 0)
597			return (error);
598	}
599
600	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
601	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
602
603	hr->rb_tdhead = hr->rb_tdtail = 0;
604	hr->rb_td_nbusy = 0;
605	hr->rb_rdtail = 0;
606	CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
607	    hr->rb_txddma);
608	CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
609	    hr->rb_rxddma);
610	CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
611	    *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
612	CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
613	    *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
614	return (0);
615}
616
617static int
618hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
619    u_int32_t clr, u_int32_t set)
620{
621	int i = 0;
622
623	val &= ~clr;
624	val |= set;
625	HME_MAC_WRITE_4(sc, reg, val);
626	if (clr == 0 && set == 0)
627		return (1);	/* just write, no bits to wait for */
628	do {
629		DELAY(100);
630		i++;
631		val = HME_MAC_READ_4(sc, reg);
632		if (i > 40) {
633			/* After 3.5ms, we should have been done. */
634			device_printf(sc->sc_dev, "timeout while writing to "
635			    "MAC configuration register\n");
636			return (0);
637		}
638	} while ((val & clr) != 0 && (val & set) != set);
639	return (1);
640}
641
642/*
643 * Initialization of interface; set up initialization block
644 * and transmit/receive descriptor rings.
645 */
646static void
647hme_init(void *xsc)
648{
649	struct hme_softc *sc = (struct hme_softc *)xsc;
650	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
651	u_int8_t *ea;
652	u_int32_t v;
653
654	/*
655	 * Initialization sequence. The numbered steps below correspond
656	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
657	 * Channel Engine manual (part of the PCIO manual).
658	 * See also the STP2002-STQ document from Sun Microsystems.
659	 */
660
661	/* step 1 & 2. Reset the Ethernet Channel */
662	hme_stop(sc);
663
664	/* Re-initialize the MIF */
665	hme_mifinit(sc);
666
667#if 0
668	/* Mask all MIF interrupts, just in case */
669	HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
670#endif
671
672	/* step 3. Setup data structures in host memory */
673	if (hme_meminit(sc) != 0) {
674		device_printf(sc->sc_dev, "out of buffers; init aborted.");
675		return;
676	}
677
678	/* step 4. TX MAC registers & counters */
679	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
680	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
681	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
682	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
683	HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN);
684
685	/* Load station MAC address */
686	ea = sc->sc_arpcom.ac_enaddr;
687	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
688	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
689	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
690
691	/*
692	 * Init seed for backoff
693	 * (source suggested by manual: low 10 bits of MAC address)
694	 */
695	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
696	HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
697
698
699	/* Note: Accepting power-on default for other MAC registers here.. */
700
701	/* step 5. RX MAC registers & counters */
702	hme_setladrf(sc, 0);
703
704	/* step 6 & 7. Program Descriptor Ring Base Addresses */
705	HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
706	/* Transmit Descriptor ring size: in increments of 16 */
707	HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
708
709	HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
710	HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN);
711
712	/* step 8. Global Configuration & Interrupt Mask */
713	HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
714	    ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
715		HME_SEB_STAT_HOSTTOTX |
716		HME_SEB_STAT_RXTOHOST |
717		HME_SEB_STAT_TXALL |
718		HME_SEB_STAT_TXPERR |
719		HME_SEB_STAT_RCNTEXP |
720		HME_SEB_STAT_ALL_ERRORS ));
721
722	switch (sc->sc_burst) {
723	default:
724		v = 0;
725		break;
726	case 16:
727		v = HME_SEB_CFG_BURST16;
728		break;
729	case 32:
730		v = HME_SEB_CFG_BURST32;
731		break;
732	case 64:
733		v = HME_SEB_CFG_BURST64;
734		break;
735	}
736	HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
737
738	/* step 9. ETX Configuration: use mostly default values */
739
740	/* Enable DMA */
741	v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
742	v |= HME_ETX_CFG_DMAENABLE;
743	HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
744
745	/* step 10. ERX Configuration */
746	v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
747
748	/* Encode Receive Descriptor ring size: four possible values */
749	v &= ~HME_ERX_CFG_RINGSIZEMSK;
750	switch (HME_NRXDESC) {
751	case 32:
752		v |= HME_ERX_CFG_RINGSIZE32;
753		break;
754	case 64:
755		v |= HME_ERX_CFG_RINGSIZE64;
756		break;
757	case 128:
758		v |= HME_ERX_CFG_RINGSIZE128;
759		break;
760	case 256:
761		v |= HME_ERX_CFG_RINGSIZE256;
762		break;
763	default:
764		printf("hme: invalid Receive Descriptor ring size\n");
765		break;
766	}
767
768	/* Enable DMA, fix RX first byte offset. */
769	v &= ~HME_ERX_CFG_FBO_MASK;
770	v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
771	CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
772	HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
773
774	/* step 11. XIF Configuration */
775	v = HME_MAC_READ_4(sc, HME_MACI_XIF);
776	v |= HME_MAC_XIF_OE;
777	/* If an external transceiver is connected, enable its MII drivers */
778	if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
779		v |= HME_MAC_XIF_MIIENABLE;
780	CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
781	HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
782
783	/* step 12. RX_MAC Configuration Register */
784	v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
785	v |= HME_MAC_RXCFG_ENABLE;
786	v &= ~(HME_MAC_RXCFG_DCRCS);
787	CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
788	HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
789
790	/* step 13. TX_MAC Configuration Register */
791	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
792	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
793	CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
794	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
795
796	/* step 14. Issue Transmit Pending command */
797
798#ifdef HMEDEBUG
799	/* Debug: double-check. */
800	CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
801	    "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
802	    HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
803	    HME_ERX_READ_4(sc, HME_ERXI_RING),
804	    HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
805	CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
806	    HME_SEB_READ_4(sc, HME_SEBI_IMASK),
807	    HME_ERX_READ_4(sc, HME_ERXI_CFG),
808	    HME_ETX_READ_4(sc, HME_ETXI_CFG));
809	CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
810	    HME_MAC_READ_4(sc, HME_MACI_RXCFG),
811	    HME_MAC_READ_4(sc, HME_MACI_TXCFG));
812#endif
813
814	/* Start the one second timer. */
815	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
816
817	ifp->if_flags |= IFF_RUNNING;
818	ifp->if_flags &= ~IFF_OACTIVE;
819	ifp->if_timer = 0;
820	hme_start(ifp);
821}
822
823struct hme_txdma_arg {
824	struct hme_softc	*hta_sc;
825	struct hme_txdesc	*hta_htx;
826	int			hta_ndescs;
827};
828
829/*
830 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
831 * are readable from the nearest burst boundary on (i.e. potentially before
832 * ds_addr) to the first boundary beyond the end. This is usually a safe
833 * assumption to make, but is not documented.
834 */
835static void
836hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
837    bus_size_t totsz, int error)
838{
839	struct hme_txdma_arg *ta = xsc;
840	struct hme_txdesc *htx;
841	bus_size_t len = 0;
842	caddr_t txd;
843	u_int32_t flags = 0;
844	int i, tdhead, pci;
845
846	if (error != 0)
847		return;
848
849	tdhead = ta->hta_sc->sc_rb.rb_tdhead;
850	pci = ta->hta_sc->sc_pci;
851	txd = ta->hta_sc->sc_rb.rb_txd;
852	htx = ta->hta_htx;
853
854	if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
855		ta->hta_ndescs = -1;
856		return;
857	}
858	ta->hta_ndescs = nsegs;
859
860	for (i = 0; i < nsegs; i++) {
861		if (segs[i].ds_len == 0)
862			continue;
863
864		/* Fill the ring entry. */
865		flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
866		if (len == 0)
867			flags |= HME_XD_SOP;
868		if (len + segs[i].ds_len == totsz)
869			flags |= HME_XD_EOP;
870		CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
871		    "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
872		    (u_int)segs[i].ds_addr);
873		HME_XD_SETFLAGS(pci, txd, tdhead, flags);
874		HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
875
876		ta->hta_sc->sc_rb.rb_td_nbusy++;
877		htx->htx_lastdesc = tdhead;
878		tdhead = (tdhead + 1) % HME_NTXDESC;
879		len += segs[i].ds_len;
880	}
881	ta->hta_sc->sc_rb.rb_tdhead = tdhead;
882	KASSERT((flags & HME_XD_EOP) != 0,
883	    ("hme_txdma_callback: missed end of packet!"));
884}
885
886/*
887 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
888 * start the transmission.
889 * Returns 0 on success, -1 if there were not enough free descriptors to map
890 * the packet, or an errno otherwise.
891 */
892static int
893hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
894{
895	struct hme_txdma_arg cba;
896	struct hme_txdesc *td;
897	int error, si, ri;
898	u_int32_t flags;
899
900	si = sc->sc_rb.rb_tdhead;
901	if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
902		return (-1);
903	td->htx_m = m0;
904	cba.hta_sc = sc;
905	cba.hta_htx = td;
906	if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
907	     m0, hme_txdma_callback, &cba, 0)) != 0)
908		goto fail;
909	if (cba.hta_ndescs == -1) {
910		error = -1;
911		goto fail;
912	}
913	bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
914	    BUS_DMASYNC_PREWRITE);
915
916	STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
917	STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
918
919	/* Turn descriptor ownership to the hme, back to forth. */
920	ri = sc->sc_rb.rb_tdhead;
921	CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
922	    ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
923	do {
924		ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
925		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
926		    HME_XD_OWN;
927		CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
928		    ri, si, flags);
929		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
930	} while (ri != si);
931
932	/* start the transmission. */
933	HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
934	return (0);
935fail:
936	bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
937	return (error);
938}
939
940/*
941 * Pass a packet to the higher levels.
942 */
943static void
944hme_read(struct hme_softc *sc, int ix, int len)
945{
946	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
947	struct mbuf *m;
948
949	if (len <= sizeof(struct ether_header) ||
950	    len > ETHERMTU + sizeof(struct ether_header)) {
951#ifdef HMEDEBUG
952		HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
953		    len);
954#endif
955		ifp->if_ierrors++;
956		hme_discard_rxbuf(sc, ix);
957		return;
958	}
959
960	m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
961	CTR1(KTR_HME, "hme_read: len %d", len);
962
963	if (hme_add_rxbuf(sc, ix, 0) != 0) {
964		/*
965		 * hme_add_rxbuf will leave the old buffer in the ring until
966		 * it is sure that a new buffer can be mapped. If it can not,
967		 * drop the packet, but leave the interface up.
968		 */
969		ifp->if_iqdrops++;
970		hme_discard_rxbuf(sc, ix);
971		return;
972	}
973
974	ifp->if_ipackets++;
975
976	m->m_pkthdr.rcvif = ifp;
977	m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
978	m_adj(m, HME_RXOFFS);
979	/* Pass the packet up. */
980	(*ifp->if_input)(ifp, m);
981}
982
983static void
984hme_start(struct ifnet *ifp)
985{
986	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
987	struct mbuf *m;
988	int error, enq = 0;
989
990	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
991		return;
992
993	error = 0;
994	for (;;) {
995		IF_DEQUEUE(&ifp->if_snd, m);
996		if (m == NULL)
997			break;
998
999		error = hme_load_txmbuf(sc, m);
1000		if (error == -1) {
1001			ifp->if_flags |= IFF_OACTIVE;
1002			IF_PREPEND(&ifp->if_snd, m);
1003			break;
1004		} else if (error > 0) {
1005			printf("hme_start: error %d while loading mbuf\n",
1006			    error);
1007		} else {
1008			enq = 1;
1009			BPF_MTAP(ifp, m);
1010		}
1011	}
1012
1013	if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1014		ifp->if_flags |= IFF_OACTIVE;
1015	/* Set watchdog timer if a packet was queued */
1016	if (enq) {
1017		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1018		    BUS_DMASYNC_PREWRITE);
1019		ifp->if_timer = 5;
1020	}
1021}
1022
1023/*
1024 * Transmit interrupt.
1025 */
1026static void
1027hme_tint(struct hme_softc *sc)
1028{
1029	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1030	struct hme_txdesc *htx;
1031	unsigned int ri, txflags;
1032
1033	/*
1034	 * Unload collision counters
1035	 */
1036	ifp->if_collisions +=
1037		HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1038		HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1039		HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1040		HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1041
1042	/*
1043	 * then clear the hardware counters.
1044	 */
1045	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1046	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1047	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1048	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1049
1050	htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1051	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1052	/* Fetch current position in the transmit ring */
1053	for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1054		if (sc->sc_rb.rb_td_nbusy <= 0) {
1055			CTR0(KTR_HME, "hme_tint: not busy!");
1056			break;
1057		}
1058
1059		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1060		CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1061
1062		if ((txflags & HME_XD_OWN) != 0)
1063			break;
1064
1065		CTR0(KTR_HME, "hme_tint: not owned");
1066		--sc->sc_rb.rb_td_nbusy;
1067		ifp->if_flags &= ~IFF_OACTIVE;
1068
1069		/* Complete packet transmitted? */
1070		if ((txflags & HME_XD_EOP) == 0)
1071			continue;
1072
1073		KASSERT(htx->htx_lastdesc == ri,
1074		    ("hme_tint: ring indices skewed: %d != %d!",
1075		     htx->htx_lastdesc, ri));
1076		bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1077		    BUS_DMASYNC_POSTWRITE);
1078		bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1079
1080		ifp->if_opackets++;
1081		m_freem(htx->htx_m);
1082		htx->htx_m = NULL;
1083		STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1084		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1085		htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1086	}
1087	/* Turn off watchdog */
1088	if (sc->sc_rb.rb_td_nbusy == 0)
1089		ifp->if_timer = 0;
1090
1091	/* Update ring */
1092	sc->sc_rb.rb_tdtail = ri;
1093
1094	hme_start(ifp);
1095
1096	if (sc->sc_rb.rb_td_nbusy == 0)
1097		ifp->if_timer = 0;
1098}
1099
1100/*
1101 * Receive interrupt.
1102 */
1103static void
1104hme_rint(struct hme_softc *sc)
1105{
1106	caddr_t xdr = sc->sc_rb.rb_rxd;
1107	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1108	unsigned int ri, len;
1109	int progress = 0;
1110	u_int32_t flags;
1111
1112	/*
1113	 * Process all buffers with valid data.
1114	 */
1115	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1116	for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1117		flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1118		CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1119		if ((flags & HME_XD_OWN) != 0)
1120			break;
1121
1122		progress++;
1123		if ((flags & HME_XD_OFL) != 0) {
1124			device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1125			    "flags=0x%x\n", ri, flags);
1126			ifp->if_ierrors++;
1127			hme_discard_rxbuf(sc, ri);
1128		} else {
1129			len = HME_XD_DECODE_RSIZE(flags);
1130			hme_read(sc, ri, len);
1131		}
1132	}
1133	if (progress) {
1134		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1135		    BUS_DMASYNC_PREWRITE);
1136	}
1137	sc->sc_rb.rb_rdtail = ri;
1138}
1139
1140static void
1141hme_eint(struct hme_softc *sc, u_int status)
1142{
1143
1144	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1145		device_printf(sc->sc_dev, "XXXlink status changed\n");
1146		return;
1147	}
1148
1149	HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1150}
1151
1152void
1153hme_intr(void *v)
1154{
1155	struct hme_softc *sc = (struct hme_softc *)v;
1156	u_int32_t status;
1157
1158	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1159	CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1160
1161	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1162		hme_eint(sc, status);
1163
1164	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1165		hme_tint(sc);
1166
1167	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1168		hme_rint(sc);
1169}
1170
1171
1172static void
1173hme_watchdog(struct ifnet *ifp)
1174{
1175	struct hme_softc *sc = ifp->if_softc;
1176#ifdef HMEDEBUG
1177	u_int32_t status;
1178
1179	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1180	CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1181#endif
1182	device_printf(sc->sc_dev, "device timeout\n");
1183	++ifp->if_oerrors;
1184
1185	hme_reset(sc);
1186}
1187
1188/*
1189 * Initialize the MII Management Interface
1190 */
1191static void
1192hme_mifinit(struct hme_softc *sc)
1193{
1194	u_int32_t v;
1195
1196	/* Configure the MIF in frame mode */
1197	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1198	v &= ~HME_MIF_CFG_BBMODE;
1199	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1200}
1201
1202/*
1203 * MII interface
1204 */
1205int
1206hme_mii_readreg(device_t dev, int phy, int reg)
1207{
1208	struct hme_softc *sc = device_get_softc(dev);
1209	int n;
1210	u_int32_t v;
1211
1212	/* Select the desired PHY in the MIF configuration register */
1213	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1214	/* Clear PHY select bit */
1215	v &= ~HME_MIF_CFG_PHY;
1216	if (phy == HME_PHYAD_EXTERNAL)
1217		/* Set PHY select bit to get at external device */
1218		v |= HME_MIF_CFG_PHY;
1219	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1220
1221	/* Construct the frame command */
1222	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1223	    HME_MIF_FO_TAMSB |
1224	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1225	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1226	    (reg << HME_MIF_FO_REGAD_SHIFT);
1227
1228	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1229	for (n = 0; n < 100; n++) {
1230		DELAY(1);
1231		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1232		if (v & HME_MIF_FO_TALSB)
1233			return (v & HME_MIF_FO_DATA);
1234	}
1235
1236	device_printf(sc->sc_dev, "mii_read timeout\n");
1237	return (0);
1238}
1239
1240int
1241hme_mii_writereg(device_t dev, int phy, int reg, int val)
1242{
1243	struct hme_softc *sc = device_get_softc(dev);
1244	int n;
1245	u_int32_t v;
1246
1247	/* Select the desired PHY in the MIF configuration register */
1248	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1249	/* Clear PHY select bit */
1250	v &= ~HME_MIF_CFG_PHY;
1251	if (phy == HME_PHYAD_EXTERNAL)
1252		/* Set PHY select bit to get at external device */
1253		v |= HME_MIF_CFG_PHY;
1254	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1255
1256	/* Construct the frame command */
1257	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1258	    HME_MIF_FO_TAMSB				|
1259	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1260	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1261	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1262	    (val & HME_MIF_FO_DATA);
1263
1264	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1265	for (n = 0; n < 100; n++) {
1266		DELAY(1);
1267		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1268		if (v & HME_MIF_FO_TALSB)
1269			return (1);
1270	}
1271
1272	device_printf(sc->sc_dev, "mii_write timeout\n");
1273	return (0);
1274}
1275
1276void
1277hme_mii_statchg(device_t dev)
1278{
1279	struct hme_softc *sc = device_get_softc(dev);
1280	int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1281	int phy = sc->sc_phys[instance];
1282	u_int32_t v;
1283
1284#ifdef HMEDEBUG
1285	if (sc->sc_debug)
1286		printf("hme_mii_statchg: status change: phy = %d\n", phy);
1287#endif
1288
1289	/* Select the current PHY in the MIF configuration register */
1290	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1291	v &= ~HME_MIF_CFG_PHY;
1292	if (phy == HME_PHYAD_EXTERNAL)
1293		v |= HME_MIF_CFG_PHY;
1294	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1295
1296	/* Set the MAC Full Duplex bit appropriately */
1297	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1298	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1299		return;
1300	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1301		v |= HME_MAC_TXCFG_FULLDPLX;
1302	else
1303		v &= ~HME_MAC_TXCFG_FULLDPLX;
1304	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1305	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1306		return;
1307}
1308
1309static int
1310hme_mediachange(struct ifnet *ifp)
1311{
1312	struct hme_softc *sc = ifp->if_softc;
1313
1314	return (mii_mediachg(sc->sc_mii));
1315}
1316
1317static void
1318hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1319{
1320	struct hme_softc *sc = ifp->if_softc;
1321
1322	if ((ifp->if_flags & IFF_UP) == 0)
1323		return;
1324
1325	mii_pollstat(sc->sc_mii);
1326	ifmr->ifm_active = sc->sc_mii->mii_media_active;
1327	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1328}
1329
1330/*
1331 * Process an ioctl request.
1332 */
1333static int
1334hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1335{
1336	struct hme_softc *sc = ifp->if_softc;
1337	struct ifreq *ifr = (struct ifreq *)data;
1338	int s, error = 0;
1339
1340	s = splnet();
1341
1342	switch (cmd) {
1343	case SIOCSIFFLAGS:
1344		if ((ifp->if_flags & IFF_UP) == 0 &&
1345		    (ifp->if_flags & IFF_RUNNING) != 0) {
1346			/*
1347			 * If interface is marked down and it is running, then
1348			 * stop it.
1349			 */
1350			hme_stop(sc);
1351			ifp->if_flags &= ~IFF_RUNNING;
1352		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1353		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1354			/*
1355			 * If interface is marked up and it is stopped, then
1356			 * start it.
1357			 */
1358			hme_init(sc);
1359		} else if ((ifp->if_flags & IFF_UP) != 0) {
1360			/*
1361			 * Reset the interface to pick up changes in any other
1362			 * flags that affect hardware registers.
1363			 */
1364			hme_init(sc);
1365		}
1366#ifdef HMEDEBUG
1367		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1368#endif
1369		break;
1370
1371	case SIOCADDMULTI:
1372	case SIOCDELMULTI:
1373		hme_setladrf(sc, 1);
1374		error = 0;
1375		break;
1376	case SIOCGIFMEDIA:
1377	case SIOCSIFMEDIA:
1378		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1379		break;
1380	default:
1381		error = ether_ioctl(ifp, cmd, data);
1382		break;
1383	}
1384
1385	splx(s);
1386	return (error);
1387}
1388
1389/*
1390 * Set up the logical address filter.
1391 */
1392static void
1393hme_setladrf(struct hme_softc *sc, int reenable)
1394{
1395	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1396	struct ifmultiaddr *inm;
1397	struct sockaddr_dl *sdl;
1398	u_char *cp;
1399	u_int32_t crc;
1400	u_int32_t hash[4];
1401	u_int32_t macc;
1402	int len;
1403
1404	/* Clear hash table */
1405	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1406
1407	/* Get current RX configuration */
1408	macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1409
1410	/*
1411	 * Disable the receiver while changing it's state as the documentation
1412	 * mandates.
1413	 * We then must wait until the bit clears in the register. This should
1414	 * take at most 3.5ms.
1415	 */
1416	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1417		return;
1418	/* Disable the hash filter before writing to the filter registers. */
1419	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1420	    HME_MAC_RXCFG_HENABLE, 0))
1421		return;
1422
1423	if (reenable)
1424		macc |= HME_MAC_RXCFG_ENABLE;
1425	else
1426		macc &= ~HME_MAC_RXCFG_ENABLE;
1427
1428	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1429		/* Turn on promiscuous mode; turn off the hash filter */
1430		macc |= HME_MAC_RXCFG_PMISC;
1431		macc &= ~HME_MAC_RXCFG_HENABLE;
1432		ifp->if_flags |= IFF_ALLMULTI;
1433		goto chipit;
1434	}
1435
1436	/* Turn off promiscuous mode; turn on the hash filter */
1437	macc &= ~HME_MAC_RXCFG_PMISC;
1438	macc |= HME_MAC_RXCFG_HENABLE;
1439
1440	/*
1441	 * Set up multicast address filter by passing all multicast addresses
1442	 * through a crc generator, and then using the high order 6 bits as an
1443	 * index into the 64 bit logical address filter.  The high order bit
1444	 * selects the word, while the rest of the bits select the bit within
1445	 * the word.
1446	 */
1447
1448	TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
1449		if (inm->ifma_addr->sa_family != AF_LINK)
1450			continue;
1451		sdl = (struct sockaddr_dl *)inm->ifma_addr;
1452		cp = LLADDR(sdl);
1453		crc = 0xffffffff;
1454		for (len = sdl->sdl_alen; --len >= 0;) {
1455			int octet = *cp++;
1456			int i;
1457
1458#define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
1459			for (i = 0; i < 8; i++) {
1460				if ((crc & 1) ^ (octet & 1)) {
1461					crc >>= 1;
1462					crc ^= MC_POLY_LE;
1463				} else {
1464					crc >>= 1;
1465				}
1466				octet >>= 1;
1467			}
1468		}
1469		/* Just want the 6 most significant bits. */
1470		crc >>= 26;
1471
1472		/* Set the corresponding bit in the filter. */
1473		hash[crc >> 4] |= 1 << (crc & 0xf);
1474	}
1475
1476	ifp->if_flags &= ~IFF_ALLMULTI;
1477
1478chipit:
1479	/* Now load the hash table into the chip */
1480	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1481	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1482	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1483	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1484	hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1485	    macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
1486}
1487