if_hme.c revision 129006
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *        This product includes software developed by the NetBSD
20 *        Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 *	from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/dev/hme/if_hme.c 129006 2004-05-06 13:38:19Z joerg $");
42
43/*
44 * HME Ethernet module driver.
45 *
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
52 *
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
56 *
57 * Checksumming is not yet supported.
58 */
59
60#define HMEDEBUG
61#define	KTR_HME		KTR_CT2		/* XXX */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/bus.h>
66#include <sys/endian.h>
67#include <sys/kernel.h>
68#include <sys/ktr.h>
69#include <sys/mbuf.h>
70#include <sys/malloc.h>
71#include <sys/socket.h>
72#include <sys/sockio.h>
73
74#include <net/bpf.h>
75#include <net/ethernet.h>
76#include <net/if.h>
77#include <net/if_arp.h>
78#include <net/if_dl.h>
79#include <net/if_media.h>
80#include <net/if_vlan_var.h>
81
82#include <dev/mii/mii.h>
83#include <dev/mii/miivar.h>
84
85#include <machine/bus.h>
86
87#include <dev/hme/if_hmereg.h>
88#include <dev/hme/if_hmevar.h>
89
90static void	hme_start(struct ifnet *);
91static void	hme_stop(struct hme_softc *);
92static int	hme_ioctl(struct ifnet *, u_long, caddr_t);
93static void	hme_tick(void *);
94static void	hme_watchdog(struct ifnet *);
95static void	hme_init(void *);
96static int	hme_add_rxbuf(struct hme_softc *, unsigned int, int);
97static int	hme_meminit(struct hme_softc *);
98static int	hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
99    u_int32_t, u_int32_t);
100static void	hme_mifinit(struct hme_softc *);
101static void	hme_reset(struct hme_softc *);
102static void	hme_setladrf(struct hme_softc *, int);
103
104static int	hme_mediachange(struct ifnet *);
105static void	hme_mediastatus(struct ifnet *, struct ifmediareq *);
106
107static int	hme_load_txmbuf(struct hme_softc *, struct mbuf *);
108static void	hme_read(struct hme_softc *, int, int);
109static void	hme_eint(struct hme_softc *, u_int);
110static void	hme_rint(struct hme_softc *);
111static void	hme_tint(struct hme_softc *);
112
113static void	hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
114static void	hme_rxdma_callback(void *, bus_dma_segment_t *, int,
115    bus_size_t, int);
116static void	hme_txdma_callback(void *, bus_dma_segment_t *, int,
117    bus_size_t, int);
118
119devclass_t hme_devclass;
120
121static int hme_nerr;
122
123DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
124MODULE_DEPEND(hme, miibus, 1, 1, 1);
125
126#define	HME_SPC_READ_4(spc, sc, offs) \
127	bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
128	    (sc)->sc_ ## spc ## o + (offs))
129#define	HME_SPC_WRITE_4(spc, sc, offs, v) \
130	bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
131	    (sc)->sc_ ## spc ## o + (offs), (v))
132
133#define	HME_SEB_READ_4(sc, offs)	HME_SPC_READ_4(seb, (sc), (offs))
134#define	HME_SEB_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(seb, (sc), (offs), (v))
135#define	HME_ERX_READ_4(sc, offs)	HME_SPC_READ_4(erx, (sc), (offs))
136#define	HME_ERX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(erx, (sc), (offs), (v))
137#define	HME_ETX_READ_4(sc, offs)	HME_SPC_READ_4(etx, (sc), (offs))
138#define	HME_ETX_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(etx, (sc), (offs), (v))
139#define	HME_MAC_READ_4(sc, offs)	HME_SPC_READ_4(mac, (sc), (offs))
140#define	HME_MAC_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mac, (sc), (offs), (v))
141#define	HME_MIF_READ_4(sc, offs)	HME_SPC_READ_4(mif, (sc), (offs))
142#define	HME_MIF_WRITE_4(sc, offs, v)	HME_SPC_WRITE_4(mif, (sc), (offs), (v))
143
144#define	HME_MAXERR	5
145#define	HME_WHINE(dev, ...) do {					\
146	if (hme_nerr++ < HME_MAXERR)					\
147		device_printf(dev, __VA_ARGS__);			\
148	if (hme_nerr == HME_MAXERR) {					\
149		device_printf(dev, "too may errors; not reporting any "	\
150		    "more\n");						\
151	}								\
152} while(0)
153
154/* Support oversized VLAN frames. */
155#define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
156
157int
158hme_config(struct hme_softc *sc)
159{
160	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
161	struct mii_softc *child;
162	bus_size_t size;
163	int error, rdesc, tdesc, i;
164
165	/*
166	 * HME common initialization.
167	 *
168	 * hme_softc fields that must be initialized by the front-end:
169	 *
170	 * the dma bus tag:
171	 *	sc_dmatag
172	 *
173	 * the bus handles, tags and offsets (splitted for SBus compatability):
174	 *	sc_seb{t,h,o}	(Shared Ethernet Block registers)
175	 *	sc_erx{t,h,o}	(Receiver Unit registers)
176	 *	sc_etx{t,h,o}	(Transmitter Unit registers)
177	 *	sc_mac{t,h,o}	(MAC registers)
178	 *	sc_mif{t,h,o}	(Managment Interface registers)
179	 *
180	 * the maximum bus burst size:
181	 *	sc_burst
182	 *
183	 */
184
185	/* Make sure the chip is stopped. */
186	hme_stop(sc);
187
188	/*
189	 * Allocate DMA capable memory
190	 * Buffer descriptors must be aligned on a 2048 byte boundary;
191	 * take this into account when calculating the size. Note that
192	 * the maximum number of descriptors (256) occupies 2048 bytes,
193	 * so we allocate that much regardless of HME_N*DESC.
194	 */
195	size =	4096;
196
197	error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
198	    BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
199	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
200	if (error)
201		return (error);
202
203	error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
204	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
205	    1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
206	    &Giant, &sc->sc_cdmatag);
207	if (error)
208		goto fail_ptag;
209
210	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
211	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
212	    HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
213	    NULL, NULL, &sc->sc_rdmatag);
214	if (error)
215		goto fail_ctag;
216
217	error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
218	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
219	    HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
220	    NULL, NULL, &sc->sc_tdmatag);
221	if (error)
222		goto fail_rtag;
223
224	/* Allocate control/TX DMA buffer */
225	error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
226	    0, &sc->sc_cdmamap);
227	if (error != 0) {
228		device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
229		goto fail_ttag;
230	}
231
232	/* Load the buffer */
233	sc->sc_rb.rb_dmabase = 0;
234	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
235	     sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
236	    sc->sc_rb.rb_dmabase == 0) {
237		device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
238		    error);
239		goto fail_free;
240	}
241	CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
242	    sc->sc_rb.rb_dmabase);
243
244	/*
245	 * Prepare the RX descriptors. rdesc serves as marker for the last
246	 * processed descriptor and may be used later on.
247	 */
248	for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
249		sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
250		error = bus_dmamap_create(sc->sc_rdmatag, 0,
251		    &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
252		if (error != 0)
253			goto fail_rxdesc;
254	}
255	error = bus_dmamap_create(sc->sc_rdmatag, 0,
256	    &sc->sc_rb.rb_spare_dmamap);
257	if (error != 0)
258		goto fail_rxdesc;
259	/* Same for the TX descs. */
260	for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
261		sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
262		error = bus_dmamap_create(sc->sc_tdmatag, 0,
263		    &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
264		if (error != 0)
265			goto fail_txdesc;
266	}
267
268	/* Initialize ifnet structure. */
269	ifp->if_softc = sc;
270	if_initname(ifp, device_get_name(sc->sc_dev),
271	    device_get_unit(sc->sc_dev));
272	ifp->if_mtu = ETHERMTU;
273	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
274	ifp->if_start = hme_start;
275	ifp->if_ioctl = hme_ioctl;
276	ifp->if_init = hme_init;
277	ifp->if_output = ether_output;
278	ifp->if_watchdog = hme_watchdog;
279	ifp->if_snd.ifq_maxlen = HME_NTXQ;
280
281	hme_mifinit(sc);
282
283	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
284	    hme_mediastatus)) != 0) {
285		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
286		goto fail_rxdesc;
287	}
288	sc->sc_mii = device_get_softc(sc->sc_miibus);
289
290	/*
291	 * Walk along the list of attached MII devices and
292	 * establish an `MII instance' to `phy number'
293	 * mapping. We'll use this mapping in media change
294	 * requests to determine which phy to use to program
295	 * the MIF configuration register.
296	 */
297	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
298	     child = LIST_NEXT(child, mii_list)) {
299		/*
300		 * Note: we support just two PHYs: the built-in
301		 * internal device and an external on the MII
302		 * connector.
303		 */
304		if (child->mii_phy > 1 || child->mii_inst > 1) {
305			device_printf(sc->sc_dev, "cannot accomodate "
306			    "MII device %s at phy %d, instance %d\n",
307			    device_get_name(child->mii_dev),
308			    child->mii_phy, child->mii_inst);
309			continue;
310		}
311
312		sc->sc_phys[child->mii_inst] = child->mii_phy;
313	}
314
315	/* Attach the interface. */
316	ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
317
318	/*
319	 * Tell the upper layer(s) we support long frames.
320	 */
321	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
322	ifp->if_capabilities |= IFCAP_VLAN_MTU;
323
324	callout_init(&sc->sc_tick_ch, 0);
325	return (0);
326
327fail_txdesc:
328	for (i = 0; i < tdesc; i++) {
329		bus_dmamap_destroy(sc->sc_tdmatag,
330		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
331	}
332	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
333fail_rxdesc:
334	for (i = 0; i < rdesc; i++) {
335		bus_dmamap_destroy(sc->sc_rdmatag,
336		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
337	}
338	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
339fail_free:
340	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
341fail_ttag:
342	bus_dma_tag_destroy(sc->sc_tdmatag);
343fail_rtag:
344	bus_dma_tag_destroy(sc->sc_rdmatag);
345fail_ctag:
346	bus_dma_tag_destroy(sc->sc_cdmatag);
347fail_ptag:
348	bus_dma_tag_destroy(sc->sc_pdmatag);
349	return (error);
350}
351
352void
353hme_detach(struct hme_softc *sc)
354{
355	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
356	int i;
357
358	ether_ifdetach(ifp);
359	hme_stop(sc);
360	device_delete_child(sc->sc_dev, sc->sc_miibus);
361
362	for (i = 0; i < HME_NTXQ; i++) {
363		bus_dmamap_destroy(sc->sc_tdmatag,
364		    sc->sc_rb.rb_txdesc[i].htx_dmamap);
365	}
366	bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
367	for (i = 0; i < HME_NRXDESC; i++) {
368		bus_dmamap_destroy(sc->sc_rdmatag,
369		    sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
370	}
371	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
372	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
373	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
374	bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
375	bus_dma_tag_destroy(sc->sc_tdmatag);
376	bus_dma_tag_destroy(sc->sc_rdmatag);
377	bus_dma_tag_destroy(sc->sc_cdmatag);
378	bus_dma_tag_destroy(sc->sc_pdmatag);
379}
380
381void
382hme_suspend(struct hme_softc *sc)
383{
384
385	hme_stop(sc);
386}
387
388void
389hme_resume(struct hme_softc *sc)
390{
391	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
392
393	if ((ifp->if_flags & IFF_UP) != 0)
394		hme_init(ifp);
395}
396
397static void
398hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
399{
400	struct hme_softc *sc = (struct hme_softc *)xsc;
401
402	if (error != 0)
403		return;
404	KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
405	sc->sc_rb.rb_dmabase = segs[0].ds_addr;
406}
407
408static void
409hme_tick(void *arg)
410{
411	struct hme_softc *sc = arg;
412	int s;
413
414	s = splnet();
415	mii_tick(sc->sc_mii);
416	splx(s);
417
418	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
419}
420
421static void
422hme_reset(struct hme_softc *sc)
423{
424	int s;
425
426	s = splnet();
427	hme_init(sc);
428	splx(s);
429}
430
431static void
432hme_stop(struct hme_softc *sc)
433{
434	u_int32_t v;
435	int n;
436
437	callout_stop(&sc->sc_tick_ch);
438
439	/* Reset transmitter and receiver */
440	HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
441	    HME_SEB_RESET_ERX);
442
443	for (n = 0; n < 20; n++) {
444		v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
445		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
446			return;
447		DELAY(20);
448	}
449
450	device_printf(sc->sc_dev, "hme_stop: reset failed\n");
451}
452
453static void
454hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
455    bus_size_t totsize, int error)
456{
457	bus_addr_t *a = xsc;
458
459	KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
460	if (error != 0)
461		return;
462	*a = segs[0].ds_addr;
463}
464
465/*
466 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
467 * ring for subsequent use.
468 */
469static __inline void
470hme_discard_rxbuf(struct hme_softc *sc, int ix)
471{
472
473	/*
474	 * Dropped a packet, reinitialize the descriptor and turn the
475	 * ownership back to the hardware.
476	 */
477	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
478	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
479}
480
481static int
482hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
483{
484	struct hme_rxdesc *rd;
485	struct mbuf *m;
486	bus_addr_t ba;
487	bus_dmamap_t map;
488	uintptr_t b;
489	int a, unmap;
490
491	rd = &sc->sc_rb.rb_rxdesc[ri];
492	unmap = rd->hrx_m != NULL;
493	if (unmap && keepold) {
494		/*
495		 * Reinitialize the descriptor flags, as they may have been
496		 * altered by the hardware.
497		 */
498		hme_discard_rxbuf(sc, ri);
499		return (0);
500	}
501	if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
502		return (ENOBUFS);
503	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
504	b = mtod(m, uintptr_t);
505	/*
506	 * Required alignment boundary. At least 16 is needed, but since
507	 * the mapping must be done in a way that a burst can start on a
508	 * natural boundary we might need to extend this.
509	 */
510	a = max(HME_MINRXALIGN, sc->sc_burst);
511	/*
512	 * Make sure the buffer suitably aligned. The 2 byte offset is removed
513	 * when the mbuf is handed up. XXX: this ensures at least 16 byte
514	 * alignment of the header adjacent to the ethernet header, which
515	 * should be sufficient in all cases. Nevertheless, this second-guesses
516	 * ALIGN().
517	 */
518	m_adj(m, roundup2(b, a) - b);
519	if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
520	    m, hme_rxdma_callback, &ba, 0) != 0) {
521		m_freem(m);
522		return (ENOBUFS);
523	}
524	if (unmap) {
525		bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
526		    BUS_DMASYNC_POSTREAD);
527		bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
528	}
529	map = rd->hrx_dmamap;
530	rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
531	sc->sc_rb.rb_spare_dmamap = map;
532	bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
533	HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
534	rd->hrx_m = m;
535	HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
536	    HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
537	return (0);
538}
539
540static int
541hme_meminit(struct hme_softc *sc)
542{
543	struct hme_ring *hr = &sc->sc_rb;
544	struct hme_txdesc *td;
545	bus_addr_t dma;
546	caddr_t p;
547	unsigned int i;
548	int error;
549
550	p = hr->rb_membase;
551	dma = hr->rb_dmabase;
552
553	/*
554	 * Allocate transmit descriptors
555	 */
556	hr->rb_txd = p;
557	hr->rb_txddma = dma;
558	p += HME_NTXDESC * HME_XD_SIZE;
559	dma += HME_NTXDESC * HME_XD_SIZE;
560	/* We have reserved descriptor space until the next 2048 byte boundary.*/
561	dma = (bus_addr_t)roundup((u_long)dma, 2048);
562	p = (caddr_t)roundup((u_long)p, 2048);
563
564	/*
565	 * Allocate receive descriptors
566	 */
567	hr->rb_rxd = p;
568	hr->rb_rxddma = dma;
569	p += HME_NRXDESC * HME_XD_SIZE;
570	dma += HME_NRXDESC * HME_XD_SIZE;
571	/* Again move forward to the next 2048 byte boundary.*/
572	dma = (bus_addr_t)roundup((u_long)dma, 2048);
573	p = (caddr_t)roundup((u_long)p, 2048);
574
575	/*
576	 * Initialize transmit buffer descriptors
577	 */
578	for (i = 0; i < HME_NTXDESC; i++) {
579		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
580		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
581	}
582
583	STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
584	STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
585	for (i = 0; i < HME_NTXQ; i++) {
586		td = &sc->sc_rb.rb_txdesc[i];
587		if (td->htx_m != NULL) {
588			m_freem(td->htx_m);
589			bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
590			    BUS_DMASYNC_POSTWRITE);
591			bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
592			td->htx_m = NULL;
593		}
594		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
595	}
596
597	/*
598	 * Initialize receive buffer descriptors
599	 */
600	for (i = 0; i < HME_NRXDESC; i++) {
601		error = hme_add_rxbuf(sc, i, 1);
602		if (error != 0)
603			return (error);
604	}
605
606	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
607	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
608
609	hr->rb_tdhead = hr->rb_tdtail = 0;
610	hr->rb_td_nbusy = 0;
611	hr->rb_rdtail = 0;
612	CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
613	    hr->rb_txddma);
614	CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
615	    hr->rb_rxddma);
616	CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
617	    *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
618	CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
619	    *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
620	return (0);
621}
622
623static int
624hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
625    u_int32_t clr, u_int32_t set)
626{
627	int i = 0;
628
629	val &= ~clr;
630	val |= set;
631	HME_MAC_WRITE_4(sc, reg, val);
632	if (clr == 0 && set == 0)
633		return (1);	/* just write, no bits to wait for */
634	do {
635		DELAY(100);
636		i++;
637		val = HME_MAC_READ_4(sc, reg);
638		if (i > 40) {
639			/* After 3.5ms, we should have been done. */
640			device_printf(sc->sc_dev, "timeout while writing to "
641			    "MAC configuration register\n");
642			return (0);
643		}
644	} while ((val & clr) != 0 && (val & set) != set);
645	return (1);
646}
647
648/*
649 * Initialization of interface; set up initialization block
650 * and transmit/receive descriptor rings.
651 */
652static void
653hme_init(void *xsc)
654{
655	struct hme_softc *sc = (struct hme_softc *)xsc;
656	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
657	u_int8_t *ea;
658	u_int32_t v;
659
660	/*
661	 * Initialization sequence. The numbered steps below correspond
662	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
663	 * Channel Engine manual (part of the PCIO manual).
664	 * See also the STP2002-STQ document from Sun Microsystems.
665	 */
666
667	/* step 1 & 2. Reset the Ethernet Channel */
668	hme_stop(sc);
669
670	/* Re-initialize the MIF */
671	hme_mifinit(sc);
672
673#if 0
674	/* Mask all MIF interrupts, just in case */
675	HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
676#endif
677
678	/* step 3. Setup data structures in host memory */
679	if (hme_meminit(sc) != 0) {
680		device_printf(sc->sc_dev, "out of buffers; init aborted.");
681		return;
682	}
683
684	/* step 4. TX MAC registers & counters */
685	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
686	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
687	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
688	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
689	HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
690
691	/* Load station MAC address */
692	ea = sc->sc_arpcom.ac_enaddr;
693	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
694	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
695	HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
696
697	/*
698	 * Init seed for backoff
699	 * (source suggested by manual: low 10 bits of MAC address)
700	 */
701	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
702	HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
703
704
705	/* Note: Accepting power-on default for other MAC registers here.. */
706
707	/* step 5. RX MAC registers & counters */
708	hme_setladrf(sc, 0);
709
710	/* step 6 & 7. Program Descriptor Ring Base Addresses */
711	HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
712	/* Transmit Descriptor ring size: in increments of 16 */
713	HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
714
715	HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
716	HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
717
718	/* step 8. Global Configuration & Interrupt Mask */
719	HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
720	    ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
721		HME_SEB_STAT_HOSTTOTX |
722		HME_SEB_STAT_RXTOHOST |
723		HME_SEB_STAT_TXALL |
724		HME_SEB_STAT_TXPERR |
725		HME_SEB_STAT_RCNTEXP |
726		HME_SEB_STAT_ALL_ERRORS ));
727
728	switch (sc->sc_burst) {
729	default:
730		v = 0;
731		break;
732	case 16:
733		v = HME_SEB_CFG_BURST16;
734		break;
735	case 32:
736		v = HME_SEB_CFG_BURST32;
737		break;
738	case 64:
739		v = HME_SEB_CFG_BURST64;
740		break;
741	}
742	HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
743
744	/* step 9. ETX Configuration: use mostly default values */
745
746	/* Enable DMA */
747	v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
748	v |= HME_ETX_CFG_DMAENABLE;
749	HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
750
751	/* step 10. ERX Configuration */
752	v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
753
754	/* Encode Receive Descriptor ring size: four possible values */
755	v &= ~HME_ERX_CFG_RINGSIZEMSK;
756	switch (HME_NRXDESC) {
757	case 32:
758		v |= HME_ERX_CFG_RINGSIZE32;
759		break;
760	case 64:
761		v |= HME_ERX_CFG_RINGSIZE64;
762		break;
763	case 128:
764		v |= HME_ERX_CFG_RINGSIZE128;
765		break;
766	case 256:
767		v |= HME_ERX_CFG_RINGSIZE256;
768		break;
769	default:
770		printf("hme: invalid Receive Descriptor ring size\n");
771		break;
772	}
773
774	/* Enable DMA, fix RX first byte offset. */
775	v &= ~HME_ERX_CFG_FBO_MASK;
776	v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
777	CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
778	HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
779
780	/* step 11. XIF Configuration */
781	v = HME_MAC_READ_4(sc, HME_MACI_XIF);
782	v |= HME_MAC_XIF_OE;
783	/* If an external transceiver is connected, enable its MII drivers */
784	if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
785		v |= HME_MAC_XIF_MIIENABLE;
786	CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
787	HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
788
789	/* step 12. RX_MAC Configuration Register */
790	v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
791	v |= HME_MAC_RXCFG_ENABLE;
792	v &= ~(HME_MAC_RXCFG_DCRCS);
793	CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
794	HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
795
796	/* step 13. TX_MAC Configuration Register */
797	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
798	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
799	CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
800	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
801
802	/* step 14. Issue Transmit Pending command */
803
804#ifdef HMEDEBUG
805	/* Debug: double-check. */
806	CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
807	    "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
808	    HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
809	    HME_ERX_READ_4(sc, HME_ERXI_RING),
810	    HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
811	CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
812	    HME_SEB_READ_4(sc, HME_SEBI_IMASK),
813	    HME_ERX_READ_4(sc, HME_ERXI_CFG),
814	    HME_ETX_READ_4(sc, HME_ETXI_CFG));
815	CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
816	    HME_MAC_READ_4(sc, HME_MACI_RXCFG),
817	    HME_MAC_READ_4(sc, HME_MACI_TXCFG));
818#endif
819
820	/* Start the one second timer. */
821	callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
822
823	ifp->if_flags |= IFF_RUNNING;
824	ifp->if_flags &= ~IFF_OACTIVE;
825	ifp->if_timer = 0;
826	hme_start(ifp);
827}
828
829struct hme_txdma_arg {
830	struct hme_softc	*hta_sc;
831	struct hme_txdesc	*hta_htx;
832	int			hta_ndescs;
833};
834
835/*
836 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
837 * are readable from the nearest burst boundary on (i.e. potentially before
838 * ds_addr) to the first boundary beyond the end. This is usually a safe
839 * assumption to make, but is not documented.
840 */
841static void
842hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
843    bus_size_t totsz, int error)
844{
845	struct hme_txdma_arg *ta = xsc;
846	struct hme_txdesc *htx;
847	bus_size_t len = 0;
848	caddr_t txd;
849	u_int32_t flags = 0;
850	int i, tdhead, pci;
851
852	if (error != 0)
853		return;
854
855	tdhead = ta->hta_sc->sc_rb.rb_tdhead;
856	pci = ta->hta_sc->sc_pci;
857	txd = ta->hta_sc->sc_rb.rb_txd;
858	htx = ta->hta_htx;
859
860	if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
861		ta->hta_ndescs = -1;
862		return;
863	}
864	ta->hta_ndescs = nsegs;
865
866	for (i = 0; i < nsegs; i++) {
867		if (segs[i].ds_len == 0)
868			continue;
869
870		/* Fill the ring entry. */
871		flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
872		if (len == 0)
873			flags |= HME_XD_SOP;
874		if (len + segs[i].ds_len == totsz)
875			flags |= HME_XD_EOP;
876		CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
877		    "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
878		    (u_int)segs[i].ds_addr);
879		HME_XD_SETFLAGS(pci, txd, tdhead, flags);
880		HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
881
882		ta->hta_sc->sc_rb.rb_td_nbusy++;
883		htx->htx_lastdesc = tdhead;
884		tdhead = (tdhead + 1) % HME_NTXDESC;
885		len += segs[i].ds_len;
886	}
887	ta->hta_sc->sc_rb.rb_tdhead = tdhead;
888	KASSERT((flags & HME_XD_EOP) != 0,
889	    ("hme_txdma_callback: missed end of packet!"));
890}
891
892/*
893 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
894 * start the transmission.
895 * Returns 0 on success, -1 if there were not enough free descriptors to map
896 * the packet, or an errno otherwise.
897 */
898static int
899hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
900{
901	struct hme_txdma_arg cba;
902	struct hme_txdesc *td;
903	int error, si, ri;
904	u_int32_t flags;
905
906	si = sc->sc_rb.rb_tdhead;
907	if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
908		return (-1);
909	td->htx_m = m0;
910	cba.hta_sc = sc;
911	cba.hta_htx = td;
912	if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
913	     m0, hme_txdma_callback, &cba, 0)) != 0)
914		goto fail;
915	if (cba.hta_ndescs == -1) {
916		error = -1;
917		goto fail;
918	}
919	bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
920	    BUS_DMASYNC_PREWRITE);
921
922	STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
923	STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
924
925	/* Turn descriptor ownership to the hme, back to forth. */
926	ri = sc->sc_rb.rb_tdhead;
927	CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
928	    ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
929	do {
930		ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
931		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
932		    HME_XD_OWN;
933		CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
934		    ri, si, flags);
935		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
936	} while (ri != si);
937
938	/* start the transmission. */
939	HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
940	return (0);
941fail:
942	bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
943	return (error);
944}
945
946/*
947 * Pass a packet to the higher levels.
948 */
949static void
950hme_read(struct hme_softc *sc, int ix, int len)
951{
952	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
953	struct mbuf *m;
954
955	if (len <= sizeof(struct ether_header) ||
956	    len > HME_MAX_FRAMESIZE) {
957#ifdef HMEDEBUG
958		HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
959		    len);
960#endif
961		ifp->if_ierrors++;
962		hme_discard_rxbuf(sc, ix);
963		return;
964	}
965
966	m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
967	CTR1(KTR_HME, "hme_read: len %d", len);
968
969	if (hme_add_rxbuf(sc, ix, 0) != 0) {
970		/*
971		 * hme_add_rxbuf will leave the old buffer in the ring until
972		 * it is sure that a new buffer can be mapped. If it can not,
973		 * drop the packet, but leave the interface up.
974		 */
975		ifp->if_iqdrops++;
976		hme_discard_rxbuf(sc, ix);
977		return;
978	}
979
980	ifp->if_ipackets++;
981
982	m->m_pkthdr.rcvif = ifp;
983	m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
984	m_adj(m, HME_RXOFFS);
985	/* Pass the packet up. */
986	(*ifp->if_input)(ifp, m);
987}
988
989static void
990hme_start(struct ifnet *ifp)
991{
992	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
993	struct mbuf *m;
994	int error, enq = 0;
995
996	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
997		return;
998
999	error = 0;
1000	for (;;) {
1001		IF_DEQUEUE(&ifp->if_snd, m);
1002		if (m == NULL)
1003			break;
1004
1005		error = hme_load_txmbuf(sc, m);
1006		if (error == -1) {
1007			ifp->if_flags |= IFF_OACTIVE;
1008			IF_PREPEND(&ifp->if_snd, m);
1009			break;
1010		} else if (error > 0) {
1011			printf("hme_start: error %d while loading mbuf\n",
1012			    error);
1013		} else {
1014			enq = 1;
1015			BPF_MTAP(ifp, m);
1016		}
1017	}
1018
1019	if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1020		ifp->if_flags |= IFF_OACTIVE;
1021	/* Set watchdog timer if a packet was queued */
1022	if (enq) {
1023		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1024		    BUS_DMASYNC_PREWRITE);
1025		ifp->if_timer = 5;
1026	}
1027}
1028
1029/*
1030 * Transmit interrupt.
1031 */
1032static void
1033hme_tint(struct hme_softc *sc)
1034{
1035	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1036	struct hme_txdesc *htx;
1037	unsigned int ri, txflags;
1038
1039	/*
1040	 * Unload collision counters
1041	 */
1042	ifp->if_collisions +=
1043		HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1044		HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1045		HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1046		HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1047
1048	/*
1049	 * then clear the hardware counters.
1050	 */
1051	HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1052	HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1053	HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1054	HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1055
1056	htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1057	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1058	/* Fetch current position in the transmit ring */
1059	for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1060		if (sc->sc_rb.rb_td_nbusy <= 0) {
1061			CTR0(KTR_HME, "hme_tint: not busy!");
1062			break;
1063		}
1064
1065		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1066		CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1067
1068		if ((txflags & HME_XD_OWN) != 0)
1069			break;
1070
1071		CTR0(KTR_HME, "hme_tint: not owned");
1072		--sc->sc_rb.rb_td_nbusy;
1073		ifp->if_flags &= ~IFF_OACTIVE;
1074
1075		/* Complete packet transmitted? */
1076		if ((txflags & HME_XD_EOP) == 0)
1077			continue;
1078
1079		KASSERT(htx->htx_lastdesc == ri,
1080		    ("hme_tint: ring indices skewed: %d != %d!",
1081		     htx->htx_lastdesc, ri));
1082		bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1083		    BUS_DMASYNC_POSTWRITE);
1084		bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1085
1086		ifp->if_opackets++;
1087		m_freem(htx->htx_m);
1088		htx->htx_m = NULL;
1089		STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1090		STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1091		htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1092	}
1093	/* Turn off watchdog */
1094	if (sc->sc_rb.rb_td_nbusy == 0)
1095		ifp->if_timer = 0;
1096
1097	/* Update ring */
1098	sc->sc_rb.rb_tdtail = ri;
1099
1100	hme_start(ifp);
1101
1102	if (sc->sc_rb.rb_td_nbusy == 0)
1103		ifp->if_timer = 0;
1104}
1105
1106/*
1107 * Receive interrupt.
1108 */
1109static void
1110hme_rint(struct hme_softc *sc)
1111{
1112	caddr_t xdr = sc->sc_rb.rb_rxd;
1113	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1114	unsigned int ri, len;
1115	int progress = 0;
1116	u_int32_t flags;
1117
1118	/*
1119	 * Process all buffers with valid data.
1120	 */
1121	bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1122	for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1123		flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1124		CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1125		if ((flags & HME_XD_OWN) != 0)
1126			break;
1127
1128		progress++;
1129		if ((flags & HME_XD_OFL) != 0) {
1130			device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1131			    "flags=0x%x\n", ri, flags);
1132			ifp->if_ierrors++;
1133			hme_discard_rxbuf(sc, ri);
1134		} else {
1135			len = HME_XD_DECODE_RSIZE(flags);
1136			hme_read(sc, ri, len);
1137		}
1138	}
1139	if (progress) {
1140		bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1141		    BUS_DMASYNC_PREWRITE);
1142	}
1143	sc->sc_rb.rb_rdtail = ri;
1144}
1145
1146static void
1147hme_eint(struct hme_softc *sc, u_int status)
1148{
1149
1150	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1151		device_printf(sc->sc_dev, "XXXlink status changed\n");
1152		return;
1153	}
1154
1155	HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1156}
1157
1158void
1159hme_intr(void *v)
1160{
1161	struct hme_softc *sc = (struct hme_softc *)v;
1162	u_int32_t status;
1163
1164	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1165	CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1166
1167	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1168		hme_eint(sc, status);
1169
1170	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1171		hme_tint(sc);
1172
1173	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1174		hme_rint(sc);
1175}
1176
1177
1178static void
1179hme_watchdog(struct ifnet *ifp)
1180{
1181	struct hme_softc *sc = ifp->if_softc;
1182#ifdef HMEDEBUG
1183	u_int32_t status;
1184
1185	status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1186	CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1187#endif
1188	device_printf(sc->sc_dev, "device timeout\n");
1189	++ifp->if_oerrors;
1190
1191	hme_reset(sc);
1192}
1193
1194/*
1195 * Initialize the MII Management Interface
1196 */
1197static void
1198hme_mifinit(struct hme_softc *sc)
1199{
1200	u_int32_t v;
1201
1202	/* Configure the MIF in frame mode */
1203	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1204	v &= ~HME_MIF_CFG_BBMODE;
1205	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1206}
1207
1208/*
1209 * MII interface
1210 */
1211int
1212hme_mii_readreg(device_t dev, int phy, int reg)
1213{
1214	struct hme_softc *sc = device_get_softc(dev);
1215	int n;
1216	u_int32_t v;
1217
1218	/* Select the desired PHY in the MIF configuration register */
1219	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1220	/* Clear PHY select bit */
1221	v &= ~HME_MIF_CFG_PHY;
1222	if (phy == HME_PHYAD_EXTERNAL)
1223		/* Set PHY select bit to get at external device */
1224		v |= HME_MIF_CFG_PHY;
1225	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1226
1227	/* Construct the frame command */
1228	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1229	    HME_MIF_FO_TAMSB |
1230	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1231	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1232	    (reg << HME_MIF_FO_REGAD_SHIFT);
1233
1234	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1235	for (n = 0; n < 100; n++) {
1236		DELAY(1);
1237		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1238		if (v & HME_MIF_FO_TALSB)
1239			return (v & HME_MIF_FO_DATA);
1240	}
1241
1242	device_printf(sc->sc_dev, "mii_read timeout\n");
1243	return (0);
1244}
1245
1246int
1247hme_mii_writereg(device_t dev, int phy, int reg, int val)
1248{
1249	struct hme_softc *sc = device_get_softc(dev);
1250	int n;
1251	u_int32_t v;
1252
1253	/* Select the desired PHY in the MIF configuration register */
1254	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1255	/* Clear PHY select bit */
1256	v &= ~HME_MIF_CFG_PHY;
1257	if (phy == HME_PHYAD_EXTERNAL)
1258		/* Set PHY select bit to get at external device */
1259		v |= HME_MIF_CFG_PHY;
1260	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1261
1262	/* Construct the frame command */
1263	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1264	    HME_MIF_FO_TAMSB				|
1265	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1266	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1267	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1268	    (val & HME_MIF_FO_DATA);
1269
1270	HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1271	for (n = 0; n < 100; n++) {
1272		DELAY(1);
1273		v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1274		if (v & HME_MIF_FO_TALSB)
1275			return (1);
1276	}
1277
1278	device_printf(sc->sc_dev, "mii_write timeout\n");
1279	return (0);
1280}
1281
1282void
1283hme_mii_statchg(device_t dev)
1284{
1285	struct hme_softc *sc = device_get_softc(dev);
1286	int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1287	int phy = sc->sc_phys[instance];
1288	u_int32_t v;
1289
1290#ifdef HMEDEBUG
1291	if (sc->sc_debug)
1292		printf("hme_mii_statchg: status change: phy = %d\n", phy);
1293#endif
1294
1295	/* Select the current PHY in the MIF configuration register */
1296	v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1297	v &= ~HME_MIF_CFG_PHY;
1298	if (phy == HME_PHYAD_EXTERNAL)
1299		v |= HME_MIF_CFG_PHY;
1300	HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1301
1302	/* Set the MAC Full Duplex bit appropriately */
1303	v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1304	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1305		return;
1306	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1307		v |= HME_MAC_TXCFG_FULLDPLX;
1308	else
1309		v &= ~HME_MAC_TXCFG_FULLDPLX;
1310	HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1311	if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1312		return;
1313}
1314
1315static int
1316hme_mediachange(struct ifnet *ifp)
1317{
1318	struct hme_softc *sc = ifp->if_softc;
1319
1320	return (mii_mediachg(sc->sc_mii));
1321}
1322
1323static void
1324hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1325{
1326	struct hme_softc *sc = ifp->if_softc;
1327
1328	if ((ifp->if_flags & IFF_UP) == 0)
1329		return;
1330
1331	mii_pollstat(sc->sc_mii);
1332	ifmr->ifm_active = sc->sc_mii->mii_media_active;
1333	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1334}
1335
1336/*
1337 * Process an ioctl request.
1338 */
1339static int
1340hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1341{
1342	struct hme_softc *sc = ifp->if_softc;
1343	struct ifreq *ifr = (struct ifreq *)data;
1344	int s, error = 0;
1345
1346	s = splnet();
1347
1348	switch (cmd) {
1349	case SIOCSIFFLAGS:
1350		if ((ifp->if_flags & IFF_UP) == 0 &&
1351		    (ifp->if_flags & IFF_RUNNING) != 0) {
1352			/*
1353			 * If interface is marked down and it is running, then
1354			 * stop it.
1355			 */
1356			hme_stop(sc);
1357			ifp->if_flags &= ~IFF_RUNNING;
1358		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1359		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1360			/*
1361			 * If interface is marked up and it is stopped, then
1362			 * start it.
1363			 */
1364			hme_init(sc);
1365		} else if ((ifp->if_flags & IFF_UP) != 0) {
1366			/*
1367			 * Reset the interface to pick up changes in any other
1368			 * flags that affect hardware registers.
1369			 */
1370			hme_init(sc);
1371		}
1372#ifdef HMEDEBUG
1373		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1374#endif
1375		break;
1376
1377	case SIOCADDMULTI:
1378	case SIOCDELMULTI:
1379		hme_setladrf(sc, 1);
1380		error = 0;
1381		break;
1382	case SIOCGIFMEDIA:
1383	case SIOCSIFMEDIA:
1384		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1385		break;
1386	default:
1387		error = ether_ioctl(ifp, cmd, data);
1388		break;
1389	}
1390
1391	splx(s);
1392	return (error);
1393}
1394
1395/*
1396 * Set up the logical address filter.
1397 */
1398static void
1399hme_setladrf(struct hme_softc *sc, int reenable)
1400{
1401	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1402	struct ifmultiaddr *inm;
1403	struct sockaddr_dl *sdl;
1404	u_char *cp;
1405	u_int32_t crc;
1406	u_int32_t hash[4];
1407	u_int32_t macc;
1408	int len;
1409
1410	/* Clear hash table */
1411	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1412
1413	/* Get current RX configuration */
1414	macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1415
1416	/*
1417	 * Disable the receiver while changing it's state as the documentation
1418	 * mandates.
1419	 * We then must wait until the bit clears in the register. This should
1420	 * take at most 3.5ms.
1421	 */
1422	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1423		return;
1424	/* Disable the hash filter before writing to the filter registers. */
1425	if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1426	    HME_MAC_RXCFG_HENABLE, 0))
1427		return;
1428
1429	if (reenable)
1430		macc |= HME_MAC_RXCFG_ENABLE;
1431	else
1432		macc &= ~HME_MAC_RXCFG_ENABLE;
1433
1434	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1435		/* Turn on promiscuous mode; turn off the hash filter */
1436		macc |= HME_MAC_RXCFG_PMISC;
1437		macc &= ~HME_MAC_RXCFG_HENABLE;
1438		ifp->if_flags |= IFF_ALLMULTI;
1439		goto chipit;
1440	}
1441
1442	/* Turn off promiscuous mode; turn on the hash filter */
1443	macc &= ~HME_MAC_RXCFG_PMISC;
1444	macc |= HME_MAC_RXCFG_HENABLE;
1445
1446	/*
1447	 * Set up multicast address filter by passing all multicast addresses
1448	 * through a crc generator, and then using the high order 6 bits as an
1449	 * index into the 64 bit logical address filter.  The high order bit
1450	 * selects the word, while the rest of the bits select the bit within
1451	 * the word.
1452	 */
1453
1454	TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
1455		if (inm->ifma_addr->sa_family != AF_LINK)
1456			continue;
1457		sdl = (struct sockaddr_dl *)inm->ifma_addr;
1458		cp = LLADDR(sdl);
1459		crc = 0xffffffff;
1460		for (len = sdl->sdl_alen; --len >= 0;) {
1461			int octet = *cp++;
1462			int i;
1463
1464#define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
1465			for (i = 0; i < 8; i++) {
1466				if ((crc & 1) ^ (octet & 1)) {
1467					crc >>= 1;
1468					crc ^= MC_POLY_LE;
1469				} else {
1470					crc >>= 1;
1471				}
1472				octet >>= 1;
1473			}
1474		}
1475		/* Just want the 6 most significant bits. */
1476		crc >>= 26;
1477
1478		/* Set the corresponding bit in the filter. */
1479		hash[crc >> 4] |= 1 << (crc & 0xf);
1480	}
1481
1482	ifp->if_flags &= ~IFF_ALLMULTI;
1483
1484chipit:
1485	/* Now load the hash table into the chip */
1486	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1487	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1488	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1489	HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1490	hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1491	    macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));
1492}
1493