if_ate.c revision 194015
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* TODO
27 *
28 * 1) Turn on the clock in pmc?  Turn off?
29 * 2) GPIO initializtion in board setup code.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 194015 2009-06-11 17:05:13Z avg $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/kernel.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46#include <machine/bus.h>
47
48#include <net/ethernet.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
53#include <net/if_mib.h>
54#include <net/if_types.h>
55
56#ifdef INET
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/in_var.h>
60#include <netinet/ip.h>
61#endif
62
63#include <net/bpf.h>
64#include <net/bpfdesc.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68#include <arm/at91/if_atereg.h>
69
70#include "miibus_if.h"
71
72#define	ATE_MAX_TX_BUFFERS	2	/* We have ping-pong tx buffers */
73#define	ATE_MAX_RX_BUFFERS	64
74
75/*
76 * Driver-specific flags.
77 */
78#define	ATE_FLAG_DETACHING	0x01
79#define	ATE_FLAG_MULTICAST	0x02
80
81struct ate_softc
82{
83	struct ifnet	*ifp;		/* ifnet pointer */
84	struct mtx	sc_mtx;		/* Basically a perimeter lock */
85	device_t	dev;		/* Myself */
86	device_t	miibus;		/* My child miibus */
87	struct resource	*irq_res;	/* IRQ resource */
88	struct resource	*mem_res;	/* Memory resource */
89	struct callout	tick_ch;	/* Tick callout */
90	struct ifmib_iso_8802_3 mibdata; /* Stuff for network mgmt */
91	struct mbuf	*sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
92	bus_dma_tag_t	mtag;		/* bus dma tag for mbufs */
93	bus_dma_tag_t	rxtag;
94	bus_dma_tag_t	rx_desc_tag;
95	bus_dmamap_t	rx_desc_map;
96	bus_dmamap_t	rx_map[ATE_MAX_RX_BUFFERS];
97	bus_dmamap_t	tx_map[ATE_MAX_TX_BUFFERS];
98	bus_addr_t	rx_desc_phys;
99	eth_rx_desc_t	*rx_descs;
100	void		*rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
101	void		*intrhand;	/* Interrupt handle */
102	int		flags;
103	int		if_flags;
104	int		rx_buf_ptr;
105	int		txcur;		/* Current TX map pointer */
106	int		use_rmii;
107};
108
109static inline uint32_t
110RD4(struct ate_softc *sc, bus_size_t off)
111{
112
113	return (bus_read_4(sc->mem_res, off));
114}
115
116static inline void
117WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
118{
119
120	bus_write_4(sc->mem_res, off, val);
121}
122
123static inline void
124BARRIER(struct ate_softc *sc, bus_size_t off, bus_size_t len, int flags)
125{
126
127	bus_barrier(sc->mem_res, off, len, flags);
128}
129
130#define	ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
131#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
132#define	ATE_LOCK_INIT(_sc)					\
133	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
134	    MTX_NETWORK_LOCK, MTX_DEF)
135#define	ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
136#define	ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
137#define	ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
138
139static devclass_t ate_devclass;
140
141/*
142 * ifnet entry points.
143 */
144static void	ateinit_locked(void *);
145static void	atestart_locked(struct ifnet *);
146
147static void	ateinit(void *);
148static void	atestart(struct ifnet *);
149static void	atestop(struct ate_softc *);
150static int	ateioctl(struct ifnet * ifp, u_long, caddr_t);
151
152/*
153 * Bus entry points.
154 */
155static int	ate_probe(device_t dev);
156static int	ate_attach(device_t dev);
157static int	ate_detach(device_t dev);
158static void	ate_intr(void *);
159
160/*
161 * Helper routines.
162 */
163static int	ate_activate(device_t dev);
164static void	ate_deactivate(struct ate_softc *sc);
165static int	ate_ifmedia_upd(struct ifnet *ifp);
166static void	ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
167static int	ate_get_mac(struct ate_softc *sc, u_char *eaddr);
168static void	ate_set_mac(struct ate_softc *sc, u_char *eaddr);
169static void	ate_rxfilter(struct ate_softc *sc);
170
171/*
172 * The AT91 family of products has the ethernet called EMAC.  However,
173 * it isn't self identifying.  It is anticipated that the parent bus
174 * code will take care to only add ate devices where they really are.  As
175 * such, we do nothing here to identify the device and just set its name.
176 */
177static int
178ate_probe(device_t dev)
179{
180
181	device_set_desc(dev, "EMAC");
182	return (0);
183}
184
185static int
186ate_attach(device_t dev)
187{
188	struct ate_softc *sc;
189	struct ifnet *ifp = NULL;
190	struct sysctl_ctx_list *sctx;
191	struct sysctl_oid *soid;
192	u_char eaddr[ETHER_ADDR_LEN];
193	uint32_t rnd;
194	int rid, err;
195
196	sc = device_get_softc(dev);
197	sc->dev = dev;
198	ATE_LOCK_INIT(sc);
199
200	/*
201	 * Allocate resources.
202	 */
203	rid = 0;
204	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
205	    RF_ACTIVE);
206	if (sc->mem_res == NULL) {
207		device_printf(dev, "could not allocate memory resources.\n");
208		err = ENOMEM;
209		goto out;
210	}
211	rid = 0;
212	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
213	    RF_ACTIVE);
214	if (sc->irq_res == NULL) {
215		device_printf(dev, "could not allocate interrupt resources.\n");
216		err = ENOMEM;
217		goto out;
218	}
219
220	err = ate_activate(dev);
221	if (err)
222		goto out;
223
224	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
225
226	/* Sysctls */
227	sctx = device_get_sysctl_ctx(dev);
228	soid = device_get_sysctl_tree(dev);
229	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
230	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
231
232	/* Calling atestop before ifp is set is OK. */
233	ATE_LOCK(sc);
234	atestop(sc);
235	ATE_UNLOCK(sc);
236	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
237
238	if ((err = ate_get_mac(sc, eaddr)) != 0) {
239		/*
240		 * No MAC address configured. Generate the random one.
241		 */
242		if  (bootverbose)
243			device_printf(dev,
244			    "Generating random ethernet address.\n");
245		rnd = arc4random();
246
247		/*
248		 * Set OUI to convenient locally assigned address.  'b'
249		 * is 0x62, which has the locally assigned bit set, and
250		 * the broadcast/multicast bit clear.
251		 */
252		eaddr[0] = 'b';
253		eaddr[1] = 's';
254		eaddr[2] = 'd';
255		eaddr[3] = (rnd >> 16) & 0xff;
256		eaddr[4] = (rnd >> 8) & 0xff;
257		eaddr[5] = rnd & 0xff;
258	}
259
260	sc->ifp = ifp = if_alloc(IFT_ETHER);
261	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
262		device_printf(dev, "Cannot find my PHY.\n");
263		err = ENXIO;
264		goto out;
265	}
266
267	ifp->if_softc = sc;
268	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
269	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
270	ifp->if_capabilities |= IFCAP_VLAN_MTU;
271	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
272	ifp->if_start = atestart;
273	ifp->if_ioctl = ateioctl;
274	ifp->if_init = ateinit;
275	ifp->if_baudrate = 10000000;
276	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
277	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
278	IFQ_SET_READY(&ifp->if_snd);
279	ifp->if_timer = 0;
280	ifp->if_linkmib = &sc->mibdata;
281	ifp->if_linkmiblen = sizeof(sc->mibdata);
282	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
283	sc->if_flags = ifp->if_flags;
284
285	ether_ifattach(ifp, eaddr);
286
287	/*
288	 * Activate the interrupt.
289	 */
290	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
291	    NULL, ate_intr, sc, &sc->intrhand);
292	if (err) {
293		device_printf(dev, "could not establish interrupt handler.\n");
294		ether_ifdetach(ifp);
295		goto out;
296	}
297
298out:
299	if (err)
300		ate_detach(dev);
301	return (err);
302}
303
304static int
305ate_detach(device_t dev)
306{
307	struct ate_softc *sc;
308	struct ifnet *ifp;
309
310	sc = device_get_softc(dev);
311	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
312	ifp = sc->ifp;
313	if (device_is_attached(dev)) {
314		ATE_LOCK(sc);
315			sc->flags |= ATE_FLAG_DETACHING;
316			atestop(sc);
317		ATE_UNLOCK(sc);
318		callout_drain(&sc->tick_ch);
319		ether_ifdetach(ifp);
320	}
321	if (sc->miibus != NULL) {
322		device_delete_child(dev, sc->miibus);
323		sc->miibus = NULL;
324	}
325	bus_generic_detach(sc->dev);
326	ate_deactivate(sc);
327	if (sc->intrhand != NULL) {
328		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
329		sc->intrhand = NULL;
330	}
331	if (ifp != NULL) {
332		if_free(ifp);
333		sc->ifp = NULL;
334	}
335	if (sc->mem_res != NULL) {
336		bus_release_resource(dev, SYS_RES_IOPORT,
337		    rman_get_rid(sc->mem_res), sc->mem_res);
338		sc->mem_res = NULL;
339	}
340	if (sc->irq_res != NULL) {
341		bus_release_resource(dev, SYS_RES_IRQ,
342		    rman_get_rid(sc->irq_res), sc->irq_res);
343		sc->irq_res = NULL;
344	}
345	ATE_LOCK_DESTROY(sc);
346	return (0);
347}
348
349static void
350ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
351{
352	struct ate_softc *sc;
353
354	if (error != 0)
355		return;
356	sc = (struct ate_softc *)arg;
357	sc->rx_desc_phys = segs[0].ds_addr;
358}
359
360static void
361ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
362{
363	struct ate_softc *sc;
364	int i;
365
366	if (error != 0)
367		return;
368	sc = (struct ate_softc *)arg;
369	i = sc->rx_buf_ptr;
370
371	/*
372	 * For the last buffer, set the wrap bit so the controller
373	 * restarts from the first descriptor.
374	 */
375	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
376	if (i == ATE_MAX_RX_BUFFERS - 1)
377		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
378	else
379		sc->rx_descs[i].addr = segs[0].ds_addr;
380	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
381	sc->rx_descs[i].status = 0;
382	/* Flush the memory in the mbuf */
383	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
384}
385
386/*
387 * Compute the multicast filter for this device using the standard
388 * algorithm.  I wonder why this isn't in ether somewhere as a lot
389 * of different MAC chips use this method (or the reverse the bits)
390 * method.
391 */
392static int
393ate_setmcast(struct ate_softc *sc)
394{
395	uint32_t index;
396	uint32_t mcaf[2];
397	u_char *af = (u_char *) mcaf;
398	struct ifmultiaddr *ifma;
399	struct ifnet *ifp;
400
401	ifp = sc->ifp;
402
403	if ((ifp->if_flags & IFF_PROMISC) != 0)
404		return (0);
405	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
406		WR4(sc, ETH_HSL, 0xffffffff);
407		WR4(sc, ETH_HSH, 0xffffffff);
408		return (1);
409	}
410
411	/*
412	 * Compute the multicast hash.
413	 */
414	mcaf[0] = 0;
415	mcaf[1] = 0;
416	IF_ADDR_LOCK(ifp);
417	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
418		if (ifma->ifma_addr->sa_family != AF_LINK)
419			continue;
420		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
421		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
422		af[index >> 3] |= 1 << (index & 7);
423	}
424	IF_ADDR_UNLOCK(ifp);
425
426	/*
427	 * Write the hash to the hash register.  This card can also
428	 * accept unicast packets as well as multicast packets using this
429	 * register for easier bridging operations, but we don't take
430	 * advantage of that.  Locks here are to avoid LOR with the
431	 * IF_ADDR_LOCK, but might not be strictly necessary.
432	 */
433	WR4(sc, ETH_HSL, mcaf[0]);
434	WR4(sc, ETH_HSH, mcaf[1]);
435	return (mcaf[0] || mcaf[1]);
436}
437
438static int
439ate_activate(device_t dev)
440{
441	struct ate_softc *sc;
442	int err, i;
443
444	sc = device_get_softc(dev);
445
446	/*
447	 * Allocate DMA tags and maps.
448	 */
449	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
450	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
451	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
452	if (err != 0)
453		goto errout;
454	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
455		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
456		if (err != 0)
457			goto errout;
458	}
459
460	/*
461	 * Allocate DMA tags and maps for RX.
462	 */
463	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
464	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
465	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
466	if (err != 0)
467		goto errout;
468
469	/*
470	 * DMA tag and map for the RX descriptors.
471	 */
472	err = bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
473	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
474	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
475	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
476	    &sc->sc_mtx, &sc->rx_desc_tag);
477	if (err != 0)
478		goto errout;
479	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
480	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
481		goto errout;
482	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
483	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
484	    ate_getaddr, sc, 0) != 0)
485		goto errout;
486
487	/*
488	 * Allocate our RX buffers.  This chip has a RX structure that's filled
489	 * in.
490	 */
491	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
492		sc->rx_buf_ptr = i;
493		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
494		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
495			goto errout;
496		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
497		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
498			goto errout;
499	}
500	sc->rx_buf_ptr = 0;
501	/* Flush the memory for the EMAC rx descriptor. */
502	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
503	/* Write the descriptor queue address. */
504	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
505	return (0);
506
507errout:
508	return (ENOMEM);
509}
510
511static void
512ate_deactivate(struct ate_softc *sc)
513{
514	int i;
515
516	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
517	if (sc->mtag != NULL) {
518		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
519			if (sc->sent_mbuf[i] != NULL) {
520				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
521				    BUS_DMASYNC_POSTWRITE);
522				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
523				m_freem(sc->sent_mbuf[i]);
524			}
525			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
526			sc->sent_mbuf[i] = NULL;
527			sc->tx_map[i] = NULL;
528		}
529		bus_dma_tag_destroy(sc->mtag);
530	}
531	if (sc->rx_desc_tag != NULL) {
532		if (sc->rx_descs != NULL) {
533			if (sc->rx_desc_phys != 0) {
534				bus_dmamap_sync(sc->rx_desc_tag,
535				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
536				bus_dmamap_unload(sc->rx_desc_tag,
537				    sc->rx_desc_map);
538				sc->rx_desc_phys = 0;
539			}
540		}
541	}
542	if (sc->rxtag != NULL) {
543		for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
544			if (sc->rx_buf[i] != NULL) {
545				if (sc->rx_descs[i].addr != 0) {
546					bus_dmamap_sync(sc->rxtag,
547					    sc->rx_map[i],
548					    BUS_DMASYNC_POSTREAD);
549					bus_dmamap_unload(sc->rxtag,
550					    sc->rx_map[i]);
551					sc->rx_descs[i].addr = 0;
552				}
553				bus_dmamem_free(sc->rxtag, sc->rx_buf[i],
554				    sc->rx_map[i]);
555				sc->rx_buf[i] = NULL;
556				sc->rx_map[i] = NULL;
557			}
558		}
559		bus_dma_tag_destroy(sc->rxtag);
560	}
561	if (sc->rx_desc_tag != NULL) {
562		if (sc->rx_descs != NULL)
563			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
564			    sc->rx_desc_map);
565		bus_dma_tag_destroy(sc->rx_desc_tag);
566		sc->rx_descs = NULL;
567		sc->rx_desc_tag = NULL;
568	}
569}
570
571/*
572 * Change media according to request.
573 */
574static int
575ate_ifmedia_upd(struct ifnet *ifp)
576{
577	struct ate_softc *sc = ifp->if_softc;
578	struct mii_data *mii;
579
580	mii = device_get_softc(sc->miibus);
581	ATE_LOCK(sc);
582	mii_mediachg(mii);
583	ATE_UNLOCK(sc);
584	return (0);
585}
586
587/*
588 * Notify the world which media we're using.
589 */
590static void
591ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
592{
593	struct ate_softc *sc = ifp->if_softc;
594	struct mii_data *mii;
595
596	mii = device_get_softc(sc->miibus);
597	ATE_LOCK(sc);
598	mii_pollstat(mii);
599	ifmr->ifm_active = mii->mii_media_active;
600	ifmr->ifm_status = mii->mii_media_status;
601	ATE_UNLOCK(sc);
602}
603
604static void
605ate_stat_update(struct ate_softc *sc, int active)
606{
607	uint32_t reg;
608
609	/*
610	 * The speed and full/half-duplex state needs to be reflected
611	 * in the ETH_CFG register.
612	 */
613	reg = RD4(sc, ETH_CFG);
614	reg &= ~(ETH_CFG_SPD | ETH_CFG_FD);
615	if (IFM_SUBTYPE(active) != IFM_10_T)
616		reg |= ETH_CFG_SPD;
617	if (active & IFM_FDX)
618		reg |= ETH_CFG_FD;
619	WR4(sc, ETH_CFG, reg);
620}
621
622static void
623ate_tick(void *xsc)
624{
625	struct ate_softc *sc = xsc;
626	struct ifnet *ifp = sc->ifp;
627	struct mii_data *mii;
628	int active;
629	uint32_t c;
630
631	/*
632	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
633	 * the MII if there's a link if this bit is clear.  Not sure if we
634	 * should do the same thing here or not.
635	 */
636	ATE_ASSERT_LOCKED(sc);
637	if (sc->miibus != NULL) {
638		mii = device_get_softc(sc->miibus);
639		active = mii->mii_media_active;
640		mii_tick(mii);
641		if (mii->mii_media_status & IFM_ACTIVE &&
642		     active != mii->mii_media_active)
643			ate_stat_update(sc, mii->mii_media_active);
644	}
645
646	/*
647	 * Update the stats as best we can.  When we're done, clear
648	 * the status counters and start over.  We're supposed to read these
649	 * registers often enough that they won't overflow.  Hopefully
650	 * once a second is often enough.  Some don't map well to
651	 * the dot3Stats mib, so for those we just count them as general
652	 * errors.  Stats for iframes, ibutes, oframes and obytes are
653	 * collected elsewhere.  These registers zero on a read to prevent
654	 * races.  For all the collision stats, also update the collision
655	 * stats for the interface.
656	 */
657	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
658	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
659	c = RD4(sc, ETH_SCOL);
660	ifp->if_collisions += c;
661	sc->mibdata.dot3StatsSingleCollisionFrames += c;
662	c = RD4(sc, ETH_MCOL);
663	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
664	ifp->if_collisions += c;
665	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
666	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
667	c = RD4(sc, ETH_LCOL);
668	sc->mibdata.dot3StatsLateCollisions += c;
669	ifp->if_collisions += c;
670	c = RD4(sc, ETH_ECOL);
671	sc->mibdata.dot3StatsExcessiveCollisions += c;
672	ifp->if_collisions += c;
673	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
674	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
675	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
676
677	/*
678	 * Not sure where to lump these, so count them against the errors
679	 * for the interface.
680	 */
681	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
682	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
683	    RD4(sc, ETH_USF);
684
685	/*
686	 * Schedule another timeout one second from now.
687	 */
688	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
689}
690
691static void
692ate_set_mac(struct ate_softc *sc, u_char *eaddr)
693{
694
695	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
696	    (eaddr[1] << 8) | eaddr[0]);
697	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
698}
699
700static int
701ate_get_mac(struct ate_softc *sc, u_char *eaddr)
702{
703	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
704	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
705	uint32_t low, high;
706	int i;
707
708	/*
709	 * The boot loader setup the MAC with an address, if one is set in
710	 * the loader. Grab one MAC address from the SA[1-4][HL] registers.
711	 */
712	for (i = 0; i < 4; i++) {
713		low = RD4(sc, sa_low_reg[i]);
714		high = RD4(sc, sa_high_reg[i]);
715		if ((low | (high & 0xffff)) != 0) {
716			eaddr[0] = low & 0xff;
717			eaddr[1] = (low >> 8) & 0xff;
718			eaddr[2] = (low >> 16) & 0xff;
719			eaddr[3] = (low >> 24) & 0xff;
720			eaddr[4] = high & 0xff;
721			eaddr[5] = (high >> 8) & 0xff;
722			return (0);
723		}
724	}
725	return (ENXIO);
726}
727
728static void
729ate_intr(void *xsc)
730{
731	struct ate_softc *sc = xsc;
732	struct ifnet *ifp = sc->ifp;
733	struct mbuf *mb;
734	void *bp;
735	uint32_t status, reg, rx_stat;
736	int i;
737
738	status = RD4(sc, ETH_ISR);
739	if (status == 0)
740		return;
741	if (status & ETH_ISR_RCOM) {
742		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
743		    BUS_DMASYNC_POSTREAD);
744		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
745			i = sc->rx_buf_ptr;
746			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
747			bp = sc->rx_buf[i];
748			rx_stat = sc->rx_descs[i].status;
749			if ((rx_stat & ETH_LEN_MASK) == 0) {
750				if (bootverbose)
751					device_printf(sc->dev, "ignoring bogus zero-length packet\n");
752				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
753				    BUS_DMASYNC_PREWRITE);
754				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
755				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
756				    BUS_DMASYNC_POSTWRITE);
757				continue;
758			}
759			/* Flush memory for mbuf so we don't get stale bytes */
760			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
761			    BUS_DMASYNC_POSTREAD);
762			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
763
764			/*
765			 * The length returned by the device includes the
766			 * ethernet CRC calculation for the packet, but
767			 * ifnet drivers are supposed to discard it.
768			 */
769			mb = m_devget(sc->rx_buf[i],
770			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
771			    ETHER_ALIGN, ifp, NULL);
772			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
773			    BUS_DMASYNC_PREWRITE);
774			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
775			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
776			    BUS_DMASYNC_POSTWRITE);
777			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
778			    BUS_DMASYNC_PREREAD);
779			if (mb != NULL) {
780				ifp->if_ipackets++;
781				(*ifp->if_input)(ifp, mb);
782			}
783
784		}
785	}
786	if (status & ETH_ISR_TCOM) {
787		ATE_LOCK(sc);
788		/* XXX TSR register should be cleared */
789		if (sc->sent_mbuf[0]) {
790			bus_dmamap_sync(sc->mtag, sc->tx_map[0],
791			    BUS_DMASYNC_POSTWRITE);
792			bus_dmamap_unload(sc->mtag, sc->tx_map[0]);
793			m_freem(sc->sent_mbuf[0]);
794			ifp->if_opackets++;
795			sc->sent_mbuf[0] = NULL;
796		}
797		if (sc->sent_mbuf[1]) {
798			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
799				bus_dmamap_sync(sc->mtag, sc->tx_map[1],
800				    BUS_DMASYNC_POSTWRITE);
801				bus_dmamap_unload(sc->mtag, sc->tx_map[1]);
802				m_freem(sc->sent_mbuf[1]);
803				ifp->if_opackets++;
804				sc->txcur = 0;
805				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
806			} else {
807				sc->sent_mbuf[0] = sc->sent_mbuf[1];
808				sc->sent_mbuf[1] = NULL;
809				sc->txcur = 1;
810			}
811		} else {
812			sc->sent_mbuf[0] = NULL;
813			sc->txcur = 0;
814		}
815		/*
816		 * We're no longer busy, so clear the busy flag and call the
817		 * start routine to xmit more packets.
818		 */
819		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
820		atestart_locked(sc->ifp);
821		ATE_UNLOCK(sc);
822	}
823	if (status & ETH_ISR_RBNA) {
824		/* Workaround Errata #11 */
825		if (bootverbose)
826			device_printf(sc->dev, "RBNA workaround\n");
827		reg = RD4(sc, ETH_CTL);
828		WR4(sc, ETH_CTL, reg & ~ETH_CTL_RE);
829		BARRIER(sc, ETH_CTL, 4, BUS_SPACE_BARRIER_WRITE);
830		WR4(sc, ETH_CTL, reg | ETH_CTL_RE);
831	}
832}
833
834/*
835 * Reset and initialize the chip.
836 */
837static void
838ateinit_locked(void *xsc)
839{
840	struct ate_softc *sc = xsc;
841	struct ifnet *ifp = sc->ifp;
842 	struct mii_data *mii;
843	uint8_t eaddr[ETHER_ADDR_LEN];
844	uint32_t reg;
845
846	ATE_ASSERT_LOCKED(sc);
847
848	/*
849	 * XXX TODO(3)
850	 * we need to turn on the EMAC clock in the pmc.  With the
851	 * default boot loader, this is already turned on.  However, we
852	 * need to think about how best to turn it on/off as the interface
853	 * is brought up/down, as well as dealing with the mii bus...
854	 *
855	 * We also need to multiplex the pins correctly.
856	 */
857
858	/*
859	 * There are two different ways that the mii bus is connected
860	 * to this chip.  Select the right one based on a compile-time
861	 * option.
862	 */
863	reg = RD4(sc, ETH_CFG);
864	if (sc->use_rmii)
865		reg |= ETH_CFG_RMII;
866	else
867		reg &= ~ETH_CFG_RMII;
868	WR4(sc, ETH_CFG, reg);
869
870	ate_rxfilter(sc);
871
872	/*
873	 * Set the chip MAC address.
874	 */
875	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
876	ate_set_mac(sc, eaddr);
877
878	/*
879	 * Turn on MACs and interrupt processing.
880	 */
881	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
882	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
883
884	/* Enable big packets. */
885	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
886
887	/*
888	 * Set 'running' flag, and clear output active flag
889	 * and attempt to start the output.
890	 */
891	ifp->if_drv_flags |= IFF_DRV_RUNNING;
892	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
893
894	mii = device_get_softc(sc->miibus);
895	mii_pollstat(mii);
896	ate_stat_update(sc, mii->mii_media_active);
897	atestart_locked(ifp);
898
899	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
900}
901
902/*
903 * Dequeue packets and transmit.
904 */
905static void
906atestart_locked(struct ifnet *ifp)
907{
908	struct ate_softc *sc = ifp->if_softc;
909	struct mbuf *m, *mdefrag;
910	bus_dma_segment_t segs[1];
911	int nseg, e;
912
913	ATE_ASSERT_LOCKED(sc);
914	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
915		return;
916
917	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
918		/*
919		 * Check to see if there's room to put another packet into the
920		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
921		 * packets.  We use OACTIVE to indicate "we can stuff more into
922		 * our buffers (clear) or not (set)."
923		 */
924		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
925			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
926			return;
927		}
928		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
929		if (m == 0) {
930			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
931			return;
932		}
933		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
934		    segs, &nseg, 0);
935		if (e == EFBIG) {
936			mdefrag = m_defrag(m, M_DONTWAIT);
937			if (mdefrag == NULL) {
938				IFQ_DRV_PREPEND(&ifp->if_snd, m);
939				return;
940			}
941			m = mdefrag;
942			e = bus_dmamap_load_mbuf_sg(sc->mtag,
943			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
944		}
945		if (e != 0) {
946			m_freem(m);
947			continue;
948		}
949		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
950		    BUS_DMASYNC_PREWRITE);
951
952		/*
953		 * Tell the hardware to xmit the packet.
954		 */
955		WR4(sc, ETH_TAR, segs[0].ds_addr);
956		BARRIER(sc, ETH_TAR, 8, BUS_SPACE_BARRIER_WRITE);
957		WR4(sc, ETH_TCR, segs[0].ds_len);
958
959		/*
960		 * Tap off here if there is a bpf listener.
961		 */
962		BPF_MTAP(ifp, m);
963
964		sc->sent_mbuf[sc->txcur] = m;
965		sc->txcur++;
966	}
967}
968
969static void
970ateinit(void *xsc)
971{
972	struct ate_softc *sc = xsc;
973
974	ATE_LOCK(sc);
975	ateinit_locked(sc);
976	ATE_UNLOCK(sc);
977}
978
979static void
980atestart(struct ifnet *ifp)
981{
982	struct ate_softc *sc = ifp->if_softc;
983
984	ATE_LOCK(sc);
985	atestart_locked(ifp);
986	ATE_UNLOCK(sc);
987}
988
989/*
990 * Turn off interrupts, and stop the NIC.  Can be called with sc->ifp NULL,
991 * so be careful.
992 */
993static void
994atestop(struct ate_softc *sc)
995{
996	struct ifnet *ifp;
997	int i;
998
999	ATE_ASSERT_LOCKED(sc);
1000	ifp = sc->ifp;
1001	if (ifp) {
1002		ifp->if_timer = 0;
1003		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1004	}
1005
1006	callout_stop(&sc->tick_ch);
1007
1008	/*
1009	 * Enable some parts of the MAC that are needed always (like the
1010	 * MII bus.  This turns off the RE and TE bits, which will remain
1011	 * off until ateinit() is called to turn them on.  With RE and TE
1012	 * turned off, there's no DMA to worry about after this write.
1013	 */
1014	WR4(sc, ETH_CTL, ETH_CTL_MPE);
1015
1016	/*
1017	 * Turn off all the configured options and revert to defaults.
1018	 */
1019	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
1020
1021	/*
1022	 * Turn off all the interrupts, and ack any pending ones by reading
1023	 * the ISR.
1024	 */
1025	WR4(sc, ETH_IDR, 0xffffffff);
1026	RD4(sc, ETH_ISR);
1027
1028	/*
1029	 * Clear out the Transmit and Receiver Status registers of any
1030	 * errors they may be reporting
1031	 */
1032	WR4(sc, ETH_TSR, 0xffffffff);
1033	WR4(sc, ETH_RSR, 0xffffffff);
1034
1035	/*
1036	 * Release TX resources.
1037	 */
1038	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1039		if (sc->sent_mbuf[i] != NULL) {
1040			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1041			    BUS_DMASYNC_POSTWRITE);
1042			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1043			m_freem(sc->sent_mbuf[i]);
1044			sc->sent_mbuf[i] = NULL;
1045		}
1046	}
1047
1048	/*
1049	 * XXX we should power down the EMAC if it isn't in use, after
1050	 * putting it into loopback mode.  This saves about 400uA according
1051	 * to the datasheet.
1052	 */
1053}
1054
1055static void
1056ate_rxfilter(struct ate_softc *sc)
1057{
1058	struct ifnet *ifp;
1059	uint32_t reg;
1060	int enabled;
1061
1062	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1063	ATE_ASSERT_LOCKED(sc);
1064	ifp = sc->ifp;
1065
1066	/*
1067	 * Wipe out old filter settings.
1068	 */
1069	reg = RD4(sc, ETH_CFG);
1070	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1071	reg |= ETH_CFG_NBC;
1072	sc->flags &= ~ATE_FLAG_MULTICAST;
1073
1074	/*
1075	 * Set new parameters.
1076	 */
1077	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1078		reg &= ~ETH_CFG_NBC;
1079	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1080		reg |= ETH_CFG_CAF;
1081	} else {
1082		enabled = ate_setmcast(sc);
1083		if (enabled != 0) {
1084			reg |= ETH_CFG_MTI;
1085			sc->flags |= ATE_FLAG_MULTICAST;
1086		}
1087	}
1088	WR4(sc, ETH_CFG, reg);
1089}
1090
1091static int
1092ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1093{
1094	struct ate_softc *sc = ifp->if_softc;
1095 	struct mii_data *mii;
1096 	struct ifreq *ifr = (struct ifreq *)data;
1097	int drv_flags, flags;
1098	int mask, error, enabled;
1099
1100	error = 0;
1101	flags = ifp->if_flags;
1102	drv_flags = ifp->if_drv_flags;
1103	switch (cmd) {
1104	case SIOCSIFFLAGS:
1105		ATE_LOCK(sc);
1106		if ((flags & IFF_UP) != 0) {
1107			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1108				if (((flags ^ sc->if_flags)
1109				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1110					ate_rxfilter(sc);
1111			} else {
1112				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
1113					ateinit_locked(sc);
1114			}
1115		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1116			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1117			atestop(sc);
1118		}
1119		sc->if_flags = flags;
1120		ATE_UNLOCK(sc);
1121		break;
1122
1123	case SIOCADDMULTI:
1124	case SIOCDELMULTI:
1125		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1126			ATE_LOCK(sc);
1127			enabled = ate_setmcast(sc);
1128			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1129				ate_rxfilter(sc);
1130			ATE_UNLOCK(sc);
1131		}
1132		break;
1133
1134  	case SIOCSIFMEDIA:
1135  	case SIOCGIFMEDIA:
1136 		mii = device_get_softc(sc->miibus);
1137 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1138  		break;
1139	case SIOCSIFCAP:
1140		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1141		if (mask & IFCAP_VLAN_MTU) {
1142			ATE_LOCK(sc);
1143			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1144				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1145				ifp->if_capenable |= IFCAP_VLAN_MTU;
1146			} else {
1147				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1148				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1149			}
1150			ATE_UNLOCK(sc);
1151		}
1152	default:
1153		error = ether_ioctl(ifp, cmd, data);
1154		break;
1155	}
1156	return (error);
1157}
1158
1159static void
1160ate_child_detached(device_t dev, device_t child)
1161{
1162	struct ate_softc *sc;
1163
1164	sc = device_get_softc(dev);
1165	if (child == sc->miibus)
1166		sc->miibus = NULL;
1167}
1168
1169/*
1170 * MII bus support routines.
1171 */
1172static int
1173ate_miibus_readreg(device_t dev, int phy, int reg)
1174{
1175	struct ate_softc *sc;
1176	int val;
1177
1178	/*
1179	 * XXX if we implement agressive power savings, then we need
1180	 * XXX to make sure that the clock to the emac is on here
1181	 */
1182
1183	sc = device_get_softc(dev);
1184	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1185	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1186	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1187		continue;
1188	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1189
1190	return (val);
1191}
1192
1193static int
1194ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1195{
1196	struct ate_softc *sc;
1197
1198	/*
1199	 * XXX if we implement agressive power savings, then we need
1200	 * XXX to make sure that the clock to the emac is on here
1201	 */
1202
1203	sc = device_get_softc(dev);
1204	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1205	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1206		continue;
1207	return (0);
1208}
1209
1210static device_method_t ate_methods[] = {
1211	/* Device interface */
1212	DEVMETHOD(device_probe,		ate_probe),
1213	DEVMETHOD(device_attach,	ate_attach),
1214	DEVMETHOD(device_detach,	ate_detach),
1215
1216	/* Bus interface */
1217	DEVMETHOD(bus_child_detached,	ate_child_detached),
1218
1219	/* MII interface */
1220	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1221	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1222
1223	{ 0, 0 }
1224};
1225
1226static driver_t ate_driver = {
1227	"ate",
1228	ate_methods,
1229	sizeof(struct ate_softc),
1230};
1231
1232DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1233DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1234MODULE_DEPEND(ate, miibus, 1, 1, 1);
1235MODULE_DEPEND(ate, ether, 1, 1, 1);
1236