if_ate.c revision 191959
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* TODO: (in no order)
27 *
28 * 8) Need to sync busdma goo in atestop
29 * 9) atestop should maybe free the mbufs?
30 *
31 * 1) detach
32 * 2) Free dma setup
33 * 3) Turn on the clock in pmc?  Turn off?
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 191959 2009-05-10 08:54:10Z stas $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/bus.h>
42#include <sys/kernel.h>
43#include <sys/mbuf.h>
44#include <sys/malloc.h>
45#include <sys/module.h>
46#include <sys/rman.h>
47#include <sys/socket.h>
48#include <sys/sockio.h>
49#include <sys/sysctl.h>
50#include <machine/bus.h>
51
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_arp.h>
55#include <net/if_dl.h>
56#include <net/if_media.h>
57#include <net/if_mib.h>
58#include <net/if_types.h>
59
60#ifdef INET
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/in_var.h>
64#include <netinet/ip.h>
65#endif
66
67#include <net/bpf.h>
68#include <net/bpfdesc.h>
69
70#include <dev/mii/mii.h>
71#include <dev/mii/miivar.h>
72#include <arm/at91/if_atereg.h>
73
74#include "miibus_if.h"
75
76#define ATE_MAX_TX_BUFFERS 2		/* We have ping-pong tx buffers */
77#define ATE_MAX_RX_BUFFERS 64
78
79/*
80 * Driver-specific flags.
81 */
82#define	ATE_FLAG_DETACHING	0x01
83
84struct ate_softc
85{
86	struct ifnet *ifp;		/* ifnet pointer */
87	struct mtx sc_mtx;		/* basically a perimeter lock */
88	device_t dev;			/* Myself */
89	device_t miibus;		/* My child miibus */
90	void *intrhand;			/* Interrupt handle */
91	struct resource *irq_res;	/* IRQ resource */
92	struct resource	*mem_res;	/* Memory resource */
93	struct callout tick_ch;		/* Tick callout */
94	bus_dma_tag_t mtag;		/* bus dma tag for mbufs */
95	bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
96	struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
97	bus_dma_tag_t rxtag;
98	bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
99	void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
100	int rx_buf_ptr;
101	bus_dma_tag_t rx_desc_tag;
102	bus_dmamap_t rx_desc_map;
103	int txcur;			/* current tx map pointer */
104	bus_addr_t rx_desc_phys;
105	eth_rx_desc_t *rx_descs;
106	int use_rmii;
107	struct	ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
108	int	flags;
109	int	if_flags;
110};
111
112static inline uint32_t
113RD4(struct ate_softc *sc, bus_size_t off)
114{
115	return bus_read_4(sc->mem_res, off);
116}
117
118static inline void
119WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
120{
121	bus_write_4(sc->mem_res, off, val);
122}
123
124#define ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
125#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
126#define ATE_LOCK_INIT(_sc) \
127	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
128	    MTX_NETWORK_LOCK, MTX_DEF)
129#define ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
130#define ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
131#define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
132
133static devclass_t ate_devclass;
134
135/* ifnet entry points */
136
137static void ateinit_locked(void *);
138static void atestart_locked(struct ifnet *);
139
140static void ateinit(void *);
141static void atestart(struct ifnet *);
142static void atestop(struct ate_softc *);
143static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
144
145/* bus entry points */
146
147static int ate_probe(device_t dev);
148static int ate_attach(device_t dev);
149static int ate_detach(device_t dev);
150static void ate_intr(void *);
151
152/* helper routines */
153static int ate_activate(device_t dev);
154static void ate_deactivate(device_t dev);
155static int ate_ifmedia_upd(struct ifnet *ifp);
156static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
157static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
158static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
159static void	ate_rxfilter(struct ate_softc *sc);
160
161/*
162 * The AT91 family of products has the ethernet called EMAC.  However,
163 * it isn't self identifying.  It is anticipated that the parent bus
164 * code will take care to only add ate devices where they really are.  As
165 * such, we do nothing here to identify the device and just set its name.
166 */
167static int
168ate_probe(device_t dev)
169{
170	device_set_desc(dev, "EMAC");
171	return (0);
172}
173
174static int
175ate_attach(device_t dev)
176{
177	struct ate_softc *sc = device_get_softc(dev);
178	struct ifnet *ifp = NULL;
179	struct sysctl_ctx_list *sctx;
180	struct sysctl_oid *soid;
181	int err;
182	u_char eaddr[ETHER_ADDR_LEN];
183	uint32_t rnd;
184
185	sc->dev = dev;
186	err = ate_activate(dev);
187	if (err)
188		goto out;
189
190	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
191
192	/* Sysctls */
193	sctx = device_get_sysctl_ctx(dev);
194	soid = device_get_sysctl_tree(dev);
195	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
196	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
197
198	/* calling atestop before ifp is set is OK */
199	atestop(sc);
200	ATE_LOCK_INIT(sc);
201	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
202
203	if ((err = ate_get_mac(sc, eaddr)) != 0) {
204		/*
205		 * No MAC address configured. Generate the random one.
206		 */
207		if  (bootverbose)
208			device_printf(dev,
209			    "Generating random ethernet address.\n");
210		rnd = arc4random();
211
212		/*
213		 * Set OUI to convenient locally assigned address.  'b'
214		 * is 0x62, which has the locally assigned bit set, and
215		 * the broadcast/multicast bit clear.
216		 */
217		eaddr[0] = 'b';
218		eaddr[1] = 's';
219		eaddr[2] = 'd';
220		eaddr[3] = (rnd >> 16) & 0xff;
221		eaddr[4] = (rnd >> 8) & 0xff;
222		eaddr[5] = rnd & 0xff;
223	}
224	ate_set_mac(sc, eaddr);
225
226	sc->ifp = ifp = if_alloc(IFT_ETHER);
227	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
228		device_printf(dev, "Cannot find my PHY.\n");
229		err = ENXIO;
230		goto out;
231	}
232
233	ifp->if_softc = sc;
234	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
235	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
236	ifp->if_capabilities |= IFCAP_VLAN_MTU;
237	ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
238	ifp->if_start = atestart;
239	ifp->if_ioctl = ateioctl;
240	ifp->if_init = ateinit;
241	ifp->if_baudrate = 10000000;
242	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
243	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
244	IFQ_SET_READY(&ifp->if_snd);
245	ifp->if_timer = 0;
246	ifp->if_linkmib = &sc->mibdata;
247	ifp->if_linkmiblen = sizeof(sc->mibdata);
248	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
249	sc->if_flags = ifp->if_flags;
250
251	ether_ifattach(ifp, eaddr);
252
253	/*
254	 * Activate the interrupt
255	 */
256	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
257	    NULL, ate_intr, sc, &sc->intrhand);
258	if (err) {
259		ether_ifdetach(ifp);
260		ATE_LOCK_DESTROY(sc);
261	}
262out:;
263	if (err)
264		ate_deactivate(dev);
265	if (err && ifp)
266		if_free(ifp);
267	return (err);
268}
269
270static int
271ate_detach(device_t dev)
272{
273	return EBUSY;	/* XXX TODO(1) */
274}
275
276static void
277ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
278{
279	struct ate_softc *sc;
280
281	if (error != 0)
282		return;
283	sc = (struct ate_softc *)arg;
284	sc->rx_desc_phys = segs[0].ds_addr;
285}
286
287static void
288ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
289{
290	struct ate_softc *sc;
291	int i;
292
293	if (error != 0)
294		return;
295	sc = (struct ate_softc *)arg;
296	i = sc->rx_buf_ptr;
297
298	/*
299	 * For the last buffer, set the wrap bit so the controller
300	 * restarts from the first descriptor.
301	 */
302	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
303	if (i == ATE_MAX_RX_BUFFERS - 1)
304		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
305	else
306		sc->rx_descs[i].addr = segs[0].ds_addr;
307	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
308	sc->rx_descs[i].status = 0;
309	/* Flush the memory in the mbuf */
310	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
311}
312
313/*
314 * Compute the multicast filter for this device using the standard
315 * algorithm.  I wonder why this isn't in ether somewhere as a lot
316 * of different MAC chips use this method (or the reverse the bits)
317 * method.
318 */
319static void
320ate_setmcast(struct ate_softc *sc)
321{
322	uint32_t index;
323	uint32_t mcaf[2];
324	u_char *af = (u_char *) mcaf;
325	struct ifmultiaddr *ifma;
326
327	mcaf[0] = 0;
328	mcaf[1] = 0;
329
330	IF_ADDR_LOCK(sc->ifp);
331	TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
332		if (ifma->ifma_addr->sa_family != AF_LINK)
333			continue;
334		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
335		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
336		af[index >> 3] |= 1 << (index & 7);
337	}
338	IF_ADDR_UNLOCK(sc->ifp);
339
340	/*
341	 * Write the hash to the hash register.  This card can also
342	 * accept unicast packets as well as multicast packets using this
343	 * register for easier bridging operations, but we don't take
344	 * advantage of that.  Locks here are to avoid LOR with the
345	 * IF_ADDR_LOCK, but might not be strictly necessary.
346	 */
347	WR4(sc, ETH_HSL, mcaf[0]);
348	WR4(sc, ETH_HSH, mcaf[1]);
349}
350
351static int
352ate_activate(device_t dev)
353{
354	struct ate_softc *sc;
355	int rid, err, i;
356
357	sc = device_get_softc(dev);
358	rid = 0;
359	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
360	    RF_ACTIVE);
361	if (sc->mem_res == NULL)
362		goto errout;
363	rid = 0;
364	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
365	    RF_ACTIVE);
366	if (sc->irq_res == NULL)
367		goto errout;
368
369	/*
370	 * Allocate DMA tags and maps
371	 */
372	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
373	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
374	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
375	if (err != 0)
376		goto errout;
377	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
378		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
379		if (err != 0)
380			goto errout;
381	}
382	 /*
383	  * Allocate our Rx buffers.  This chip has a rx structure that's filled
384	  * in
385	  */
386
387	/*
388	 * Allocate DMA tags and maps for RX.
389	 */
390	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
391	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
392	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
393	if (err != 0)
394		goto errout;
395
396	/* Dma TAG and MAP for the rx descriptors. */
397	err = bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
398	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
399	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
400	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
401	    &sc->sc_mtx, &sc->rx_desc_tag);
402	if (err != 0)
403		goto errout;
404	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
405	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
406		goto errout;
407	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
408	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
409	    ate_getaddr, sc, 0) != 0)
410		goto errout;
411	/* XXX TODO(5) Put this in ateinit_locked? */
412	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
413		sc->rx_buf_ptr = i;
414		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
415		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
416			goto errout;
417		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
418		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
419			goto errout;
420	}
421	sc->rx_buf_ptr = 0;
422	/* Flush the memory for the EMAC rx descriptor */
423	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
424	/* Write the descriptor queue address. */
425	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
426	return (0);
427errout:
428	ate_deactivate(dev);
429	return (ENOMEM);
430}
431
432static void
433ate_deactivate(device_t dev)
434{
435	struct ate_softc *sc;
436
437	sc = device_get_softc(dev);
438	/* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
439#if 0
440	if (sc->fxp_mtag) {
441		for (i = 0; i < FXP_NRFABUFS; i++) {
442			rxp = &sc->fxp_desc.rx_list[i];
443			if (rxp->rx_mbuf != NULL) {
444				bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
445				    BUS_DMASYNC_POSTREAD);
446				bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
447				m_freem(rxp->rx_mbuf);
448			}
449			bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
450		}
451		bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
452		for (i = 0; i < FXP_NTXCB; i++) {
453			txp = &sc->fxp_desc.tx_list[i];
454			if (txp->tx_mbuf != NULL) {
455				bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
456				    BUS_DMASYNC_POSTWRITE);
457				bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
458				m_freem(txp->tx_mbuf);
459			}
460			bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
461		}
462		bus_dma_tag_destroy(sc->fxp_mtag);
463	}
464	if (sc->fxp_stag)
465		bus_dma_tag_destroy(sc->fxp_stag);
466	if (sc->cbl_tag)
467		bus_dma_tag_destroy(sc->cbl_tag);
468	if (sc->mcs_tag)
469		bus_dma_tag_destroy(sc->mcs_tag);
470#endif
471	if (sc->intrhand)
472		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
473	sc->intrhand = 0;
474	bus_generic_detach(sc->dev);
475	if (sc->miibus)
476		device_delete_child(sc->dev, sc->miibus);
477	if (sc->mem_res)
478		bus_release_resource(dev, SYS_RES_IOPORT,
479		    rman_get_rid(sc->mem_res), sc->mem_res);
480	sc->mem_res = 0;
481	if (sc->irq_res)
482		bus_release_resource(dev, SYS_RES_IRQ,
483		    rman_get_rid(sc->irq_res), sc->irq_res);
484	sc->irq_res = 0;
485	return;
486}
487
488/*
489 * Change media according to request.
490 */
491static int
492ate_ifmedia_upd(struct ifnet *ifp)
493{
494	struct ate_softc *sc = ifp->if_softc;
495	struct mii_data *mii;
496
497	mii = device_get_softc(sc->miibus);
498	ATE_LOCK(sc);
499	mii_mediachg(mii);
500	ATE_UNLOCK(sc);
501	return (0);
502}
503
504/*
505 * Notify the world which media we're using.
506 */
507static void
508ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
509{
510	struct ate_softc *sc = ifp->if_softc;
511	struct mii_data *mii;
512
513	mii = device_get_softc(sc->miibus);
514	ATE_LOCK(sc);
515	mii_pollstat(mii);
516	ifmr->ifm_active = mii->mii_media_active;
517	ifmr->ifm_status = mii->mii_media_status;
518	ATE_UNLOCK(sc);
519}
520
521static void
522ate_stat_update(struct ate_softc *sc, int active)
523{
524	/*
525	 * The speed and full/half-duplex state needs to be reflected
526	 * in the ETH_CFG register.
527	 */
528	if (IFM_SUBTYPE(active) == IFM_10_T)
529		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
530	else
531		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
532	if (active & IFM_FDX)
533		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
534	else
535		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
536}
537
538static void
539ate_tick(void *xsc)
540{
541	struct ate_softc *sc = xsc;
542	struct ifnet *ifp = sc->ifp;
543	struct mii_data *mii;
544	int active;
545	uint32_t c;
546
547	/*
548	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
549	 * the MII if there's a link if this bit is clear.  Not sure if we
550	 * should do the same thing here or not.
551	 */
552	ATE_ASSERT_LOCKED(sc);
553	if (sc->miibus != NULL) {
554		mii = device_get_softc(sc->miibus);
555		active = mii->mii_media_active;
556		mii_tick(mii);
557		if (mii->mii_media_status & IFM_ACTIVE &&
558		     active != mii->mii_media_active)
559			ate_stat_update(sc, mii->mii_media_active);
560	}
561
562	/*
563	 * Update the stats as best we can.  When we're done, clear
564	 * the status counters and start over.  We're supposed to read these
565	 * registers often enough that they won't overflow.  Hopefully
566	 * once a second is often enough.  Some don't map well to
567	 * the dot3Stats mib, so for those we just count them as general
568	 * errors.  Stats for iframes, ibutes, oframes and obytes are
569	 * collected elsewhere.  These registers zero on a read to prevent
570	 * races.  For all the collision stats, also update the collision
571	 * stats for the interface.
572	 */
573	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
574	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
575	c = RD4(sc, ETH_SCOL);
576	ifp->if_collisions += c;
577	sc->mibdata.dot3StatsSingleCollisionFrames += c;
578	c = RD4(sc, ETH_MCOL);
579	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
580	ifp->if_collisions += c;
581	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
582	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
583	c = RD4(sc, ETH_LCOL);
584	sc->mibdata.dot3StatsLateCollisions += c;
585	ifp->if_collisions += c;
586	c = RD4(sc, ETH_ECOL);
587	sc->mibdata.dot3StatsExcessiveCollisions += c;
588	ifp->if_collisions += c;
589	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
590	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
591	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
592	/*
593	 * not sure where to lump these, so count them against the errors
594	 * for the interface.
595	 */
596	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
597	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
598	    RD4(sc, ETH_USF);
599
600	/*
601	 * Schedule another timeout one second from now.
602	 */
603	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
604}
605
606static void
607ate_set_mac(struct ate_softc *sc, u_char *eaddr)
608{
609	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
610	    (eaddr[1] << 8) | eaddr[0]);
611	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
612}
613
614static int
615ate_get_mac(struct ate_softc *sc, u_char *eaddr)
616{
617	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
618	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
619	uint32_t low, high;
620	int i;
621
622	/*
623	 * The boot loader setup the MAC with an address, if one is set in
624	 * the loader. Grab one MAC address from the SA[1-4][HL] registers.
625	 */
626	for (i = 0; i < 4; i++) {
627		low = RD4(sc, sa_low_reg[i]);
628		high = RD4(sc, sa_high_reg[i]);
629		if ((low | (high & 0xffff)) != 0) {
630			eaddr[0] = low & 0xff;
631			eaddr[1] = (low >> 8) & 0xff;
632			eaddr[2] = (low >> 16) & 0xff;
633			eaddr[3] = (low >> 24) & 0xff;
634			eaddr[4] = high & 0xff;
635			eaddr[5] = (high >> 8) & 0xff;
636			return (0);
637		}
638	}
639	return (ENXIO);
640}
641
642static void
643ate_intr(void *xsc)
644{
645	struct ate_softc *sc = xsc;
646	struct ifnet *ifp = sc->ifp;
647	int status;
648	int i;
649	void *bp;
650	struct mbuf *mb;
651	uint32_t rx_stat;
652
653	status = RD4(sc, ETH_ISR);
654	if (status == 0)
655		return;
656	if (status & ETH_ISR_RCOM) {
657		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
658		    BUS_DMASYNC_POSTREAD);
659		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
660			i = sc->rx_buf_ptr;
661			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
662			bp = sc->rx_buf[i];
663			rx_stat = sc->rx_descs[i].status;
664			if ((rx_stat & ETH_LEN_MASK) == 0) {
665				printf("ignoring bogus 0 len packet\n");
666				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
667				    BUS_DMASYNC_PREWRITE);
668				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
669				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
670				    BUS_DMASYNC_POSTWRITE);
671				continue;
672			}
673			/* Flush memory for mbuf so we don't get stale bytes */
674			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
675			    BUS_DMASYNC_POSTREAD);
676			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
677
678			/*
679			 * The length returned by the device includes the
680			 * ethernet CRC calculation for the packet, but
681			 * ifnet drivers are supposed to discard it.
682			 */
683			mb = m_devget(sc->rx_buf[i],
684			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
685			    ETHER_ALIGN, ifp, NULL);
686			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
687			    BUS_DMASYNC_PREWRITE);
688			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
689			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
690			    BUS_DMASYNC_POSTWRITE);
691			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
692			    BUS_DMASYNC_PREREAD);
693			if (mb != NULL) {
694				ifp->if_ipackets++;
695				(*ifp->if_input)(ifp, mb);
696			}
697
698		}
699	}
700	if (status & ETH_ISR_TCOM) {
701		ATE_LOCK(sc);
702		/* XXX TSR register should be cleared */
703		if (sc->sent_mbuf[0]) {
704			bus_dmamap_sync(sc->mtag, sc->tx_map[0],
705			    BUS_DMASYNC_POSTWRITE);
706			m_freem(sc->sent_mbuf[0]);
707			ifp->if_opackets++;
708			sc->sent_mbuf[0] = NULL;
709		}
710		if (sc->sent_mbuf[1]) {
711			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
712				bus_dmamap_sync(sc->mtag, sc->tx_map[1],
713				    BUS_DMASYNC_POSTWRITE);
714				m_freem(sc->sent_mbuf[1]);
715				ifp->if_opackets++;
716				sc->txcur = 0;
717				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
718			} else {
719				sc->sent_mbuf[0] = sc->sent_mbuf[1];
720				sc->sent_mbuf[1] = NULL;
721				sc->txcur = 1;
722			}
723		} else {
724			sc->sent_mbuf[0] = NULL;
725			sc->txcur = 0;
726		}
727		/*
728		 * We're no longer busy, so clear the busy flag and call the
729		 * start routine to xmit more packets.
730		 */
731		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
732		atestart_locked(sc->ifp);
733		ATE_UNLOCK(sc);
734	}
735	if (status & ETH_ISR_RBNA) {
736		printf("RBNA workaround\n");
737		/* Workaround Errata #11 */
738		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
739		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
740	}
741}
742
743/*
744 * Reset and initialize the chip
745 */
746static void
747ateinit_locked(void *xsc)
748{
749	struct ate_softc *sc = xsc;
750	struct ifnet *ifp = sc->ifp;
751 	struct mii_data *mii;
752
753	ATE_ASSERT_LOCKED(sc);
754
755	/*
756	 * XXX TODO(3)
757	 * we need to turn on the EMAC clock in the pmc.  With the
758	 * default boot loader, this is already turned on.  However, we
759	 * need to think about how best to turn it on/off as the interface
760	 * is brought up/down, as well as dealing with the mii bus...
761	 *
762	 * We also need to multiplex the pins correctly.
763	 */
764
765	/*
766	 * There are two different ways that the mii bus is connected
767	 * to this chip.  Select the right one based on a compile-time
768	 * option.
769	 */
770	if (sc->use_rmii)
771		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
772	else
773		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
774
775	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
776	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
777
778	/*
779	 * Boot loader fills in MAC address.  If that's not the case, then
780	 * we should set SA1L and SA1H here to the appropriate value.  Note:
781	 * the byte order is big endian, not little endian, so we have some
782	 * swapping to do.  Again, if we need it (which I don't think we do).
783	 */
784	ate_setmcast(sc);
785	ate_rxfilter(sc);
786
787	/* enable big packets */
788	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
789
790	/*
791	 * Set 'running' flag, and clear output active flag
792	 * and attempt to start the output
793	 */
794	ifp->if_drv_flags |= IFF_DRV_RUNNING;
795	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
796
797	mii = device_get_softc(sc->miibus);
798	mii_pollstat(mii);
799	ate_stat_update(sc, mii->mii_media_active);
800	atestart_locked(ifp);
801
802	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
803}
804
805/*
806 * dequeu packets and transmit
807 */
808static void
809atestart_locked(struct ifnet *ifp)
810{
811	struct ate_softc *sc = ifp->if_softc;
812	struct mbuf *m, *mdefrag;
813	bus_dma_segment_t segs[1];
814	int nseg, e;
815
816	ATE_ASSERT_LOCKED(sc);
817	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
818		return;
819
820	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
821		/*
822		 * check to see if there's room to put another packet into the
823		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
824		 * packets.  We use OACTIVE to indicate "we can stuff more into
825		 * our buffers (clear) or not (set)."
826		 */
827		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
828			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
829			return;
830		}
831		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
832		if (m == 0) {
833			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
834			return;
835		}
836		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
837		    segs, &nseg, 0);
838		if (e == EFBIG) {
839			mdefrag = m_defrag(m, M_DONTWAIT);
840			if (mdefrag == NULL) {
841				IFQ_DRV_PREPEND(&ifp->if_snd, m);
842				return;
843			}
844			m = mdefrag;
845			e = bus_dmamap_load_mbuf_sg(sc->mtag,
846			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
847		}
848		if (e != 0) {
849			m_freem(m);
850			continue;
851		}
852		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
853		    BUS_DMASYNC_PREWRITE);
854
855		/*
856		 * tell the hardware to xmit the packet.
857		 */
858		WR4(sc, ETH_TAR, segs[0].ds_addr);
859		WR4(sc, ETH_TCR, segs[0].ds_len);
860
861		/*
862		 * Tap off here if there is a bpf listener.
863		 */
864		BPF_MTAP(ifp, m);
865
866		sc->sent_mbuf[sc->txcur] = m;
867		sc->txcur++;
868	}
869}
870
871static void
872ateinit(void *xsc)
873{
874	struct ate_softc *sc = xsc;
875	ATE_LOCK(sc);
876	ateinit_locked(sc);
877	ATE_UNLOCK(sc);
878}
879
880static void
881atestart(struct ifnet *ifp)
882{
883	struct ate_softc *sc = ifp->if_softc;
884	ATE_LOCK(sc);
885	atestart_locked(ifp);
886	ATE_UNLOCK(sc);
887}
888
889/*
890 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
891 * so be careful.
892 */
893static void
894atestop(struct ate_softc *sc)
895{
896	struct ifnet *ifp = sc->ifp;
897
898	if (ifp) {
899		ifp->if_timer = 0;
900		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
901	}
902
903	callout_stop(&sc->tick_ch);
904
905	/*
906	 * Enable some parts of the MAC that are needed always (like the
907	 * MII bus.  This turns off the RE and TE bits, which will remain
908	 * off until ateinit() is called to turn them on.  With RE and TE
909	 * turned off, there's no DMA to worry about after this write.
910	 */
911	WR4(sc, ETH_CTL, ETH_CTL_MPE);
912
913	/*
914	 * Turn off all the configured options and revert to defaults.
915	 */
916	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
917
918	/*
919	 * Turn off all the interrupts, and ack any pending ones by reading
920	 * the ISR.
921	 */
922	WR4(sc, ETH_IDR, 0xffffffff);
923	RD4(sc, ETH_ISR);
924
925	/*
926	 * Clear out the Transmit and Receiver Status registers of any
927	 * errors they may be reporting
928	 */
929	WR4(sc, ETH_TSR, 0xffffffff);
930	WR4(sc, ETH_RSR, 0xffffffff);
931
932	/*
933	 * XXX TODO(8)
934	 * need to worry about the busdma resources?  Yes, I think we need
935	 * to sync and unload them.  We may also need to release the mbufs
936	 * that are assocaited with RX and TX operations.
937	 */
938
939	/*
940	 * XXX we should power down the EMAC if it isn't in use, after
941	 * putting it into loopback mode.  This saves about 400uA according
942	 * to the datasheet.
943	 */
944}
945
946static void
947ate_rxfilter(struct ate_softc *sc)
948{
949	struct ifnet *ifp;
950	uint32_t reg;
951
952	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
953	ATE_ASSERT_LOCKED(sc);
954	ifp = sc->ifp;
955
956	/*
957	 * Wipe out old filter settings.
958	 */
959	reg = RD4(sc, ETH_CFG);
960	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
961	reg |= ETH_CFG_NBC;
962
963	/*
964	 * Set new parameters.
965	 */
966	if ((ifp->if_flags & IFF_BROADCAST) != 0)
967		reg &= ~ETH_CFG_NBC;
968	if ((ifp->if_flags & IFF_PROMISC) != 0)
969		reg |= ETH_CFG_CAF;
970	if ((ifp->if_flags & IFF_ALLMULTI) != 0)
971		reg |= ETH_CFG_MTI;
972	WR4(sc, ETH_CFG, reg);
973}
974
975static int
976ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
977{
978	struct ate_softc *sc = ifp->if_softc;
979 	struct mii_data *mii;
980 	struct ifreq *ifr = (struct ifreq *)data;
981	int drv_flags, flags;
982	int mask, error = 0;
983
984	flags = ifp->if_flags;
985	drv_flags = ifp->if_drv_flags;
986	switch (cmd) {
987	case SIOCSIFFLAGS:
988		ATE_LOCK(sc);
989		if ((flags & IFF_UP) != 0) {
990			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
991				if (((flags ^ sc->if_flags)
992				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
993					ate_rxfilter(sc);
994			} else {
995				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
996					ateinit_locked(sc);
997			}
998		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
999			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1000			atestop(sc);
1001		}
1002		sc->if_flags = flags;
1003		ATE_UNLOCK(sc);
1004		break;
1005
1006	case SIOCADDMULTI:
1007	case SIOCDELMULTI:
1008		/* update multicast filter list. */
1009		ATE_LOCK(sc);
1010		ate_setmcast(sc);
1011		ATE_UNLOCK(sc);
1012		error = 0;
1013		break;
1014
1015  	case SIOCSIFMEDIA:
1016  	case SIOCGIFMEDIA:
1017 		mii = device_get_softc(sc->miibus);
1018 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1019  		break;
1020	case SIOCSIFCAP:
1021		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1022		if (mask & IFCAP_VLAN_MTU) {
1023			ATE_LOCK(sc);
1024			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1025				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1026				ifp->if_capenable |= IFCAP_VLAN_MTU;
1027			} else {
1028				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1029				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1030			}
1031			ATE_UNLOCK(sc);
1032		}
1033	default:
1034		error = ether_ioctl(ifp, cmd, data);
1035		break;
1036	}
1037	return (error);
1038}
1039
1040static void
1041ate_child_detached(device_t dev, device_t child)
1042{
1043	struct ate_softc *sc;
1044
1045	sc = device_get_softc(dev);
1046	if (child == sc->miibus)
1047		sc->miibus = NULL;
1048}
1049
1050/*
1051 * MII bus support routines.
1052 */
1053static int
1054ate_miibus_readreg(device_t dev, int phy, int reg)
1055{
1056	struct ate_softc *sc;
1057	int val;
1058
1059	/*
1060	 * XXX if we implement agressive power savings, then we need
1061	 * XXX to make sure that the clock to the emac is on here
1062	 */
1063
1064	sc = device_get_softc(dev);
1065	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1066	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1067	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1068		continue;
1069	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1070
1071	return (val);
1072}
1073
1074static void
1075ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1076{
1077	struct ate_softc *sc;
1078
1079	/*
1080	 * XXX if we implement agressive power savings, then we need
1081	 * XXX to make sure that the clock to the emac is on here
1082	 */
1083
1084	sc = device_get_softc(dev);
1085	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1086	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1087		continue;
1088	return;
1089}
1090
1091static device_method_t ate_methods[] = {
1092	/* Device interface */
1093	DEVMETHOD(device_probe,		ate_probe),
1094	DEVMETHOD(device_attach,	ate_attach),
1095	DEVMETHOD(device_detach,	ate_detach),
1096
1097	/* Bus interface */
1098	DEVMETHOD(bus_child_detached,	ate_child_detached),
1099
1100	/* MII interface */
1101	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1102	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1103
1104	{ 0, 0 }
1105};
1106
1107static driver_t ate_driver = {
1108	"ate",
1109	ate_methods,
1110	sizeof(struct ate_softc),
1111};
1112
1113DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1114DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1115MODULE_DEPEND(ate, miibus, 1, 1, 1);
1116MODULE_DEPEND(ate, ether, 1, 1, 1);
1117