if_ate.c revision 165779
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25/* TODO: (in no order)
26 *
27 * 8) Need to sync busdma goo in atestop
28 * 9) atestop should maybe free the mbufs?
29 *
30 * 1) detach
31 * 2) Free dma setup
32 * 3) Turn on the clock in pmc?  Turn off?
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 165779 2007-01-05 01:07:59Z ticso $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bus.h>
41#include <sys/kernel.h>
42#include <sys/mbuf.h>
43#include <sys/malloc.h>
44#include <sys/module.h>
45#include <sys/rman.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49#include <machine/bus.h>
50
51#include <net/ethernet.h>
52#include <net/if.h>
53#include <net/if_arp.h>
54#include <net/if_dl.h>
55#include <net/if_media.h>
56#include <net/if_mib.h>
57#include <net/if_types.h>
58
59#ifdef INET
60#include <netinet/in.h>
61#include <netinet/in_systm.h>
62#include <netinet/in_var.h>
63#include <netinet/ip.h>
64#endif
65
66#include <net/bpf.h>
67#include <net/bpfdesc.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71#include <arm/at91/if_atereg.h>
72
73#include "miibus_if.h"
74
75#define ATE_MAX_TX_BUFFERS 2		/* We have ping-pong tx buffers */
76#define ATE_MAX_RX_BUFFERS 64
77
78struct ate_softc
79{
80	struct ifnet *ifp;		/* ifnet pointer */
81	struct mtx sc_mtx;		/* basically a perimeter lock */
82	device_t dev;			/* Myself */
83	device_t miibus;		/* My child miibus */
84	void *intrhand;			/* Interrupt handle */
85	struct resource *irq_res;	/* IRQ resource */
86	struct resource	*mem_res;	/* Memory resource */
87	struct callout tick_ch;		/* Tick callout */
88	bus_dma_tag_t mtag;		/* bus dma tag for mbufs */
89	bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
90	struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
91	bus_dma_tag_t rxtag;
92	bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
93	void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
94	int rx_buf_ptr;
95	bus_dma_tag_t rx_desc_tag;
96	bus_dmamap_t rx_desc_map;
97	int txcur;			/* current tx map pointer */
98	bus_addr_t rx_desc_phys;
99	eth_rx_desc_t *rx_descs;
100	int use_rmii;
101	struct	ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
102};
103
104static inline uint32_t
105RD4(struct ate_softc *sc, bus_size_t off)
106{
107	return bus_read_4(sc->mem_res, off);
108}
109
110static inline void
111WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
112{
113	bus_write_4(sc->mem_res, off, val);
114}
115
116#define ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
117#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
118#define ATE_LOCK_INIT(_sc) \
119	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
120	    MTX_NETWORK_LOCK, MTX_DEF)
121#define ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
122#define ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
123#define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
124
125static devclass_t ate_devclass;
126
127/* ifnet entry points */
128
129static void ateinit_locked(void *);
130static void atestart_locked(struct ifnet *);
131
132static void ateinit(void *);
133static void atestart(struct ifnet *);
134static void atestop(struct ate_softc *);
135static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
136
137/* bus entry points */
138
139static int ate_probe(device_t dev);
140static int ate_attach(device_t dev);
141static int ate_detach(device_t dev);
142static void ate_intr(void *);
143
144/* helper routines */
145static int ate_activate(device_t dev);
146static void ate_deactivate(device_t dev);
147static int ate_ifmedia_upd(struct ifnet *ifp);
148static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
149static void ate_get_mac(struct ate_softc *sc, u_char *eaddr);
150static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
151
152/*
153 * The AT91 family of products has the ethernet called EMAC.  However,
154 * it isn't self identifying.  It is anticipated that the parent bus
155 * code will take care to only add ate devices where they really are.  As
156 * such, we do nothing here to identify the device and just set its name.
157 */
158static int
159ate_probe(device_t dev)
160{
161	device_set_desc(dev, "EMAC");
162	return (0);
163}
164
165static int
166ate_attach(device_t dev)
167{
168	struct ate_softc *sc = device_get_softc(dev);
169	struct ifnet *ifp = NULL;
170	struct sysctl_ctx_list *sctx;
171	struct sysctl_oid *soid;
172	int err;
173	u_char eaddr[6];
174
175	sc->dev = dev;
176	err = ate_activate(dev);
177	if (err)
178		goto out;
179
180	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
181
182
183	/*Sysctls*/
184	sctx = device_get_sysctl_ctx(dev);
185	soid = device_get_sysctl_tree(dev);
186	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
187	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
188
189	/* calling atestop before ifp is set is OK */
190	atestop(sc);
191	ATE_LOCK_INIT(sc);
192	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
193
194	ate_get_mac(sc, eaddr);
195	ate_set_mac(sc, eaddr);
196
197	sc->ifp = ifp = if_alloc(IFT_ETHER);
198	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
199		device_printf(dev, "Cannot find my PHY.\n");
200		err = ENXIO;
201		goto out;
202	}
203
204	ifp->if_softc = sc;
205	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
206	ifp->if_mtu = ETHERMTU;
207	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
208	ifp->if_capabilities |= IFCAP_VLAN_MTU;
209	ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
210	ifp->if_start = atestart;
211	ifp->if_ioctl = ateioctl;
212	ifp->if_init = ateinit;
213	ifp->if_baudrate = 10000000;
214	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
215	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
216	IFQ_SET_READY(&ifp->if_snd);
217	ifp->if_timer = 0;
218	ifp->if_linkmib = &sc->mibdata;
219	ifp->if_linkmiblen = sizeof(sc->mibdata);
220	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
221
222	ether_ifattach(ifp, eaddr);
223
224	/*
225	 * Activate the interrupt
226	 */
227	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
228	    ate_intr, sc, &sc->intrhand);
229	if (err) {
230		ether_ifdetach(ifp);
231		ATE_LOCK_DESTROY(sc);
232	}
233out:;
234	if (err)
235		ate_deactivate(dev);
236	if (err && ifp)
237		if_free(ifp);
238	return (err);
239}
240
241static int
242ate_detach(device_t dev)
243{
244	return EBUSY;	/* XXX TODO(1) */
245}
246
247static void
248ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
249{
250	struct ate_softc *sc;
251
252	if (error != 0)
253		return;
254	sc = (struct ate_softc *)arg;
255	sc->rx_desc_phys = segs[0].ds_addr;
256}
257
258static void
259ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
260{
261	struct ate_softc *sc;
262	int i;
263
264	if (error != 0)
265		return;
266	sc = (struct ate_softc *)arg;
267	i = sc->rx_buf_ptr;
268
269	/*
270	 * For the last buffer, set the wrap bit so the controller
271	 * restarts from the first descriptor.
272	 */
273	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
274	if (i == ATE_MAX_RX_BUFFERS - 1)
275		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
276	else
277		sc->rx_descs[i].addr = segs[0].ds_addr;
278	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
279	sc->rx_descs[i].status = 0;
280	/* Flush the memory in the mbuf */
281	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
282}
283
284/*
285 * Compute the multicast filter for this device using the standard
286 * algorithm.  I wonder why this isn't in ether somewhere as a lot
287 * of different MAC chips use this method (or the reverse the bits)
288 * method.
289 */
290static void
291ate_setmcast(struct ate_softc *sc)
292{
293	uint32_t index;
294	uint32_t mcaf[2];
295	u_char *af = (u_char *) mcaf;
296	struct ifmultiaddr *ifma;
297
298	mcaf[0] = 0;
299	mcaf[1] = 0;
300
301	IF_ADDR_LOCK(sc->ifp);
302	TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
303		if (ifma->ifma_addr->sa_family != AF_LINK)
304			continue;
305		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
306		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
307		af[index >> 3] |= 1 << (index & 7);
308	}
309	IF_ADDR_UNLOCK(sc->ifp);
310
311	/*
312	 * Write the hash to the hash register.  This card can also
313	 * accept unicast packets as well as multicast packets using this
314	 * register for easier bridging operations, but we don't take
315	 * advantage of that.  Locks here are to avoid LOR with the
316	 * IF_ADDR_LOCK, but might not be strictly necessary.
317	 */
318	WR4(sc, ETH_HSL, mcaf[0]);
319	WR4(sc, ETH_HSH, mcaf[1]);
320}
321
322static int
323ate_activate(device_t dev)
324{
325	struct ate_softc *sc;
326	int rid, err, i;
327
328	sc = device_get_softc(dev);
329	rid = 0;
330	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
331	    RF_ACTIVE);
332	if (sc->mem_res == NULL)
333		goto errout;
334	rid = 0;
335	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
336	    RF_ACTIVE);
337	if (sc->irq_res == NULL)
338		goto errout;
339
340	/*
341	 * Allocate DMA tags and maps
342	 */
343	err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
344	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
345	    busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
346	if (err != 0)
347		goto errout;
348	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
349		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
350		if (err != 0)
351			goto errout;
352	}
353	 /*
354	  * Allocate our Rx buffers.  This chip has a rx structure that's filled
355	  * in
356	  */
357
358	/*
359	 * Allocate DMA tags and maps for RX.
360	 */
361	err = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
362	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0,
363	    busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
364	if (err != 0)
365		goto errout;
366
367	/* Dma TAG and MAP for the rx descriptors. */
368	err = bus_dma_tag_create(NULL, sizeof(eth_rx_desc_t), 0,
369	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
370	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
371	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
372	    &sc->sc_mtx, &sc->rx_desc_tag);
373	if (err != 0)
374		goto errout;
375	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
376	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
377		goto errout;
378	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
379	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
380	    ate_getaddr, sc, 0) != 0)
381		goto errout;
382	/* XXX TODO(5) Put this in ateinit_locked? */
383	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
384		sc->rx_buf_ptr = i;
385		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
386		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
387			goto errout;
388		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
389		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
390			goto errout;
391	}
392	sc->rx_buf_ptr = 0;
393	/* Flush the memory for the EMAC rx descriptor */
394	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
395	/* Write the descriptor queue address. */
396	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
397	return (0);
398errout:
399	ate_deactivate(dev);
400	return (ENOMEM);
401}
402
403static void
404ate_deactivate(device_t dev)
405{
406	struct ate_softc *sc;
407
408	sc = device_get_softc(dev);
409	/* XXX TODO(2) teardown busdma junk, below from fxp -- customize */
410#if 0
411	if (sc->fxp_mtag) {
412		for (i = 0; i < FXP_NRFABUFS; i++) {
413			rxp = &sc->fxp_desc.rx_list[i];
414			if (rxp->rx_mbuf != NULL) {
415				bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
416				    BUS_DMASYNC_POSTREAD);
417				bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
418				m_freem(rxp->rx_mbuf);
419			}
420			bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
421		}
422		bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
423		for (i = 0; i < FXP_NTXCB; i++) {
424			txp = &sc->fxp_desc.tx_list[i];
425			if (txp->tx_mbuf != NULL) {
426				bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
427				    BUS_DMASYNC_POSTWRITE);
428				bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
429				m_freem(txp->tx_mbuf);
430			}
431			bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
432		}
433		bus_dma_tag_destroy(sc->fxp_mtag);
434	}
435	if (sc->fxp_stag)
436		bus_dma_tag_destroy(sc->fxp_stag);
437	if (sc->cbl_tag)
438		bus_dma_tag_destroy(sc->cbl_tag);
439	if (sc->mcs_tag)
440		bus_dma_tag_destroy(sc->mcs_tag);
441#endif
442	if (sc->intrhand)
443		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
444	sc->intrhand = 0;
445	bus_generic_detach(sc->dev);
446	if (sc->miibus)
447		device_delete_child(sc->dev, sc->miibus);
448	if (sc->mem_res)
449		bus_release_resource(dev, SYS_RES_IOPORT,
450		    rman_get_rid(sc->mem_res), sc->mem_res);
451	sc->mem_res = 0;
452	if (sc->irq_res)
453		bus_release_resource(dev, SYS_RES_IRQ,
454		    rman_get_rid(sc->irq_res), sc->irq_res);
455	sc->irq_res = 0;
456	return;
457}
458
459/*
460 * Change media according to request.
461 */
462static int
463ate_ifmedia_upd(struct ifnet *ifp)
464{
465	struct ate_softc *sc = ifp->if_softc;
466	struct mii_data *mii;
467
468	mii = device_get_softc(sc->miibus);
469	ATE_LOCK(sc);
470	mii_mediachg(mii);
471	ATE_UNLOCK(sc);
472	return (0);
473}
474
475/*
476 * Notify the world which media we're using.
477 */
478static void
479ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
480{
481	struct ate_softc *sc = ifp->if_softc;
482	struct mii_data *mii;
483
484	mii = device_get_softc(sc->miibus);
485	ATE_LOCK(sc);
486	mii_pollstat(mii);
487	ifmr->ifm_active = mii->mii_media_active;
488	ifmr->ifm_status = mii->mii_media_status;
489	ATE_UNLOCK(sc);
490}
491
492static void
493ate_stat_update(struct ate_softc *sc, int active)
494{
495	/*
496	 * The speed and full/half-duplex state needs to be reflected
497	 * in the ETH_CFG register.
498	 */
499	if (IFM_SUBTYPE(active) == IFM_10_T)
500		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
501	else
502		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
503	if (active & IFM_FDX)
504		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
505	else
506		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
507}
508
509static void
510ate_tick(void *xsc)
511{
512	struct ate_softc *sc = xsc;
513	struct ifnet *ifp = sc->ifp;
514	struct mii_data *mii;
515	int active;
516	uint32_t c;
517
518	/*
519	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
520	 * the MII if there's a link if this bit is clear.  Not sure if we
521	 * should do the same thing here or not.
522	 */
523	ATE_ASSERT_LOCKED(sc);
524	if (sc->miibus != NULL) {
525		mii = device_get_softc(sc->miibus);
526		active = mii->mii_media_active;
527		mii_tick(mii);
528		if (mii->mii_media_status & IFM_ACTIVE &&
529		     active != mii->mii_media_active)
530			ate_stat_update(sc, mii->mii_media_active);
531	}
532
533	/*
534	 * Update the stats as best we can.  When we're done, clear
535	 * the status counters and start over.  We're supposed to read these
536	 * registers often enough that they won't overflow.  Hopefully
537	 * once a second is often enough.  Some don't map well to
538	 * the dot3Stats mib, so for those we just count them as general
539	 * errors.  Stats for iframes, ibutes, oframes and obytes are
540	 * collected elsewhere.  These registers zero on a read to prevent
541	 * races.  For all the collision stats, also update the collision
542	 * stats for the interface.
543	 */
544	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
545	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
546	c = RD4(sc, ETH_SCOL);
547	ifp->if_collisions += c;
548	sc->mibdata.dot3StatsSingleCollisionFrames += c;
549	c = RD4(sc, ETH_MCOL);
550	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
551	ifp->if_collisions += c;
552	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
553	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
554	c = RD4(sc, ETH_LCOL);
555	sc->mibdata.dot3StatsLateCollisions += c;
556	ifp->if_collisions += c;
557	c = RD4(sc, ETH_ECOL);
558	sc->mibdata.dot3StatsExcessiveCollisions += c;
559	ifp->if_collisions += c;
560	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
561	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
562	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
563	/*
564	 * not sure where to lump these, so count them against the errors
565	 * for the interface.
566	 */
567	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
568	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
569	    RD4(sc, ETH_USF);
570
571	/*
572	 * Schedule another timeout one second from now.
573	 */
574	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
575}
576
577static void
578ate_set_mac(struct ate_softc *sc, u_char *eaddr)
579{
580	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
581	    (eaddr[1] << 8) | eaddr[0]);
582	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
583
584}
585
586static void
587ate_get_mac(struct ate_softc *sc, u_char *eaddr)
588{
589    uint32_t low, high;
590
591    /*
592     * The boot loader setup the MAC with an address, if one is set in
593     * the loader.  The TSC loader will also set the MAC address in a
594     * similar way.  Grab the MAC address from the SA1[HL] registers.
595     */
596    low = RD4(sc, ETH_SA1L);
597    high =  RD4(sc, ETH_SA1H);
598    eaddr[0] = (high >> 8) & 0xff;
599    eaddr[1] = high & 0xff;
600    eaddr[2] = (low >> 24) & 0xff;
601    eaddr[3] = (low >> 16) & 0xff;
602    eaddr[4] = (low >> 8) & 0xff;
603    eaddr[5] = low & 0xff;
604}
605
606static void
607ate_intr(void *xsc)
608{
609	struct ate_softc *sc = xsc;
610	struct ifnet *ifp = sc->ifp;
611	int status;
612	int i;
613	void *bp;
614	struct mbuf *mb;
615	uint32_t rx_stat;
616
617	status = RD4(sc, ETH_ISR);
618	if (status == 0)
619		return;
620	if (status & ETH_ISR_RCOM) {
621		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
622		    BUS_DMASYNC_POSTREAD);
623		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
624			i = sc->rx_buf_ptr;
625			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
626			bp = sc->rx_buf[i];
627			rx_stat = sc->rx_descs[i].status;
628			if ((rx_stat & ETH_LEN_MASK) == 0) {
629				printf("ignoring bogus 0 len packet\n");
630				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
631				    BUS_DMASYNC_PREWRITE);
632				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
633				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
634				    BUS_DMASYNC_POSTWRITE);
635				continue;
636			}
637			/* Flush memory for mbuf so we don't get stale bytes */
638			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
639			    BUS_DMASYNC_POSTREAD);
640			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
641
642			/*
643			 * The length returned by the device includes the
644			 * ethernet CRC calculation for the packet, but
645			 * ifnet drivers are supposed to discard it.
646			 */
647			mb = m_devget(sc->rx_buf[i],
648			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
649			    ETHER_ALIGN, ifp, NULL);
650			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
651			    BUS_DMASYNC_PREWRITE);
652			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
653			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
654			    BUS_DMASYNC_POSTWRITE);
655			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
656			    BUS_DMASYNC_PREREAD);
657			if (mb != NULL) {
658				ifp->if_ipackets++;
659				(*ifp->if_input)(ifp, mb);
660			}
661
662		}
663	}
664	if (status & ETH_ISR_TCOM) {
665		ATE_LOCK(sc);
666		/* XXX TSR register should be cleared */
667		if (sc->sent_mbuf[0]) {
668			bus_dmamap_sync(sc->rxtag, sc->tx_map[0],
669			    BUS_DMASYNC_POSTWRITE);
670			m_freem(sc->sent_mbuf[0]);
671			ifp->if_opackets++;
672			sc->sent_mbuf[0] = NULL;
673		}
674		if (sc->sent_mbuf[1]) {
675			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
676				bus_dmamap_sync(sc->rxtag, sc->tx_map[1],
677				    BUS_DMASYNC_POSTWRITE);
678				m_freem(sc->sent_mbuf[1]);
679				ifp->if_opackets++;
680				sc->txcur = 0;
681				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
682			} else {
683				sc->sent_mbuf[0] = sc->sent_mbuf[1];
684				sc->sent_mbuf[1] = NULL;
685				sc->txcur = 1;
686			}
687		} else {
688			sc->sent_mbuf[0] = NULL;
689			sc->txcur = 0;
690		}
691		/*
692		 * We're no longer busy, so clear the busy flag and call the
693		 * start routine to xmit more packets.
694		 */
695		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
696		atestart_locked(sc->ifp);
697		ATE_UNLOCK(sc);
698	}
699	if (status & ETH_ISR_RBNA) {
700		printf("RBNA workaround\n");
701		/* Workaround Errata #11 */
702		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
703		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
704	}
705}
706
707/*
708 * Reset and initialize the chip
709 */
710static void
711ateinit_locked(void *xsc)
712{
713	struct ate_softc *sc = xsc;
714	struct ifnet *ifp = sc->ifp;
715 	struct mii_data *mii;
716
717	ATE_ASSERT_LOCKED(sc);
718
719	/*
720	 * XXX TODO(3)
721	 * we need to turn on the EMAC clock in the pmc.  With the
722	 * default boot loader, this is already turned on.  However, we
723	 * need to think about how best to turn it on/off as the interface
724	 * is brought up/down, as well as dealing with the mii bus...
725	 *
726	 * We also need to multiplex the pins correctly.
727	 */
728
729	/*
730	 * There are two different ways that the mii bus is connected
731	 * to this chip.  Select the right one based on a compile-time
732	 * option.
733	 */
734	if (sc->use_rmii)
735		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
736	else
737		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
738
739	/*
740	 * Turn on the multicast hash, and write 0's to it.
741	 */
742	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_MTI);
743	WR4(sc, ETH_HSH, 0);
744	WR4(sc, ETH_HSL, 0);
745
746	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
747	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
748
749	/*
750	 * Boot loader fills in MAC address.  If that's not the case, then
751	 * we should set SA1L and SA1H here to the appropriate value.  Note:
752	 * the byte order is big endian, not little endian, so we have some
753	 * swapping to do.  Again, if we need it (which I don't think we do).
754	 */
755	ate_setmcast(sc);
756
757	/* enable big packets */
758	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
759
760	/*
761	 * Set 'running' flag, and clear output active flag
762	 * and attempt to start the output
763	 */
764	ifp->if_drv_flags |= IFF_DRV_RUNNING;
765	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
766
767	mii = device_get_softc(sc->miibus);
768	mii_pollstat(mii);
769	ate_stat_update(sc, mii->mii_media_active);
770	atestart_locked(ifp);
771
772	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
773}
774
775/*
776 * dequeu packets and transmit
777 */
778static void
779atestart_locked(struct ifnet *ifp)
780{
781	struct ate_softc *sc = ifp->if_softc;
782	struct mbuf *m, *mdefrag;
783	bus_dma_segment_t segs[1];
784	int nseg, e;
785
786	ATE_ASSERT_LOCKED(sc);
787	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
788		return;
789
790	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
791		/*
792		 * check to see if there's room to put another packet into the
793		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
794		 * packets.  We use OACTIVE to indicate "we can stuff more into
795		 * our buffers (clear) or not (set)."
796		 */
797		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
798			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
799			return;
800		}
801		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
802		if (m == 0) {
803			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
804			return;
805		}
806		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
807		    segs, &nseg, 0);
808		if (e == EFBIG) {
809			mdefrag = m_defrag(m, M_DONTWAIT);
810			if (mdefrag == NULL) {
811				IFQ_DRV_PREPEND(&ifp->if_snd, m);
812				return;
813			}
814			m = mdefrag;
815			e = bus_dmamap_load_mbuf_sg(sc->mtag,
816			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
817		}
818		if (e != 0) {
819			m_freem(m);
820			continue;
821		}
822		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
823		    BUS_DMASYNC_PREWRITE);
824
825		/*
826		 * tell the hardware to xmit the packet.
827		 */
828		WR4(sc, ETH_TAR, segs[0].ds_addr);
829		WR4(sc, ETH_TCR, segs[0].ds_len);
830
831		/*
832		 * Tap off here if there is a bpf listener.
833		 */
834		BPF_MTAP(ifp, m);
835
836		sc->sent_mbuf[sc->txcur] = m;
837		sc->txcur++;
838	}
839}
840
841static void
842ateinit(void *xsc)
843{
844	struct ate_softc *sc = xsc;
845	ATE_LOCK(sc);
846	ateinit_locked(sc);
847	ATE_UNLOCK(sc);
848}
849
850static void
851atestart(struct ifnet *ifp)
852{
853	struct ate_softc *sc = ifp->if_softc;
854	ATE_LOCK(sc);
855	atestart_locked(ifp);
856	ATE_UNLOCK(sc);
857}
858
859/*
860 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
861 * so be careful.
862 */
863static void
864atestop(struct ate_softc *sc)
865{
866	struct ifnet *ifp = sc->ifp;
867
868	if (ifp) {
869		ifp->if_timer = 0;
870		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
871	}
872
873	callout_stop(&sc->tick_ch);
874
875	/*
876	 * Enable some parts of the MAC that are needed always (like the
877	 * MII bus.  This turns off the RE and TE bits, which will remain
878	 * off until ateinit() is called to turn them on.  With RE and TE
879	 * turned off, there's no DMA to worry about after this write.
880	 */
881	WR4(sc, ETH_CTL, ETH_CTL_MPE);
882
883	/*
884	 * Turn off all the configured options and revert to defaults.
885	 */
886	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
887
888	/*
889	 * Turn off all the interrupts, and ack any pending ones by reading
890	 * the ISR.
891	 */
892	WR4(sc, ETH_IDR, 0xffffffff);
893	RD4(sc, ETH_ISR);
894
895	/*
896	 * Clear out the Transmit and Receiver Status registers of any
897	 * errors they may be reporting
898	 */
899	WR4(sc, ETH_TSR, 0xffffffff);
900	WR4(sc, ETH_RSR, 0xffffffff);
901
902	/*
903	 * XXX TODO(8)
904	 * need to worry about the busdma resources?  Yes, I think we need
905	 * to sync and unload them.  We may also need to release the mbufs
906	 * that are assocaited with RX and TX operations.
907	 */
908
909	/*
910	 * XXX we should power down the EMAC if it isn't in use, after
911	 * putting it into loopback mode.  This saves about 400uA according
912	 * to the datasheet.
913	 */
914}
915
916static int
917ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
918{
919	struct ate_softc *sc = ifp->if_softc;
920 	struct mii_data *mii;
921 	struct ifreq *ifr = (struct ifreq *)data;
922	int mask, error = 0;
923
924	switch (cmd) {
925	case SIOCSIFFLAGS:
926		ATE_LOCK(sc);
927		if ((ifp->if_flags & IFF_UP) == 0 &&
928		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
929			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
930			atestop(sc);
931		} else {
932			/* reinitialize card on any parameter change */
933			ateinit_locked(sc);
934		}
935		ATE_UNLOCK(sc);
936		break;
937
938	case SIOCADDMULTI:
939	case SIOCDELMULTI:
940		/* update multicast filter list. */
941		ATE_LOCK(sc);
942		ate_setmcast(sc);
943		ATE_UNLOCK(sc);
944		error = 0;
945		break;
946
947  	case SIOCSIFMEDIA:
948  	case SIOCGIFMEDIA:
949 		mii = device_get_softc(sc->miibus);
950 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
951  		break;
952	case SIOCSIFCAP:
953		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
954		if (mask & IFCAP_VLAN_MTU) {
955			ATE_LOCK(sc);
956			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
957				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
958				ifp->if_capenable |= IFCAP_VLAN_MTU;
959			} else {
960				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
961				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
962			}
963			ATE_UNLOCK(sc);
964		}
965	default:
966		error = ether_ioctl(ifp, cmd, data);
967		break;
968	}
969	return (error);
970}
971
972static void
973ate_child_detached(device_t dev, device_t child)
974{
975	struct ate_softc *sc;
976
977	sc = device_get_softc(dev);
978	if (child == sc->miibus)
979		sc->miibus = NULL;
980}
981
982/*
983 * MII bus support routines.
984 */
985static int
986ate_miibus_readreg(device_t dev, int phy, int reg)
987{
988	struct ate_softc *sc;
989	int val;
990
991	/*
992	 * XXX if we implement agressive power savings, then we need
993	 * XXX to make sure that the clock to the emac is on here
994	 */
995
996	if (phy != 0)
997		return (0xffff);
998	sc = device_get_softc(dev);
999	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1000	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1001	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1002		continue;
1003	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1004
1005	return (val);
1006}
1007
1008static void
1009ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1010{
1011	struct ate_softc *sc;
1012
1013	/*
1014	 * XXX if we implement agressive power savings, then we need
1015	 * XXX to make sure that the clock to the emac is on here
1016	 */
1017
1018	sc = device_get_softc(dev);
1019	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1020	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1021		continue;
1022	return;
1023}
1024
1025static device_method_t ate_methods[] = {
1026	/* Device interface */
1027	DEVMETHOD(device_probe,		ate_probe),
1028	DEVMETHOD(device_attach,	ate_attach),
1029	DEVMETHOD(device_detach,	ate_detach),
1030
1031	/* Bus interface */
1032	DEVMETHOD(bus_child_detached,	ate_child_detached),
1033
1034	/* MII interface */
1035	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1036	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1037
1038	{ 0, 0 }
1039};
1040
1041static driver_t ate_driver = {
1042	"ate",
1043	ate_methods,
1044	sizeof(struct ate_softc),
1045};
1046
1047DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1048DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1049MODULE_DEPEND(ate, miibus, 1, 1, 1);
1050MODULE_DEPEND(ate, ether, 1, 1, 1);
1051