if_ate.c revision 192018
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* TODO
27 *
28 * 1) Turn on the clock in pmc?  Turn off?
29 * 2) GPIO initializtion in board setup code.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 192018 2009-05-12 16:07:08Z stas $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/kernel.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46#include <machine/bus.h>
47
48#include <net/ethernet.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
53#include <net/if_mib.h>
54#include <net/if_types.h>
55
56#ifdef INET
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/in_var.h>
60#include <netinet/ip.h>
61#endif
62
63#include <net/bpf.h>
64#include <net/bpfdesc.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68#include <arm/at91/if_atereg.h>
69
70#include "miibus_if.h"
71
72#define ATE_MAX_TX_BUFFERS 2		/* We have ping-pong tx buffers */
73#define ATE_MAX_RX_BUFFERS 64
74
75/*
76 * Driver-specific flags.
77 */
78#define	ATE_FLAG_DETACHING	0x01
79#define	ATE_FLAG_MULTICAST	0x02
80
81struct ate_softc
82{
83	struct ifnet *ifp;		/* ifnet pointer */
84	struct mtx sc_mtx;		/* basically a perimeter lock */
85	device_t dev;			/* Myself */
86	device_t miibus;		/* My child miibus */
87	void *intrhand;			/* Interrupt handle */
88	struct resource *irq_res;	/* IRQ resource */
89	struct resource	*mem_res;	/* Memory resource */
90	struct callout tick_ch;		/* Tick callout */
91	bus_dma_tag_t mtag;		/* bus dma tag for mbufs */
92	bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
93	struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
94	bus_dma_tag_t rxtag;
95	bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
96	void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
97	int rx_buf_ptr;
98	bus_dma_tag_t rx_desc_tag;
99	bus_dmamap_t rx_desc_map;
100	int txcur;			/* current tx map pointer */
101	bus_addr_t rx_desc_phys;
102	eth_rx_desc_t *rx_descs;
103	int use_rmii;
104	struct	ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
105	int	flags;
106	int	if_flags;
107};
108
109static inline uint32_t
110RD4(struct ate_softc *sc, bus_size_t off)
111{
112	return bus_read_4(sc->mem_res, off);
113}
114
115static inline void
116WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
117{
118	bus_write_4(sc->mem_res, off, val);
119}
120
121#define ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
122#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
123#define ATE_LOCK_INIT(_sc) \
124	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
125	    MTX_NETWORK_LOCK, MTX_DEF)
126#define ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
127#define ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
128#define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
129
130static devclass_t ate_devclass;
131
132/* ifnet entry points */
133
134static void ateinit_locked(void *);
135static void atestart_locked(struct ifnet *);
136
137static void ateinit(void *);
138static void atestart(struct ifnet *);
139static void atestop(struct ate_softc *);
140static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
141
142/* bus entry points */
143
144static int ate_probe(device_t dev);
145static int ate_attach(device_t dev);
146static int ate_detach(device_t dev);
147static void ate_intr(void *);
148
149/* helper routines */
150static int ate_activate(device_t dev);
151static void ate_deactivate(struct ate_softc *sc);
152static int ate_ifmedia_upd(struct ifnet *ifp);
153static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
154static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
155static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
156static void	ate_rxfilter(struct ate_softc *sc);
157
158/*
159 * The AT91 family of products has the ethernet called EMAC.  However,
160 * it isn't self identifying.  It is anticipated that the parent bus
161 * code will take care to only add ate devices where they really are.  As
162 * such, we do nothing here to identify the device and just set its name.
163 */
164static int
165ate_probe(device_t dev)
166{
167	device_set_desc(dev, "EMAC");
168	return (0);
169}
170
171static int
172ate_attach(device_t dev)
173{
174	struct ate_softc *sc = device_get_softc(dev);
175	struct ifnet *ifp = NULL;
176	struct sysctl_ctx_list *sctx;
177	struct sysctl_oid *soid;
178	u_char eaddr[ETHER_ADDR_LEN];
179	uint32_t rnd;
180	int rid, err;
181
182	sc->dev = dev;
183	ATE_LOCK_INIT(sc);
184
185	/*
186	 * Allocate resources.
187	 */
188	rid = 0;
189	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
190	    RF_ACTIVE);
191	if (sc->mem_res == NULL) {
192		device_printf(dev, "could not allocate memory resources.\n");
193		err = ENOMEM;
194		goto out;
195	}
196	rid = 0;
197	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
198	    RF_ACTIVE);
199	if (sc->irq_res == NULL) {
200		device_printf(dev, "could not allocate interrupt resources.\n");
201		err = ENOMEM;
202		goto out;
203	}
204
205	err = ate_activate(dev);
206	if (err)
207		goto out;
208
209	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
210
211	/* Sysctls */
212	sctx = device_get_sysctl_ctx(dev);
213	soid = device_get_sysctl_tree(dev);
214	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
215	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
216
217	/* calling atestop before ifp is set is OK */
218	ATE_LOCK(sc);
219	atestop(sc);
220	ATE_UNLOCK(sc);
221	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
222
223	if ((err = ate_get_mac(sc, eaddr)) != 0) {
224		/*
225		 * No MAC address configured. Generate the random one.
226		 */
227		if  (bootverbose)
228			device_printf(dev,
229			    "Generating random ethernet address.\n");
230		rnd = arc4random();
231
232		/*
233		 * Set OUI to convenient locally assigned address.  'b'
234		 * is 0x62, which has the locally assigned bit set, and
235		 * the broadcast/multicast bit clear.
236		 */
237		eaddr[0] = 'b';
238		eaddr[1] = 's';
239		eaddr[2] = 'd';
240		eaddr[3] = (rnd >> 16) & 0xff;
241		eaddr[4] = (rnd >> 8) & 0xff;
242		eaddr[5] = rnd & 0xff;
243	}
244	ate_set_mac(sc, eaddr);
245
246	sc->ifp = ifp = if_alloc(IFT_ETHER);
247	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
248		device_printf(dev, "Cannot find my PHY.\n");
249		err = ENXIO;
250		goto out;
251	}
252
253	ifp->if_softc = sc;
254	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
255	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
256	ifp->if_capabilities |= IFCAP_VLAN_MTU;
257	ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
258	ifp->if_start = atestart;
259	ifp->if_ioctl = ateioctl;
260	ifp->if_init = ateinit;
261	ifp->if_baudrate = 10000000;
262	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
263	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
264	IFQ_SET_READY(&ifp->if_snd);
265	ifp->if_timer = 0;
266	ifp->if_linkmib = &sc->mibdata;
267	ifp->if_linkmiblen = sizeof(sc->mibdata);
268	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
269	sc->if_flags = ifp->if_flags;
270
271	ether_ifattach(ifp, eaddr);
272
273	/*
274	 * Activate the interrupt.
275	 */
276	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
277	    NULL, ate_intr, sc, &sc->intrhand);
278	if (err) {
279		device_printf(dev, "could not establish interrupt handler.\n");
280		ether_ifdetach(ifp);
281		goto out;
282	}
283
284out:
285	if (err)
286		ate_detach(dev);
287	return (err);
288}
289
290static int
291ate_detach(device_t dev)
292{
293	struct ate_softc *sc;
294	struct ifnet *ifp;
295
296	sc = device_get_softc(dev);
297	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
298	ifp = sc->ifp;
299	if (device_is_attached(dev)) {
300		ATE_LOCK(sc);
301			sc->flags |= ATE_FLAG_DETACHING;
302			atestop(sc);
303		ATE_UNLOCK(sc);
304		callout_drain(&sc->tick_ch);
305		ether_ifdetach(ifp);
306	}
307	if (sc->miibus != NULL) {
308		device_delete_child(dev, sc->miibus);
309		sc->miibus = NULL;
310	}
311	bus_generic_detach(sc->dev);
312	ate_deactivate(sc);
313	if (sc->intrhand != NULL) {
314		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
315		sc->intrhand = NULL;
316	}
317	if (ifp != NULL) {
318		if_free(ifp);
319		sc->ifp = NULL;
320	}
321	if (sc->mem_res != NULL) {
322		bus_release_resource(dev, SYS_RES_IOPORT,
323		    rman_get_rid(sc->mem_res), sc->mem_res);
324		sc->mem_res = NULL;
325	}
326	if (sc->irq_res != NULL) {
327		bus_release_resource(dev, SYS_RES_IRQ,
328		    rman_get_rid(sc->irq_res), sc->irq_res);
329		sc->irq_res = NULL;
330	}
331	ATE_LOCK_DESTROY(sc);
332	return (0);
333}
334
335static void
336ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
337{
338	struct ate_softc *sc;
339
340	if (error != 0)
341		return;
342	sc = (struct ate_softc *)arg;
343	sc->rx_desc_phys = segs[0].ds_addr;
344}
345
346static void
347ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
348{
349	struct ate_softc *sc;
350	int i;
351
352	if (error != 0)
353		return;
354	sc = (struct ate_softc *)arg;
355	i = sc->rx_buf_ptr;
356
357	/*
358	 * For the last buffer, set the wrap bit so the controller
359	 * restarts from the first descriptor.
360	 */
361	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
362	if (i == ATE_MAX_RX_BUFFERS - 1)
363		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
364	else
365		sc->rx_descs[i].addr = segs[0].ds_addr;
366	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
367	sc->rx_descs[i].status = 0;
368	/* Flush the memory in the mbuf */
369	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
370}
371
372/*
373 * Compute the multicast filter for this device using the standard
374 * algorithm.  I wonder why this isn't in ether somewhere as a lot
375 * of different MAC chips use this method (or the reverse the bits)
376 * method.
377 */
378static int
379ate_setmcast(struct ate_softc *sc)
380{
381	uint32_t index;
382	uint32_t mcaf[2];
383	u_char *af = (u_char *) mcaf;
384	struct ifmultiaddr *ifma;
385	struct ifnet *ifp;
386
387	ifp = sc->ifp;
388
389	if ((ifp->if_flags & IFF_PROMISC) != 0)
390		return (0);
391	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
392		WR4(sc, ETH_HSL, 0xffffffff);
393		WR4(sc, ETH_HSH, 0xffffffff);
394		return (1);
395	}
396
397	/*
398	 * Compute the multicast hash.
399	 */
400	mcaf[0] = 0;
401	mcaf[1] = 0;
402	IF_ADDR_LOCK(ifp);
403	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
404		if (ifma->ifma_addr->sa_family != AF_LINK)
405			continue;
406		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
407		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
408		af[index >> 3] |= 1 << (index & 7);
409	}
410	IF_ADDR_UNLOCK(ifp);
411
412	/*
413	 * Write the hash to the hash register.  This card can also
414	 * accept unicast packets as well as multicast packets using this
415	 * register for easier bridging operations, but we don't take
416	 * advantage of that.  Locks here are to avoid LOR with the
417	 * IF_ADDR_LOCK, but might not be strictly necessary.
418	 */
419	WR4(sc, ETH_HSL, mcaf[0]);
420	WR4(sc, ETH_HSH, mcaf[1]);
421	return (mcaf[0] || mcaf[1]);
422}
423
424static int
425ate_activate(device_t dev)
426{
427	struct ate_softc *sc;
428	int err, i;
429
430	sc = device_get_softc(dev);
431	/*
432	 * Allocate DMA tags and maps
433	 */
434	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
435	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
436	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
437	if (err != 0)
438		goto errout;
439	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
440		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
441		if (err != 0)
442			goto errout;
443	}
444	 /*
445	  * Allocate our Rx buffers.  This chip has a rx structure that's filled
446	  * in
447	  */
448
449	/*
450	 * Allocate DMA tags and maps for RX.
451	 */
452	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
453	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
454	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
455	if (err != 0)
456		goto errout;
457
458	/* Dma TAG and MAP for the rx descriptors. */
459	err = bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
460	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
461	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
462	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
463	    &sc->sc_mtx, &sc->rx_desc_tag);
464	if (err != 0)
465		goto errout;
466	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
467	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
468		goto errout;
469	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
470	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
471	    ate_getaddr, sc, 0) != 0)
472		goto errout;
473	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
474		sc->rx_buf_ptr = i;
475		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
476		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
477			goto errout;
478		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
479		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
480			goto errout;
481	}
482	sc->rx_buf_ptr = 0;
483	/* Flush the memory for the EMAC rx descriptor */
484	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
485	/* Write the descriptor queue address. */
486	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
487	return (0);
488
489errout:
490	return (ENOMEM);
491}
492
493static void
494ate_deactivate(struct ate_softc *sc)
495{
496	int i;
497
498	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
499	if (sc->mtag != NULL) {
500		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
501			if (sc->sent_mbuf[i] != NULL) {
502				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
503				    BUS_DMASYNC_POSTWRITE);
504				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
505				m_freem(sc->sent_mbuf[i]);
506			}
507			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
508			sc->sent_mbuf[i] = NULL;
509			sc->tx_map[i] = NULL;
510		}
511		bus_dma_tag_destroy(sc->mtag);
512	}
513	if (sc->rx_desc_tag != NULL) {
514		if (sc->rx_descs != NULL) {
515			if (sc->rx_desc_phys != 0) {
516				bus_dmamap_sync(sc->rx_desc_tag,
517				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
518				bus_dmamap_unload(sc->rx_desc_tag,
519				    sc->rx_desc_map);
520				sc->rx_desc_phys = 0;
521			}
522		}
523	}
524	if (sc->rxtag != NULL) {
525		for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
526			if (sc->rx_buf[i] != NULL) {
527				if (sc->rx_descs[i].addr != 0) {
528					bus_dmamap_sync(sc->rxtag,
529					    sc->rx_map[i],
530					    BUS_DMASYNC_POSTREAD);
531					bus_dmamap_unload(sc->rxtag,
532					    sc->rx_map[i]);
533					sc->rx_descs[i].addr = 0;
534				}
535				bus_dmamem_free(sc->rxtag, sc->rx_buf[i],
536				    sc->rx_map[i]);
537				sc->rx_buf[i] = NULL;
538				sc->rx_map[i] = NULL;
539			}
540		}
541		bus_dma_tag_destroy(sc->rxtag);
542	}
543	if (sc->rx_desc_tag != NULL) {
544		if (sc->rx_descs != NULL)
545			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
546			    sc->rx_desc_map);
547		bus_dma_tag_destroy(sc->rx_desc_tag);
548		sc->rx_descs = NULL;
549		sc->rx_desc_tag = NULL;
550	}
551}
552
553/*
554 * Change media according to request.
555 */
556static int
557ate_ifmedia_upd(struct ifnet *ifp)
558{
559	struct ate_softc *sc = ifp->if_softc;
560	struct mii_data *mii;
561
562	mii = device_get_softc(sc->miibus);
563	ATE_LOCK(sc);
564	mii_mediachg(mii);
565	ATE_UNLOCK(sc);
566	return (0);
567}
568
569/*
570 * Notify the world which media we're using.
571 */
572static void
573ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
574{
575	struct ate_softc *sc = ifp->if_softc;
576	struct mii_data *mii;
577
578	mii = device_get_softc(sc->miibus);
579	ATE_LOCK(sc);
580	mii_pollstat(mii);
581	ifmr->ifm_active = mii->mii_media_active;
582	ifmr->ifm_status = mii->mii_media_status;
583	ATE_UNLOCK(sc);
584}
585
586static void
587ate_stat_update(struct ate_softc *sc, int active)
588{
589	/*
590	 * The speed and full/half-duplex state needs to be reflected
591	 * in the ETH_CFG register.
592	 */
593	if (IFM_SUBTYPE(active) == IFM_10_T)
594		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_SPD);
595	else
596		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_SPD);
597	if (active & IFM_FDX)
598		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_FD);
599	else
600		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_FD);
601}
602
603static void
604ate_tick(void *xsc)
605{
606	struct ate_softc *sc = xsc;
607	struct ifnet *ifp = sc->ifp;
608	struct mii_data *mii;
609	int active;
610	uint32_t c;
611
612	/*
613	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
614	 * the MII if there's a link if this bit is clear.  Not sure if we
615	 * should do the same thing here or not.
616	 */
617	ATE_ASSERT_LOCKED(sc);
618	if (sc->miibus != NULL) {
619		mii = device_get_softc(sc->miibus);
620		active = mii->mii_media_active;
621		mii_tick(mii);
622		if (mii->mii_media_status & IFM_ACTIVE &&
623		     active != mii->mii_media_active)
624			ate_stat_update(sc, mii->mii_media_active);
625	}
626
627	/*
628	 * Update the stats as best we can.  When we're done, clear
629	 * the status counters and start over.  We're supposed to read these
630	 * registers often enough that they won't overflow.  Hopefully
631	 * once a second is often enough.  Some don't map well to
632	 * the dot3Stats mib, so for those we just count them as general
633	 * errors.  Stats for iframes, ibutes, oframes and obytes are
634	 * collected elsewhere.  These registers zero on a read to prevent
635	 * races.  For all the collision stats, also update the collision
636	 * stats for the interface.
637	 */
638	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
639	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
640	c = RD4(sc, ETH_SCOL);
641	ifp->if_collisions += c;
642	sc->mibdata.dot3StatsSingleCollisionFrames += c;
643	c = RD4(sc, ETH_MCOL);
644	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
645	ifp->if_collisions += c;
646	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
647	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
648	c = RD4(sc, ETH_LCOL);
649	sc->mibdata.dot3StatsLateCollisions += c;
650	ifp->if_collisions += c;
651	c = RD4(sc, ETH_ECOL);
652	sc->mibdata.dot3StatsExcessiveCollisions += c;
653	ifp->if_collisions += c;
654	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
655	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
656	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
657	/*
658	 * not sure where to lump these, so count them against the errors
659	 * for the interface.
660	 */
661	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
662	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
663	    RD4(sc, ETH_USF);
664
665	/*
666	 * Schedule another timeout one second from now.
667	 */
668	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
669}
670
671static void
672ate_set_mac(struct ate_softc *sc, u_char *eaddr)
673{
674	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
675	    (eaddr[1] << 8) | eaddr[0]);
676	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
677}
678
679static int
680ate_get_mac(struct ate_softc *sc, u_char *eaddr)
681{
682	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
683	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
684	uint32_t low, high;
685	int i;
686
687	/*
688	 * The boot loader setup the MAC with an address, if one is set in
689	 * the loader. Grab one MAC address from the SA[1-4][HL] registers.
690	 */
691	for (i = 0; i < 4; i++) {
692		low = RD4(sc, sa_low_reg[i]);
693		high = RD4(sc, sa_high_reg[i]);
694		if ((low | (high & 0xffff)) != 0) {
695			eaddr[0] = low & 0xff;
696			eaddr[1] = (low >> 8) & 0xff;
697			eaddr[2] = (low >> 16) & 0xff;
698			eaddr[3] = (low >> 24) & 0xff;
699			eaddr[4] = high & 0xff;
700			eaddr[5] = (high >> 8) & 0xff;
701			return (0);
702		}
703	}
704	return (ENXIO);
705}
706
707static void
708ate_intr(void *xsc)
709{
710	struct ate_softc *sc = xsc;
711	struct ifnet *ifp = sc->ifp;
712	int status;
713	int i;
714	void *bp;
715	struct mbuf *mb;
716	uint32_t rx_stat;
717
718	status = RD4(sc, ETH_ISR);
719	if (status == 0)
720		return;
721	if (status & ETH_ISR_RCOM) {
722		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
723		    BUS_DMASYNC_POSTREAD);
724		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
725			i = sc->rx_buf_ptr;
726			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
727			bp = sc->rx_buf[i];
728			rx_stat = sc->rx_descs[i].status;
729			if ((rx_stat & ETH_LEN_MASK) == 0) {
730				printf("ignoring bogus 0 len packet\n");
731				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
732				    BUS_DMASYNC_PREWRITE);
733				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
734				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
735				    BUS_DMASYNC_POSTWRITE);
736				continue;
737			}
738			/* Flush memory for mbuf so we don't get stale bytes */
739			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
740			    BUS_DMASYNC_POSTREAD);
741			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
742
743			/*
744			 * The length returned by the device includes the
745			 * ethernet CRC calculation for the packet, but
746			 * ifnet drivers are supposed to discard it.
747			 */
748			mb = m_devget(sc->rx_buf[i],
749			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
750			    ETHER_ALIGN, ifp, NULL);
751			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
752			    BUS_DMASYNC_PREWRITE);
753			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
754			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
755			    BUS_DMASYNC_POSTWRITE);
756			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
757			    BUS_DMASYNC_PREREAD);
758			if (mb != NULL) {
759				ifp->if_ipackets++;
760				(*ifp->if_input)(ifp, mb);
761			}
762
763		}
764	}
765	if (status & ETH_ISR_TCOM) {
766		ATE_LOCK(sc);
767		/* XXX TSR register should be cleared */
768		if (sc->sent_mbuf[0]) {
769			bus_dmamap_sync(sc->mtag, sc->tx_map[0],
770			    BUS_DMASYNC_POSTWRITE);
771			bus_dmamap_unload(sc->mtag, sc->tx_map[0]);
772			m_freem(sc->sent_mbuf[0]);
773			ifp->if_opackets++;
774			sc->sent_mbuf[0] = NULL;
775		}
776		if (sc->sent_mbuf[1]) {
777			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
778				bus_dmamap_sync(sc->mtag, sc->tx_map[1],
779				    BUS_DMASYNC_POSTWRITE);
780				bus_dmamap_unload(sc->mtag, sc->tx_map[1]);
781				m_freem(sc->sent_mbuf[1]);
782				ifp->if_opackets++;
783				sc->txcur = 0;
784				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
785			} else {
786				sc->sent_mbuf[0] = sc->sent_mbuf[1];
787				sc->sent_mbuf[1] = NULL;
788				sc->txcur = 1;
789			}
790		} else {
791			sc->sent_mbuf[0] = NULL;
792			sc->txcur = 0;
793		}
794		/*
795		 * We're no longer busy, so clear the busy flag and call the
796		 * start routine to xmit more packets.
797		 */
798		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
799		atestart_locked(sc->ifp);
800		ATE_UNLOCK(sc);
801	}
802	if (status & ETH_ISR_RBNA) {
803		printf("RBNA workaround\n");
804		/* Workaround Errata #11 */
805		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) &~ ETH_CTL_RE);
806		WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_RE);
807	}
808}
809
810/*
811 * Reset and initialize the chip
812 */
813static void
814ateinit_locked(void *xsc)
815{
816	struct ate_softc *sc = xsc;
817	struct ifnet *ifp = sc->ifp;
818 	struct mii_data *mii;
819
820	ATE_ASSERT_LOCKED(sc);
821
822	/*
823	 * XXX TODO(3)
824	 * we need to turn on the EMAC clock in the pmc.  With the
825	 * default boot loader, this is already turned on.  However, we
826	 * need to think about how best to turn it on/off as the interface
827	 * is brought up/down, as well as dealing with the mii bus...
828	 *
829	 * We also need to multiplex the pins correctly.
830	 */
831
832	/*
833	 * There are two different ways that the mii bus is connected
834	 * to this chip.  Select the right one based on a compile-time
835	 * option.
836	 */
837	if (sc->use_rmii)
838		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_RMII);
839	else
840		WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_RMII);
841
842	ate_rxfilter(sc);
843
844	/*
845	 * Turn on MACs and interrupt processing.
846	 */
847	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
848	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
849
850	/*
851	 * Boot loader fills in MAC address.  If that's not the case, then
852	 * we should set SA1L and SA1H here to the appropriate value.  Note:
853	 * the byte order is big endian, not little endian, so we have some
854	 * swapping to do.  Again, if we need it (which I don't think we do).
855	 */
856
857	/* enable big packets */
858	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
859
860	/*
861	 * Set 'running' flag, and clear output active flag
862	 * and attempt to start the output
863	 */
864	ifp->if_drv_flags |= IFF_DRV_RUNNING;
865	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
866
867	mii = device_get_softc(sc->miibus);
868	mii_pollstat(mii);
869	ate_stat_update(sc, mii->mii_media_active);
870	atestart_locked(ifp);
871
872	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
873}
874
875/*
876 * dequeu packets and transmit
877 */
878static void
879atestart_locked(struct ifnet *ifp)
880{
881	struct ate_softc *sc = ifp->if_softc;
882	struct mbuf *m, *mdefrag;
883	bus_dma_segment_t segs[1];
884	int nseg, e;
885
886	ATE_ASSERT_LOCKED(sc);
887	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
888		return;
889
890	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
891		/*
892		 * check to see if there's room to put another packet into the
893		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
894		 * packets.  We use OACTIVE to indicate "we can stuff more into
895		 * our buffers (clear) or not (set)."
896		 */
897		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
898			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
899			return;
900		}
901		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
902		if (m == 0) {
903			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
904			return;
905		}
906		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
907		    segs, &nseg, 0);
908		if (e == EFBIG) {
909			mdefrag = m_defrag(m, M_DONTWAIT);
910			if (mdefrag == NULL) {
911				IFQ_DRV_PREPEND(&ifp->if_snd, m);
912				return;
913			}
914			m = mdefrag;
915			e = bus_dmamap_load_mbuf_sg(sc->mtag,
916			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
917		}
918		if (e != 0) {
919			m_freem(m);
920			continue;
921		}
922		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
923		    BUS_DMASYNC_PREWRITE);
924
925		/*
926		 * tell the hardware to xmit the packet.
927		 */
928		WR4(sc, ETH_TAR, segs[0].ds_addr);
929		WR4(sc, ETH_TCR, segs[0].ds_len);
930
931		/*
932		 * Tap off here if there is a bpf listener.
933		 */
934		BPF_MTAP(ifp, m);
935
936		sc->sent_mbuf[sc->txcur] = m;
937		sc->txcur++;
938	}
939}
940
941static void
942ateinit(void *xsc)
943{
944	struct ate_softc *sc = xsc;
945	ATE_LOCK(sc);
946	ateinit_locked(sc);
947	ATE_UNLOCK(sc);
948}
949
950static void
951atestart(struct ifnet *ifp)
952{
953	struct ate_softc *sc = ifp->if_softc;
954	ATE_LOCK(sc);
955	atestart_locked(ifp);
956	ATE_UNLOCK(sc);
957}
958
959/*
960 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
961 * so be careful.
962 */
963static void
964atestop(struct ate_softc *sc)
965{
966	struct ifnet *ifp;
967	int i;
968
969	ATE_ASSERT_LOCKED(sc);
970	ifp = sc->ifp;
971	if (ifp) {
972		ifp->if_timer = 0;
973		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
974	}
975
976	callout_stop(&sc->tick_ch);
977
978	/*
979	 * Enable some parts of the MAC that are needed always (like the
980	 * MII bus.  This turns off the RE and TE bits, which will remain
981	 * off until ateinit() is called to turn them on.  With RE and TE
982	 * turned off, there's no DMA to worry about after this write.
983	 */
984	WR4(sc, ETH_CTL, ETH_CTL_MPE);
985
986	/*
987	 * Turn off all the configured options and revert to defaults.
988	 */
989	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
990
991	/*
992	 * Turn off all the interrupts, and ack any pending ones by reading
993	 * the ISR.
994	 */
995	WR4(sc, ETH_IDR, 0xffffffff);
996	RD4(sc, ETH_ISR);
997
998	/*
999	 * Clear out the Transmit and Receiver Status registers of any
1000	 * errors they may be reporting
1001	 */
1002	WR4(sc, ETH_TSR, 0xffffffff);
1003	WR4(sc, ETH_RSR, 0xffffffff);
1004
1005	/*
1006	 * Release TX resources.
1007	 */
1008	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1009		if (sc->sent_mbuf[i] != NULL) {
1010			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1011			    BUS_DMASYNC_POSTWRITE);
1012			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1013			m_freem(sc->sent_mbuf[i]);
1014			sc->sent_mbuf[i] = NULL;
1015		}
1016	}
1017
1018	/*
1019	 * XXX we should power down the EMAC if it isn't in use, after
1020	 * putting it into loopback mode.  This saves about 400uA according
1021	 * to the datasheet.
1022	 */
1023}
1024
1025static void
1026ate_rxfilter(struct ate_softc *sc)
1027{
1028	struct ifnet *ifp;
1029	uint32_t reg;
1030	int enabled;
1031
1032	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1033	ATE_ASSERT_LOCKED(sc);
1034	ifp = sc->ifp;
1035
1036	/*
1037	 * Wipe out old filter settings.
1038	 */
1039	reg = RD4(sc, ETH_CFG);
1040	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1041	reg |= ETH_CFG_NBC;
1042	sc->flags &= ~ATE_FLAG_MULTICAST;
1043
1044	/*
1045	 * Set new parameters.
1046	 */
1047	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1048		reg &= ~ETH_CFG_NBC;
1049	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1050		reg |= ETH_CFG_CAF;
1051	} else {
1052		enabled = ate_setmcast(sc);
1053		if (enabled != 0) {
1054			reg |= ETH_CFG_MTI;
1055			sc->flags |= ATE_FLAG_MULTICAST;
1056		}
1057	}
1058	WR4(sc, ETH_CFG, reg);
1059}
1060
1061static int
1062ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1063{
1064	struct ate_softc *sc = ifp->if_softc;
1065 	struct mii_data *mii;
1066 	struct ifreq *ifr = (struct ifreq *)data;
1067	int drv_flags, flags;
1068	int mask, error, enabled;
1069
1070	error = 0;
1071	flags = ifp->if_flags;
1072	drv_flags = ifp->if_drv_flags;
1073	switch (cmd) {
1074	case SIOCSIFFLAGS:
1075		ATE_LOCK(sc);
1076		if ((flags & IFF_UP) != 0) {
1077			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1078				if (((flags ^ sc->if_flags)
1079				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1080					ate_rxfilter(sc);
1081			} else {
1082				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
1083					ateinit_locked(sc);
1084			}
1085		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1086			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1087			atestop(sc);
1088		}
1089		sc->if_flags = flags;
1090		ATE_UNLOCK(sc);
1091		break;
1092
1093	case SIOCADDMULTI:
1094	case SIOCDELMULTI:
1095		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1096			ATE_LOCK(sc);
1097			enabled = ate_setmcast(sc);
1098			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1099				ate_rxfilter(sc);
1100			ATE_UNLOCK(sc);
1101		}
1102		break;
1103
1104  	case SIOCSIFMEDIA:
1105  	case SIOCGIFMEDIA:
1106 		mii = device_get_softc(sc->miibus);
1107 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1108  		break;
1109	case SIOCSIFCAP:
1110		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1111		if (mask & IFCAP_VLAN_MTU) {
1112			ATE_LOCK(sc);
1113			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1114				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1115				ifp->if_capenable |= IFCAP_VLAN_MTU;
1116			} else {
1117				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1118				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1119			}
1120			ATE_UNLOCK(sc);
1121		}
1122	default:
1123		error = ether_ioctl(ifp, cmd, data);
1124		break;
1125	}
1126	return (error);
1127}
1128
1129static void
1130ate_child_detached(device_t dev, device_t child)
1131{
1132	struct ate_softc *sc;
1133
1134	sc = device_get_softc(dev);
1135	if (child == sc->miibus)
1136		sc->miibus = NULL;
1137}
1138
1139/*
1140 * MII bus support routines.
1141 */
1142static int
1143ate_miibus_readreg(device_t dev, int phy, int reg)
1144{
1145	struct ate_softc *sc;
1146	int val;
1147
1148	/*
1149	 * XXX if we implement agressive power savings, then we need
1150	 * XXX to make sure that the clock to the emac is on here
1151	 */
1152
1153	sc = device_get_softc(dev);
1154	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1155	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1156	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1157		continue;
1158	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1159
1160	return (val);
1161}
1162
1163static void
1164ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1165{
1166	struct ate_softc *sc;
1167
1168	/*
1169	 * XXX if we implement agressive power savings, then we need
1170	 * XXX to make sure that the clock to the emac is on here
1171	 */
1172
1173	sc = device_get_softc(dev);
1174	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1175	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1176		continue;
1177	return;
1178}
1179
1180static device_method_t ate_methods[] = {
1181	/* Device interface */
1182	DEVMETHOD(device_probe,		ate_probe),
1183	DEVMETHOD(device_attach,	ate_attach),
1184	DEVMETHOD(device_detach,	ate_detach),
1185
1186	/* Bus interface */
1187	DEVMETHOD(bus_child_detached,	ate_child_detached),
1188
1189	/* MII interface */
1190	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1191	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1192
1193	{ 0, 0 }
1194};
1195
1196static driver_t ate_driver = {
1197	"ate",
1198	ate_methods,
1199	sizeof(struct ate_softc),
1200};
1201
1202DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1203DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1204MODULE_DEPEND(ate, miibus, 1, 1, 1);
1205MODULE_DEPEND(ate, ether, 1, 1, 1);
1206