if_ate.c revision 234281
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 * Copyright (c) 2009 Greg Ansley.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/* TODO
28 *
29 * 1) Turn on the clock in pmc?  Turn off?
30 * 2) GPIO initializtion in board setup code.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 234281 2012-04-14 11:29:32Z marius $");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/bus.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47
48#include <machine/bus.h>
49
50#include <net/ethernet.h>
51#include <net/if.h>
52#include <net/if_arp.h>
53#include <net/if_dl.h>
54#include <net/if_media.h>
55#include <net/if_mib.h>
56#include <net/if_types.h>
57
58#ifdef INET
59#include <netinet/in.h>
60#include <netinet/in_systm.h>
61#include <netinet/in_var.h>
62#include <netinet/ip.h>
63#endif
64
65#include <net/bpf.h>
66#include <net/bpfdesc.h>
67
68#include <dev/mii/mii.h>
69#include <dev/mii/miivar.h>
70
71#include "opt_at91.h"
72#include <arm/at91/at91reg.h>
73#include <arm/at91/at91var.h>
74#include <arm/at91/if_atereg.h>
75
76#include "miibus_if.h"
77
78/*
79 * Driver-specific flags.
80 */
81#define	ATE_FLAG_DETACHING	0x01
82#define	ATE_FLAG_MULTICAST	0x02
83
84/*
85 * Old EMAC assumes whole packet fits in one buffer;
86 * new EBACB assumes all receive buffers are 128 bytes
87 */
88#define	RX_BUF_SIZE(sc)	(sc->is_emacb ? 128 : MCLBYTES)
89
90/*
91 * EMACB has an 11 bit counter for Rx/Tx Descriptors
92 * for max total of 1024 decriptors each.
93 */
94#define	ATE_MAX_RX_DESCR	1024
95#define	ATE_MAX_TX_DESCR	1024
96
97/* How many buffers to allocate */
98#define	ATE_MAX_TX_BUFFERS	4	/* We have ping-pong tx buffers */
99
100/* How much memory to use for rx buffers */
101#define	ATE_RX_MEMORY		(ATE_MAX_RX_DESCR * 128)
102
103/* Actual number of descriptors we allocate */
104#define	ATE_NUM_RX_DESCR	ATE_MAX_RX_DESCR
105#define	ATE_NUM_TX_DESCR	ATE_MAX_TX_BUFFERS
106
107#if ATE_NUM_TX_DESCR > ATE_MAX_TX_DESCR
108#error "Can't have more TX buffers that descriptors"
109#endif
110#if ATE_NUM_RX_DESCR > ATE_MAX_RX_DESCR
111#error "Can't have more RX buffers that descriptors"
112#endif
113
114/* Wrap indexes the same way the hardware does */
115#define	NEXT_RX_IDX(sc, cur)	\
116    ((sc->rx_descs[cur].addr & ETH_WRAP_BIT) ? 0 : (cur + 1))
117
118#define	NEXT_TX_IDX(sc, cur)	\
119    ((sc->tx_descs[cur].status & ETHB_TX_WRAP) ? 0 : (cur + 1))
120
121struct ate_softc
122{
123	struct ifnet	*ifp;		/* ifnet pointer */
124	struct mtx	sc_mtx;		/* Basically a perimeter lock */
125	device_t	dev;		/* Myself */
126	device_t	miibus;		/* My child miibus */
127	struct resource *irq_res;	/* IRQ resource */
128	struct resource	*mem_res;	/* Memory resource */
129	struct callout  tick_ch;	/* Tick callout */
130	struct ifmib_iso_8802_3 mibdata; /* Stuff for network mgmt */
131	bus_dma_tag_t   mtag;		/* bus dma tag for mbufs */
132	bus_dma_tag_t   rx_tag;
133	bus_dma_tag_t   rx_desc_tag;
134	bus_dmamap_t    rx_desc_map;
135	bus_dmamap_t    rx_map[ATE_MAX_RX_DESCR];
136	bus_addr_t	rx_desc_phys;   /* PA of rx descriptors */
137	eth_rx_desc_t   *rx_descs;	/* VA of rx descriptors */
138	void		*rx_buf[ATE_NUM_RX_DESCR]; /* RX buffer space */
139	int		rxhead;		/* Current RX map/desc index */
140	uint32_t	rx_buf_size;    /* Size of Rx buffers */
141
142	bus_dma_tag_t   tx_desc_tag;
143	bus_dmamap_t    tx_desc_map;
144	bus_dmamap_t    tx_map[ATE_MAX_TX_BUFFERS];
145	bus_addr_t	tx_desc_phys;   /* PA of tx descriptors */
146	eth_tx_desc_t   *tx_descs;	/* VA of tx descriptors */
147	int		txhead;		/* Current TX map/desc index */
148	int		txtail;		/* Current TX map/desc index */
149	struct mbuf	*sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
150	void		*intrhand;	/* Interrupt handle */
151	int		flags;
152	int		if_flags;
153	int		use_rmii;
154	int		is_emacb;	/* SAM9x hardware version */
155};
156
157static inline uint32_t
158RD4(struct ate_softc *sc, bus_size_t off)
159{
160
161	return (bus_read_4(sc->mem_res, off));
162}
163
164static inline void
165WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
166{
167
168	bus_write_4(sc->mem_res, off, val);
169}
170
171static inline void
172BARRIER(struct ate_softc *sc, bus_size_t off, bus_size_t len, int flags)
173{
174
175	bus_barrier(sc->mem_res, off, len, flags);
176}
177
178#define	ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
179#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
180#define	ATE_LOCK_INIT(_sc)					\
181	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
182	    MTX_NETWORK_LOCK, MTX_DEF)
183#define	ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
184#define	ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
185#define	ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
186
187static devclass_t ate_devclass;
188
189/*
190 * ifnet entry points.
191 */
192static void	ateinit_locked(void *);
193static void	atestart_locked(struct ifnet *);
194
195static void	ateinit(void *);
196static void	atestart(struct ifnet *);
197static void	atestop(struct ate_softc *);
198static int	ateioctl(struct ifnet * ifp, u_long, caddr_t);
199
200/*
201 * Bus entry points.
202 */
203static int	ate_probe(device_t dev);
204static int	ate_attach(device_t dev);
205static int	ate_detach(device_t dev);
206static void	ate_intr(void *);
207
208/*
209 * Helper routines.
210 */
211static int	ate_activate(device_t dev);
212static void	ate_deactivate(struct ate_softc *sc);
213static int	ate_ifmedia_upd(struct ifnet *ifp);
214static void	ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
215static int	ate_get_mac(struct ate_softc *sc, u_char *eaddr);
216static void	ate_set_mac(struct ate_softc *sc, u_char *eaddr);
217static void	ate_rxfilter(struct ate_softc *sc);
218
219static int	ate_miibus_readreg(device_t dev, int phy, int reg);
220
221static int	ate_miibus_writereg(device_t dev, int phy, int reg, int data);
222/*
223 * The AT91 family of products has the ethernet interface called EMAC.
224 * However, it isn't self identifying.  It is anticipated that the parent bus
225 * code will take care to only add ate devices where they really are.  As
226 * such, we do nothing here to identify the device and just set its name.
227 */
228static int
229ate_probe(device_t dev)
230{
231
232	device_set_desc(dev, "EMAC");
233	return (0);
234}
235
236static int
237ate_attach(device_t dev)
238{
239	struct ate_softc *sc;
240	struct ifnet *ifp = NULL;
241	struct sysctl_ctx_list *sctx;
242	struct sysctl_oid *soid;
243	u_char eaddr[ETHER_ADDR_LEN];
244	uint32_t rnd;
245	int rid, err;
246
247	sc = device_get_softc(dev);
248	sc->dev = dev;
249	ATE_LOCK_INIT(sc);
250
251	rid = 0;
252	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
253	    RF_ACTIVE);
254	if (sc->mem_res == NULL) {
255		device_printf(dev, "could not allocate memory resources.\n");
256		err = ENOMEM;
257		goto out;
258	}
259	rid = 0;
260	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
261	    RF_ACTIVE);
262	if (sc->irq_res == NULL) {
263		device_printf(dev, "could not allocate interrupt resources.\n");
264		err = ENOMEM;
265		goto out;
266	}
267
268	/* New or old version, chooses buffer size. */
269	sc->is_emacb    = at91_is_sam9();
270	sc->rx_buf_size = RX_BUF_SIZE(sc);
271
272	err = ate_activate(dev);
273	if (err)
274		goto out;
275
276	/* Default to what boot rom did */
277	if (!sc->is_emacb)
278		sc->use_rmii =
279		    (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
280	else
281		sc->use_rmii =
282		    (RD4(sc, ETHB_UIO) & ETHB_UIO_RMII) == ETHB_UIO_RMII;
283
284#ifdef AT91_ATE_USE_RMII
285	/* Compile time override */
286	sc->use_rmii = 1;
287#endif
288	/* Sysctls */
289	sctx = device_get_sysctl_ctx(dev);
290	soid = device_get_sysctl_tree(dev);
291	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
292	    CTLFLAG_RW, &sc->use_rmii, 0, "rmii in use");
293
294	/* Calling atestop before ifp is set is OK. */
295	ATE_LOCK(sc);
296	atestop(sc);
297	ATE_UNLOCK(sc);
298	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
299
300	if ((err = ate_get_mac(sc, eaddr)) != 0) {
301		/* No MAC address configured. Generate the random one. */
302		if (bootverbose)
303			device_printf(dev,
304			    "Generating random ethernet address.\n");
305		rnd = arc4random();
306
307		/*
308		 * Set OUI to convenient locally assigned address.  'b'
309		 * is 0x62, which has the locally assigned bit set, and
310		 * the broadcast/multicast bit clear.
311		 */
312		eaddr[0] = 'b';
313		eaddr[1] = 's';
314		eaddr[2] = 'd';
315		eaddr[3] = (rnd >> 16) & 0xff;
316		eaddr[4] = (rnd >>  8) & 0xff;
317		eaddr[5] = (rnd >>  0) & 0xff;
318	}
319
320	sc->ifp = ifp = if_alloc(IFT_ETHER);
321	err = mii_attach(dev, &sc->miibus, ifp, ate_ifmedia_upd,
322	    ate_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
323	if (err != 0) {
324		device_printf(dev, "attaching PHYs failed\n");
325		goto out;
326	}
327	/*
328	 * XXX: Clear the isolate bit, or we won't get up,
329	 * at least on the HL201
330	 */
331	ate_miibus_writereg(dev, 0, 0, 0x3000);
332
333	ifp->if_softc = sc;
334	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
335	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
336	ifp->if_capabilities |= IFCAP_VLAN_MTU;
337	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
338	ifp->if_start = atestart;
339	ifp->if_ioctl = ateioctl;
340	ifp->if_init = ateinit;
341	ifp->if_baudrate = 10000000;
342	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
343	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
344	IFQ_SET_READY(&ifp->if_snd);
345	ifp->if_linkmib = &sc->mibdata;
346	ifp->if_linkmiblen = sizeof(sc->mibdata);
347	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
348	sc->if_flags = ifp->if_flags;
349
350	ether_ifattach(ifp, eaddr);
351
352	/* Activate the interrupt. */
353	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
354	    NULL, ate_intr, sc, &sc->intrhand);
355	if (err) {
356		device_printf(dev, "could not establish interrupt handler.\n");
357		ether_ifdetach(ifp);
358		goto out;
359	}
360
361out:
362	if (err)
363		ate_detach(dev);
364	return (err);
365}
366
367static int
368ate_detach(device_t dev)
369{
370	struct ate_softc *sc;
371	struct ifnet *ifp;
372
373	sc = device_get_softc(dev);
374	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
375	ifp = sc->ifp;
376	if (device_is_attached(dev)) {
377		ATE_LOCK(sc);
378			sc->flags |= ATE_FLAG_DETACHING;
379			atestop(sc);
380		ATE_UNLOCK(sc);
381		callout_drain(&sc->tick_ch);
382		ether_ifdetach(ifp);
383	}
384	if (sc->miibus != NULL) {
385		device_delete_child(dev, sc->miibus);
386		sc->miibus = NULL;
387	}
388	bus_generic_detach(sc->dev);
389	ate_deactivate(sc);
390	if (sc->intrhand != NULL) {
391		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
392		sc->intrhand = NULL;
393	}
394	if (ifp != NULL) {
395		if_free(ifp);
396		sc->ifp = NULL;
397	}
398	if (sc->mem_res != NULL) {
399		bus_release_resource(dev, SYS_RES_IOPORT,
400		    rman_get_rid(sc->mem_res), sc->mem_res);
401		sc->mem_res = NULL;
402	}
403	if (sc->irq_res != NULL) {
404		bus_release_resource(dev, SYS_RES_IRQ,
405		    rman_get_rid(sc->irq_res), sc->irq_res);
406		sc->irq_res = NULL;
407	}
408	ATE_LOCK_DESTROY(sc);
409	return (0);
410}
411
412static void
413ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
414{
415
416	if (error != 0)
417		return;
418	*(bus_addr_t *)arg = segs[0].ds_addr;
419}
420
421static void
422ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
423{
424	struct ate_softc *sc;
425
426	if (error != 0)
427		return;
428	sc = (struct ate_softc *)arg;
429
430	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
431	sc->rx_descs[sc->rxhead].addr = segs[0].ds_addr;
432	sc->rx_descs[sc->rxhead].status = 0;
433	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
434}
435
436static uint32_t
437ate_mac_hash(const uint8_t *buf)
438{
439	uint32_t index = 0;
440	for (int i = 0; i < 48; i++) {
441		index ^= ((buf[i >> 3] >> (i & 7)) & 1) << (i % 6);
442	}
443	return (index);
444}
445
446/*
447 * Compute the multicast filter for this device.
448 */
449static int
450ate_setmcast(struct ate_softc *sc)
451{
452	uint32_t index;
453	uint32_t mcaf[2];
454	u_char *af = (u_char *) mcaf;
455	struct ifmultiaddr *ifma;
456	struct ifnet *ifp;
457
458	ifp = sc->ifp;
459
460	if ((ifp->if_flags & IFF_PROMISC) != 0)
461		return (0);
462	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
463		WR4(sc, ETH_HSL, 0xffffffff);
464		WR4(sc, ETH_HSH, 0xffffffff);
465		return (1);
466	}
467
468	/* Compute the multicast hash. */
469	mcaf[0] = 0;
470	mcaf[1] = 0;
471	if_maddr_rlock(ifp);
472	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
473		if (ifma->ifma_addr->sa_family != AF_LINK)
474			continue;
475		index = ate_mac_hash(LLADDR((struct sockaddr_dl *)
476		    ifma->ifma_addr));
477		af[index >> 3] |= 1 << (index & 7);
478	}
479	if_maddr_runlock(ifp);
480
481	/*
482	 * Write the hash to the hash register.  This card can also
483	 * accept unicast packets as well as multicast packets using this
484	 * register for easier bridging operations, but we don't take
485	 * advantage of that.  Locks here are to avoid LOR with the
486	 * if_maddr_rlock, but might not be strictly necessary.
487	 */
488	WR4(sc, ETH_HSL, mcaf[0]);
489	WR4(sc, ETH_HSH, mcaf[1]);
490	return (mcaf[0] || mcaf[1]);
491}
492
493static int
494ate_activate(device_t dev)
495{
496	struct ate_softc *sc;
497	int i;
498
499	sc = device_get_softc(dev);
500
501	/* Allocate DMA tags and maps for TX mbufs */
502	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
503	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
504	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag))
505		goto errout;
506	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
507		if ( bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]))
508			goto errout;
509	}
510
511
512	/* DMA tag and map for the RX descriptors. */
513	if (bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
514	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
515	    ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t), 1,
516	    ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
517	    &sc->sc_mtx, &sc->rx_desc_tag))
518		goto errout;
519	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
520	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
521		goto errout;
522	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
523	    sc->rx_descs, ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t),
524	    ate_getaddr, &sc->rx_desc_phys, 0) != 0)
525		goto errout;
526
527	/* Allocate DMA tags and maps for RX. buffers */
528	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
529	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
530	    sc->rx_buf_size, 1, sc->rx_buf_size, 0,
531	    busdma_lock_mutex, &sc->sc_mtx, &sc->rx_tag))
532		goto errout;
533
534	/*
535	 * Allocate our RX buffers.
536	 * This chip has a RX structure that's filled in.
537	 * XXX On MACB (SAM9 part) we should receive directly into mbuf
538	 * to avoid the copy.  XXX
539	 */
540	sc->rxhead = 0;
541	for (sc->rxhead = 0; sc->rxhead < ATE_RX_MEMORY/sc->rx_buf_size;
542	    sc->rxhead++) {
543		if (bus_dmamem_alloc(sc->rx_tag,
544		    (void **)&sc->rx_buf[sc->rxhead], BUS_DMA_NOWAIT,
545		    &sc->rx_map[sc->rxhead]) != 0)
546			goto errout;
547
548		if (bus_dmamap_load(sc->rx_tag, sc->rx_map[sc->rxhead],
549		    sc->rx_buf[sc->rxhead], sc->rx_buf_size,
550		    ate_load_rx_buf, sc, 0) != 0) {
551			printf("bus_dmamem_load\n");
552			goto errout;
553		}
554		bus_dmamap_sync(sc->rx_tag, sc->rx_map[sc->rxhead], BUS_DMASYNC_PREREAD);
555	}
556
557	/*
558	 * For the last buffer, set the wrap bit so the controller
559	 * restarts from the first descriptor.
560	 */
561	sc->rx_descs[--sc->rxhead].addr |= ETH_WRAP_BIT;
562	sc->rxhead = 0;
563
564	/* Flush the memory for the EMAC rx descriptor. */
565	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
566
567	/* Write the descriptor queue address. */
568	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
569
570	/*
571	 * DMA tag and map for the TX descriptors.
572	 * XXX Old EMAC (not EMACB) doesn't really need DMA'able
573	 * memory. We could just malloc it. gja XXX
574	 */
575	if (bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_tx_desc_t),
576	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
577	    ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t), 1,
578	    ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t), 0, busdma_lock_mutex,
579	    &sc->sc_mtx, &sc->tx_desc_tag) != 0)
580		goto errout;
581
582	if (bus_dmamem_alloc(sc->tx_desc_tag, (void **)&sc->tx_descs,
583	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->tx_desc_map) != 0)
584		goto errout;
585
586	if (bus_dmamap_load(sc->tx_desc_tag, sc->tx_desc_map,
587	    sc->tx_descs, ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t),
588	    ate_getaddr, &sc->tx_desc_phys, 0) != 0)
589		goto errout;
590
591	/* Initilize descriptors; mark all empty */
592	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
593		sc->tx_descs[i].addr =0;
594		sc->tx_descs[i].status = ETHB_TX_USED;
595		sc->sent_mbuf[i] = NULL;
596	}
597
598	/* Mark last entry to cause wrap when indexing through */
599	sc->tx_descs[ATE_MAX_TX_BUFFERS - 1].status =
600	    ETHB_TX_WRAP | ETHB_TX_USED;
601
602	/* Flush the memory for the EMAC tx descriptor. */
603	bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map, BUS_DMASYNC_PREWRITE);
604
605	sc->txhead = sc->txtail = 0;
606	if (sc->is_emacb) {
607		/* Write the descriptor queue address. */
608		WR4(sc, ETHB_TBQP, sc->tx_desc_phys);
609	}
610
611	/* EMACB: Enable transceiver input clock */
612	if (sc->is_emacb)
613		WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) | ETHB_UIO_CLKE);
614
615	return (0);
616
617errout:
618	return (ENOMEM);
619}
620
621static void
622ate_deactivate(struct ate_softc *sc)
623{
624	int i;
625
626	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
627	if (sc->mtag != NULL) {
628		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
629			if (sc->sent_mbuf[i] != NULL) {
630				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
631				    BUS_DMASYNC_POSTWRITE);
632				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
633				m_freem(sc->sent_mbuf[i]);
634			}
635			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
636			sc->sent_mbuf[i] = NULL;
637			sc->tx_map[i] = NULL;
638		}
639		bus_dma_tag_destroy(sc->mtag);
640	}
641	if (sc->rx_desc_tag != NULL) {
642		if (sc->rx_descs != NULL) {
643			if (sc->rx_desc_phys != 0) {
644				bus_dmamap_sync(sc->rx_desc_tag,
645				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
646				bus_dmamap_unload(sc->rx_desc_tag,
647				    sc->rx_desc_map);
648				sc->rx_desc_phys = 0;
649			}
650		}
651	}
652	if (sc->rx_tag != NULL) {
653		for (i = 0; sc->rx_buf[i] != NULL; i++) {
654			if (sc->rx_descs[i].addr != 0) {
655				bus_dmamap_sync(sc->rx_tag,
656				    sc->rx_map[i],
657				    BUS_DMASYNC_POSTREAD);
658				bus_dmamap_unload(sc->rx_tag,
659				    sc->rx_map[i]);
660				sc->rx_descs[i].addr = 0;
661			}
662			bus_dmamem_free(sc->rx_tag, sc->rx_buf[i],
663			    sc->rx_map[i]);
664			sc->rx_buf[i] = NULL;
665			sc->rx_map[i] = NULL;
666		}
667		bus_dma_tag_destroy(sc->rx_tag);
668	}
669	if (sc->rx_desc_tag != NULL) {
670		if (sc->rx_descs != NULL)
671			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
672			    sc->rx_desc_map);
673		bus_dma_tag_destroy(sc->rx_desc_tag);
674		sc->rx_descs = NULL;
675		sc->rx_desc_tag = NULL;
676	}
677
678	if (sc->is_emacb)
679	    WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) & ~ETHB_UIO_CLKE);
680}
681
682/*
683 * Change media according to request.
684 */
685static int
686ate_ifmedia_upd(struct ifnet *ifp)
687{
688	struct ate_softc *sc = ifp->if_softc;
689	struct mii_data *mii;
690
691	mii = device_get_softc(sc->miibus);
692	ATE_LOCK(sc);
693	mii_mediachg(mii);
694	ATE_UNLOCK(sc);
695	return (0);
696}
697
698/*
699 * Notify the world which media we're using.
700 */
701static void
702ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
703{
704	struct ate_softc *sc = ifp->if_softc;
705	struct mii_data *mii;
706
707	mii = device_get_softc(sc->miibus);
708	ATE_LOCK(sc);
709	mii_pollstat(mii);
710	ifmr->ifm_active = mii->mii_media_active;
711	ifmr->ifm_status = mii->mii_media_status;
712	ATE_UNLOCK(sc);
713}
714
715static void
716ate_stat_update(struct ate_softc *sc, int active)
717{
718	uint32_t reg;
719
720	/*
721	 * The speed and full/half-duplex state needs to be reflected
722	 * in the ETH_CFG register.
723	 */
724	reg = RD4(sc, ETH_CFG);
725	reg &= ~(ETH_CFG_SPD | ETH_CFG_FD);
726	if (IFM_SUBTYPE(active) != IFM_10_T)
727		reg |= ETH_CFG_SPD;
728	if (active & IFM_FDX)
729		reg |= ETH_CFG_FD;
730	WR4(sc, ETH_CFG, reg);
731}
732
733static void
734ate_tick(void *xsc)
735{
736	struct ate_softc *sc = xsc;
737	struct ifnet *ifp = sc->ifp;
738	struct mii_data *mii;
739	int active;
740	uint32_t c;
741
742	/*
743	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
744	 * the MII if there's a link if this bit is clear.  Not sure if we
745	 * should do the same thing here or not.
746	 */
747	ATE_ASSERT_LOCKED(sc);
748	if (sc->miibus != NULL) {
749		mii = device_get_softc(sc->miibus);
750		active = mii->mii_media_active;
751		mii_tick(mii);
752		if (mii->mii_media_status & IFM_ACTIVE &&
753		    active != mii->mii_media_active)
754			ate_stat_update(sc, mii->mii_media_active);
755	}
756
757	/*
758	 * Update the stats as best we can.  When we're done, clear
759	 * the status counters and start over.  We're supposed to read these
760	 * registers often enough that they won't overflow.  Hopefully
761	 * once a second is often enough.  Some don't map well to
762	 * the dot3Stats mib, so for those we just count them as general
763	 * errors.  Stats for iframes, ibutes, oframes and obytes are
764	 * collected elsewhere.  These registers zero on a read to prevent
765	 * races.  For all the collision stats, also update the collision
766	 * stats for the interface.
767	 */
768	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
769	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
770	c = RD4(sc, ETH_SCOL);
771	ifp->if_collisions += c;
772	sc->mibdata.dot3StatsSingleCollisionFrames += c;
773	c = RD4(sc, ETH_MCOL);
774	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
775	ifp->if_collisions += c;
776	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
777	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
778	c = RD4(sc, ETH_LCOL);
779	sc->mibdata.dot3StatsLateCollisions += c;
780	ifp->if_collisions += c;
781	c = RD4(sc, ETH_ECOL);
782	sc->mibdata.dot3StatsExcessiveCollisions += c;
783	ifp->if_collisions += c;
784	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
785	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
786	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
787
788	/*
789	 * Not sure where to lump these, so count them against the errors
790	 * for the interface.
791	 */
792	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
793	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
794	    RD4(sc, ETH_USF);
795
796	/* Schedule another timeout one second from now. */
797	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
798}
799
800static void
801ate_set_mac(struct ate_softc *sc, u_char *eaddr)
802{
803
804	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
805	    (eaddr[1] << 8) | eaddr[0]);
806	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
807}
808
809static int
810ate_get_mac(struct ate_softc *sc, u_char *eaddr)
811{
812	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
813	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
814	uint32_t low, high;
815	int i;
816
817	/*
818	 * The boot loader may setup the MAC with an address(es), grab the
819	 * first MAC address from the SA[1-4][HL] registers.
820	 */
821	for (i = 0; i < 4; i++) {
822		low = RD4(sc, sa_low_reg[i]);
823		high = RD4(sc, sa_high_reg[i]);
824		if ((low | (high & 0xffff)) != 0) {
825			eaddr[0] = low & 0xff;
826			eaddr[1] = (low >> 8) & 0xff;
827			eaddr[2] = (low >> 16) & 0xff;
828			eaddr[3] = (low >> 24) & 0xff;
829			eaddr[4] = high & 0xff;
830			eaddr[5] = (high >> 8) & 0xff;
831			return (0);
832		}
833	}
834	return (ENXIO);
835}
836
837static void
838ate_intr(void *xsc)
839{
840	struct ate_softc *sc = xsc;
841	struct ifnet *ifp = sc->ifp;
842	struct mbuf *mb;
843	eth_rx_desc_t	*rxdhead;
844	uint32_t status, reg, idx;
845	int remain, count, done;
846
847	status = RD4(sc, ETH_ISR);
848	if (status == 0)
849		return;
850
851	if (status & ETH_ISR_RCOM) {
852
853	    bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
854		    BUS_DMASYNC_POSTREAD);
855
856	    rxdhead = &sc->rx_descs[sc->rxhead];
857	    while (rxdhead->addr & ETH_CPU_OWNER) {
858			if (!sc->is_emacb) {
859				/*
860				 * Simulate SAM9 FIRST/LAST bits for RM9200.
861				 * RM9200 EMAC has only on Rx buffer per packet.
862				 * But sometime we are handed a zero lenght packet.
863				 */
864				if ((rxdhead->status & ETH_LEN_MASK) == 0)
865					rxdhead->status = 0; /* Mark error */
866				else
867					rxdhead->status |= ETH_BUF_FIRST | ETH_BUF_LAST;
868			}
869
870			if ((rxdhead->status & ETH_BUF_FIRST) == 0) {
871				/* Something went wrong during RX so
872				   release back to EMAC all buffers of invalid packets.
873				*/
874				rxdhead->status = 0;
875				rxdhead->addr &= ~ETH_CPU_OWNER;
876				sc->rxhead = NEXT_RX_IDX(sc, sc->rxhead);
877				rxdhead = &sc->rx_descs[sc->rxhead];
878				continue;
879			}
880
881			/* Find end of packet or start of next */
882			idx = sc->rxhead;
883			if ((sc->rx_descs[idx].status & ETH_BUF_LAST) == 0) {
884				idx = NEXT_RX_IDX(sc, idx);
885
886				while ((sc->rx_descs[idx].addr & ETH_CPU_OWNER) &&
887					((sc->rx_descs[idx].status &
888					    (ETH_BUF_FIRST|ETH_BUF_LAST))== 0))
889					idx = NEXT_RX_IDX(sc, idx);
890			}
891
892			/* Packet NOT yet completely in memory; we are done */
893			if ((sc->rx_descs[idx].addr & ETH_CPU_OWNER) == 0 ||
894			    ((sc->rx_descs[idx].status & (ETH_BUF_FIRST|ETH_BUF_LAST))== 0))
895					break;
896
897			/* Packets with no end descriptor are invalid. */
898			if ((sc->rx_descs[idx].status & ETH_BUF_LAST) == 0) {
899					rxdhead->status &= ~ETH_BUF_FIRST;
900					continue;
901			}
902
903			/* FCS is not coppied into mbuf. */
904			remain = (sc->rx_descs[idx].status & ETH_LEN_MASK) - 4;
905
906			/* Get an appropriately sized mbuf  */
907			if (remain + ETHER_ALIGN >= MINCLSIZE)
908				mb = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
909			else
910				MGETHDR(mb, M_DONTWAIT, MT_DATA);
911
912			if (mb == NULL) {
913				sc->ifp->if_iqdrops++;
914				rxdhead->status = 0;
915				continue;
916			}
917			mb->m_data += ETHER_ALIGN;
918			mb->m_pkthdr.rcvif = ifp;
919
920			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));	/* Reset status */
921
922			/* Now we process the buffers that make up the packet */
923			do {
924
925				/* Last buffer may just be 1-4 bytes of FCS so remain
926				 * may be zero for last decriptor.  */
927				if (remain > 0) {
928						/* Make sure we get the current bytes */
929						bus_dmamap_sync(sc->rx_tag, sc->rx_map[sc->rxhead],
930						    BUS_DMASYNC_POSTREAD);
931
932						count = MIN(remain, sc->rx_buf_size);
933
934						/* XXX Performance robbing copy. Could
935						 * recieve directly to mbufs if not an
936						 * RM9200. XXX  */
937						m_append(mb, count, sc->rx_buf[sc->rxhead]);
938						remain -= count;
939				}
940
941				done = (rxdhead->status & ETH_BUF_LAST) != 0;
942
943				/* Return the descriptor to the EMAC */
944				rxdhead->status = 0;
945				rxdhead->addr &= ~ETH_CPU_OWNER;
946				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
947				    BUS_DMASYNC_PREWRITE);
948
949				/* Move on to next descriptor with wrap */
950				sc->rxhead = NEXT_RX_IDX(sc, sc->rxhead);
951				rxdhead = &sc->rx_descs[sc->rxhead];
952
953			} while (!done);
954
955			if (mb != NULL) {
956				ifp->if_ipackets++;
957				(*ifp->if_input)(ifp, mb);
958			}
959		}
960	}
961
962
963	if (status & ETH_ISR_TCOM) {
964		bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
965		    BUS_DMASYNC_POSTREAD);
966
967		ATE_LOCK(sc);
968		/* XXX TSR register should be cleared */
969		if (!sc->is_emacb) {
970			/* Simulate Transmit descriptor table */
971
972			/* First packet done */
973			if (sc->txtail < sc->txhead)
974				sc->tx_descs[sc->txtail].status |= ETHB_TX_USED;
975
976			/* Second Packet done */
977			if (sc->txtail + 1 < sc->txhead &&
978			    RD4(sc, ETH_TSR) & ETH_TSR_IDLE)
979				sc->tx_descs[sc->txtail + 1].status |= ETHB_TX_USED;
980		}
981
982		while (sc->txtail != sc->txhead &&
983		    sc->tx_descs[sc->txtail].status & ETHB_TX_USED ) {
984
985			bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txtail],
986			    BUS_DMASYNC_POSTWRITE);
987			bus_dmamap_unload(sc->mtag, sc->tx_map[sc->txtail]);
988			m_freem(sc->sent_mbuf[sc->txtail]);
989			sc->tx_descs[sc->txtail].addr = 0;
990			sc->sent_mbuf[sc->txtail] = NULL;
991
992			ifp->if_opackets++;
993			sc->txtail = NEXT_TX_IDX(sc, sc->txtail);
994		}
995
996		/* Flush descriptors to EMAC */
997		bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map, BUS_DMASYNC_PREWRITE);
998
999		/*
1000		 * We're no longer busy, so clear the busy flag and call the
1001		 * start routine to xmit more packets.
1002		 */
1003		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1004		atestart_locked(sc->ifp);
1005		ATE_UNLOCK(sc);
1006	}
1007
1008	if (status & ETH_ISR_RBNA) {
1009		/* Workaround RM9200 Errata #11 */
1010		if (bootverbose)
1011			device_printf(sc->dev, "RBNA workaround\n");
1012		reg = RD4(sc, ETH_CTL);
1013		WR4(sc, ETH_CTL, reg & ~ETH_CTL_RE);
1014		BARRIER(sc, ETH_CTL, 4, BUS_SPACE_BARRIER_WRITE);
1015		WR4(sc, ETH_CTL, reg | ETH_CTL_RE);
1016	}
1017}
1018
1019/*
1020 * Reset and initialize the chip.
1021 */
1022static void
1023ateinit_locked(void *xsc)
1024{
1025	struct ate_softc *sc = xsc;
1026	struct ifnet *ifp = sc->ifp;
1027	struct mii_data *mii;
1028	uint8_t eaddr[ETHER_ADDR_LEN];
1029	uint32_t reg;
1030
1031	ATE_ASSERT_LOCKED(sc);
1032
1033	/*
1034	 * XXX TODO(3)
1035	 * we need to turn on the EMAC clock in the pmc.  With the
1036	 * default boot loader, this is already turned on.  However, we
1037	 * need to think about how best to turn it on/off as the interface
1038	 * is brought up/down, as well as dealing with the mii bus...
1039	 *
1040	 * We also need to multiplex the pins correctly (in board_xxx.c).
1041	 */
1042
1043	/*
1044	 * There are two different ways that the mii bus is connected
1045	 * to this chip mii or rmii.
1046	 */
1047	if (!sc->is_emacb) {
1048		/* RM9200 */
1049		reg = RD4(sc, ETH_CFG);
1050		if (sc->use_rmii)
1051			reg |= ETH_CFG_RMII;
1052		else
1053			reg &= ~ETH_CFG_RMII;
1054		WR4(sc, ETH_CFG, reg);
1055	} else  {
1056		/* SAM9 */
1057		reg = ETHB_UIO_CLKE;
1058		reg |= (sc->use_rmii) ? ETHB_UIO_RMII : 0;
1059		WR4(sc, ETHB_UIO, reg);
1060	}
1061
1062	ate_rxfilter(sc);
1063
1064	/*
1065	 * Set the chip MAC address.
1066	 */
1067	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
1068	ate_set_mac(sc, eaddr);
1069
1070	/* Make sure we know state of TX queue */
1071	sc->txhead = sc->txtail = 0;
1072	if (sc->is_emacb) {
1073		/* Write the descriptor queue address. */
1074		WR4(sc, ETHB_TBQP, sc->tx_desc_phys);
1075	}
1076
1077	/*
1078	 * Turn on MACs and interrupt processing.
1079	 */
1080	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
1081	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
1082
1083	/* Enable big packets. */
1084	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1085
1086	/*
1087	 * Set 'running' flag, and clear output active flag
1088	 * and attempt to start the output.
1089	 */
1090	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1091	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1092
1093	mii = device_get_softc(sc->miibus);
1094	mii_pollstat(mii);
1095	ate_stat_update(sc, mii->mii_media_active);
1096	atestart_locked(ifp);
1097
1098	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
1099}
1100
1101/*
1102 * Dequeue packets and transmit.
1103 */
1104static void
1105atestart_locked(struct ifnet *ifp)
1106{
1107	struct ate_softc *sc = ifp->if_softc;
1108	struct mbuf *m, *mdefrag;
1109	bus_dma_segment_t segs[1];
1110	int nseg, e;
1111
1112	ATE_ASSERT_LOCKED(sc);
1113	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1114		return;
1115
1116	while (sc->tx_descs[sc->txhead].status & ETHB_TX_USED) {
1117		/*
1118		 * Check to see if there's room to put another packet into the
1119		 * xmit queue. The old EMAC version has a ping-pong buffer for
1120		 * xmit packets.  We use OACTIVE to indicate "we can stuff more
1121		 * into our buffers (clear) or not (set)."
1122		 */
1123		if (!sc->is_emacb) {
1124			/* RM9200 has only two hardware entries */
1125			if (!sc->is_emacb && (RD4(sc, ETH_TSR) & ETH_TSR_BNQ) == 0) {
1126				ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1127				return;
1128			}
1129		}
1130
1131		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1132		if (m == 0)
1133			break;
1134
1135		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txhead], m,
1136		    segs, &nseg, 0);
1137		if (e == EFBIG) {
1138			mdefrag = m_defrag(m, M_DONTWAIT);
1139			if (mdefrag == NULL) {
1140				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1141				return;
1142			}
1143			m = mdefrag;
1144			e = bus_dmamap_load_mbuf_sg(sc->mtag,
1145			    sc->tx_map[sc->txhead], m, segs, &nseg, 0);
1146		}
1147		if (e != 0) {
1148			m_freem(m);
1149			continue;
1150		}
1151		sc->sent_mbuf[sc->txhead] = m;
1152
1153		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txhead],
1154		    BUS_DMASYNC_PREWRITE);
1155
1156		/* Tell the hardware to xmit the packet. */
1157		if (!sc->is_emacb) {
1158			WR4(sc, ETH_TAR, segs[0].ds_addr);
1159			BARRIER(sc, ETH_TAR, 4, BUS_SPACE_BARRIER_WRITE);
1160			WR4(sc, ETH_TCR, segs[0].ds_len);
1161		} else {
1162			bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
1163			    BUS_DMASYNC_POSTWRITE);
1164			sc->tx_descs[sc->txhead].addr = segs[0].ds_addr;
1165			sc->tx_descs[sc->txhead].status = segs[0].ds_len |
1166			    (sc->tx_descs[sc->txhead].status & ETHB_TX_WRAP) |
1167			    ETHB_TX_BUF_LAST;
1168			bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
1169			    BUS_DMASYNC_PREWRITE);
1170			WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETHB_CTL_TGO);
1171		}
1172		sc->txhead = NEXT_TX_IDX(sc, sc->txhead);
1173
1174		/* Tap off here if there is a bpf listener. */
1175		BPF_MTAP(ifp, m);
1176	}
1177
1178	if ((sc->tx_descs[sc->txhead].status & ETHB_TX_USED) == 0)
1179	    ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1180}
1181
1182static void
1183ateinit(void *xsc)
1184{
1185	struct ate_softc *sc = xsc;
1186
1187	ATE_LOCK(sc);
1188	ateinit_locked(sc);
1189	ATE_UNLOCK(sc);
1190}
1191
1192static void
1193atestart(struct ifnet *ifp)
1194{
1195	struct ate_softc *sc = ifp->if_softc;
1196
1197	ATE_LOCK(sc);
1198	atestart_locked(ifp);
1199	ATE_UNLOCK(sc);
1200}
1201
1202/*
1203 * Turn off interrupts, and stop the NIC.  Can be called with sc->ifp NULL,
1204 * so be careful.
1205 */
1206static void
1207atestop(struct ate_softc *sc)
1208{
1209	struct ifnet *ifp;
1210	int i;
1211
1212	ATE_ASSERT_LOCKED(sc);
1213	ifp = sc->ifp;
1214	if (ifp) {
1215		//ifp->if_timer = 0;
1216		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1217	}
1218
1219	callout_stop(&sc->tick_ch);
1220
1221	/*
1222	 * Enable some parts of the MAC that are needed always (like the
1223	 * MII bus.  This turns off the RE and TE bits, which will remain
1224	 * off until ateinit() is called to turn them on.  With RE and TE
1225	 * turned off, there's no DMA to worry about after this write.
1226	 */
1227	WR4(sc, ETH_CTL, ETH_CTL_MPE);
1228
1229	/*
1230	 * Turn off all the configured options and revert to defaults.
1231	 */
1232
1233	/* Make sure thate the MDIO clk is less than
1234	 * 2.5 Mhz. Can no longer default to /32 since
1235	 * SAM9 family may have MCK > 80 Mhz */
1236	if (at91_master_clock <= 2000000)
1237		WR4(sc, ETH_CFG, ETH_CFG_CLK_8);
1238	else if (at91_master_clock <= 4000000)
1239		WR4(sc, ETH_CFG, ETH_CFG_CLK_16);
1240	else if (at91_master_clock <= 800000)
1241		WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
1242	else
1243		WR4(sc, ETH_CFG, ETH_CFG_CLK_64);
1244
1245	/*
1246	 * Turn off all the interrupts, and ack any pending ones by reading
1247	 * the ISR.
1248	 */
1249	WR4(sc, ETH_IDR, 0xffffffff);
1250	RD4(sc, ETH_ISR);
1251
1252	/*
1253	 * Clear out the Transmit and Receiver Status registers of any
1254	 * errors they may be reporting
1255	 */
1256	WR4(sc, ETH_TSR, 0xffffffff);
1257	WR4(sc, ETH_RSR, 0xffffffff);
1258
1259	/* Release TX resources. */
1260	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1261		if (sc->sent_mbuf[i] != NULL) {
1262			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1263			    BUS_DMASYNC_POSTWRITE);
1264			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1265			m_freem(sc->sent_mbuf[i]);
1266			sc->sent_mbuf[i] = NULL;
1267		}
1268	}
1269
1270	/* Turn off transeiver input clock */
1271	if (sc->is_emacb)
1272	    WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) & ~ETHB_UIO_CLKE);
1273
1274	/*
1275	 * XXX we should power down the EMAC if it isn't in use, after
1276	 * putting it into loopback mode.  This saves about 400uA according
1277	 * to the datasheet.
1278	 */
1279}
1280
1281static void
1282ate_rxfilter(struct ate_softc *sc)
1283{
1284	struct ifnet *ifp;
1285	uint32_t reg;
1286	int enabled;
1287
1288	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1289	ATE_ASSERT_LOCKED(sc);
1290	ifp = sc->ifp;
1291
1292	/* Wipe out old filter settings. */
1293	reg = RD4(sc, ETH_CFG);
1294	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1295	reg |= ETH_CFG_NBC;
1296	sc->flags &= ~ATE_FLAG_MULTICAST;
1297
1298	/* Set new parameters. */
1299	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1300		reg &= ~ETH_CFG_NBC;
1301	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1302		reg |= ETH_CFG_CAF;
1303	} else {
1304		enabled = ate_setmcast(sc);
1305		if (enabled != 0) {
1306			reg |= ETH_CFG_MTI;
1307			sc->flags |= ATE_FLAG_MULTICAST;
1308		}
1309	}
1310	WR4(sc, ETH_CFG, reg);
1311}
1312
1313static int
1314ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1315{
1316	struct ate_softc *sc = ifp->if_softc;
1317	struct mii_data *mii;
1318	struct ifreq *ifr = (struct ifreq *)data;
1319	int drv_flags, flags;
1320	int mask, error, enabled;
1321
1322	error = 0;
1323	flags = ifp->if_flags;
1324	drv_flags = ifp->if_drv_flags;
1325	switch (cmd) {
1326	case SIOCSIFFLAGS:
1327		ATE_LOCK(sc);
1328		if ((flags & IFF_UP) != 0) {
1329			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1330				if (((flags ^ sc->if_flags)
1331				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1332					ate_rxfilter(sc);
1333			} else {
1334				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
1335					ateinit_locked(sc);
1336			}
1337		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1338			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1339			atestop(sc);
1340		}
1341		sc->if_flags = flags;
1342		ATE_UNLOCK(sc);
1343		break;
1344
1345	case SIOCADDMULTI:
1346	case SIOCDELMULTI:
1347		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1348			ATE_LOCK(sc);
1349			enabled = ate_setmcast(sc);
1350			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1351				ate_rxfilter(sc);
1352			ATE_UNLOCK(sc);
1353		}
1354		break;
1355
1356	case SIOCSIFMEDIA:
1357	case SIOCGIFMEDIA:
1358		mii = device_get_softc(sc->miibus);
1359		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1360		break;
1361	case SIOCSIFCAP:
1362		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1363		if (mask & IFCAP_VLAN_MTU) {
1364			ATE_LOCK(sc);
1365			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1366				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1367				ifp->if_capenable |= IFCAP_VLAN_MTU;
1368			} else {
1369				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1370				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1371			}
1372			ATE_UNLOCK(sc);
1373		}
1374	default:
1375		error = ether_ioctl(ifp, cmd, data);
1376		break;
1377	}
1378	return (error);
1379}
1380
1381static void
1382ate_child_detached(device_t dev, device_t child)
1383{
1384	struct ate_softc *sc;
1385
1386	sc = device_get_softc(dev);
1387	if (child == sc->miibus)
1388		sc->miibus = NULL;
1389}
1390
1391/*
1392 * MII bus support routines.
1393 */
1394static int
1395ate_miibus_readreg(device_t dev, int phy, int reg)
1396{
1397	struct ate_softc *sc;
1398	int val;
1399
1400	/*
1401	 * XXX if we implement agressive power savings, then we need
1402	 * XXX to make sure that the clock to the emac is on here
1403	 */
1404
1405	sc = device_get_softc(dev);
1406	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1407	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1408	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1409		continue;
1410	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1411
1412	return (val);
1413}
1414
1415static int
1416ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1417{
1418	struct ate_softc *sc;
1419
1420	/*
1421	 * XXX if we implement agressive power savings, then we need
1422	 * XXX to make sure that the clock to the emac is on here
1423	 */
1424
1425	sc = device_get_softc(dev);
1426	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1427	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1428		continue;
1429	return (0);
1430}
1431
1432static device_method_t ate_methods[] = {
1433	/* Device interface */
1434	DEVMETHOD(device_probe,		ate_probe),
1435	DEVMETHOD(device_attach,	ate_attach),
1436	DEVMETHOD(device_detach,	ate_detach),
1437
1438	/* Bus interface */
1439	DEVMETHOD(bus_child_detached,	ate_child_detached),
1440
1441	/* MII interface */
1442	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1443	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1444
1445	DEVMETHOD_END
1446};
1447
1448static driver_t ate_driver = {
1449	"ate",
1450	ate_methods,
1451	sizeof(struct ate_softc),
1452};
1453
1454DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, NULL, NULL);
1455DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, NULL, NULL);
1456MODULE_DEPEND(ate, miibus, 1, 1, 1);
1457MODULE_DEPEND(ate, ether, 1, 1, 1);
1458