1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 * Copyright (c) 2009 Greg Ansley.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/* TODO
28 *
29 * 1) Turn on the clock in pmc?  Turn off?
30 * 2) GPIO initializtion in board setup code.
31 */
32
33#include "opt_platform.h"
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bus.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/module.h>
45#include <sys/rman.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49
50#include <machine/bus.h>
51
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_arp.h>
55#include <net/if_dl.h>
56#include <net/if_media.h>
57#include <net/if_mib.h>
58#include <net/if_types.h>
59#include <net/if_var.h>
60
61#ifdef INET
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/in_var.h>
65#include <netinet/ip.h>
66#endif
67
68#include <net/bpf.h>
69#include <net/bpfdesc.h>
70
71#include <dev/mii/mii.h>
72#include <dev/mii/miivar.h>
73
74#include "opt_at91.h"
75#include <arm/at91/at91reg.h>
76#include <arm/at91/at91var.h>
77#include <arm/at91/if_atereg.h>
78
79#ifdef FDT
80#include <dev/fdt/fdt_common.h>
81#include <dev/ofw/ofw_bus.h>
82#include <dev/ofw/ofw_bus_subr.h>
83#endif
84
85#include "miibus_if.h"
86
87/*
88 * Driver-specific flags.
89 */
90#define	ATE_FLAG_DETACHING	0x01
91#define	ATE_FLAG_MULTICAST	0x02
92
93/*
94 * Old EMAC assumes whole packet fits in one buffer;
95 * new EBACB assumes all receive buffers are 128 bytes
96 */
97#define	RX_BUF_SIZE(sc)	(sc->is_emacb ? 128 : MCLBYTES)
98
99/*
100 * EMACB has an 11 bit counter for Rx/Tx Descriptors
101 * for max total of 1024 decriptors each.
102 */
103#define	ATE_MAX_RX_DESCR	1024
104#define	ATE_MAX_TX_DESCR	1024
105
106/* How many buffers to allocate */
107#define	ATE_MAX_TX_BUFFERS	4	/* We have ping-pong tx buffers */
108
109/* How much memory to use for rx buffers */
110#define	ATE_RX_MEMORY		(ATE_MAX_RX_DESCR * 128)
111
112/* Actual number of descriptors we allocate */
113#define	ATE_NUM_RX_DESCR	ATE_MAX_RX_DESCR
114#define	ATE_NUM_TX_DESCR	ATE_MAX_TX_BUFFERS
115
116#if ATE_NUM_TX_DESCR > ATE_MAX_TX_DESCR
117#error "Can't have more TX buffers that descriptors"
118#endif
119#if ATE_NUM_RX_DESCR > ATE_MAX_RX_DESCR
120#error "Can't have more RX buffers that descriptors"
121#endif
122
123/* Wrap indexes the same way the hardware does */
124#define	NEXT_RX_IDX(sc, cur)	\
125    ((sc->rx_descs[cur].addr & ETH_WRAP_BIT) ? 0 : (cur + 1))
126
127#define	NEXT_TX_IDX(sc, cur)	\
128    ((sc->tx_descs[cur].status & ETHB_TX_WRAP) ? 0 : (cur + 1))
129
130struct ate_softc
131{
132	struct ifnet	*ifp;		/* ifnet pointer */
133	struct mtx	sc_mtx;		/* Basically a perimeter lock */
134	device_t	dev;		/* Myself */
135	device_t	miibus;		/* My child miibus */
136	struct resource *irq_res;	/* IRQ resource */
137	struct resource	*mem_res;	/* Memory resource */
138	struct callout  tick_ch;	/* Tick callout */
139	struct ifmib_iso_8802_3 mibdata; /* Stuff for network mgmt */
140	bus_dma_tag_t   mtag;		/* bus dma tag for mbufs */
141	bus_dma_tag_t   rx_tag;
142	bus_dma_tag_t   rx_desc_tag;
143	bus_dmamap_t    rx_desc_map;
144	bus_dmamap_t    rx_map[ATE_MAX_RX_DESCR];
145	bus_addr_t	rx_desc_phys;   /* PA of rx descriptors */
146	eth_rx_desc_t   *rx_descs;	/* VA of rx descriptors */
147	void		*rx_buf[ATE_NUM_RX_DESCR]; /* RX buffer space */
148	int		rxhead;		/* Current RX map/desc index */
149	uint32_t	rx_buf_size;    /* Size of Rx buffers */
150
151	bus_dma_tag_t   tx_desc_tag;
152	bus_dmamap_t    tx_desc_map;
153	bus_dmamap_t    tx_map[ATE_MAX_TX_BUFFERS];
154	bus_addr_t	tx_desc_phys;   /* PA of tx descriptors */
155	eth_tx_desc_t   *tx_descs;	/* VA of tx descriptors */
156	int		txhead;		/* Current TX map/desc index */
157	int		txtail;		/* Current TX map/desc index */
158	struct mbuf	*sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
159	void		*intrhand;	/* Interrupt handle */
160	int		flags;
161	int		if_flags;
162	int		use_rmii;
163	int		is_emacb;	/* SAM9x hardware version */
164};
165
166static inline uint32_t
167RD4(struct ate_softc *sc, bus_size_t off)
168{
169
170	return (bus_read_4(sc->mem_res, off));
171}
172
173static inline void
174WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
175{
176
177	bus_write_4(sc->mem_res, off, val);
178}
179
180static inline void
181BARRIER(struct ate_softc *sc, bus_size_t off, bus_size_t len, int flags)
182{
183
184	bus_barrier(sc->mem_res, off, len, flags);
185}
186
187#define	ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
188#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
189#define	ATE_LOCK_INIT(_sc)					\
190	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
191	    MTX_NETWORK_LOCK, MTX_DEF)
192#define	ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
193#define	ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
194#define	ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
195
196static devclass_t ate_devclass;
197
198/*
199 * ifnet entry points.
200 */
201static void	ateinit_locked(void *);
202static void	atestart_locked(struct ifnet *);
203
204static void	ateinit(void *);
205static void	atestart(struct ifnet *);
206static void	atestop(struct ate_softc *);
207static int	ateioctl(struct ifnet * ifp, u_long, caddr_t);
208
209/*
210 * Bus entry points.
211 */
212static int	ate_probe(device_t dev);
213static int	ate_attach(device_t dev);
214static int	ate_detach(device_t dev);
215static void	ate_intr(void *);
216
217/*
218 * Helper routines.
219 */
220static int	ate_activate(device_t dev);
221static void	ate_deactivate(struct ate_softc *sc);
222static int	ate_ifmedia_upd(struct ifnet *ifp);
223static void	ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
224static int	ate_get_mac(struct ate_softc *sc, u_char *eaddr);
225static void	ate_set_mac(struct ate_softc *sc, u_char *eaddr);
226static void	ate_rxfilter(struct ate_softc *sc);
227
228static int	ate_miibus_readreg(device_t dev, int phy, int reg);
229
230static int	ate_miibus_writereg(device_t dev, int phy, int reg, int data);
231
232/*
233 * The AT91 family of products has the ethernet interface called EMAC.
234 * However, it isn't self identifying.  It is anticipated that the parent bus
235 * code will take care to only add ate devices where they really are.  As
236 * such, we do nothing here to identify the device and just set its name.
237 * However, FDT makes it self-identifying.
238 */
239static int
240ate_probe(device_t dev)
241{
242#ifdef FDT
243	if (!ofw_bus_is_compatible(dev, "cdns,at91rm9200-emac") &&
244	    !ofw_bus_is_compatible(dev, "cdns,emac") &&
245	    !ofw_bus_is_compatible(dev, "cdns,at32ap7000-macb"))
246		return (ENXIO);
247#endif
248	device_set_desc(dev, "EMAC");
249	return (0);
250}
251
252#ifdef FDT
253/*
254 * We have to know if we're using MII or RMII attachment
255 * for the MACB to talk to the PHY correctly. With FDT,
256 * we must use rmii if there's a proprety phy-mode
257 * equal to "rmii". Otherwise we MII mode is used.
258 */
259static void
260ate_set_rmii(struct ate_softc *sc)
261{
262	phandle_t node;
263	char prop[10];
264	ssize_t len;
265
266	node = ofw_bus_get_node(sc->dev);
267	memset(prop, 0 ,sizeof(prop));
268	len = OF_getproplen(node, "phy-mode");
269	if (len != 4)
270		return;
271	if (OF_getprop(node, "phy-mode", prop, len) != len)
272		return;
273	if (strncmp(prop, "rmii", 4) == 0)
274		sc->use_rmii = 1;
275}
276
277#else
278/*
279 * We have to know if we're using MII or RMII attachment
280 * for the MACB to talk to the PHY correctly. Without FDT,
281 * there's no good way to do this. So, if the config file
282 * has 'option AT91_ATE_USE_RMII', then we'll force RMII.
283 * Otherwise, we'll use what the bootloader setup. Either
284 * it setup RMII or MII, in which case we'll get it right,
285 * or it did nothing, and we'll fall back to MII and the
286 * option would override if present.
287 */
288static void
289ate_set_rmii(struct ate_softc *sc)
290{
291
292	/* Default to what boot rom did */
293	if (!sc->is_emacb)
294		sc->use_rmii =
295		    (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
296	else
297		sc->use_rmii =
298		    (RD4(sc, ETHB_UIO) & ETHB_UIO_RMII) == ETHB_UIO_RMII;
299
300#ifdef AT91_ATE_USE_RMII
301	/* Compile time override */
302	sc->use_rmii = 1;
303#endif
304}
305#endif
306
307static int
308ate_attach(device_t dev)
309{
310	struct ate_softc *sc;
311	struct ifnet *ifp = NULL;
312	struct sysctl_ctx_list *sctx;
313	struct sysctl_oid *soid;
314	u_char eaddr[ETHER_ADDR_LEN];
315	uint32_t rnd;
316	int rid, err;
317
318	sc = device_get_softc(dev);
319	sc->dev = dev;
320	ATE_LOCK_INIT(sc);
321
322	rid = 0;
323	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
324	    RF_ACTIVE);
325	if (sc->mem_res == NULL) {
326		device_printf(dev, "could not allocate memory resources.\n");
327		err = ENOMEM;
328		goto out;
329	}
330	rid = 0;
331	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
332	    RF_ACTIVE);
333	if (sc->irq_res == NULL) {
334		device_printf(dev, "could not allocate interrupt resources.\n");
335		err = ENOMEM;
336		goto out;
337	}
338
339	/* New or old version, chooses buffer size. */
340#ifdef FDT
341	sc->is_emacb = ofw_bus_is_compatible(dev, "cdns,at32ap7000-macb");
342#else
343	sc->is_emacb = at91_is_sam9() || at91_is_sam9xe();
344#endif
345	sc->rx_buf_size = RX_BUF_SIZE(sc);
346
347	err = ate_activate(dev);
348	if (err)
349		goto out;
350
351	ate_set_rmii(sc);
352
353	/* Sysctls */
354	sctx = device_get_sysctl_ctx(dev);
355	soid = device_get_sysctl_tree(dev);
356	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
357	    CTLFLAG_RW, &sc->use_rmii, 0, "rmii in use");
358
359	/* Calling atestop before ifp is set is OK. */
360	ATE_LOCK(sc);
361	atestop(sc);
362	ATE_UNLOCK(sc);
363	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
364
365	if ((err = ate_get_mac(sc, eaddr)) != 0) {
366		/* No MAC address configured. Generate the random one. */
367		if (bootverbose)
368			device_printf(dev,
369			    "Generating random ethernet address.\n");
370		rnd = arc4random();
371
372		/*
373		 * Set OUI to convenient locally assigned address.  'b'
374		 * is 0x62, which has the locally assigned bit set, and
375		 * the broadcast/multicast bit clear.
376		 */
377		eaddr[0] = 'b';
378		eaddr[1] = 's';
379		eaddr[2] = 'd';
380		eaddr[3] = (rnd >> 16) & 0xff;
381		eaddr[4] = (rnd >>  8) & 0xff;
382		eaddr[5] = (rnd >>  0) & 0xff;
383	}
384
385	sc->ifp = ifp = if_alloc(IFT_ETHER);
386	err = mii_attach(dev, &sc->miibus, ifp, ate_ifmedia_upd,
387	    ate_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
388	if (err != 0) {
389		device_printf(dev, "attaching PHYs failed\n");
390		goto out;
391	}
392	/*
393	 * XXX: Clear the isolate bit, or we won't get up,
394	 * at least on the HL201
395	 */
396	ate_miibus_writereg(dev, 0, 0, 0x3000);
397
398	ifp->if_softc = sc;
399	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
400	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
401	ifp->if_capabilities |= IFCAP_VLAN_MTU;
402	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
403	ifp->if_start = atestart;
404	ifp->if_ioctl = ateioctl;
405	ifp->if_init = ateinit;
406	ifp->if_baudrate = 10000000;
407	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
408	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
409	IFQ_SET_READY(&ifp->if_snd);
410	ifp->if_linkmib = &sc->mibdata;
411	ifp->if_linkmiblen = sizeof(sc->mibdata);
412	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
413	sc->if_flags = ifp->if_flags;
414
415	ether_ifattach(ifp, eaddr);
416
417	/* Activate the interrupt. */
418	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
419	    NULL, ate_intr, sc, &sc->intrhand);
420	if (err) {
421		device_printf(dev, "could not establish interrupt handler.\n");
422		ether_ifdetach(ifp);
423		goto out;
424	}
425
426out:
427	if (err)
428		ate_detach(dev);
429	return (err);
430}
431
432static int
433ate_detach(device_t dev)
434{
435	struct ate_softc *sc;
436	struct ifnet *ifp;
437
438	sc = device_get_softc(dev);
439	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
440	ifp = sc->ifp;
441	if (device_is_attached(dev)) {
442		ATE_LOCK(sc);
443			sc->flags |= ATE_FLAG_DETACHING;
444			atestop(sc);
445		ATE_UNLOCK(sc);
446		callout_drain(&sc->tick_ch);
447		ether_ifdetach(ifp);
448	}
449	if (sc->miibus != NULL) {
450		device_delete_child(dev, sc->miibus);
451		sc->miibus = NULL;
452	}
453	bus_generic_detach(sc->dev);
454	ate_deactivate(sc);
455	if (sc->intrhand != NULL) {
456		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
457		sc->intrhand = NULL;
458	}
459	if (ifp != NULL) {
460		if_free(ifp);
461		sc->ifp = NULL;
462	}
463	if (sc->mem_res != NULL) {
464		bus_release_resource(dev, SYS_RES_IOPORT,
465		    rman_get_rid(sc->mem_res), sc->mem_res);
466		sc->mem_res = NULL;
467	}
468	if (sc->irq_res != NULL) {
469		bus_release_resource(dev, SYS_RES_IRQ,
470		    rman_get_rid(sc->irq_res), sc->irq_res);
471		sc->irq_res = NULL;
472	}
473	ATE_LOCK_DESTROY(sc);
474	return (0);
475}
476
477static void
478ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
479{
480
481	if (error != 0)
482		return;
483	*(bus_addr_t *)arg = segs[0].ds_addr;
484}
485
486static void
487ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
488{
489	struct ate_softc *sc;
490
491	if (error != 0)
492		return;
493	sc = (struct ate_softc *)arg;
494
495	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
496	sc->rx_descs[sc->rxhead].addr = segs[0].ds_addr;
497	sc->rx_descs[sc->rxhead].status = 0;
498	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
499}
500
501static uint32_t
502ate_mac_hash(const uint8_t *buf)
503{
504	uint32_t index = 0;
505	for (int i = 0; i < 48; i++) {
506		index ^= ((buf[i >> 3] >> (i & 7)) & 1) << (i % 6);
507	}
508	return (index);
509}
510
511/*
512 * Compute the multicast filter for this device.
513 */
514static int
515ate_setmcast(struct ate_softc *sc)
516{
517	uint32_t index;
518	uint32_t mcaf[2];
519	u_char *af = (u_char *) mcaf;
520	struct ifmultiaddr *ifma;
521	struct ifnet *ifp;
522
523	ifp = sc->ifp;
524
525	if ((ifp->if_flags & IFF_PROMISC) != 0)
526		return (0);
527	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
528		WR4(sc, ETH_HSL, 0xffffffff);
529		WR4(sc, ETH_HSH, 0xffffffff);
530		return (1);
531	}
532
533	/* Compute the multicast hash. */
534	mcaf[0] = 0;
535	mcaf[1] = 0;
536	if_maddr_rlock(ifp);
537	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
538		if (ifma->ifma_addr->sa_family != AF_LINK)
539			continue;
540		index = ate_mac_hash(LLADDR((struct sockaddr_dl *)
541		    ifma->ifma_addr));
542		af[index >> 3] |= 1 << (index & 7);
543	}
544	if_maddr_runlock(ifp);
545
546	/*
547	 * Write the hash to the hash register.  This card can also
548	 * accept unicast packets as well as multicast packets using this
549	 * register for easier bridging operations, but we don't take
550	 * advantage of that.  Locks here are to avoid LOR with the
551	 * if_maddr_rlock, but might not be strictly necessary.
552	 */
553	WR4(sc, ETH_HSL, mcaf[0]);
554	WR4(sc, ETH_HSH, mcaf[1]);
555	return (mcaf[0] || mcaf[1]);
556}
557
558static int
559ate_activate(device_t dev)
560{
561	struct ate_softc *sc;
562	int i;
563
564	sc = device_get_softc(dev);
565
566	/* Allocate DMA tags and maps for TX mbufs */
567	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
568	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
569	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag))
570		goto errout;
571	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
572		if ( bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]))
573			goto errout;
574	}
575
576
577	/* DMA tag and map for the RX descriptors. */
578	if (bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
579	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
580	    ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t), 1,
581	    ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
582	    &sc->sc_mtx, &sc->rx_desc_tag))
583		goto errout;
584	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
585	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
586		goto errout;
587	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
588	    sc->rx_descs, ATE_NUM_RX_DESCR * sizeof(eth_rx_desc_t),
589	    ate_getaddr, &sc->rx_desc_phys, 0) != 0)
590		goto errout;
591
592	/* Allocate DMA tags and maps for RX. buffers */
593	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
594	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
595	    sc->rx_buf_size, 1, sc->rx_buf_size, 0,
596	    busdma_lock_mutex, &sc->sc_mtx, &sc->rx_tag))
597		goto errout;
598
599	/*
600	 * Allocate our RX buffers.
601	 * This chip has a RX structure that's filled in.
602	 * XXX On MACB (SAM9 part) we should receive directly into mbuf
603	 * to avoid the copy.  XXX
604	 */
605	sc->rxhead = 0;
606	for (sc->rxhead = 0; sc->rxhead < ATE_RX_MEMORY/sc->rx_buf_size;
607	    sc->rxhead++) {
608		if (bus_dmamem_alloc(sc->rx_tag,
609		    (void **)&sc->rx_buf[sc->rxhead], BUS_DMA_NOWAIT,
610		    &sc->rx_map[sc->rxhead]) != 0)
611			goto errout;
612
613		if (bus_dmamap_load(sc->rx_tag, sc->rx_map[sc->rxhead],
614		    sc->rx_buf[sc->rxhead], sc->rx_buf_size,
615		    ate_load_rx_buf, sc, 0) != 0) {
616			printf("bus_dmamem_load\n");
617			goto errout;
618		}
619		bus_dmamap_sync(sc->rx_tag, sc->rx_map[sc->rxhead], BUS_DMASYNC_PREREAD);
620	}
621
622	/*
623	 * For the last buffer, set the wrap bit so the controller
624	 * restarts from the first descriptor.
625	 */
626	sc->rx_descs[--sc->rxhead].addr |= ETH_WRAP_BIT;
627	sc->rxhead = 0;
628
629	/* Flush the memory for the EMAC rx descriptor. */
630	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
631
632	/* Write the descriptor queue address. */
633	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
634
635	/*
636	 * DMA tag and map for the TX descriptors.
637	 */
638	if (bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_tx_desc_t),
639	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
640	    ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t), 1,
641	    ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t), 0, busdma_lock_mutex,
642	    &sc->sc_mtx, &sc->tx_desc_tag) != 0)
643		goto errout;
644
645	if (bus_dmamem_alloc(sc->tx_desc_tag, (void **)&sc->tx_descs,
646	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->tx_desc_map) != 0)
647		goto errout;
648
649	if (bus_dmamap_load(sc->tx_desc_tag, sc->tx_desc_map,
650	    sc->tx_descs, ATE_MAX_TX_BUFFERS * sizeof(eth_tx_desc_t),
651	    ate_getaddr, &sc->tx_desc_phys, 0) != 0)
652		goto errout;
653
654	/* Initialize descriptors; mark all empty */
655	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
656		sc->tx_descs[i].addr =0;
657		sc->tx_descs[i].status = ETHB_TX_USED;
658		sc->sent_mbuf[i] = NULL;
659	}
660
661	/* Mark last entry to cause wrap when indexing through */
662	sc->tx_descs[ATE_MAX_TX_BUFFERS - 1].status =
663	    ETHB_TX_WRAP | ETHB_TX_USED;
664
665	/* Flush the memory for the EMAC tx descriptor. */
666	bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map, BUS_DMASYNC_PREWRITE);
667
668	sc->txhead = sc->txtail = 0;
669	if (sc->is_emacb) {
670		/* Write the descriptor queue address. */
671		WR4(sc, ETHB_TBQP, sc->tx_desc_phys);
672
673		/* EMACB: Enable transceiver input clock */
674		WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) | ETHB_UIO_CLKE);
675	}
676
677	return (0);
678
679errout:
680	return (ENOMEM);
681}
682
683static void
684ate_deactivate(struct ate_softc *sc)
685{
686	int i;
687
688	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
689	if (sc->mtag != NULL) {
690		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
691			if (sc->sent_mbuf[i] != NULL) {
692				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
693				    BUS_DMASYNC_POSTWRITE);
694				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
695				m_freem(sc->sent_mbuf[i]);
696			}
697			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
698			sc->sent_mbuf[i] = NULL;
699			sc->tx_map[i] = NULL;
700		}
701		bus_dma_tag_destroy(sc->mtag);
702	}
703	if (sc->rx_desc_tag != NULL) {
704		if (sc->rx_descs != NULL) {
705			if (sc->rx_desc_phys != 0) {
706				bus_dmamap_sync(sc->rx_desc_tag,
707				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
708				bus_dmamap_unload(sc->rx_desc_tag,
709				    sc->rx_desc_map);
710				sc->rx_desc_phys = 0;
711			}
712		}
713	}
714	if (sc->rx_tag != NULL) {
715		for (i = 0; sc->rx_buf[i] != NULL; i++) {
716			if (sc->rx_descs[i].addr != 0) {
717				bus_dmamap_sync(sc->rx_tag,
718				    sc->rx_map[i],
719				    BUS_DMASYNC_POSTREAD);
720				bus_dmamap_unload(sc->rx_tag,
721				    sc->rx_map[i]);
722				sc->rx_descs[i].addr = 0;
723			}
724			bus_dmamem_free(sc->rx_tag, sc->rx_buf[i],
725			    sc->rx_map[i]);
726			sc->rx_buf[i] = NULL;
727		}
728		bus_dma_tag_destroy(sc->rx_tag);
729	}
730	if (sc->rx_desc_tag != NULL) {
731		if (sc->rx_descs != NULL)
732			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
733			    sc->rx_desc_map);
734		bus_dma_tag_destroy(sc->rx_desc_tag);
735		sc->rx_descs = NULL;
736		sc->rx_desc_tag = NULL;
737	}
738
739	if (sc->is_emacb)
740		WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) & ~ETHB_UIO_CLKE);
741}
742
743/*
744 * Change media according to request.
745 */
746static int
747ate_ifmedia_upd(struct ifnet *ifp)
748{
749	struct ate_softc *sc = ifp->if_softc;
750	struct mii_data *mii;
751
752	mii = device_get_softc(sc->miibus);
753	ATE_LOCK(sc);
754	mii_mediachg(mii);
755	ATE_UNLOCK(sc);
756	return (0);
757}
758
759/*
760 * Notify the world which media we're using.
761 */
762static void
763ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
764{
765	struct ate_softc *sc = ifp->if_softc;
766	struct mii_data *mii;
767
768	mii = device_get_softc(sc->miibus);
769	ATE_LOCK(sc);
770	mii_pollstat(mii);
771	ifmr->ifm_active = mii->mii_media_active;
772	ifmr->ifm_status = mii->mii_media_status;
773	ATE_UNLOCK(sc);
774}
775
776static void
777ate_stat_update(struct ate_softc *sc, int active)
778{
779	uint32_t reg;
780
781	/*
782	 * The speed and full/half-duplex state needs to be reflected
783	 * in the ETH_CFG register.
784	 */
785	reg = RD4(sc, ETH_CFG);
786	reg &= ~(ETH_CFG_SPD | ETH_CFG_FD);
787	if (IFM_SUBTYPE(active) != IFM_10_T)
788		reg |= ETH_CFG_SPD;
789	if (active & IFM_FDX)
790		reg |= ETH_CFG_FD;
791	WR4(sc, ETH_CFG, reg);
792}
793
794static void
795ate_tick(void *xsc)
796{
797	struct ate_softc *sc = xsc;
798	struct ifnet *ifp = sc->ifp;
799	struct mii_data *mii;
800	int active;
801	uint32_t c;
802
803	/*
804	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
805	 * the MII if there's a link if this bit is clear.  Not sure if we
806	 * should do the same thing here or not.
807	 */
808	ATE_ASSERT_LOCKED(sc);
809	if (sc->miibus != NULL) {
810		mii = device_get_softc(sc->miibus);
811		active = mii->mii_media_active;
812		mii_tick(mii);
813		if (mii->mii_media_status & IFM_ACTIVE &&
814		    active != mii->mii_media_active)
815			ate_stat_update(sc, mii->mii_media_active);
816	}
817
818	/*
819	 * Update the stats as best we can.  When we're done, clear
820	 * the status counters and start over.  We're supposed to read these
821	 * registers often enough that they won't overflow.  Hopefully
822	 * once a second is often enough.  Some don't map well to
823	 * the dot3Stats mib, so for those we just count them as general
824	 * errors.  Stats for iframes, ibutes, oframes and obytes are
825	 * collected elsewhere.  These registers zero on a read to prevent
826	 * races.  For all the collision stats, also update the collision
827	 * stats for the interface.
828	 */
829	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
830	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
831	c = RD4(sc, ETH_SCOL);
832	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, c);
833	sc->mibdata.dot3StatsSingleCollisionFrames += c;
834	c = RD4(sc, ETH_MCOL);
835	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
836	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, c);
837	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
838	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
839	c = RD4(sc, ETH_LCOL);
840	sc->mibdata.dot3StatsLateCollisions += c;
841	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, c);
842	c = RD4(sc, ETH_ECOL);
843	sc->mibdata.dot3StatsExcessiveCollisions += c;
844	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, c);
845	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
846	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
847	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
848
849	/*
850	 * Not sure where to lump these, so count them against the errors
851	 * for the interface.
852	 */
853	if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, RD4(sc, ETH_TUE));
854	if_inc_counter(sc->ifp, IFCOUNTER_IERRORS,
855	    RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) + RD4(sc, ETH_USF));
856
857	/* Schedule another timeout one second from now. */
858	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
859}
860
861static void
862ate_set_mac(struct ate_softc *sc, u_char *eaddr)
863{
864
865	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
866	    (eaddr[1] << 8) | eaddr[0]);
867	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
868}
869
870static int
871ate_get_mac(struct ate_softc *sc, u_char *eaddr)
872{
873	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
874	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
875	uint32_t low, high;
876	int i;
877
878	/*
879	 * The boot loader may setup the MAC with an address(es), grab the
880	 * first MAC address from the SA[1-4][HL] registers.
881	 */
882	for (i = 0; i < 4; i++) {
883		low = RD4(sc, sa_low_reg[i]);
884		high = RD4(sc, sa_high_reg[i]);
885		if ((low | (high & 0xffff)) != 0) {
886			eaddr[0] = low & 0xff;
887			eaddr[1] = (low >> 8) & 0xff;
888			eaddr[2] = (low >> 16) & 0xff;
889			eaddr[3] = (low >> 24) & 0xff;
890			eaddr[4] = high & 0xff;
891			eaddr[5] = (high >> 8) & 0xff;
892			return (0);
893		}
894	}
895	return (ENXIO);
896}
897
898static void
899ate_intr(void *xsc)
900{
901	struct ate_softc *sc = xsc;
902	struct ifnet *ifp = sc->ifp;
903	struct mbuf *mb;
904	eth_rx_desc_t	*rxdhead;
905	uint32_t status, reg, idx;
906	int remain, count, done;
907
908	status = RD4(sc, ETH_ISR);
909	if (status == 0)
910		return;
911
912	if (status & ETH_ISR_RCOM) {
913		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
914		    BUS_DMASYNC_POSTREAD);
915
916		rxdhead = &sc->rx_descs[sc->rxhead];
917		while (rxdhead->addr & ETH_CPU_OWNER) {
918			if (!sc->is_emacb) {
919				/*
920				 * Simulate SAM9 FIRST/LAST bits for RM9200.
921				 * RM9200 EMAC has only on Rx buffer per packet.
922				 * But sometime we are handed a zero length packet.
923				 */
924				if ((rxdhead->status & ETH_LEN_MASK) == 0)
925					rxdhead->status = 0; /* Mark error */
926				else
927					rxdhead->status |= ETH_BUF_FIRST | ETH_BUF_LAST;
928			}
929
930			if ((rxdhead->status & ETH_BUF_FIRST) == 0) {
931				/* Something went wrong during RX so
932				   release back to EMAC all buffers of invalid packets.
933				*/
934				rxdhead->status = 0;
935				rxdhead->addr &= ~ETH_CPU_OWNER;
936				sc->rxhead = NEXT_RX_IDX(sc, sc->rxhead);
937				rxdhead = &sc->rx_descs[sc->rxhead];
938				continue;
939			}
940
941			/* Find end of packet or start of next */
942			idx = sc->rxhead;
943			if ((sc->rx_descs[idx].status & ETH_BUF_LAST) == 0) {
944				idx = NEXT_RX_IDX(sc, idx);
945
946				while ((sc->rx_descs[idx].addr & ETH_CPU_OWNER) &&
947					((sc->rx_descs[idx].status &
948					    (ETH_BUF_FIRST|ETH_BUF_LAST))== 0))
949					idx = NEXT_RX_IDX(sc, idx);
950			}
951
952			/* Packet NOT yet completely in memory; we are done */
953			if ((sc->rx_descs[idx].addr & ETH_CPU_OWNER) == 0 ||
954			    ((sc->rx_descs[idx].status & (ETH_BUF_FIRST|ETH_BUF_LAST))== 0))
955					break;
956
957			/* Packets with no end descriptor are invalid. */
958			if ((sc->rx_descs[idx].status & ETH_BUF_LAST) == 0) {
959					rxdhead->status &= ~ETH_BUF_FIRST;
960					continue;
961			}
962
963			/* FCS is not coppied into mbuf. */
964			remain = (sc->rx_descs[idx].status & ETH_LEN_MASK) - 4;
965
966			/* Get an appropriately sized mbuf. */
967			mb = m_get2(remain + ETHER_ALIGN, M_NOWAIT, MT_DATA,
968			    M_PKTHDR);
969			if (mb == NULL) {
970				if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
971				rxdhead->status = 0;
972				continue;
973			}
974			mb->m_data += ETHER_ALIGN;
975			mb->m_pkthdr.rcvif = ifp;
976
977			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));	/* Reset status */
978
979			/* Now we process the buffers that make up the packet */
980			do {
981
982				/* Last buffer may just be 1-4 bytes of FCS so remain
983				 * may be zero for last descriptor.  */
984				if (remain > 0) {
985						/* Make sure we get the current bytes */
986						bus_dmamap_sync(sc->rx_tag, sc->rx_map[sc->rxhead],
987						    BUS_DMASYNC_POSTREAD);
988
989						count = MIN(remain, sc->rx_buf_size);
990
991						/* XXX Performance robbing copy. Could
992						 * receive directly to mbufs if not an
993						 * RM9200. And even then we could likely
994						 * copy just the protocol headers. XXX  */
995						m_append(mb, count, sc->rx_buf[sc->rxhead]);
996						remain -= count;
997				}
998
999				done = (rxdhead->status & ETH_BUF_LAST) != 0;
1000
1001				/* Return the descriptor to the EMAC */
1002				rxdhead->status = 0;
1003				rxdhead->addr &= ~ETH_CPU_OWNER;
1004				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
1005				    BUS_DMASYNC_PREWRITE);
1006
1007				/* Move on to next descriptor with wrap */
1008				sc->rxhead = NEXT_RX_IDX(sc, sc->rxhead);
1009				rxdhead = &sc->rx_descs[sc->rxhead];
1010
1011			} while (!done);
1012
1013			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1014			(*ifp->if_input)(ifp, mb);
1015		}
1016	}
1017
1018
1019	if (status & ETH_ISR_TCOM) {
1020		bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
1021		    BUS_DMASYNC_POSTREAD);
1022
1023		ATE_LOCK(sc);
1024		/* XXX TSR register should be cleared */
1025		if (!sc->is_emacb) {
1026			/* Simulate Transmit descriptor table */
1027
1028			/* First packet done */
1029			if (sc->txtail < sc->txhead)
1030				sc->tx_descs[sc->txtail].status |= ETHB_TX_USED;
1031
1032			/* Second Packet done */
1033			if (sc->txtail + 1 < sc->txhead &&
1034			    RD4(sc, ETH_TSR) & ETH_TSR_IDLE)
1035				sc->tx_descs[sc->txtail + 1].status |= ETHB_TX_USED;
1036		}
1037
1038		while ((sc->tx_descs[sc->txtail].status & ETHB_TX_USED) &&
1039		    sc->sent_mbuf[sc->txtail] != NULL) {
1040			bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txtail],
1041			    BUS_DMASYNC_POSTWRITE);
1042			bus_dmamap_unload(sc->mtag, sc->tx_map[sc->txtail]);
1043			m_freem(sc->sent_mbuf[sc->txtail]);
1044			sc->tx_descs[sc->txtail].addr = 0;
1045			sc->sent_mbuf[sc->txtail] = NULL;
1046			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1047			sc->txtail = NEXT_TX_IDX(sc, sc->txtail);
1048		}
1049
1050		/* Flush descriptors to EMAC */
1051		bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map, BUS_DMASYNC_PREWRITE);
1052
1053		/*
1054		 * We're no longer busy, so clear the busy flag and call the
1055		 * start routine to xmit more packets.
1056		 */
1057		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1058		atestart_locked(sc->ifp);
1059		ATE_UNLOCK(sc);
1060	}
1061
1062	if (status & ETH_ISR_RBNA) {
1063		/* Workaround RM9200 Errata #11 */
1064		if (bootverbose)
1065			device_printf(sc->dev, "RBNA workaround\n");
1066		reg = RD4(sc, ETH_CTL);
1067		WR4(sc, ETH_CTL, reg & ~ETH_CTL_RE);
1068		BARRIER(sc, ETH_CTL, 4, BUS_SPACE_BARRIER_WRITE);
1069		WR4(sc, ETH_CTL, reg | ETH_CTL_RE);
1070	}
1071
1072	/* XXX need to work around SAM9260 errata 43.2.4.1:
1073	 * disable the mac, reset tx buffer, enable mac on TUND */
1074}
1075
1076/*
1077 * Reset and initialize the chip.
1078 */
1079static void
1080ateinit_locked(void *xsc)
1081{
1082	struct ate_softc *sc = xsc;
1083	struct ifnet *ifp = sc->ifp;
1084	struct mii_data *mii;
1085	uint8_t eaddr[ETHER_ADDR_LEN];
1086	uint32_t reg;
1087
1088	ATE_ASSERT_LOCKED(sc);
1089
1090	/*
1091	 * XXX TODO(3)
1092	 * we need to turn on the EMAC clock in the pmc.  With the
1093	 * default boot loader, this is already turned on.  However, we
1094	 * need to think about how best to turn it on/off as the interface
1095	 * is brought up/down, as well as dealing with the mii bus...
1096	 *
1097	 * We also need to multiplex the pins correctly (in board_xxx.c).
1098	 */
1099
1100	/*
1101	 * There are two different ways that the mii bus is connected
1102	 * to this chip mii or rmii.
1103	 */
1104	if (!sc->is_emacb) {
1105		/* RM9200 */
1106		reg = RD4(sc, ETH_CFG);
1107		if (sc->use_rmii)
1108			reg |= ETH_CFG_RMII;
1109		else
1110			reg &= ~ETH_CFG_RMII;
1111		WR4(sc, ETH_CFG, reg);
1112	} else  {
1113		/* SAM9 */
1114		reg = ETHB_UIO_CLKE;
1115		reg |= (sc->use_rmii) ? ETHB_UIO_RMII : 0;
1116		WR4(sc, ETHB_UIO, reg);
1117	}
1118
1119	ate_rxfilter(sc);
1120
1121	/*
1122	 * Set the chip MAC address.
1123	 */
1124	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
1125	ate_set_mac(sc, eaddr);
1126
1127	/* Make sure we know state of TX queue */
1128	sc->txhead = sc->txtail = 0;
1129	if (sc->is_emacb) {
1130		/* Write the descriptor queue address. */
1131		WR4(sc, ETHB_TBQP, sc->tx_desc_phys);
1132	}
1133
1134	/*
1135	 * Turn on MACs and interrupt processing.
1136	 */
1137	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
1138	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
1139
1140	/* Enable big packets. */
1141	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1142
1143	/*
1144	 * Set 'running' flag, and clear output active flag
1145	 * and attempt to start the output.
1146	 */
1147	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1148	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1149
1150	mii = device_get_softc(sc->miibus);
1151	mii_pollstat(mii);
1152	ate_stat_update(sc, mii->mii_media_active);
1153	atestart_locked(ifp);
1154
1155	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
1156}
1157
1158/*
1159 * Dequeue packets and transmit.
1160 */
1161static void
1162atestart_locked(struct ifnet *ifp)
1163{
1164	struct ate_softc *sc = ifp->if_softc;
1165	struct mbuf *m, *mdefrag;
1166	bus_dma_segment_t segs[1];
1167	int nseg, e;
1168
1169	ATE_ASSERT_LOCKED(sc);
1170	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1171		return;
1172
1173	while (sc->tx_descs[sc->txhead].status & ETHB_TX_USED) {
1174		/*
1175		 * Check to see if there's room to put another packet into the
1176		 * xmit queue. The old EMAC version has a ping-pong buffer for
1177		 * xmit packets.  We use OACTIVE to indicate "we can stuff more
1178		 * into our buffers (clear) or not (set)."
1179		 */
1180		/* RM9200 has only two hardware entries */
1181		if (!sc->is_emacb && (RD4(sc, ETH_TSR) & ETH_TSR_BNQ) == 0) {
1182			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1183			return;
1184		}
1185
1186		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1187		if (m == NULL)
1188			break;
1189
1190		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txhead], m,
1191		    segs, &nseg, 0);
1192		if (e == EFBIG) {
1193			mdefrag = m_defrag(m, M_NOWAIT);
1194			if (mdefrag == NULL) {
1195				IFQ_DRV_PREPEND(&ifp->if_snd, m);
1196				return;
1197			}
1198			m = mdefrag;
1199			e = bus_dmamap_load_mbuf_sg(sc->mtag,
1200			    sc->tx_map[sc->txhead], m, segs, &nseg, 0);
1201		}
1202		if (e != 0) {
1203			m_freem(m);
1204			continue;
1205		}
1206
1207		/*
1208		 * There's a small race between the loop in ate_intr finishing
1209		 * and the check above to see if the packet was finished, as well
1210		 * as when atestart gets called via other paths. Lose the race
1211		 * gracefully and free the mbuf...
1212		 */
1213		if (sc->sent_mbuf[sc->txhead] != NULL) {
1214			bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txtail],
1215			    BUS_DMASYNC_POSTWRITE);
1216			bus_dmamap_unload(sc->mtag, sc->tx_map[sc->txtail]);
1217			m_free(sc->sent_mbuf[sc->txhead]);
1218			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1219		}
1220
1221		sc->sent_mbuf[sc->txhead] = m;
1222
1223		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txhead],
1224		    BUS_DMASYNC_PREWRITE);
1225
1226		/* Tell the hardware to xmit the packet. */
1227		if (!sc->is_emacb) {
1228			WR4(sc, ETH_TAR, segs[0].ds_addr);
1229			BARRIER(sc, ETH_TAR, 4, BUS_SPACE_BARRIER_WRITE);
1230			WR4(sc, ETH_TCR, segs[0].ds_len);
1231		} else {
1232			bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
1233			    BUS_DMASYNC_POSTWRITE);
1234			sc->tx_descs[sc->txhead].addr = segs[0].ds_addr;
1235			sc->tx_descs[sc->txhead].status = segs[0].ds_len |
1236			    (sc->tx_descs[sc->txhead].status & ETHB_TX_WRAP) |
1237			    ETHB_TX_BUF_LAST;
1238			bus_dmamap_sync(sc->tx_desc_tag, sc->tx_desc_map,
1239			    BUS_DMASYNC_PREWRITE);
1240			WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETHB_CTL_TGO);
1241		}
1242		sc->txhead = NEXT_TX_IDX(sc, sc->txhead);
1243
1244		/* Tap off here if there is a bpf listener. */
1245		BPF_MTAP(ifp, m);
1246	}
1247
1248	if ((sc->tx_descs[sc->txhead].status & ETHB_TX_USED) == 0)
1249	    ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1250}
1251
1252static void
1253ateinit(void *xsc)
1254{
1255	struct ate_softc *sc = xsc;
1256
1257	ATE_LOCK(sc);
1258	ateinit_locked(sc);
1259	ATE_UNLOCK(sc);
1260}
1261
1262static void
1263atestart(struct ifnet *ifp)
1264{
1265	struct ate_softc *sc = ifp->if_softc;
1266
1267	ATE_LOCK(sc);
1268	atestart_locked(ifp);
1269	ATE_UNLOCK(sc);
1270}
1271
1272/*
1273 * Turn off interrupts, and stop the NIC.  Can be called with sc->ifp NULL,
1274 * so be careful.
1275 */
1276static void
1277atestop(struct ate_softc *sc)
1278{
1279	struct ifnet *ifp;
1280	int i;
1281
1282	ATE_ASSERT_LOCKED(sc);
1283	ifp = sc->ifp;
1284	if (ifp) {
1285		//ifp->if_timer = 0;
1286		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1287	}
1288
1289	callout_stop(&sc->tick_ch);
1290
1291	/*
1292	 * Enable some parts of the MAC that are needed always (like the
1293	 * MII bus.  This turns off the RE and TE bits, which will remain
1294	 * off until ateinit() is called to turn them on.  With RE and TE
1295	 * turned off, there's no DMA to worry about after this write.
1296	 */
1297	WR4(sc, ETH_CTL, ETH_CTL_MPE);
1298
1299	/*
1300	 * Turn off all the configured options and revert to defaults.
1301	 */
1302
1303	/* Make sure thate the MDIO clk is less than
1304	 * 2.5 Mhz. Can no longer default to /32 since
1305	 * SAM9 family may have MCK > 80 Mhz */
1306	if (at91_master_clock <= 2000000)
1307		WR4(sc, ETH_CFG, ETH_CFG_CLK_8);
1308	else if (at91_master_clock <= 4000000)
1309		WR4(sc, ETH_CFG, ETH_CFG_CLK_16);
1310	else if (at91_master_clock <= 800000)
1311		WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
1312	else
1313		WR4(sc, ETH_CFG, ETH_CFG_CLK_64);
1314
1315	/*
1316	 * Turn off all the interrupts, and ack any pending ones by reading
1317	 * the ISR.
1318	 */
1319	WR4(sc, ETH_IDR, 0xffffffff);
1320	RD4(sc, ETH_ISR);
1321
1322	/*
1323	 * Clear out the Transmit and Receiver Status registers of any
1324	 * errors they may be reporting
1325	 */
1326	WR4(sc, ETH_TSR, 0xffffffff);
1327	WR4(sc, ETH_RSR, 0xffffffff);
1328
1329	/* Release TX resources. */
1330	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1331		if (sc->sent_mbuf[i] != NULL) {
1332			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1333			    BUS_DMASYNC_POSTWRITE);
1334			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1335			m_freem(sc->sent_mbuf[i]);
1336			sc->sent_mbuf[i] = NULL;
1337		}
1338	}
1339
1340	/* Turn off transeiver input clock */
1341	if (sc->is_emacb)
1342		WR4(sc, ETHB_UIO, RD4(sc, ETHB_UIO) & ~ETHB_UIO_CLKE);
1343
1344	/*
1345	 * XXX we should power down the EMAC if it isn't in use, after
1346	 * putting it into loopback mode.  This saves about 400uA according
1347	 * to the datasheet.
1348	 */
1349}
1350
1351static void
1352ate_rxfilter(struct ate_softc *sc)
1353{
1354	struct ifnet *ifp;
1355	uint32_t reg;
1356	int enabled;
1357
1358	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1359	ATE_ASSERT_LOCKED(sc);
1360	ifp = sc->ifp;
1361
1362	/* Wipe out old filter settings. */
1363	reg = RD4(sc, ETH_CFG);
1364	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1365	reg |= ETH_CFG_NBC;
1366	sc->flags &= ~ATE_FLAG_MULTICAST;
1367
1368	/* Set new parameters. */
1369	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1370		reg &= ~ETH_CFG_NBC;
1371	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1372		reg |= ETH_CFG_CAF;
1373	} else {
1374		enabled = ate_setmcast(sc);
1375		if (enabled != 0) {
1376			reg |= ETH_CFG_MTI;
1377			sc->flags |= ATE_FLAG_MULTICAST;
1378		}
1379	}
1380	WR4(sc, ETH_CFG, reg);
1381}
1382
1383static int
1384ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1385{
1386	struct ate_softc *sc = ifp->if_softc;
1387	struct mii_data *mii;
1388	struct ifreq *ifr = (struct ifreq *)data;
1389	int drv_flags, flags;
1390	int mask, error, enabled;
1391
1392	error = 0;
1393	flags = ifp->if_flags;
1394	drv_flags = ifp->if_drv_flags;
1395	switch (cmd) {
1396	case SIOCSIFFLAGS:
1397		ATE_LOCK(sc);
1398		if ((flags & IFF_UP) != 0) {
1399			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1400				if (((flags ^ sc->if_flags)
1401				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1402					ate_rxfilter(sc);
1403			} else {
1404				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
1405					ateinit_locked(sc);
1406			}
1407		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1408			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1409			atestop(sc);
1410		}
1411		sc->if_flags = flags;
1412		ATE_UNLOCK(sc);
1413		break;
1414
1415	case SIOCADDMULTI:
1416	case SIOCDELMULTI:
1417		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1418			ATE_LOCK(sc);
1419			enabled = ate_setmcast(sc);
1420			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1421				ate_rxfilter(sc);
1422			ATE_UNLOCK(sc);
1423		}
1424		break;
1425
1426	case SIOCSIFMEDIA:
1427	case SIOCGIFMEDIA:
1428		mii = device_get_softc(sc->miibus);
1429		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1430		break;
1431	case SIOCSIFCAP:
1432		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1433		if (mask & IFCAP_VLAN_MTU) {
1434			ATE_LOCK(sc);
1435			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1436				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1437				ifp->if_capenable |= IFCAP_VLAN_MTU;
1438			} else {
1439				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1440				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1441			}
1442			ATE_UNLOCK(sc);
1443		}
1444	default:
1445		error = ether_ioctl(ifp, cmd, data);
1446		break;
1447	}
1448	return (error);
1449}
1450
1451static void
1452ate_child_detached(device_t dev, device_t child)
1453{
1454	struct ate_softc *sc;
1455
1456	sc = device_get_softc(dev);
1457	if (child == sc->miibus)
1458		sc->miibus = NULL;
1459}
1460
1461/*
1462 * MII bus support routines.
1463 */
1464static int
1465ate_miibus_readreg(device_t dev, int phy, int reg)
1466{
1467	struct ate_softc *sc;
1468	int val;
1469
1470	/*
1471	 * XXX if we implement aggressive power savings, then we need
1472	 * XXX to make sure that the clock to the emac is on here
1473	 */
1474
1475	sc = device_get_softc(dev);
1476	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1477	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1478	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1479		continue;
1480	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1481
1482	return (val);
1483}
1484
1485static int
1486ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1487{
1488	struct ate_softc *sc;
1489
1490	/*
1491	 * XXX if we implement aggressive power savings, then we need
1492	 * XXX to make sure that the clock to the emac is on here
1493	 */
1494
1495	sc = device_get_softc(dev);
1496	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1497	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1498		continue;
1499	return (0);
1500}
1501
1502static device_method_t ate_methods[] = {
1503	/* Device interface */
1504	DEVMETHOD(device_probe,		ate_probe),
1505	DEVMETHOD(device_attach,	ate_attach),
1506	DEVMETHOD(device_detach,	ate_detach),
1507
1508	/* Bus interface */
1509	DEVMETHOD(bus_child_detached,	ate_child_detached),
1510
1511	/* MII interface */
1512	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1513	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1514
1515	DEVMETHOD_END
1516};
1517
1518static driver_t ate_driver = {
1519	"ate",
1520	ate_methods,
1521	sizeof(struct ate_softc),
1522};
1523
1524#ifdef FDT
1525DRIVER_MODULE(ate, simplebus, ate_driver, ate_devclass, NULL, NULL);
1526#else
1527DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, NULL, NULL);
1528#endif
1529DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, NULL, NULL);
1530MODULE_DEPEND(ate, miibus, 1, 1, 1);
1531MODULE_DEPEND(ate, ether, 1, 1, 1);
1532