if_ate.c revision 192028
1/*-
2 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/* TODO
27 *
28 * 1) Turn on the clock in pmc?  Turn off?
29 * 2) GPIO initializtion in board setup code.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/at91/if_ate.c 192028 2009-05-12 21:28:41Z stas $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/kernel.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46#include <machine/bus.h>
47
48#include <net/ethernet.h>
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
53#include <net/if_mib.h>
54#include <net/if_types.h>
55
56#ifdef INET
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/in_var.h>
60#include <netinet/ip.h>
61#endif
62
63#include <net/bpf.h>
64#include <net/bpfdesc.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/miivar.h>
68#include <arm/at91/if_atereg.h>
69
70#include "miibus_if.h"
71
72#define ATE_MAX_TX_BUFFERS 2		/* We have ping-pong tx buffers */
73#define ATE_MAX_RX_BUFFERS 64
74
75/*
76 * Driver-specific flags.
77 */
78#define	ATE_FLAG_DETACHING	0x01
79#define	ATE_FLAG_MULTICAST	0x02
80
81struct ate_softc
82{
83	struct ifnet *ifp;		/* ifnet pointer */
84	struct mtx sc_mtx;		/* basically a perimeter lock */
85	device_t dev;			/* Myself */
86	device_t miibus;		/* My child miibus */
87	void *intrhand;			/* Interrupt handle */
88	struct resource *irq_res;	/* IRQ resource */
89	struct resource	*mem_res;	/* Memory resource */
90	struct callout tick_ch;		/* Tick callout */
91	bus_dma_tag_t mtag;		/* bus dma tag for mbufs */
92	bus_dmamap_t tx_map[ATE_MAX_TX_BUFFERS];
93	struct mbuf *sent_mbuf[ATE_MAX_TX_BUFFERS]; /* Sent mbufs */
94	bus_dma_tag_t rxtag;
95	bus_dmamap_t rx_map[ATE_MAX_RX_BUFFERS];
96	void *rx_buf[ATE_MAX_RX_BUFFERS]; /* RX buffer space */
97	int rx_buf_ptr;
98	bus_dma_tag_t rx_desc_tag;
99	bus_dmamap_t rx_desc_map;
100	int txcur;			/* current tx map pointer */
101	bus_addr_t rx_desc_phys;
102	eth_rx_desc_t *rx_descs;
103	int use_rmii;
104	struct	ifmib_iso_8802_3 mibdata; /* stuff for network mgmt */
105	int	flags;
106	int	if_flags;
107};
108
109static inline uint32_t
110RD4(struct ate_softc *sc, bus_size_t off)
111{
112	return bus_read_4(sc->mem_res, off);
113}
114
115static inline void
116WR4(struct ate_softc *sc, bus_size_t off, uint32_t val)
117{
118	bus_write_4(sc->mem_res, off, val);
119}
120
121static inline void
122BARRIER(struct ate_softc *sc, bus_size_t off, bus_size_t len, int flags)
123{
124
125	bus_barrier(sc->mem_res, off, len, flags);
126}
127
128#define ATE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
129#define	ATE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
130#define ATE_LOCK_INIT(_sc) \
131	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
132	    MTX_NETWORK_LOCK, MTX_DEF)
133#define ATE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
134#define ATE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
135#define ATE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
136
137static devclass_t ate_devclass;
138
139/* ifnet entry points */
140
141static void ateinit_locked(void *);
142static void atestart_locked(struct ifnet *);
143
144static void ateinit(void *);
145static void atestart(struct ifnet *);
146static void atestop(struct ate_softc *);
147static int ateioctl(struct ifnet * ifp, u_long, caddr_t);
148
149/* bus entry points */
150
151static int ate_probe(device_t dev);
152static int ate_attach(device_t dev);
153static int ate_detach(device_t dev);
154static void ate_intr(void *);
155
156/* helper routines */
157static int ate_activate(device_t dev);
158static void ate_deactivate(struct ate_softc *sc);
159static int ate_ifmedia_upd(struct ifnet *ifp);
160static void ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
161static int ate_get_mac(struct ate_softc *sc, u_char *eaddr);
162static void ate_set_mac(struct ate_softc *sc, u_char *eaddr);
163static void	ate_rxfilter(struct ate_softc *sc);
164
165/*
166 * The AT91 family of products has the ethernet called EMAC.  However,
167 * it isn't self identifying.  It is anticipated that the parent bus
168 * code will take care to only add ate devices where they really are.  As
169 * such, we do nothing here to identify the device and just set its name.
170 */
171static int
172ate_probe(device_t dev)
173{
174	device_set_desc(dev, "EMAC");
175	return (0);
176}
177
178static int
179ate_attach(device_t dev)
180{
181	struct ate_softc *sc = device_get_softc(dev);
182	struct ifnet *ifp = NULL;
183	struct sysctl_ctx_list *sctx;
184	struct sysctl_oid *soid;
185	u_char eaddr[ETHER_ADDR_LEN];
186	uint32_t rnd;
187	int rid, err;
188
189	sc->dev = dev;
190	ATE_LOCK_INIT(sc);
191
192	/*
193	 * Allocate resources.
194	 */
195	rid = 0;
196	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
197	    RF_ACTIVE);
198	if (sc->mem_res == NULL) {
199		device_printf(dev, "could not allocate memory resources.\n");
200		err = ENOMEM;
201		goto out;
202	}
203	rid = 0;
204	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
205	    RF_ACTIVE);
206	if (sc->irq_res == NULL) {
207		device_printf(dev, "could not allocate interrupt resources.\n");
208		err = ENOMEM;
209		goto out;
210	}
211
212	err = ate_activate(dev);
213	if (err)
214		goto out;
215
216	sc->use_rmii = (RD4(sc, ETH_CFG) & ETH_CFG_RMII) == ETH_CFG_RMII;
217
218	/* Sysctls */
219	sctx = device_get_sysctl_ctx(dev);
220	soid = device_get_sysctl_tree(dev);
221	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "rmii",
222	    CTLFLAG_RD, &sc->use_rmii, 0, "rmii in use");
223
224	/* calling atestop before ifp is set is OK */
225	ATE_LOCK(sc);
226	atestop(sc);
227	ATE_UNLOCK(sc);
228	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
229
230	if ((err = ate_get_mac(sc, eaddr)) != 0) {
231		/*
232		 * No MAC address configured. Generate the random one.
233		 */
234		if  (bootverbose)
235			device_printf(dev,
236			    "Generating random ethernet address.\n");
237		rnd = arc4random();
238
239		/*
240		 * Set OUI to convenient locally assigned address.  'b'
241		 * is 0x62, which has the locally assigned bit set, and
242		 * the broadcast/multicast bit clear.
243		 */
244		eaddr[0] = 'b';
245		eaddr[1] = 's';
246		eaddr[2] = 'd';
247		eaddr[3] = (rnd >> 16) & 0xff;
248		eaddr[4] = (rnd >> 8) & 0xff;
249		eaddr[5] = rnd & 0xff;
250	}
251	ate_set_mac(sc, eaddr);
252
253	sc->ifp = ifp = if_alloc(IFT_ETHER);
254	if (mii_phy_probe(dev, &sc->miibus, ate_ifmedia_upd, ate_ifmedia_sts)) {
255		device_printf(dev, "Cannot find my PHY.\n");
256		err = ENXIO;
257		goto out;
258	}
259
260	ifp->if_softc = sc;
261	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
262	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
263	ifp->if_capabilities |= IFCAP_VLAN_MTU;
264	ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
265	ifp->if_start = atestart;
266	ifp->if_ioctl = ateioctl;
267	ifp->if_init = ateinit;
268	ifp->if_baudrate = 10000000;
269	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
270	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
271	IFQ_SET_READY(&ifp->if_snd);
272	ifp->if_timer = 0;
273	ifp->if_linkmib = &sc->mibdata;
274	ifp->if_linkmiblen = sizeof(sc->mibdata);
275	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS;
276	sc->if_flags = ifp->if_flags;
277
278	ether_ifattach(ifp, eaddr);
279
280	/*
281	 * Activate the interrupt.
282	 */
283	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
284	    NULL, ate_intr, sc, &sc->intrhand);
285	if (err) {
286		device_printf(dev, "could not establish interrupt handler.\n");
287		ether_ifdetach(ifp);
288		goto out;
289	}
290
291out:
292	if (err)
293		ate_detach(dev);
294	return (err);
295}
296
297static int
298ate_detach(device_t dev)
299{
300	struct ate_softc *sc;
301	struct ifnet *ifp;
302
303	sc = device_get_softc(dev);
304	KASSERT(sc != NULL, ("[ate: %d]: sc is NULL", __LINE__));
305	ifp = sc->ifp;
306	if (device_is_attached(dev)) {
307		ATE_LOCK(sc);
308			sc->flags |= ATE_FLAG_DETACHING;
309			atestop(sc);
310		ATE_UNLOCK(sc);
311		callout_drain(&sc->tick_ch);
312		ether_ifdetach(ifp);
313	}
314	if (sc->miibus != NULL) {
315		device_delete_child(dev, sc->miibus);
316		sc->miibus = NULL;
317	}
318	bus_generic_detach(sc->dev);
319	ate_deactivate(sc);
320	if (sc->intrhand != NULL) {
321		bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
322		sc->intrhand = NULL;
323	}
324	if (ifp != NULL) {
325		if_free(ifp);
326		sc->ifp = NULL;
327	}
328	if (sc->mem_res != NULL) {
329		bus_release_resource(dev, SYS_RES_IOPORT,
330		    rman_get_rid(sc->mem_res), sc->mem_res);
331		sc->mem_res = NULL;
332	}
333	if (sc->irq_res != NULL) {
334		bus_release_resource(dev, SYS_RES_IRQ,
335		    rman_get_rid(sc->irq_res), sc->irq_res);
336		sc->irq_res = NULL;
337	}
338	ATE_LOCK_DESTROY(sc);
339	return (0);
340}
341
342static void
343ate_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
344{
345	struct ate_softc *sc;
346
347	if (error != 0)
348		return;
349	sc = (struct ate_softc *)arg;
350	sc->rx_desc_phys = segs[0].ds_addr;
351}
352
353static void
354ate_load_rx_buf(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
355{
356	struct ate_softc *sc;
357	int i;
358
359	if (error != 0)
360		return;
361	sc = (struct ate_softc *)arg;
362	i = sc->rx_buf_ptr;
363
364	/*
365	 * For the last buffer, set the wrap bit so the controller
366	 * restarts from the first descriptor.
367	 */
368	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
369	if (i == ATE_MAX_RX_BUFFERS - 1)
370		sc->rx_descs[i].addr = segs[0].ds_addr | ETH_WRAP_BIT;
371	else
372		sc->rx_descs[i].addr = segs[0].ds_addr;
373	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_POSTWRITE);
374	sc->rx_descs[i].status = 0;
375	/* Flush the memory in the mbuf */
376	bus_dmamap_sync(sc->rxtag, sc->rx_map[i], BUS_DMASYNC_PREREAD);
377}
378
379/*
380 * Compute the multicast filter for this device using the standard
381 * algorithm.  I wonder why this isn't in ether somewhere as a lot
382 * of different MAC chips use this method (or the reverse the bits)
383 * method.
384 */
385static int
386ate_setmcast(struct ate_softc *sc)
387{
388	uint32_t index;
389	uint32_t mcaf[2];
390	u_char *af = (u_char *) mcaf;
391	struct ifmultiaddr *ifma;
392	struct ifnet *ifp;
393
394	ifp = sc->ifp;
395
396	if ((ifp->if_flags & IFF_PROMISC) != 0)
397		return (0);
398	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
399		WR4(sc, ETH_HSL, 0xffffffff);
400		WR4(sc, ETH_HSH, 0xffffffff);
401		return (1);
402	}
403
404	/*
405	 * Compute the multicast hash.
406	 */
407	mcaf[0] = 0;
408	mcaf[1] = 0;
409	IF_ADDR_LOCK(ifp);
410	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
411		if (ifma->ifma_addr->sa_family != AF_LINK)
412			continue;
413		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
414		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
415		af[index >> 3] |= 1 << (index & 7);
416	}
417	IF_ADDR_UNLOCK(ifp);
418
419	/*
420	 * Write the hash to the hash register.  This card can also
421	 * accept unicast packets as well as multicast packets using this
422	 * register for easier bridging operations, but we don't take
423	 * advantage of that.  Locks here are to avoid LOR with the
424	 * IF_ADDR_LOCK, but might not be strictly necessary.
425	 */
426	WR4(sc, ETH_HSL, mcaf[0]);
427	WR4(sc, ETH_HSH, mcaf[1]);
428	return (mcaf[0] || mcaf[1]);
429}
430
431static int
432ate_activate(device_t dev)
433{
434	struct ate_softc *sc;
435	int err, i;
436
437	sc = device_get_softc(dev);
438	/*
439	 * Allocate DMA tags and maps
440	 */
441	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
442	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
443	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->mtag);
444	if (err != 0)
445		goto errout;
446	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
447		err = bus_dmamap_create(sc->mtag, 0, &sc->tx_map[i]);
448		if (err != 0)
449			goto errout;
450	}
451	 /*
452	  * Allocate our Rx buffers.  This chip has a rx structure that's filled
453	  * in
454	  */
455
456	/*
457	 * Allocate DMA tags and maps for RX.
458	 */
459	err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
460	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
461	    1, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->rxtag);
462	if (err != 0)
463		goto errout;
464
465	/* Dma TAG and MAP for the rx descriptors. */
466	err = bus_dma_tag_create(bus_get_dma_tag(dev), sizeof(eth_rx_desc_t),
467	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
468	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 1,
469	    ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t), 0, busdma_lock_mutex,
470	    &sc->sc_mtx, &sc->rx_desc_tag);
471	if (err != 0)
472		goto errout;
473	if (bus_dmamem_alloc(sc->rx_desc_tag, (void **)&sc->rx_descs,
474	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->rx_desc_map) != 0)
475		goto errout;
476	if (bus_dmamap_load(sc->rx_desc_tag, sc->rx_desc_map,
477	    sc->rx_descs, ATE_MAX_RX_BUFFERS * sizeof(eth_rx_desc_t),
478	    ate_getaddr, sc, 0) != 0)
479		goto errout;
480	for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
481		sc->rx_buf_ptr = i;
482		if (bus_dmamem_alloc(sc->rxtag, (void **)&sc->rx_buf[i],
483		      BUS_DMA_NOWAIT, &sc->rx_map[i]) != 0)
484			goto errout;
485		if (bus_dmamap_load(sc->rxtag, sc->rx_map[i], sc->rx_buf[i],
486		    MCLBYTES, ate_load_rx_buf, sc, 0) != 0)
487			goto errout;
488	}
489	sc->rx_buf_ptr = 0;
490	/* Flush the memory for the EMAC rx descriptor */
491	bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map, BUS_DMASYNC_PREWRITE);
492	/* Write the descriptor queue address. */
493	WR4(sc, ETH_RBQP, sc->rx_desc_phys);
494	return (0);
495
496errout:
497	return (ENOMEM);
498}
499
500static void
501ate_deactivate(struct ate_softc *sc)
502{
503	int i;
504
505	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
506	if (sc->mtag != NULL) {
507		for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
508			if (sc->sent_mbuf[i] != NULL) {
509				bus_dmamap_sync(sc->mtag, sc->tx_map[i],
510				    BUS_DMASYNC_POSTWRITE);
511				bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
512				m_freem(sc->sent_mbuf[i]);
513			}
514			bus_dmamap_destroy(sc->mtag, sc->tx_map[i]);
515			sc->sent_mbuf[i] = NULL;
516			sc->tx_map[i] = NULL;
517		}
518		bus_dma_tag_destroy(sc->mtag);
519	}
520	if (sc->rx_desc_tag != NULL) {
521		if (sc->rx_descs != NULL) {
522			if (sc->rx_desc_phys != 0) {
523				bus_dmamap_sync(sc->rx_desc_tag,
524				    sc->rx_desc_map, BUS_DMASYNC_POSTREAD);
525				bus_dmamap_unload(sc->rx_desc_tag,
526				    sc->rx_desc_map);
527				sc->rx_desc_phys = 0;
528			}
529		}
530	}
531	if (sc->rxtag != NULL) {
532		for (i = 0; i < ATE_MAX_RX_BUFFERS; i++) {
533			if (sc->rx_buf[i] != NULL) {
534				if (sc->rx_descs[i].addr != 0) {
535					bus_dmamap_sync(sc->rxtag,
536					    sc->rx_map[i],
537					    BUS_DMASYNC_POSTREAD);
538					bus_dmamap_unload(sc->rxtag,
539					    sc->rx_map[i]);
540					sc->rx_descs[i].addr = 0;
541				}
542				bus_dmamem_free(sc->rxtag, sc->rx_buf[i],
543				    sc->rx_map[i]);
544				sc->rx_buf[i] = NULL;
545				sc->rx_map[i] = NULL;
546			}
547		}
548		bus_dma_tag_destroy(sc->rxtag);
549	}
550	if (sc->rx_desc_tag != NULL) {
551		if (sc->rx_descs != NULL)
552			bus_dmamem_free(sc->rx_desc_tag, sc->rx_descs,
553			    sc->rx_desc_map);
554		bus_dma_tag_destroy(sc->rx_desc_tag);
555		sc->rx_descs = NULL;
556		sc->rx_desc_tag = NULL;
557	}
558}
559
560/*
561 * Change media according to request.
562 */
563static int
564ate_ifmedia_upd(struct ifnet *ifp)
565{
566	struct ate_softc *sc = ifp->if_softc;
567	struct mii_data *mii;
568
569	mii = device_get_softc(sc->miibus);
570	ATE_LOCK(sc);
571	mii_mediachg(mii);
572	ATE_UNLOCK(sc);
573	return (0);
574}
575
576/*
577 * Notify the world which media we're using.
578 */
579static void
580ate_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
581{
582	struct ate_softc *sc = ifp->if_softc;
583	struct mii_data *mii;
584
585	mii = device_get_softc(sc->miibus);
586	ATE_LOCK(sc);
587	mii_pollstat(mii);
588	ifmr->ifm_active = mii->mii_media_active;
589	ifmr->ifm_status = mii->mii_media_status;
590	ATE_UNLOCK(sc);
591}
592
593static void
594ate_stat_update(struct ate_softc *sc, int active)
595{
596	uint32_t reg;
597
598	/*
599	 * The speed and full/half-duplex state needs to be reflected
600	 * in the ETH_CFG register.
601	 */
602	reg = RD4(sc, ETH_CFG);
603	reg &= ~(ETH_CFG_SPD | ETH_CFG_FD);
604	if (IFM_SUBTYPE(active) != IFM_10_T)
605		reg |= ETH_CFG_SPD;
606	if (active & IFM_FDX)
607		reg |= ETH_CFG_FD;
608	WR4(sc, ETH_CFG, reg);
609}
610
611static void
612ate_tick(void *xsc)
613{
614	struct ate_softc *sc = xsc;
615	struct ifnet *ifp = sc->ifp;
616	struct mii_data *mii;
617	int active;
618	uint32_t c;
619
620	/*
621	 * The KB920x boot loader tests ETH_SR & ETH_SR_LINK and will ask
622	 * the MII if there's a link if this bit is clear.  Not sure if we
623	 * should do the same thing here or not.
624	 */
625	ATE_ASSERT_LOCKED(sc);
626	if (sc->miibus != NULL) {
627		mii = device_get_softc(sc->miibus);
628		active = mii->mii_media_active;
629		mii_tick(mii);
630		if (mii->mii_media_status & IFM_ACTIVE &&
631		     active != mii->mii_media_active)
632			ate_stat_update(sc, mii->mii_media_active);
633	}
634
635	/*
636	 * Update the stats as best we can.  When we're done, clear
637	 * the status counters and start over.  We're supposed to read these
638	 * registers often enough that they won't overflow.  Hopefully
639	 * once a second is often enough.  Some don't map well to
640	 * the dot3Stats mib, so for those we just count them as general
641	 * errors.  Stats for iframes, ibutes, oframes and obytes are
642	 * collected elsewhere.  These registers zero on a read to prevent
643	 * races.  For all the collision stats, also update the collision
644	 * stats for the interface.
645	 */
646	sc->mibdata.dot3StatsAlignmentErrors += RD4(sc, ETH_ALE);
647	sc->mibdata.dot3StatsFCSErrors += RD4(sc, ETH_SEQE);
648	c = RD4(sc, ETH_SCOL);
649	ifp->if_collisions += c;
650	sc->mibdata.dot3StatsSingleCollisionFrames += c;
651	c = RD4(sc, ETH_MCOL);
652	sc->mibdata.dot3StatsMultipleCollisionFrames += c;
653	ifp->if_collisions += c;
654	sc->mibdata.dot3StatsSQETestErrors += RD4(sc, ETH_SQEE);
655	sc->mibdata.dot3StatsDeferredTransmissions += RD4(sc, ETH_DTE);
656	c = RD4(sc, ETH_LCOL);
657	sc->mibdata.dot3StatsLateCollisions += c;
658	ifp->if_collisions += c;
659	c = RD4(sc, ETH_ECOL);
660	sc->mibdata.dot3StatsExcessiveCollisions += c;
661	ifp->if_collisions += c;
662	sc->mibdata.dot3StatsCarrierSenseErrors += RD4(sc, ETH_CSE);
663	sc->mibdata.dot3StatsFrameTooLongs += RD4(sc, ETH_ELR);
664	sc->mibdata.dot3StatsInternalMacReceiveErrors += RD4(sc, ETH_DRFC);
665	/*
666	 * not sure where to lump these, so count them against the errors
667	 * for the interface.
668	 */
669	sc->ifp->if_oerrors += RD4(sc, ETH_TUE);
670	sc->ifp->if_ierrors += RD4(sc, ETH_CDE) + RD4(sc, ETH_RJB) +
671	    RD4(sc, ETH_USF);
672
673	/*
674	 * Schedule another timeout one second from now.
675	 */
676	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
677}
678
679static void
680ate_set_mac(struct ate_softc *sc, u_char *eaddr)
681{
682	WR4(sc, ETH_SA1L, (eaddr[3] << 24) | (eaddr[2] << 16) |
683	    (eaddr[1] << 8) | eaddr[0]);
684	WR4(sc, ETH_SA1H, (eaddr[5] << 8) | (eaddr[4]));
685}
686
687static int
688ate_get_mac(struct ate_softc *sc, u_char *eaddr)
689{
690	bus_size_t sa_low_reg[] = { ETH_SA1L, ETH_SA2L, ETH_SA3L, ETH_SA4L };
691	bus_size_t sa_high_reg[] = { ETH_SA1H, ETH_SA2H, ETH_SA3H, ETH_SA4H };
692	uint32_t low, high;
693	int i;
694
695	/*
696	 * The boot loader setup the MAC with an address, if one is set in
697	 * the loader. Grab one MAC address from the SA[1-4][HL] registers.
698	 */
699	for (i = 0; i < 4; i++) {
700		low = RD4(sc, sa_low_reg[i]);
701		high = RD4(sc, sa_high_reg[i]);
702		if ((low | (high & 0xffff)) != 0) {
703			eaddr[0] = low & 0xff;
704			eaddr[1] = (low >> 8) & 0xff;
705			eaddr[2] = (low >> 16) & 0xff;
706			eaddr[3] = (low >> 24) & 0xff;
707			eaddr[4] = high & 0xff;
708			eaddr[5] = (high >> 8) & 0xff;
709			return (0);
710		}
711	}
712	return (ENXIO);
713}
714
715static void
716ate_intr(void *xsc)
717{
718	struct ate_softc *sc = xsc;
719	struct ifnet *ifp = sc->ifp;
720	struct mbuf *mb;
721	void *bp;
722	uint32_t status, reg, rx_stat;
723	int i;
724
725	status = RD4(sc, ETH_ISR);
726	if (status == 0)
727		return;
728	if (status & ETH_ISR_RCOM) {
729		bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
730		    BUS_DMASYNC_POSTREAD);
731		while (sc->rx_descs[sc->rx_buf_ptr].addr & ETH_CPU_OWNER) {
732			i = sc->rx_buf_ptr;
733			sc->rx_buf_ptr = (i + 1) % ATE_MAX_RX_BUFFERS;
734			bp = sc->rx_buf[i];
735			rx_stat = sc->rx_descs[i].status;
736			if ((rx_stat & ETH_LEN_MASK) == 0) {
737				if (bootverbose)
738					device_printf(sc->dev, "ignoring bogus zero-length packet\n");
739				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
740				    BUS_DMASYNC_PREWRITE);
741				sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
742				bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
743				    BUS_DMASYNC_POSTWRITE);
744				continue;
745			}
746			/* Flush memory for mbuf so we don't get stale bytes */
747			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
748			    BUS_DMASYNC_POSTREAD);
749			WR4(sc, ETH_RSR, RD4(sc, ETH_RSR));
750
751			/*
752			 * The length returned by the device includes the
753			 * ethernet CRC calculation for the packet, but
754			 * ifnet drivers are supposed to discard it.
755			 */
756			mb = m_devget(sc->rx_buf[i],
757			    (rx_stat & ETH_LEN_MASK) - ETHER_CRC_LEN,
758			    ETHER_ALIGN, ifp, NULL);
759			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
760			    BUS_DMASYNC_PREWRITE);
761			sc->rx_descs[i].addr &= ~ETH_CPU_OWNER;
762			bus_dmamap_sync(sc->rx_desc_tag, sc->rx_desc_map,
763			    BUS_DMASYNC_POSTWRITE);
764			bus_dmamap_sync(sc->rxtag, sc->rx_map[i],
765			    BUS_DMASYNC_PREREAD);
766			if (mb != NULL) {
767				ifp->if_ipackets++;
768				(*ifp->if_input)(ifp, mb);
769			}
770
771		}
772	}
773	if (status & ETH_ISR_TCOM) {
774		ATE_LOCK(sc);
775		/* XXX TSR register should be cleared */
776		if (sc->sent_mbuf[0]) {
777			bus_dmamap_sync(sc->mtag, sc->tx_map[0],
778			    BUS_DMASYNC_POSTWRITE);
779			bus_dmamap_unload(sc->mtag, sc->tx_map[0]);
780			m_freem(sc->sent_mbuf[0]);
781			ifp->if_opackets++;
782			sc->sent_mbuf[0] = NULL;
783		}
784		if (sc->sent_mbuf[1]) {
785			if (RD4(sc, ETH_TSR) & ETH_TSR_IDLE) {
786				bus_dmamap_sync(sc->mtag, sc->tx_map[1],
787				    BUS_DMASYNC_POSTWRITE);
788				bus_dmamap_unload(sc->mtag, sc->tx_map[1]);
789				m_freem(sc->sent_mbuf[1]);
790				ifp->if_opackets++;
791				sc->txcur = 0;
792				sc->sent_mbuf[0] = sc->sent_mbuf[1] = NULL;
793			} else {
794				sc->sent_mbuf[0] = sc->sent_mbuf[1];
795				sc->sent_mbuf[1] = NULL;
796				sc->txcur = 1;
797			}
798		} else {
799			sc->sent_mbuf[0] = NULL;
800			sc->txcur = 0;
801		}
802		/*
803		 * We're no longer busy, so clear the busy flag and call the
804		 * start routine to xmit more packets.
805		 */
806		sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
807		atestart_locked(sc->ifp);
808		ATE_UNLOCK(sc);
809	}
810	if (status & ETH_ISR_RBNA) {
811		/* Workaround Errata #11 */
812		if (bootverbose)
813			device_printf(sc->dev, "RBNA workaround\n");
814		reg = RD4(sc, ETH_CTL);
815		WR4(sc, ETH_CTL, reg & ~ETH_CTL_RE);
816		BARRIER(sc, ETH_CTL, 4, BUS_SPACE_BARRIER_WRITE);
817		WR4(sc, ETH_CTL, reg | ETH_CTL_RE);
818	}
819}
820
821/*
822 * Reset and initialize the chip
823 */
824static void
825ateinit_locked(void *xsc)
826{
827	struct ate_softc *sc = xsc;
828	struct ifnet *ifp = sc->ifp;
829 	struct mii_data *mii;
830	uint32_t reg;
831
832	ATE_ASSERT_LOCKED(sc);
833
834	/*
835	 * XXX TODO(3)
836	 * we need to turn on the EMAC clock in the pmc.  With the
837	 * default boot loader, this is already turned on.  However, we
838	 * need to think about how best to turn it on/off as the interface
839	 * is brought up/down, as well as dealing with the mii bus...
840	 *
841	 * We also need to multiplex the pins correctly.
842	 */
843
844	/*
845	 * There are two different ways that the mii bus is connected
846	 * to this chip.  Select the right one based on a compile-time
847	 * option.
848	 */
849	reg = RD4(sc, ETH_CFG);
850	if (sc->use_rmii)
851		reg |= ETH_CFG_RMII;
852	else
853		reg &= ~ETH_CFG_RMII;
854	WR4(sc, ETH_CFG, reg);
855
856	ate_rxfilter(sc);
857
858	/*
859	 * Turn on MACs and interrupt processing.
860	 */
861	WR4(sc, ETH_CTL, RD4(sc, ETH_CTL) | ETH_CTL_TE | ETH_CTL_RE);
862	WR4(sc, ETH_IER, ETH_ISR_RCOM | ETH_ISR_TCOM | ETH_ISR_RBNA);
863
864	/*
865	 * Boot loader fills in MAC address.  If that's not the case, then
866	 * we should set SA1L and SA1H here to the appropriate value.  Note:
867	 * the byte order is big endian, not little endian, so we have some
868	 * swapping to do.  Again, if we need it (which I don't think we do).
869	 */
870
871	/* enable big packets */
872	WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
873
874	/*
875	 * Set 'running' flag, and clear output active flag
876	 * and attempt to start the output
877	 */
878	ifp->if_drv_flags |= IFF_DRV_RUNNING;
879	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
880
881	mii = device_get_softc(sc->miibus);
882	mii_pollstat(mii);
883	ate_stat_update(sc, mii->mii_media_active);
884	atestart_locked(ifp);
885
886	callout_reset(&sc->tick_ch, hz, ate_tick, sc);
887}
888
889/*
890 * dequeu packets and transmit
891 */
892static void
893atestart_locked(struct ifnet *ifp)
894{
895	struct ate_softc *sc = ifp->if_softc;
896	struct mbuf *m, *mdefrag;
897	bus_dma_segment_t segs[1];
898	int nseg, e;
899
900	ATE_ASSERT_LOCKED(sc);
901	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
902		return;
903
904	while (sc->txcur < ATE_MAX_TX_BUFFERS) {
905		/*
906		 * check to see if there's room to put another packet into the
907		 * xmit queue.  The EMAC chip has a ping-pong buffer for xmit
908		 * packets.  We use OACTIVE to indicate "we can stuff more into
909		 * our buffers (clear) or not (set)."
910		 */
911		if (!(RD4(sc, ETH_TSR) & ETH_TSR_BNQ)) {
912			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
913			return;
914		}
915		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
916		if (m == 0) {
917			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
918			return;
919		}
920		e = bus_dmamap_load_mbuf_sg(sc->mtag, sc->tx_map[sc->txcur], m,
921		    segs, &nseg, 0);
922		if (e == EFBIG) {
923			mdefrag = m_defrag(m, M_DONTWAIT);
924			if (mdefrag == NULL) {
925				IFQ_DRV_PREPEND(&ifp->if_snd, m);
926				return;
927			}
928			m = mdefrag;
929			e = bus_dmamap_load_mbuf_sg(sc->mtag,
930			    sc->tx_map[sc->txcur], m, segs, &nseg, 0);
931		}
932		if (e != 0) {
933			m_freem(m);
934			continue;
935		}
936		bus_dmamap_sync(sc->mtag, sc->tx_map[sc->txcur],
937		    BUS_DMASYNC_PREWRITE);
938
939		/*
940		 * tell the hardware to xmit the packet.
941		 */
942		WR4(sc, ETH_TAR, segs[0].ds_addr);
943		BARRIER(sc, ETH_TAR, 8, BUS_SPACE_BARRIER_WRITE);
944		WR4(sc, ETH_TCR, segs[0].ds_len);
945
946		/*
947		 * Tap off here if there is a bpf listener.
948		 */
949		BPF_MTAP(ifp, m);
950
951		sc->sent_mbuf[sc->txcur] = m;
952		sc->txcur++;
953	}
954}
955
956static void
957ateinit(void *xsc)
958{
959	struct ate_softc *sc = xsc;
960	ATE_LOCK(sc);
961	ateinit_locked(sc);
962	ATE_UNLOCK(sc);
963}
964
965static void
966atestart(struct ifnet *ifp)
967{
968	struct ate_softc *sc = ifp->if_softc;
969	ATE_LOCK(sc);
970	atestart_locked(ifp);
971	ATE_UNLOCK(sc);
972}
973
974/*
975 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp NULL
976 * so be careful.
977 */
978static void
979atestop(struct ate_softc *sc)
980{
981	struct ifnet *ifp;
982	int i;
983
984	ATE_ASSERT_LOCKED(sc);
985	ifp = sc->ifp;
986	if (ifp) {
987		ifp->if_timer = 0;
988		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
989	}
990
991	callout_stop(&sc->tick_ch);
992
993	/*
994	 * Enable some parts of the MAC that are needed always (like the
995	 * MII bus.  This turns off the RE and TE bits, which will remain
996	 * off until ateinit() is called to turn them on.  With RE and TE
997	 * turned off, there's no DMA to worry about after this write.
998	 */
999	WR4(sc, ETH_CTL, ETH_CTL_MPE);
1000
1001	/*
1002	 * Turn off all the configured options and revert to defaults.
1003	 */
1004	WR4(sc, ETH_CFG, ETH_CFG_CLK_32);
1005
1006	/*
1007	 * Turn off all the interrupts, and ack any pending ones by reading
1008	 * the ISR.
1009	 */
1010	WR4(sc, ETH_IDR, 0xffffffff);
1011	RD4(sc, ETH_ISR);
1012
1013	/*
1014	 * Clear out the Transmit and Receiver Status registers of any
1015	 * errors they may be reporting
1016	 */
1017	WR4(sc, ETH_TSR, 0xffffffff);
1018	WR4(sc, ETH_RSR, 0xffffffff);
1019
1020	/*
1021	 * Release TX resources.
1022	 */
1023	for (i = 0; i < ATE_MAX_TX_BUFFERS; i++) {
1024		if (sc->sent_mbuf[i] != NULL) {
1025			bus_dmamap_sync(sc->mtag, sc->tx_map[i],
1026			    BUS_DMASYNC_POSTWRITE);
1027			bus_dmamap_unload(sc->mtag, sc->tx_map[i]);
1028			m_freem(sc->sent_mbuf[i]);
1029			sc->sent_mbuf[i] = NULL;
1030		}
1031	}
1032
1033	/*
1034	 * XXX we should power down the EMAC if it isn't in use, after
1035	 * putting it into loopback mode.  This saves about 400uA according
1036	 * to the datasheet.
1037	 */
1038}
1039
1040static void
1041ate_rxfilter(struct ate_softc *sc)
1042{
1043	struct ifnet *ifp;
1044	uint32_t reg;
1045	int enabled;
1046
1047	KASSERT(sc != NULL, ("[ate, %d]: sc is NULL!", __LINE__));
1048	ATE_ASSERT_LOCKED(sc);
1049	ifp = sc->ifp;
1050
1051	/*
1052	 * Wipe out old filter settings.
1053	 */
1054	reg = RD4(sc, ETH_CFG);
1055	reg &= ~(ETH_CFG_CAF | ETH_CFG_MTI | ETH_CFG_UNI);
1056	reg |= ETH_CFG_NBC;
1057	sc->flags &= ~ATE_FLAG_MULTICAST;
1058
1059	/*
1060	 * Set new parameters.
1061	 */
1062	if ((ifp->if_flags & IFF_BROADCAST) != 0)
1063		reg &= ~ETH_CFG_NBC;
1064	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1065		reg |= ETH_CFG_CAF;
1066	} else {
1067		enabled = ate_setmcast(sc);
1068		if (enabled != 0) {
1069			reg |= ETH_CFG_MTI;
1070			sc->flags |= ATE_FLAG_MULTICAST;
1071		}
1072	}
1073	WR4(sc, ETH_CFG, reg);
1074}
1075
1076static int
1077ateioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1078{
1079	struct ate_softc *sc = ifp->if_softc;
1080 	struct mii_data *mii;
1081 	struct ifreq *ifr = (struct ifreq *)data;
1082	int drv_flags, flags;
1083	int mask, error, enabled;
1084
1085	error = 0;
1086	flags = ifp->if_flags;
1087	drv_flags = ifp->if_drv_flags;
1088	switch (cmd) {
1089	case SIOCSIFFLAGS:
1090		ATE_LOCK(sc);
1091		if ((flags & IFF_UP) != 0) {
1092			if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1093				if (((flags ^ sc->if_flags)
1094				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1095					ate_rxfilter(sc);
1096			} else {
1097				if ((sc->flags & ATE_FLAG_DETACHING) == 0)
1098					ateinit_locked(sc);
1099			}
1100		} else if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1101			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1102			atestop(sc);
1103		}
1104		sc->if_flags = flags;
1105		ATE_UNLOCK(sc);
1106		break;
1107
1108	case SIOCADDMULTI:
1109	case SIOCDELMULTI:
1110		if ((drv_flags & IFF_DRV_RUNNING) != 0) {
1111			ATE_LOCK(sc);
1112			enabled = ate_setmcast(sc);
1113			if (enabled != (sc->flags & ATE_FLAG_MULTICAST))
1114				ate_rxfilter(sc);
1115			ATE_UNLOCK(sc);
1116		}
1117		break;
1118
1119  	case SIOCSIFMEDIA:
1120  	case SIOCGIFMEDIA:
1121 		mii = device_get_softc(sc->miibus);
1122 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1123  		break;
1124	case SIOCSIFCAP:
1125		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1126		if (mask & IFCAP_VLAN_MTU) {
1127			ATE_LOCK(sc);
1128			if (ifr->ifr_reqcap & IFCAP_VLAN_MTU) {
1129				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) | ETH_CFG_BIG);
1130				ifp->if_capenable |= IFCAP_VLAN_MTU;
1131			} else {
1132				WR4(sc, ETH_CFG, RD4(sc, ETH_CFG) & ~ETH_CFG_BIG);
1133				ifp->if_capenable &= ~IFCAP_VLAN_MTU;
1134			}
1135			ATE_UNLOCK(sc);
1136		}
1137	default:
1138		error = ether_ioctl(ifp, cmd, data);
1139		break;
1140	}
1141	return (error);
1142}
1143
1144static void
1145ate_child_detached(device_t dev, device_t child)
1146{
1147	struct ate_softc *sc;
1148
1149	sc = device_get_softc(dev);
1150	if (child == sc->miibus)
1151		sc->miibus = NULL;
1152}
1153
1154/*
1155 * MII bus support routines.
1156 */
1157static int
1158ate_miibus_readreg(device_t dev, int phy, int reg)
1159{
1160	struct ate_softc *sc;
1161	int val;
1162
1163	/*
1164	 * XXX if we implement agressive power savings, then we need
1165	 * XXX to make sure that the clock to the emac is on here
1166	 */
1167
1168	sc = device_get_softc(dev);
1169	DELAY(1);	/* Hangs w/o this delay really 30.5us atm */
1170	WR4(sc, ETH_MAN, ETH_MAN_REG_RD(phy, reg));
1171	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1172		continue;
1173	val = RD4(sc, ETH_MAN) & ETH_MAN_VALUE_MASK;
1174
1175	return (val);
1176}
1177
1178static void
1179ate_miibus_writereg(device_t dev, int phy, int reg, int data)
1180{
1181	struct ate_softc *sc;
1182
1183	/*
1184	 * XXX if we implement agressive power savings, then we need
1185	 * XXX to make sure that the clock to the emac is on here
1186	 */
1187
1188	sc = device_get_softc(dev);
1189	WR4(sc, ETH_MAN, ETH_MAN_REG_WR(phy, reg, data));
1190	while ((RD4(sc, ETH_SR) & ETH_SR_IDLE) == 0)
1191		continue;
1192	return;
1193}
1194
1195static device_method_t ate_methods[] = {
1196	/* Device interface */
1197	DEVMETHOD(device_probe,		ate_probe),
1198	DEVMETHOD(device_attach,	ate_attach),
1199	DEVMETHOD(device_detach,	ate_detach),
1200
1201	/* Bus interface */
1202	DEVMETHOD(bus_child_detached,	ate_child_detached),
1203
1204	/* MII interface */
1205	DEVMETHOD(miibus_readreg,	ate_miibus_readreg),
1206	DEVMETHOD(miibus_writereg,	ate_miibus_writereg),
1207
1208	{ 0, 0 }
1209};
1210
1211static driver_t ate_driver = {
1212	"ate",
1213	ate_methods,
1214	sizeof(struct ate_softc),
1215};
1216
1217DRIVER_MODULE(ate, atmelarm, ate_driver, ate_devclass, 0, 0);
1218DRIVER_MODULE(miibus, ate, miibus_driver, miibus_devclass, 0, 0);
1219MODULE_DEPEND(ate, miibus, 1, 1, 1);
1220MODULE_DEPEND(ate, ether, 1, 1, 1);
1221