if_macb.c revision 210040
1/*-
2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/arm/at91/if_macb.c 210040 2010-07-14 00:48:53Z cognet $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/mbuf.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/rman.h>
39#include <sys/socket.h>
40#include <sys/sockio.h>
41#include <sys/sysctl.h>
42#include <sys/taskqueue.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_arp.h>
47#include <net/if_dl.h>
48#include <net/if_media.h>
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51
52#ifdef INET
53#include <netinet/in.h>
54#include <netinet/in_systm.h>
55#include <netinet/in_var.h>
56#include <netinet/ip.h>
57#endif
58
59#include <net/bpf.h>
60#include <net/bpfdesc.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <arm/at91/at91_pmcvar.h>
66#include <arm/at91/if_macbreg.h>
67#include <arm/at91/if_macbvar.h>
68#include <arm/at91/at91_piovar.h>
69
70#include <arm/at91/at91_pio_sam9.h>
71#include <arm/at91/at91sam9g20reg.h>
72
73#include <machine/bus.h>
74#include <machine/intr.h>
75
76/* "device miibus" required.  See GENERIC if you get errors here. */
77#include "miibus_if.h"
78
79
80#define	MACB_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
81#define	MACB_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
82#define	MACB_LOCK_INIT(_sc)					\
83	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
84	    MTX_NETWORK_LOCK, MTX_DEF)
85#define	MACB_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
86#define	MACB_LOCK_ASSERT(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
87#define	MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
88
89
90static inline uint32_t
91read_4(struct macb_softc *sc, bus_size_t off)
92{
93
94	return (bus_read_4(sc->mem_res, off));
95}
96
97static inline void
98write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
99{
100
101	bus_write_4(sc->mem_res, off, val);
102}
103
104
105static devclass_t macb_devclass;
106
107/* ifnet entry points */
108
109static void	macbinit_locked(void *);
110static void	macbstart_locked(struct ifnet *);
111
112static void	macbinit(void *);
113static void	macbstart(struct ifnet *);
114static void	macbstop(struct macb_softc *);
115static int	macbioctl(struct ifnet * ifp, u_long, caddr_t);
116
117/* bus entry points */
118
119static int	macb_probe(device_t dev);
120static int	macb_attach(device_t dev);
121static int	macb_detach(device_t dev);
122
123/* helper functions */
124static int
125macb_new_rxbuf(struct macb_softc *sc, int index);
126
127static void
128macb_free_desc_dma_tx(struct macb_softc *sc);
129
130static void
131macb_free_desc_dma_rx(struct macb_softc *sc);
132
133static void
134macb_init_desc_dma_tx(struct macb_softc *sc);
135
136static void
137macb_watchdog(struct macb_softc *sc);
138
139static int macb_intr_rx_locked(struct macb_softc *sc, int count);
140static void macb_intr_task(void *arg, int pending __unused);
141static void	macb_tx_task(void *arg, int pending __unused);
142static void macb_intr(void *xsc);
143
144static void
145macb_tx_cleanup(struct macb_softc *sc);
146
147static inline int
148phy_write(struct macb_softc *sc, int phy, int reg, int data);
149
150static void	macb_reset(struct macb_softc *sc);
151
152static void
153macb_deactivate(device_t dev)
154{
155	struct macb_softc *sc;
156
157	sc = device_get_softc(dev);
158
159	macb_free_desc_dma_tx(sc);
160	macb_free_desc_dma_rx(sc);
161
162}
163
164static void
165macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
166{
167	bus_addr_t *paddr;
168
169	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
170	paddr = arg;
171	*paddr = segs->ds_addr;
172}
173
174static int
175macb_alloc_desc_dma_tx(struct macb_softc *sc)
176{
177	int error, i;
178
179	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
180	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
181	    16, 0,			/* alignment, boundary */
182	    BUS_SPACE_MAXADDR,		/* lowaddr */
183	    BUS_SPACE_MAXADDR,		/* highaddr */
184	    NULL, NULL,			/* filtfunc, filtfuncarg */
185	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
186	    1,				/* nsegments */
187	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
188	    0,				/* flags */
189	    NULL, NULL,			/* lockfunc, lockfuncarg */
190	    &sc->dmatag_data_tx);	/* dmat */
191	if (error != 0) {
192		device_printf(sc->dev,
193		    "Couldn't create TX descriptor dma tag\n");
194		return (error);
195	}
196	/* Allocate memory for TX ring. */
197	error = bus_dmamem_alloc(sc->dmatag_data_tx,
198	    (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
199	    BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
200	if (error != 0) {
201		device_printf(sc->dev, "failed to allocate TX dma memory\n");
202		return (error);
203	}
204	/* Load Ring DMA. */
205	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
206	    sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
207	    macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
208	if (error != 0) {
209		device_printf(sc->dev, "can't load TX descriptor dma map\n");
210		return (error);
211	}
212	/* Allocate a busdma tag for mbufs. No alignment restriction applys. */
213	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
214	    1, 0,			/* alignment, boundary */
215	    BUS_SPACE_MAXADDR,		/* lowaddr */
216	    BUS_SPACE_MAXADDR,		/* highaddr */
217	    NULL, NULL,			/* filtfunc, filtfuncarg */
218	    MCLBYTES * MAX_FRAGMENT,	/* maxsize */
219	    MAX_FRAGMENT,		/* nsegments */
220	    MCLBYTES, 0,		/* maxsegsz, flags */
221	    NULL, NULL,			/* lockfunc, lockfuncarg */
222	    &sc->dmatag_ring_tx);	/* dmat */
223	if (error != 0) {
224		device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
225		return (error);
226	}
227
228	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
229		/* Create dma map for each descriptor. */
230		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
231		    &sc->tx_desc[i].dmamap);
232		if (error != 0) {
233			device_printf(sc->dev,
234			    "failed to create TX mbuf dma map\n");
235			return (error);
236		}
237	}
238	return (0);
239}
240
241static void
242macb_free_desc_dma_tx(struct macb_softc *sc)
243{
244	struct tx_desc_info *td;
245	int i;
246
247	/* TX buffers. */
248	if (sc->dmatag_ring_tx != NULL) {
249		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
250			td = &sc->tx_desc[i];
251			if (td->dmamap != NULL) {
252				bus_dmamap_destroy(sc->dmatag_ring_tx,
253				    td->dmamap);
254				td->dmamap = NULL;
255			}
256		}
257		bus_dma_tag_destroy(sc->dmatag_ring_tx);
258		sc->dmatag_ring_tx = NULL;
259	}
260
261	/* TX descriptor ring. */
262	if (sc->dmatag_data_tx != NULL) {
263		if (sc->dmamap_ring_tx != NULL)
264			bus_dmamap_unload(sc->dmatag_data_tx,
265			    sc->dmamap_ring_tx);
266		if (sc->dmamap_ring_tx != NULL && sc->desc_tx != NULL)
267			bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
268			    sc->dmamap_ring_tx);
269		sc->dmamap_ring_tx = NULL;
270		sc->dmamap_ring_tx = NULL;
271		bus_dma_tag_destroy(sc->dmatag_data_tx);
272		sc->dmatag_data_tx = NULL;
273	}
274}
275
276static void
277macb_init_desc_dma_tx(struct macb_softc *sc)
278{
279	struct eth_tx_desc *desc;
280	int i;
281
282	MACB_LOCK_ASSERT(sc);
283
284	sc->tx_prod = 0;
285	sc->tx_cons = 0;
286	sc->tx_cnt = 0;
287
288	desc = &sc->desc_tx[0];
289	bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
290
291	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
292		desc = &sc->desc_tx[i];
293		if (i == MACB_MAX_TX_BUFFERS - 1)
294			desc->flags = TD_OWN | TD_WRAP_MASK;
295		else
296			desc->flags = TD_OWN;
297	}
298
299	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
300	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
301}
302
303static int
304macb_alloc_desc_dma_rx(struct macb_softc *sc)
305{
306	int error, i;
307
308	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
309	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
310	    16, 0,			/* alignment, boundary */
311	    BUS_SPACE_MAXADDR,		/* lowaddr */
312	    BUS_SPACE_MAXADDR,		/* highaddr */
313	    NULL, NULL,			/* filtfunc, filtfuncarg */
314	    /* maxsize, nsegments */
315	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
316	    /* maxsegsz, flags */
317	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
318	    NULL, NULL,			/* lockfunc, lockfuncarg */
319	    &sc->dmatag_data_rx);	/* dmat */
320	if (error != 0) {
321		device_printf(sc->dev,
322		    "Couldn't create RX descriptor dma tag\n");
323		return (error);
324	}
325	/* Allocate RX ring. */
326	error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
327	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
328	    &sc->dmamap_ring_rx);
329	if (error != 0) {
330		device_printf(sc->dev,
331		    "failed to allocate RX descriptor dma memory\n");
332		return (error);
333	}
334
335	/* Load dmamap. */
336	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
337	    sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
338	    macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
339	if (error != 0) {
340		device_printf(sc->dev, "can't load RX descriptor dma map\n");
341		return (error);
342	}
343
344	/* Allocate a busdma tag for mbufs. */
345	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
346	    16, 0,			/* alignment, boundary */
347	    BUS_SPACE_MAXADDR,		/* lowaddr */
348	    BUS_SPACE_MAXADDR,		/* highaddr */
349	    NULL, NULL,			/* filtfunc, filtfuncarg */
350	    MCLBYTES, 1,		/* maxsize, nsegments */
351	    MCLBYTES, 0,		/* maxsegsz, flags */
352	    NULL, NULL,			/* lockfunc, lockfuncarg */
353	    &sc->dmatag_ring_rx);	/* dmat */
354
355	if (error != 0) {
356		device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
357		return (error);
358	}
359
360	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
361		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
362		    &sc->rx_desc[i].dmamap);
363		if (error != 0) {
364			device_printf(sc->dev,
365			    "failed to create RX mbuf dmamap\n");
366			return (error);
367		}
368	}
369
370	return (0);
371}
372
373static void
374macb_free_desc_dma_rx(struct macb_softc *sc)
375{
376	struct rx_desc_info *rd;
377	int i;
378
379	/* RX buffers. */
380	if (sc->dmatag_ring_rx != NULL) {
381		for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
382			rd = &sc->rx_desc[i];
383			if (rd->dmamap != NULL) {
384				bus_dmamap_destroy(sc->dmatag_ring_rx,
385				    rd->dmamap);
386				rd->dmamap = NULL;
387			}
388		}
389		bus_dma_tag_destroy(sc->dmatag_ring_rx);
390		sc->dmatag_ring_rx = NULL;
391	}
392	/* RX descriptor ring. */
393	if (sc->dmatag_data_rx != NULL) {
394		if (sc->dmamap_ring_rx != NULL)
395			bus_dmamap_unload(sc->dmatag_data_rx,
396			    sc->dmamap_ring_rx);
397		if (sc->dmamap_ring_rx != NULL &&
398		    sc->desc_rx != NULL)
399			bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
400			    sc->dmamap_ring_rx);
401		sc->desc_rx = NULL;
402		sc->dmamap_ring_rx = NULL;
403		bus_dma_tag_destroy(sc->dmatag_data_rx);
404		sc->dmatag_data_rx = NULL;
405	}
406}
407
408static int
409macb_init_desc_dma_rx(struct macb_softc *sc)
410{
411	struct eth_rx_desc *desc;
412	struct rx_desc_info *rd;
413	int i;
414
415	MACB_LOCK_ASSERT(sc);
416
417	sc->rx_cons = 0;
418	desc = &sc->desc_rx[0];
419	bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
420	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
421		rd = &sc->rx_desc[i];
422		rd->buff = NULL;
423		if (macb_new_rxbuf(sc, i) != 0)
424			return (ENOBUFS);
425	}
426	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
427	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
428	return (0);
429}
430
431static int
432macb_new_rxbuf(struct macb_softc *sc, int index)
433{
434	struct rx_desc_info *rd;
435	struct eth_rx_desc *desc;
436	struct mbuf *m;
437	bus_dma_segment_t seg[1];
438	int error, nsegs;
439
440	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
441	if (m == NULL)
442		return (ENOBUFS);
443	m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
444	rd = &sc->rx_desc[index];
445	bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
446	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
447	    seg, &nsegs, 0);
448	KASSERT(nsegs == 1, ("Too many segments returned!"));
449	if (error != 0) {
450		m_free(m);
451		return (error);
452	}
453
454	bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
455	rd->buff = m;
456
457	desc = &sc->desc_rx[index];
458	desc->addr = seg[0].ds_addr;
459
460	desc->flags = DATA_SIZE;
461
462	if (index == MACB_MAX_RX_BUFFERS - 1)
463		desc->addr |= RD_WRAP_MASK;
464
465	return (0);
466}
467
468static int
469macb_allocate_dma(struct macb_softc *sc)
470{
471	int error;
472
473	/* Create parent tag for tx and rx */
474	error = bus_dma_tag_create(
475	    bus_get_dma_tag(sc->dev),	/* parent */
476	    1, 0,			/* alignment, boundary */
477	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
478	    BUS_SPACE_MAXADDR,		/* highaddr */
479	    NULL, NULL,			/* filter, filterarg */
480	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
481	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
482	    0,				/* flags */
483	    NULL, NULL,		/* lockfunc, lockarg */
484	    &sc->sc_parent_tag);
485	if (error != 0) {
486		device_printf(sc->dev, "Couldn't create parent DMA tag\n");
487		return (error);
488	}
489
490	if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
491		return (error);
492	if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
493		return (error);
494	return (0);
495}
496
497
498static void
499macb_tick(void *xsc)
500{
501	struct macb_softc *sc;
502	struct mii_data *mii;
503
504	sc = xsc;
505	mii = device_get_softc(sc->miibus);
506	mii_tick(mii);
507	macb_watchdog(sc);
508	/*
509	 * Schedule another timeout one second from now.
510	 */
511	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
512}
513
514
515static void
516macb_watchdog(struct macb_softc *sc)
517{
518	struct ifnet *ifp;
519
520	MACB_LOCK_ASSERT(sc);
521
522	if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
523		return;
524
525	ifp = sc->ifp;
526	if ((sc->flags & MACB_FLAG_LINK) == 0) {
527		if_printf(ifp, "watchdog timeout (missed link)\n");
528		ifp->if_oerrors++;
529		return;
530	}
531
532	if_printf(ifp, "watchdog timeout\n");
533	ifp->if_oerrors++;
534	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
535	macbinit_locked(sc);
536	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
537	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
538}
539
540
541
542static void
543macbinit_locked(void *xsc)
544{
545	struct macb_softc *sc;
546	struct ifnet *ifp;
547	int err;
548	uint32_t config;
549	struct mii_data *mii;
550
551	sc = xsc;
552	ifp = sc->ifp;
553
554	MACB_LOCK_ASSERT(sc);
555
556	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
557		return;
558
559	if ((err = macb_init_desc_dma_rx(sc)) != 0) {
560		device_printf(sc->dev, "no memory for RX buffers\n");
561		//ecestop(sc);
562		return;
563	}
564	macb_init_desc_dma_tx(sc);
565
566	config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
567	config |= CFG_PAE;		/* PAuse Enable */
568	config |= CFG_DRFCS;		/* Discard Rx FCS */
569	config |= CFG_SPD;		/* 100 mbps*/
570	//config |= CFG_CAF;
571	config |= CFG_FD;
572
573	config |= CFG_RBOF_2; /*offset +2*/
574
575	write_4(sc, EMAC_NCFGR, config);
576
577	/* Initialize TX and RX buffers */
578	write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
579	write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
580
581	/* Enable TX and RX */
582	write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
583
584
585	/* Enable interrupts */
586	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
587			       RXUBR_INTERRUPT |
588			       TUND_INTERRUPT |
589			       RLE_INTERRUPT |
590			       TXERR_INTERRUPT |
591			       ROVR_INTERRUPT |
592			       HRESP_INTERRUPT|
593			       TCOMP_INTERRUPT
594			));
595
596	/*
597	 * Set 'running' flag, and clear output active flag
598	 * and attempt to start the output
599	 */
600	ifp->if_drv_flags |= IFF_DRV_RUNNING;
601	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
602
603	mii = device_get_softc(sc->miibus);
604
605	sc->flags |= MACB_FLAG_LINK;
606
607	mii_mediachg(mii);
608
609	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
610}
611
612
613static void
614macb_tx_cleanup(struct macb_softc *sc)
615{
616	struct ifnet *ifp;
617	struct eth_tx_desc *desc;
618	struct tx_desc_info *td;
619	int flags;
620	int status;
621	int i;
622
623	MACB_LOCK_ASSERT(sc);
624
625	status = read_4(sc, EMAC_TSR);
626
627	write_4(sc, EMAC_TSR, status);
628
629	/*buffer underrun*/
630	if ((status & TSR_UND) != 0) {
631		/*reset buffers*/
632		printf("underrun\n");
633		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
634		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
635		sc->tx_cons = sc->tx_prod = 0;
636		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
637			desc = &sc->desc_tx[i];
638			desc->flags = TD_OWN;
639		}
640
641		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
642			td = &sc->tx_desc[i];
643			if (td->buff != NULL) {
644				/* We are finished with this descriptor. */
645				bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
646						BUS_DMASYNC_POSTWRITE);
647				/* ... and unload, so we can reuse. */
648				bus_dmamap_unload(sc->dmatag_data_tx,
649						  td->dmamap);
650				m_freem(td->buff);
651				td->buff = NULL;
652			}
653		}
654	}
655
656	if ((status & TSR_COMP) == 0)
657		return;
658
659
660	if (sc->tx_cons == sc->tx_prod)
661		return;
662
663	ifp = sc->ifp;
664
665	/* Prepare to read the ring (owner bit). */
666	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
667	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
668	while (sc->tx_cons != sc->tx_prod) {
669		desc = &sc->desc_tx[sc->tx_cons];
670		if ((desc->flags & TD_OWN) == 0)
671			break;
672
673		td = &sc->tx_desc[sc->tx_cons];
674		if (td->buff != NULL) {
675			/* We are finished with this descriptor. */
676			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
677					BUS_DMASYNC_POSTWRITE);
678			/* ... and unload, so we can reuse. */
679			bus_dmamap_unload(sc->dmatag_data_tx,
680					  td->dmamap);
681			m_freem(td->buff);
682			td->buff = NULL;
683			ifp->if_opackets++;
684		}
685
686		do {
687			sc->tx_cnt--;
688			MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
689			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
690			flags = desc->flags;
691			desc->flags = TD_OWN;
692			desc = &sc->desc_tx[sc->tx_cons];
693			if (flags & TD_LAST) {
694				break;
695			}
696		} while (sc->tx_cons != sc->tx_prod);
697	}
698
699	/* Unarm watchog timer when there is no pending descriptors in queue. */
700	if (sc->tx_cnt == 0)
701		sc->macb_watchdog_timer = 0;
702}
703
704static void
705macb_rx(struct macb_softc *sc)
706{
707	struct eth_rx_desc	*rxdesc;
708	struct ifnet *ifp;
709	struct mbuf *m;
710	int rxbytes;
711	int flags;
712	int nsegs;
713	int first;
714
715	rxdesc = &(sc->desc_rx[sc->rx_cons]);
716
717	ifp = sc->ifp;
718
719	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
720	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721
722
723	nsegs = 0;
724	while (rxdesc->addr & RD_OWN) {
725
726		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
727			break;
728
729		flags = rxdesc->flags;
730
731		rxbytes = flags & RD_LEN_MASK;
732
733		m = sc->rx_desc[sc->rx_cons].buff;
734
735		bus_dmamap_sync(sc->dmatag_ring_rx,
736		    sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
737		if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
738			ifp->if_iqdrops++;
739			first = sc->rx_cons;
740
741			do  {
742				rxdesc->flags = DATA_SIZE;
743				MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
744				if ((rxdesc->flags & RD_EOF) != 0)
745					break;
746				rxdesc = &(sc->desc_rx[sc->rx_cons]);
747			} while (sc->rx_cons != first);
748
749			if (sc->macb_cdata.rxhead != NULL) {
750				m_freem(sc->macb_cdata.rxhead);
751				sc->macb_cdata.rxhead = NULL;
752				sc->macb_cdata.rxtail = NULL;
753			}
754
755			break;
756		}
757
758		nsegs++;
759
760		/* Chain received mbufs. */
761		if (sc->macb_cdata.rxhead == NULL) {
762			m->m_data += 2;
763			sc->macb_cdata.rxhead = m;
764			sc->macb_cdata.rxtail = m;
765			if (flags & RD_EOF)
766				m->m_len = rxbytes;
767			else
768				m->m_len = DATA_SIZE - 2;
769		} else {
770			m->m_flags &= ~M_PKTHDR;
771			m->m_len = DATA_SIZE;
772			sc->macb_cdata.rxtail->m_next = m;
773			sc->macb_cdata.rxtail = m;
774		}
775
776		if (flags & RD_EOF) {
777
778			if (nsegs > 1) {
779				sc->macb_cdata.rxtail->m_len = (rxbytes -
780				    ((nsegs - 1) * DATA_SIZE)) + 2;
781			}
782
783			m = sc->macb_cdata.rxhead;
784			m->m_flags |= M_PKTHDR;
785			m->m_pkthdr.len = rxbytes;
786			m->m_pkthdr.rcvif = ifp;
787			ifp->if_ipackets++;
788
789			nsegs = 0;
790			MACB_UNLOCK(sc);
791			(*ifp->if_input)(ifp, m);
792			MACB_LOCK(sc);
793			sc->macb_cdata.rxhead = NULL;
794			sc->macb_cdata.rxtail = NULL;
795
796		}
797
798		rxdesc->addr &= ~RD_OWN;
799
800		MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
801
802		rxdesc = &(sc->desc_rx[sc->rx_cons]);
803	}
804
805	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
806
807}
808
809static int
810macb_intr_rx_locked(struct macb_softc *sc, int count)
811{
812	macb_rx(sc);
813	return (0);
814}
815
816static void
817macb_intr_task(void *arg, int pending __unused)
818{
819	struct macb_softc *sc;
820
821	sc = arg;
822	MACB_LOCK(sc);
823	macb_intr_rx_locked(sc, -1);
824	MACB_UNLOCK(sc);
825}
826
827static void
828macb_intr(void *xsc)
829{
830	struct macb_softc *sc;
831	struct ifnet *ifp;
832	uint32_t status;
833
834	sc = xsc;
835	ifp = sc->ifp;
836	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
837		printf("not running\n");
838		return;
839	}
840
841	status = read_4(sc, EMAC_ISR);
842
843	while (status) {
844		if (status & RCOMP_INTERRUPT) {
845			write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
846			taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
847		}
848
849		if (status & TCOMP_INTERRUPT) {
850			MACB_LOCK(sc);
851			macb_tx_cleanup(sc);
852			MACB_UNLOCK(sc);
853		}
854
855		status = read_4(sc, EMAC_ISR);
856	}
857
858	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
859	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
860}
861
862static inline int
863macb_encap(struct macb_softc *sc, struct mbuf **m_head)
864{
865	struct eth_tx_desc *desc;
866	struct tx_desc_info *txd, *txd_last;
867	struct mbuf *m;
868	bus_dma_segment_t segs[MAX_FRAGMENT];
869	bus_dmamap_t map;
870	uint32_t csum_flags;
871	int error, i, nsegs, prod, si;
872
873	M_ASSERTPKTHDR((*m_head));
874
875	prod = sc->tx_prod;
876
877	m = *m_head;
878
879	txd = txd_last = &sc->tx_desc[prod];
880	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
881	    *m_head, segs, &nsegs, 0);
882	if (error == EFBIG) {
883		m = m_collapse(*m_head, M_DONTWAIT, MAX_FRAGMENT);
884		if (m == NULL) {
885			m_freem(*m_head);
886			*m_head = NULL;
887			return (ENOMEM);
888		}
889		*m_head = m;
890		error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
891		    *m_head, segs, &nsegs, 0);
892		if (error != 0) {
893			m_freem(*m_head);
894			*m_head = NULL;
895			return (error);
896		}
897	} else if (error != 0) {
898		return (error);
899	}
900	/* Check for TX descriptor overruns. */
901	if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
902		bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
903		return (ENOBUFS);
904	}
905	bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
906	m = *m_head;
907
908	/* TODO: VLAN hardware tag insertion. */
909
910	csum_flags = 0;
911	si = prod;
912	desc = NULL;
913
914	for (i = 0; i < nsegs; i++) {
915		desc = &sc->desc_tx[prod];
916		desc->addr = segs[i].ds_addr;
917
918		if (i == 0 ) {
919			desc->flags = segs[i].ds_len | TD_OWN;
920		} else {
921			desc->flags = segs[i].ds_len;
922		}
923
924		if (prod == MACB_MAX_TX_BUFFERS - 1)
925			desc->flags |= TD_WRAP_MASK;
926
927		sc->tx_cnt++;
928		MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
929	}
930	/*
931	 * Set EOP on the last fragment.
932	 */
933
934	desc->flags |= TD_LAST;
935	desc = &sc->desc_tx[si];
936	desc->flags &= ~TD_OWN;
937
938	sc->tx_prod = prod;
939
940	/* Swap the first dma map and the last. */
941	map = txd_last->dmamap;
942	txd_last->dmamap = txd->dmamap;
943	txd->dmamap = map;
944	txd->buff = m;
945
946	return (0);
947}
948
949
950static void
951macbstart_locked(struct ifnet *ifp)
952{
953
954
955
956	struct macb_softc *sc;
957	struct mbuf *m0;
958#if 0
959	struct mbuf *m_new;
960#endif
961	int queued = 0;
962
963	sc = ifp->if_softc;
964
965	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
966	    IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
967		return;
968	}
969
970	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
971		/* Get packet from the queue */
972		IF_DEQUEUE(&ifp->if_snd, m0);
973		if (m0 == NULL)
974			break;
975#if 0
976		if (m0->m_next != NULL) {
977			/* Fragmented mbuf chain, collapse it. */
978			m_new = m_defrag(m0, M_DONTWAIT);
979			if (m_new != NULL) {
980				/* Original frame freed. */
981				m0 = m_new;
982			} else {
983				/* Defragmentation failed, just use the chain. */
984			}
985		}
986#endif
987		if (macb_encap(sc, &m0)) {
988			if (m0 == NULL)
989				break;
990			IF_PREPEND(&ifp->if_snd, m0);
991			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
992			break;
993		}
994		queued++;
995		BPF_MTAP(ifp, m0);
996	}
997	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
998		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
999	if (queued) {
1000		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1001		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1002		write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1003		sc->macb_watchdog_timer = MACB_TIMEOUT;
1004	}
1005}
1006
1007static void
1008macbinit(void *xsc)
1009{
1010	struct macb_softc *sc = xsc;
1011
1012	MACB_LOCK(sc);
1013	macbinit_locked(sc);
1014	MACB_UNLOCK(sc);
1015}
1016
1017static void
1018macbstart(struct ifnet *ifp)
1019{
1020	struct macb_softc *sc = ifp->if_softc;
1021	MACB_ASSERT_UNLOCKED(sc);
1022	MACB_LOCK(sc);
1023	macbstart_locked(ifp);
1024	MACB_UNLOCK(sc);
1025
1026}
1027
1028
1029static void
1030macb_tx_task(void *arg, int pending __unused)
1031{
1032	struct ifnet *ifp;
1033
1034	ifp = (struct ifnet *)arg;
1035	macbstart(ifp);
1036}
1037
1038
1039static void
1040macbstop(struct macb_softc *sc)
1041{
1042	struct ifnet *ifp = sc->ifp;
1043	struct rx_desc_info *rd;
1044	struct tx_desc_info *td;
1045	int i;
1046
1047	ifp = sc->ifp;
1048
1049	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1050
1051	macb_reset(sc);
1052
1053	sc->flags &= ~MACB_FLAG_LINK;
1054	callout_stop(&sc->tick_ch);
1055	sc->macb_watchdog_timer = 0;
1056
1057	/* Free TX/RX mbufs still in the queues. */
1058	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1059		td = &sc->tx_desc[i];
1060		if (td->buff != NULL) {
1061			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1062			    BUS_DMASYNC_POSTWRITE);
1063			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1064			m_freem(td->buff);
1065			td->buff = NULL;
1066		}
1067	}
1068	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1069		rd = &sc->rx_desc[i];
1070		if (rd->buff != NULL) {
1071			bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1072			    BUS_DMASYNC_POSTREAD);
1073			bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1074			m_freem(rd->buff);
1075			rd->buff = NULL;
1076		}
1077	}
1078}
1079
1080static int
1081get_hash_index(uint8_t *mac)
1082{
1083	int i, j, k;
1084	int result;
1085	int bit;
1086
1087	result = 0;
1088	for (i = 0; i < 6; i++) {
1089		bit = 0;
1090		for (j = 0; j < 8;  j++) {
1091			k = j * 6 + i;
1092			bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1093		}
1094		result |= bit;
1095	}
1096	return result;
1097}
1098
1099static void
1100set_mac_filter(uint32_t *filter, uint8_t *mac)
1101{
1102	int bits;
1103
1104	bits = get_hash_index(mac);
1105	filter[bits >> 5] |= 1 << (bits & 31);
1106}
1107
1108static void
1109set_filter(struct macb_softc *sc)
1110{
1111	struct ifnet *ifp;
1112	struct ifmultiaddr *ifma;
1113	int config;
1114	int count;
1115	uint32_t multicast_filter[2];
1116
1117	ifp = sc->ifp;
1118
1119	config = read_4(sc, EMAC_NCFGR);
1120
1121	config &= ~(CFG_CAF | CFG_MTI);
1122	write_4(sc, EMAC_HRB, 0);
1123	write_4(sc, EMAC_HRT, 0);
1124
1125	if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1126		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1127			write_4(sc, EMAC_HRB, ~0);
1128			write_4(sc, EMAC_HRT, ~0);
1129			config |= CFG_MTI;
1130		}
1131		if ((ifp->if_flags & IFF_PROMISC) != 0) {
1132			config |= CFG_CAF;
1133		}
1134		write_4(sc, EMAC_NCFGR, config);
1135		return;
1136	}
1137
1138	if_maddr_rlock(ifp);
1139	count = 0;
1140	multicast_filter[0] = 0;
1141	multicast_filter[1] = 0;
1142
1143	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1144		if (ifma->ifma_addr->sa_family != AF_LINK)
1145			continue;
1146		count++;
1147		set_mac_filter(multicast_filter,
1148			   LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1149	}
1150	if (count) {
1151		write_4(sc, EMAC_HRB, multicast_filter[0]);
1152		write_4(sc, EMAC_HRT, multicast_filter[1]);
1153		write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1154	}
1155	if_maddr_runlock(ifp);
1156}
1157
1158static int
1159macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1160{
1161
1162	struct macb_softc *sc = ifp->if_softc;
1163	struct mii_data *mii;
1164	struct ifreq *ifr = (struct ifreq *)data;
1165
1166	int error = 0;
1167
1168	switch (cmd) {
1169	case SIOCSIFFLAGS:
1170		MACB_LOCK(sc);
1171
1172		if ((ifp->if_flags & IFF_UP) != 0) {
1173			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1174				if (((ifp->if_flags ^ sc->if_flags)
1175				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1176					set_filter(sc);
1177			} else {
1178				macbinit_locked(sc);
1179			}
1180		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1181			macbstop(sc);
1182		}
1183		sc->if_flags = ifp->if_flags;
1184		MACB_UNLOCK(sc);
1185		break;
1186	case SIOCADDMULTI:
1187	case SIOCDELMULTI:
1188		MACB_LOCK(sc);
1189		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1190			set_filter(sc);
1191
1192		MACB_UNLOCK(sc);
1193		break;
1194	case SIOCSIFMEDIA:
1195	case SIOCGIFMEDIA:
1196		mii = device_get_softc(sc->miibus);
1197		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1198		break;
1199	default:
1200		error = ether_ioctl(ifp, cmd, data);
1201		break;
1202	}
1203	return (error);
1204
1205}
1206
1207/* bus entry points */
1208
1209static int
1210macb_probe(device_t dev)
1211{
1212	device_set_desc(dev, "macb");
1213	return (0);
1214}
1215
1216/*
1217 * Change media according to request.
1218 */
1219static int
1220macb_ifmedia_upd(struct ifnet *ifp)
1221{
1222	struct macb_softc *sc = ifp->if_softc;
1223	struct mii_data *mii;
1224
1225	mii = device_get_softc(sc->miibus);
1226	MACB_LOCK(sc);
1227	mii_mediachg(mii);
1228	MACB_UNLOCK(sc);
1229	return (0);
1230}
1231
1232/*
1233 * Notify the world which media we're using.
1234 */
1235static void
1236macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1237{
1238	struct macb_softc *sc = ifp->if_softc;
1239	struct mii_data *mii;
1240
1241	mii = device_get_softc(sc->miibus);
1242
1243	MACB_LOCK(sc);
1244	/* Don't report link state if driver is not running. */
1245	if ((ifp->if_flags & IFF_UP) == 0) {
1246		MACB_UNLOCK(sc);
1247		return;
1248	}
1249	mii_pollstat(mii);
1250	ifmr->ifm_active = mii->mii_media_active;
1251	ifmr->ifm_status = mii->mii_media_status;
1252	MACB_UNLOCK(sc);
1253}
1254
1255static void
1256macb_reset(struct macb_softc *sc)
1257{
1258	/*
1259	 * Disable RX and TX
1260	 */
1261	write_4(sc, EMAC_NCR, 0);
1262
1263	write_4(sc, EMAC_NCR, CLEAR_STAT);
1264
1265	/* Clear all status flags */
1266	write_4(sc, EMAC_TSR, ~0UL);
1267	write_4(sc, EMAC_RSR, ~0UL);
1268
1269	/* Disable all interrupts */
1270	write_4(sc, EMAC_IDR, ~0UL);
1271	read_4(sc, EMAC_ISR);
1272
1273}
1274
1275
1276static int
1277macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1278{
1279	uint32_t bottom;
1280	uint16_t top;
1281
1282	bottom = read_4(sc, EMAC_SA1B);
1283	top = read_4(sc, EMAC_SA1T);
1284
1285	eaddr[0] = bottom & 0xff;
1286	eaddr[1] = (bottom >> 8) & 0xff;
1287	eaddr[2] = (bottom >> 16) & 0xff;
1288	eaddr[3] = (bottom >> 24) & 0xff;
1289	eaddr[4] = top & 0xff;
1290	eaddr[5] = (top >> 8) & 0xff;
1291
1292	return (0);
1293}
1294
1295
1296static int
1297macb_attach(device_t dev)
1298{
1299	struct macb_softc *sc;
1300	struct ifnet *ifp = NULL;
1301	struct sysctl_ctx_list *sctx;
1302	struct sysctl_oid *soid;
1303	int pclk_hz;
1304	u_char eaddr[ETHER_ADDR_LEN];
1305	int rid;
1306	int err;
1307	struct at91_pmc_clock *master;
1308
1309
1310	err = 0;
1311
1312	sc = device_get_softc(dev);
1313	sc->dev = dev;
1314
1315	MACB_LOCK_INIT(sc);
1316
1317	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1318
1319	/*
1320	 * Allocate resources.
1321	 */
1322	rid = 0;
1323	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1324	    RF_ACTIVE);
1325	if (sc->mem_res == NULL) {
1326		device_printf(dev, "could not allocate memory resources.\n");
1327		err = ENOMEM;
1328		goto out;
1329	}
1330	rid = 0;
1331	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1332	    RF_ACTIVE);
1333	if (sc->irq_res == NULL) {
1334		device_printf(dev, "could not allocate interrupt resources.\n");
1335		err = ENOMEM;
1336		goto out;
1337	}
1338
1339	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, 1<<7, 0);
1340
1341	at91_pio_gpio_input(AT91SAM9G20_PIOA_BASE, 1<<7);
1342	at91_pio_gpio_set_deglitch(AT91SAM9G20_PIOA_BASE, 1<<7, 1);
1343
1344	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA19, 0);	/* ETXCK_EREFCK */
1345	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA17, 0);	/* ERXDV */
1346	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA14, 0);	/* ERX0 */
1347	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA15, 0);	/* ERX1 */
1348	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA18, 0);	/* ERXER */
1349	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA16, 0);	/* ETXEN */
1350	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA12, 0);	/* ETX0 */
1351	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA13, 0);	/* ETX1 */
1352	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA21, 0);	/* EMDIO */
1353	at91_pio_use_periph_a(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA20, 0);	/* EMDC */
1354
1355	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA28, 0);	/* ECRS */
1356	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA29, 0);	/* ECOL */
1357	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA25, 0);	/* ERX2 */
1358	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA26, 0);	/* ERX3 */
1359	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA27, 0);	/* ERXCK */
1360	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA23, 0);	/* ETX2 */
1361	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA24, 0);	/* ETX3 */
1362	at91_pio_use_periph_b(AT91SAM9G20_PIOA_BASE, AT91C_PIO_PA22, 0);	/* ETXER */
1363
1364
1365	/*setup clock*/
1366	sc->clk = at91_pmc_clock_ref("macb_clk");
1367	at91_pmc_clock_enable(sc->clk);
1368
1369	macb_reset(sc);
1370	macb_get_mac(sc, eaddr);
1371
1372	master = at91_pmc_clock_ref("mck");
1373
1374	pclk_hz = master->hz;
1375
1376	sc->clock = CFG_CLK_8;
1377	if (pclk_hz <= 20000000)
1378		sc->clock = CFG_CLK_8;
1379	else if (pclk_hz <= 40000000)
1380		sc->clock = CFG_CLK_16;
1381	else if (pclk_hz <= 80000000)
1382		sc->clock = CFG_CLK_32;
1383	else
1384		sc->clock = CFG_CLK_64;
1385
1386	sc->clock = sc->clock << 10;
1387
1388	write_4(sc, EMAC_NCFGR, sc->clock);
1389	write_4(sc, EMAC_USRIO, USRIO_CLOCK);       //enable clock
1390
1391	write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1392
1393	sc->ifp = ifp = if_alloc(IFT_ETHER);
1394	if (mii_phy_probe(dev, &sc->miibus, macb_ifmedia_upd, macb_ifmedia_sts)) {
1395		device_printf(dev, "Cannot find my PHY.\n");
1396		err = ENXIO;
1397		goto out;
1398	}
1399
1400	if (macb_allocate_dma(sc) != 0)
1401		goto out;
1402
1403	/* Sysctls */
1404	sctx = device_get_sysctl_ctx(dev);
1405	soid = device_get_sysctl_tree(dev);
1406
1407	ifp->if_softc = sc;
1408	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1409	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1410	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1411	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
1412	ifp->if_start = macbstart;
1413	ifp->if_ioctl = macbioctl;
1414	ifp->if_init = macbinit;
1415	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1416	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1417	IFQ_SET_READY(&ifp->if_snd);
1418	sc->if_flags = ifp->if_flags;
1419
1420	TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1421	TASK_INIT(&sc->sc_tx_task, 0, macb_tx_task, ifp);
1422
1423	sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1424	    taskqueue_thread_enqueue, &sc->sc_tq);
1425	if (sc->sc_tq == NULL) {
1426		device_printf(sc->dev, "could not create taskqueue\n");
1427		goto out;
1428	}
1429
1430	ether_ifattach(ifp, eaddr);
1431
1432	/*
1433	 * Activate the interrupt.
1434	 */
1435	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1436	    NULL, macb_intr, sc, &sc->intrhand);
1437	if (err) {
1438		device_printf(dev, "could not establish interrupt handler.\n");
1439		ether_ifdetach(ifp);
1440		goto out;
1441	}
1442
1443	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1444	    device_get_nameunit(sc->dev));
1445
1446	sc->macb_cdata.rxhead = 0;
1447	sc->macb_cdata.rxtail = 0;
1448
1449	phy_write(sc, 0, 0, 0x3300); //force autoneg
1450
1451	return (0);
1452out:
1453
1454	return (err);
1455}
1456
1457static int
1458macb_detach(device_t dev)
1459{
1460	struct macb_softc *sc;
1461
1462	sc = device_get_softc(dev);
1463	macbstop(sc);
1464	macb_deactivate(dev);
1465
1466	return (0);
1467}
1468
1469/*PHY related functions*/
1470static inline int
1471phy_read(struct macb_softc *sc, int phy, int reg)
1472{
1473	int val;
1474
1475	write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1476	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1477		continue;
1478	val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1479
1480	return (val);
1481}
1482
1483static inline int
1484phy_write(struct macb_softc *sc, int phy, int reg, int data)
1485{
1486
1487	write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1488	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1489		continue;
1490
1491	return (0);
1492}
1493
1494/*
1495 * MII bus support routines.
1496 */
1497static int
1498macb_miibus_readreg(device_t dev, int phy, int reg)
1499{
1500	struct macb_softc *sc;
1501	sc = device_get_softc(dev);
1502	return (phy_read(sc, phy, reg));
1503}
1504
1505static int
1506macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1507{
1508	struct macb_softc *sc;
1509	sc = device_get_softc(dev);
1510	return (phy_write(sc, phy, reg, data));
1511}
1512
1513static void
1514macb_child_detached(device_t dev, device_t child)
1515{
1516	struct macb_softc *sc;
1517	sc = device_get_softc(dev);
1518
1519}
1520
1521static void
1522macb_miibus_statchg(device_t dev)
1523{
1524	struct macb_softc *sc;
1525	struct mii_data *mii;
1526	int config;
1527
1528	sc = device_get_softc(dev);
1529
1530	mii = device_get_softc(sc->miibus);
1531
1532	sc->flags &= ~MACB_FLAG_LINK;
1533
1534	config = read_4(sc, EMAC_NCFGR);
1535
1536	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1537		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1538		case IFM_10_T:
1539			config &= ~(CFG_SPD);
1540			sc->flags |= MACB_FLAG_LINK;
1541			break;
1542		case IFM_100_TX:
1543			config |= CFG_SPD;
1544			sc->flags |= MACB_FLAG_LINK;
1545			break;
1546		default:
1547			break;
1548		}
1549	}
1550
1551	config |= CFG_FD;
1552	write_4(sc, EMAC_NCFGR, config);
1553}
1554
1555static device_method_t macb_methods[] = {
1556	/* Device interface */
1557	DEVMETHOD(device_probe,	macb_probe),
1558	DEVMETHOD(device_attach,	macb_attach),
1559	DEVMETHOD(device_detach,	macb_detach),
1560
1561	/* Bus interface */
1562	DEVMETHOD(bus_child_detached,	macb_child_detached),
1563
1564	/* MII interface */
1565	DEVMETHOD(miibus_readreg,	macb_miibus_readreg),
1566	DEVMETHOD(miibus_writereg,	macb_miibus_writereg),
1567	DEVMETHOD(miibus_statchg,	macb_miibus_statchg),
1568	{ 0, 0 }
1569};
1570
1571static driver_t macb_driver = {
1572	"macb",
1573	macb_methods,
1574	sizeof(struct macb_softc),
1575};
1576
1577
1578DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1579DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1580MODULE_DEPEND(macb, miibus, 1, 1, 1);
1581MODULE_DEPEND(macb, ether, 1, 1, 1);
1582