1/*-
2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/mbuf.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/rman.h>
39#include <sys/socket.h>
40#include <sys/sockio.h>
41#include <sys/sysctl.h>
42#include <sys/taskqueue.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_arp.h>
47#include <net/if_dl.h>
48#include <net/if_media.h>
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51
52#ifdef INET
53#include <netinet/in.h>
54#include <netinet/in_systm.h>
55#include <netinet/in_var.h>
56#include <netinet/ip.h>
57#endif
58
59#include <net/bpf.h>
60#include <net/bpfdesc.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <arm/at91/at91_pmcvar.h>
66#include <arm/at91/if_macbreg.h>
67#include <arm/at91/if_macbvar.h>
68#include <arm/at91/at91_piovar.h>
69
70#include <arm/at91/at91sam9g20reg.h>
71
72#include <machine/bus.h>
73#include <machine/intr.h>
74
75/* "device miibus" required.  See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
78
79#define	MACB_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
80#define	MACB_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
81#define	MACB_LOCK_INIT(_sc)					\
82	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
83	    MTX_NETWORK_LOCK, MTX_DEF)
84#define	MACB_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
85#define	MACB_LOCK_ASSERT(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
86#define	MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
87
88
89static inline uint32_t
90read_4(struct macb_softc *sc, bus_size_t off)
91{
92
93	return (bus_read_4(sc->mem_res, off));
94}
95
96static inline void
97write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
98{
99
100	bus_write_4(sc->mem_res, off, val);
101}
102
103
104static devclass_t macb_devclass;
105
106/* ifnet entry points */
107
108static void	macbinit_locked(void *);
109static void	macbstart_locked(struct ifnet *);
110
111static void	macbinit(void *);
112static void	macbstart(struct ifnet *);
113static void	macbstop(struct macb_softc *);
114static int	macbioctl(struct ifnet * ifp, u_long, caddr_t);
115
116/* bus entry points */
117
118static int	macb_probe(device_t dev);
119static int	macb_attach(device_t dev);
120static int	macb_detach(device_t dev);
121
122/* helper functions */
123static int
124macb_new_rxbuf(struct macb_softc *sc, int index);
125
126static void
127macb_free_desc_dma_tx(struct macb_softc *sc);
128
129static void
130macb_free_desc_dma_rx(struct macb_softc *sc);
131
132static void
133macb_init_desc_dma_tx(struct macb_softc *sc);
134
135static void
136macb_watchdog(struct macb_softc *sc);
137
138static int macb_intr_rx_locked(struct macb_softc *sc, int count);
139static void macb_intr_task(void *arg, int pending __unused);
140static void macb_intr(void *xsc);
141
142static void
143macb_tx_cleanup(struct macb_softc *sc);
144
145static inline int
146phy_write(struct macb_softc *sc, int phy, int reg, int data);
147
148static void	macb_reset(struct macb_softc *sc);
149
150static void
151macb_deactivate(device_t dev)
152{
153	struct macb_softc *sc;
154
155	sc = device_get_softc(dev);
156
157	macb_free_desc_dma_tx(sc);
158	macb_free_desc_dma_rx(sc);
159
160}
161
162static void
163macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
164{
165	bus_addr_t *paddr;
166
167	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
168	paddr = arg;
169	*paddr = segs->ds_addr;
170}
171
172static int
173macb_alloc_desc_dma_tx(struct macb_softc *sc)
174{
175	int error, i;
176
177	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
178	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
179	    16, 0,			/* alignment, boundary */
180	    BUS_SPACE_MAXADDR,		/* lowaddr */
181	    BUS_SPACE_MAXADDR,		/* highaddr */
182	    NULL, NULL,			/* filtfunc, filtfuncarg */
183	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
184	    1,				/* nsegments */
185	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
186	    0,				/* flags */
187	    NULL, NULL,			/* lockfunc, lockfuncarg */
188	    &sc->dmatag_data_tx);	/* dmat */
189	if (error != 0) {
190		device_printf(sc->dev,
191		    "Couldn't create TX descriptor dma tag\n");
192		return (error);
193	}
194	/* Allocate memory for TX ring. */
195	error = bus_dmamem_alloc(sc->dmatag_data_tx,
196	    (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
197	    BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
198	if (error != 0) {
199		device_printf(sc->dev, "failed to allocate TX dma memory\n");
200		return (error);
201	}
202	/* Load Ring DMA. */
203	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
204	    sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
205	    macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
206	if (error != 0) {
207		device_printf(sc->dev, "can't load TX descriptor dma map\n");
208		return (error);
209	}
210	/* Allocate a busdma tag for mbufs. No alignment restriction applys. */
211	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
212	    1, 0,			/* alignment, boundary */
213	    BUS_SPACE_MAXADDR,		/* lowaddr */
214	    BUS_SPACE_MAXADDR,		/* highaddr */
215	    NULL, NULL,			/* filtfunc, filtfuncarg */
216	    MCLBYTES * MAX_FRAGMENT,	/* maxsize */
217	    MAX_FRAGMENT,		/* nsegments */
218	    MCLBYTES, 0,		/* maxsegsz, flags */
219	    NULL, NULL,			/* lockfunc, lockfuncarg */
220	    &sc->dmatag_ring_tx);	/* dmat */
221	if (error != 0) {
222		device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
223		return (error);
224	}
225
226	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
227		/* Create dma map for each descriptor. */
228		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
229		    &sc->tx_desc[i].dmamap);
230		if (error != 0) {
231			device_printf(sc->dev,
232			    "failed to create TX mbuf dma map\n");
233			return (error);
234		}
235	}
236	return (0);
237}
238
239static void
240macb_free_desc_dma_tx(struct macb_softc *sc)
241{
242	struct tx_desc_info *td;
243	int i;
244
245	/* TX buffers. */
246	if (sc->dmatag_ring_tx != NULL) {
247		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
248			td = &sc->tx_desc[i];
249			if (td->dmamap != NULL) {
250				bus_dmamap_destroy(sc->dmatag_ring_tx,
251				    td->dmamap);
252				td->dmamap = NULL;
253			}
254		}
255		bus_dma_tag_destroy(sc->dmatag_ring_tx);
256		sc->dmatag_ring_tx = NULL;
257	}
258
259	/* TX descriptor ring. */
260	if (sc->dmatag_data_tx != NULL) {
261		if (sc->dmamap_ring_tx != NULL)
262			bus_dmamap_unload(sc->dmatag_data_tx,
263			    sc->dmamap_ring_tx);
264		if (sc->dmamap_ring_tx != NULL && sc->desc_tx != NULL)
265			bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
266			    sc->dmamap_ring_tx);
267		sc->dmamap_ring_tx = NULL;
268		sc->dmamap_ring_tx = NULL;
269		bus_dma_tag_destroy(sc->dmatag_data_tx);
270		sc->dmatag_data_tx = NULL;
271	}
272}
273
274static void
275macb_init_desc_dma_tx(struct macb_softc *sc)
276{
277	struct eth_tx_desc *desc;
278	int i;
279
280	MACB_LOCK_ASSERT(sc);
281
282	sc->tx_prod = 0;
283	sc->tx_cons = 0;
284	sc->tx_cnt = 0;
285
286	desc = &sc->desc_tx[0];
287	bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
288
289	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
290		desc = &sc->desc_tx[i];
291		if (i == MACB_MAX_TX_BUFFERS - 1)
292			desc->flags = TD_OWN | TD_WRAP_MASK;
293		else
294			desc->flags = TD_OWN;
295	}
296
297	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
298	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
299}
300
301static int
302macb_alloc_desc_dma_rx(struct macb_softc *sc)
303{
304	int error, i;
305
306	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
307	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
308	    16, 0,			/* alignment, boundary */
309	    BUS_SPACE_MAXADDR,		/* lowaddr */
310	    BUS_SPACE_MAXADDR,		/* highaddr */
311	    NULL, NULL,			/* filtfunc, filtfuncarg */
312	    /* maxsize, nsegments */
313	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
314	    /* maxsegsz, flags */
315	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
316	    NULL, NULL,			/* lockfunc, lockfuncarg */
317	    &sc->dmatag_data_rx);	/* dmat */
318	if (error != 0) {
319		device_printf(sc->dev,
320		    "Couldn't create RX descriptor dma tag\n");
321		return (error);
322	}
323	/* Allocate RX ring. */
324	error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
325	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
326	    &sc->dmamap_ring_rx);
327	if (error != 0) {
328		device_printf(sc->dev,
329		    "failed to allocate RX descriptor dma memory\n");
330		return (error);
331	}
332
333	/* Load dmamap. */
334	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
335	    sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
336	    macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
337	if (error != 0) {
338		device_printf(sc->dev, "can't load RX descriptor dma map\n");
339		return (error);
340	}
341
342	/* Allocate a busdma tag for mbufs. */
343	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
344	    16, 0,			/* alignment, boundary */
345	    BUS_SPACE_MAXADDR,		/* lowaddr */
346	    BUS_SPACE_MAXADDR,		/* highaddr */
347	    NULL, NULL,			/* filtfunc, filtfuncarg */
348	    MCLBYTES, 1,		/* maxsize, nsegments */
349	    MCLBYTES, 0,		/* maxsegsz, flags */
350	    NULL, NULL,			/* lockfunc, lockfuncarg */
351	    &sc->dmatag_ring_rx);	/* dmat */
352
353	if (error != 0) {
354		device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
355		return (error);
356	}
357
358	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
359		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
360		    &sc->rx_desc[i].dmamap);
361		if (error != 0) {
362			device_printf(sc->dev,
363			    "failed to create RX mbuf dmamap\n");
364			return (error);
365		}
366	}
367
368	return (0);
369}
370
371static void
372macb_free_desc_dma_rx(struct macb_softc *sc)
373{
374	struct rx_desc_info *rd;
375	int i;
376
377	/* RX buffers. */
378	if (sc->dmatag_ring_rx != NULL) {
379		for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
380			rd = &sc->rx_desc[i];
381			if (rd->dmamap != NULL) {
382				bus_dmamap_destroy(sc->dmatag_ring_rx,
383				    rd->dmamap);
384				rd->dmamap = NULL;
385			}
386		}
387		bus_dma_tag_destroy(sc->dmatag_ring_rx);
388		sc->dmatag_ring_rx = NULL;
389	}
390	/* RX descriptor ring. */
391	if (sc->dmatag_data_rx != NULL) {
392		if (sc->dmamap_ring_rx != NULL)
393			bus_dmamap_unload(sc->dmatag_data_rx,
394			    sc->dmamap_ring_rx);
395		if (sc->dmamap_ring_rx != NULL &&
396		    sc->desc_rx != NULL)
397			bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
398			    sc->dmamap_ring_rx);
399		sc->desc_rx = NULL;
400		sc->dmamap_ring_rx = NULL;
401		bus_dma_tag_destroy(sc->dmatag_data_rx);
402		sc->dmatag_data_rx = NULL;
403	}
404}
405
406static int
407macb_init_desc_dma_rx(struct macb_softc *sc)
408{
409	struct eth_rx_desc *desc;
410	struct rx_desc_info *rd;
411	int i;
412
413	MACB_LOCK_ASSERT(sc);
414
415	sc->rx_cons = 0;
416	desc = &sc->desc_rx[0];
417	bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
418	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
419		rd = &sc->rx_desc[i];
420		rd->buff = NULL;
421		if (macb_new_rxbuf(sc, i) != 0)
422			return (ENOBUFS);
423	}
424	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
425	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
426	return (0);
427}
428
429static int
430macb_new_rxbuf(struct macb_softc *sc, int index)
431{
432	struct rx_desc_info *rd;
433	struct eth_rx_desc *desc;
434	struct mbuf *m;
435	bus_dma_segment_t seg[1];
436	int error, nsegs;
437
438	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
439	if (m == NULL)
440		return (ENOBUFS);
441	m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
442	rd = &sc->rx_desc[index];
443	bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
444	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
445	    seg, &nsegs, 0);
446	KASSERT(nsegs == 1, ("Too many segments returned!"));
447	if (error != 0) {
448		m_free(m);
449		return (error);
450	}
451
452	bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
453	rd->buff = m;
454
455	desc = &sc->desc_rx[index];
456	desc->addr = seg[0].ds_addr;
457
458	desc->flags = DATA_SIZE;
459
460	if (index == MACB_MAX_RX_BUFFERS - 1)
461		desc->addr |= RD_WRAP_MASK;
462
463	return (0);
464}
465
466static int
467macb_allocate_dma(struct macb_softc *sc)
468{
469	int error;
470
471	/* Create parent tag for tx and rx */
472	error = bus_dma_tag_create(
473	    bus_get_dma_tag(sc->dev),	/* parent */
474	    1, 0,			/* alignment, boundary */
475	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
476	    BUS_SPACE_MAXADDR,		/* highaddr */
477	    NULL, NULL,			/* filter, filterarg */
478	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
479	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
480	    0,				/* flags */
481	    NULL, NULL,		/* lockfunc, lockarg */
482	    &sc->sc_parent_tag);
483	if (error != 0) {
484		device_printf(sc->dev, "Couldn't create parent DMA tag\n");
485		return (error);
486	}
487
488	if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
489		return (error);
490	if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
491		return (error);
492	return (0);
493}
494
495
496static void
497macb_tick(void *xsc)
498{
499	struct macb_softc *sc;
500	struct mii_data *mii;
501
502	sc = xsc;
503	mii = device_get_softc(sc->miibus);
504	mii_tick(mii);
505	macb_watchdog(sc);
506	/*
507	 * Schedule another timeout one second from now.
508	 */
509	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
510}
511
512
513static void
514macb_watchdog(struct macb_softc *sc)
515{
516	struct ifnet *ifp;
517
518	MACB_LOCK_ASSERT(sc);
519
520	if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
521		return;
522
523	ifp = sc->ifp;
524	if ((sc->flags & MACB_FLAG_LINK) == 0) {
525		if_printf(ifp, "watchdog timeout (missed link)\n");
526		ifp->if_oerrors++;
527		return;
528	}
529
530	if_printf(ifp, "watchdog timeout\n");
531	ifp->if_oerrors++;
532	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
533	macbinit_locked(sc);
534	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
535		macbstart_locked(ifp);
536}
537
538
539
540static void
541macbinit_locked(void *xsc)
542{
543	struct macb_softc *sc;
544	struct ifnet *ifp;
545	int err;
546	uint32_t config;
547	struct mii_data *mii;
548
549	sc = xsc;
550	ifp = sc->ifp;
551
552	MACB_LOCK_ASSERT(sc);
553
554	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
555		return;
556
557	if ((err = macb_init_desc_dma_rx(sc)) != 0) {
558		device_printf(sc->dev, "no memory for RX buffers\n");
559		//ecestop(sc);
560		return;
561	}
562	macb_init_desc_dma_tx(sc);
563
564	config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
565	config |= CFG_PAE;		/* PAuse Enable */
566	config |= CFG_DRFCS;		/* Discard Rx FCS */
567	config |= CFG_SPD;		/* 100 mbps*/
568	//config |= CFG_CAF;
569	config |= CFG_FD;
570
571	config |= CFG_RBOF_2; /*offset +2*/
572
573	write_4(sc, EMAC_NCFGR, config);
574
575	/* Initialize TX and RX buffers */
576	write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
577	write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
578
579	/* Enable TX and RX */
580	write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
581
582
583	/* Enable interrupts */
584	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
585			       RXUBR_INTERRUPT |
586			       TUND_INTERRUPT |
587			       RLE_INTERRUPT |
588			       TXERR_INTERRUPT |
589			       ROVR_INTERRUPT |
590			       HRESP_INTERRUPT|
591			       TCOMP_INTERRUPT
592			));
593
594	/*
595	 * Set 'running' flag, and clear output active flag
596	 * and attempt to start the output
597	 */
598	ifp->if_drv_flags |= IFF_DRV_RUNNING;
599	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
600
601	mii = device_get_softc(sc->miibus);
602
603	sc->flags |= MACB_FLAG_LINK;
604
605	mii_mediachg(mii);
606
607	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
608}
609
610
611static void
612macb_tx_cleanup(struct macb_softc *sc)
613{
614	struct ifnet *ifp;
615	struct eth_tx_desc *desc;
616	struct tx_desc_info *td;
617	int flags;
618	int status;
619	int i;
620
621	MACB_LOCK_ASSERT(sc);
622
623	status = read_4(sc, EMAC_TSR);
624
625	write_4(sc, EMAC_TSR, status);
626
627	/*buffer underrun*/
628	if ((status & TSR_UND) != 0) {
629		/*reset buffers*/
630		printf("underrun\n");
631		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
632		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
633		sc->tx_cons = sc->tx_prod = 0;
634		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
635			desc = &sc->desc_tx[i];
636			desc->flags = TD_OWN;
637		}
638
639		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
640			td = &sc->tx_desc[i];
641			if (td->buff != NULL) {
642				/* We are finished with this descriptor. */
643				bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
644						BUS_DMASYNC_POSTWRITE);
645				/* ... and unload, so we can reuse. */
646				bus_dmamap_unload(sc->dmatag_data_tx,
647						  td->dmamap);
648				m_freem(td->buff);
649				td->buff = NULL;
650			}
651		}
652	}
653
654	if ((status & TSR_COMP) == 0)
655		return;
656
657
658	if (sc->tx_cons == sc->tx_prod)
659		return;
660
661	ifp = sc->ifp;
662
663	/* Prepare to read the ring (owner bit). */
664	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
665	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
666	while (sc->tx_cons != sc->tx_prod) {
667		desc = &sc->desc_tx[sc->tx_cons];
668		if ((desc->flags & TD_OWN) == 0)
669			break;
670
671		td = &sc->tx_desc[sc->tx_cons];
672		if (td->buff != NULL) {
673			/* We are finished with this descriptor. */
674			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
675					BUS_DMASYNC_POSTWRITE);
676			/* ... and unload, so we can reuse. */
677			bus_dmamap_unload(sc->dmatag_data_tx,
678					  td->dmamap);
679			m_freem(td->buff);
680			td->buff = NULL;
681			ifp->if_opackets++;
682		}
683
684		do {
685			sc->tx_cnt--;
686			MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
687			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
688			flags = desc->flags;
689			desc->flags = TD_OWN;
690			desc = &sc->desc_tx[sc->tx_cons];
691			if (flags & TD_LAST) {
692				break;
693			}
694		} while (sc->tx_cons != sc->tx_prod);
695	}
696
697	/* Unarm watchog timer when there is no pending descriptors in queue. */
698	if (sc->tx_cnt == 0)
699		sc->macb_watchdog_timer = 0;
700}
701
702static void
703macb_rx(struct macb_softc *sc)
704{
705	struct eth_rx_desc	*rxdesc;
706	struct ifnet *ifp;
707	struct mbuf *m;
708	int rxbytes;
709	int flags;
710	int nsegs;
711	int first;
712
713	rxdesc = &(sc->desc_rx[sc->rx_cons]);
714
715	ifp = sc->ifp;
716
717	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
718	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
719
720
721	nsegs = 0;
722	while (rxdesc->addr & RD_OWN) {
723
724		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
725			break;
726
727		flags = rxdesc->flags;
728
729		rxbytes = flags & RD_LEN_MASK;
730
731		m = sc->rx_desc[sc->rx_cons].buff;
732
733		bus_dmamap_sync(sc->dmatag_ring_rx,
734		    sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
735		if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
736			ifp->if_iqdrops++;
737			first = sc->rx_cons;
738
739			do  {
740				rxdesc->flags = DATA_SIZE;
741				MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
742				if ((rxdesc->flags & RD_EOF) != 0)
743					break;
744				rxdesc = &(sc->desc_rx[sc->rx_cons]);
745			} while (sc->rx_cons != first);
746
747			if (sc->macb_cdata.rxhead != NULL) {
748				m_freem(sc->macb_cdata.rxhead);
749				sc->macb_cdata.rxhead = NULL;
750				sc->macb_cdata.rxtail = NULL;
751			}
752
753			break;
754		}
755
756		nsegs++;
757
758		/* Chain received mbufs. */
759		if (sc->macb_cdata.rxhead == NULL) {
760			m->m_data += 2;
761			sc->macb_cdata.rxhead = m;
762			sc->macb_cdata.rxtail = m;
763			if (flags & RD_EOF)
764				m->m_len = rxbytes;
765			else
766				m->m_len = DATA_SIZE - 2;
767		} else {
768			m->m_flags &= ~M_PKTHDR;
769			m->m_len = DATA_SIZE;
770			sc->macb_cdata.rxtail->m_next = m;
771			sc->macb_cdata.rxtail = m;
772		}
773
774		if (flags & RD_EOF) {
775
776			if (nsegs > 1) {
777				sc->macb_cdata.rxtail->m_len = (rxbytes -
778				    ((nsegs - 1) * DATA_SIZE)) + 2;
779			}
780
781			m = sc->macb_cdata.rxhead;
782			m->m_flags |= M_PKTHDR;
783			m->m_pkthdr.len = rxbytes;
784			m->m_pkthdr.rcvif = ifp;
785			ifp->if_ipackets++;
786
787			nsegs = 0;
788			MACB_UNLOCK(sc);
789			(*ifp->if_input)(ifp, m);
790			MACB_LOCK(sc);
791			sc->macb_cdata.rxhead = NULL;
792			sc->macb_cdata.rxtail = NULL;
793
794		}
795
796		rxdesc->addr &= ~RD_OWN;
797
798		MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
799
800		rxdesc = &(sc->desc_rx[sc->rx_cons]);
801	}
802
803	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
804
805}
806
807static int
808macb_intr_rx_locked(struct macb_softc *sc, int count)
809{
810	macb_rx(sc);
811	return (0);
812}
813
814static void
815macb_intr_task(void *arg, int pending __unused)
816{
817	struct macb_softc *sc;
818
819	sc = arg;
820	MACB_LOCK(sc);
821	macb_intr_rx_locked(sc, -1);
822	MACB_UNLOCK(sc);
823}
824
825static void
826macb_intr(void *xsc)
827{
828	struct macb_softc *sc;
829	struct ifnet *ifp;
830	uint32_t status;
831
832	sc = xsc;
833	ifp = sc->ifp;
834	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
835		printf("not running\n");
836		return;
837	}
838
839	MACB_LOCK(sc);
840	status = read_4(sc, EMAC_ISR);
841
842	while (status) {
843		if (status & RCOMP_INTERRUPT) {
844			write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
845			taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
846		}
847
848		if (status & TCOMP_INTERRUPT) {
849			macb_tx_cleanup(sc);
850		}
851
852		status = read_4(sc, EMAC_ISR);
853	}
854
855	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
856		macbstart_locked(ifp);
857	MACB_UNLOCK(sc);
858}
859
860static inline int
861macb_encap(struct macb_softc *sc, struct mbuf **m_head)
862{
863	struct eth_tx_desc *desc;
864	struct tx_desc_info *txd, *txd_last;
865	struct mbuf *m;
866	bus_dma_segment_t segs[MAX_FRAGMENT];
867	bus_dmamap_t map;
868	uint32_t csum_flags;
869	int error, i, nsegs, prod, si;
870
871	M_ASSERTPKTHDR((*m_head));
872
873	prod = sc->tx_prod;
874
875	m = *m_head;
876
877	txd = txd_last = &sc->tx_desc[prod];
878	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
879	    *m_head, segs, &nsegs, 0);
880	if (error == EFBIG) {
881		m = m_collapse(*m_head, M_NOWAIT, MAX_FRAGMENT);
882		if (m == NULL) {
883			m_freem(*m_head);
884			*m_head = NULL;
885			return (ENOMEM);
886		}
887		*m_head = m;
888		error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
889		    *m_head, segs, &nsegs, 0);
890		if (error != 0) {
891			m_freem(*m_head);
892			*m_head = NULL;
893			return (error);
894		}
895	} else if (error != 0) {
896		return (error);
897	}
898	/* Check for TX descriptor overruns. */
899	if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
900		bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
901		return (ENOBUFS);
902	}
903	bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
904	m = *m_head;
905
906	/* TODO: VLAN hardware tag insertion. */
907
908	csum_flags = 0;
909	si = prod;
910	desc = NULL;
911
912	for (i = 0; i < nsegs; i++) {
913		desc = &sc->desc_tx[prod];
914		desc->addr = segs[i].ds_addr;
915
916		if (i == 0 ) {
917			desc->flags = segs[i].ds_len | TD_OWN;
918		} else {
919			desc->flags = segs[i].ds_len;
920		}
921
922		if (prod == MACB_MAX_TX_BUFFERS - 1)
923			desc->flags |= TD_WRAP_MASK;
924
925		sc->tx_cnt++;
926		MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
927	}
928	/*
929	 * Set EOP on the last fragment.
930	 */
931
932	desc->flags |= TD_LAST;
933	desc = &sc->desc_tx[si];
934	desc->flags &= ~TD_OWN;
935
936	sc->tx_prod = prod;
937
938	/* Swap the first dma map and the last. */
939	map = txd_last->dmamap;
940	txd_last->dmamap = txd->dmamap;
941	txd->dmamap = map;
942	txd->buff = m;
943
944	return (0);
945}
946
947
948static void
949macbstart_locked(struct ifnet *ifp)
950{
951
952
953
954	struct macb_softc *sc;
955	struct mbuf *m0;
956#if 0
957	struct mbuf *m_new;
958#endif
959	int queued = 0;
960
961	sc = ifp->if_softc;
962
963	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
964	    IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
965		return;
966	}
967
968	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
969		/* Get packet from the queue */
970		IF_DEQUEUE(&ifp->if_snd, m0);
971		if (m0 == NULL)
972			break;
973#if 0
974		if (m0->m_next != NULL) {
975			/* Fragmented mbuf chain, collapse it. */
976			m_new = m_defrag(m0, M_NOWAIT);
977			if (m_new != NULL) {
978				/* Original frame freed. */
979				m0 = m_new;
980			} else {
981				/* Defragmentation failed, just use the chain. */
982			}
983		}
984#endif
985		if (macb_encap(sc, &m0)) {
986			if (m0 == NULL)
987				break;
988			IF_PREPEND(&ifp->if_snd, m0);
989			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
990			break;
991		}
992		queued++;
993		BPF_MTAP(ifp, m0);
994	}
995	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
996		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
997	if (queued) {
998		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
999		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000		write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1001		sc->macb_watchdog_timer = MACB_TIMEOUT;
1002	}
1003}
1004
1005static void
1006macbinit(void *xsc)
1007{
1008	struct macb_softc *sc = xsc;
1009
1010	MACB_LOCK(sc);
1011	macbinit_locked(sc);
1012	MACB_UNLOCK(sc);
1013}
1014
1015static void
1016macbstart(struct ifnet *ifp)
1017{
1018	struct macb_softc *sc = ifp->if_softc;
1019	MACB_ASSERT_UNLOCKED(sc);
1020	MACB_LOCK(sc);
1021	macbstart_locked(ifp);
1022	MACB_UNLOCK(sc);
1023
1024}
1025
1026
1027static void
1028macbstop(struct macb_softc *sc)
1029{
1030	struct ifnet *ifp = sc->ifp;
1031	struct rx_desc_info *rd;
1032	struct tx_desc_info *td;
1033	int i;
1034
1035	ifp = sc->ifp;
1036
1037	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1038
1039	macb_reset(sc);
1040
1041	sc->flags &= ~MACB_FLAG_LINK;
1042	callout_stop(&sc->tick_ch);
1043	sc->macb_watchdog_timer = 0;
1044
1045	/* Free TX/RX mbufs still in the queues. */
1046	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1047		td = &sc->tx_desc[i];
1048		if (td->buff != NULL) {
1049			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1050			    BUS_DMASYNC_POSTWRITE);
1051			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1052			m_freem(td->buff);
1053			td->buff = NULL;
1054		}
1055	}
1056	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1057		rd = &sc->rx_desc[i];
1058		if (rd->buff != NULL) {
1059			bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1060			    BUS_DMASYNC_POSTREAD);
1061			bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1062			m_freem(rd->buff);
1063			rd->buff = NULL;
1064		}
1065	}
1066}
1067
1068static int
1069get_hash_index(uint8_t *mac)
1070{
1071	int i, j, k;
1072	int result;
1073	int bit;
1074
1075	result = 0;
1076	for (i = 0; i < 6; i++) {
1077		bit = 0;
1078		for (j = 0; j < 8;  j++) {
1079			k = j * 6 + i;
1080			bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1081		}
1082		result |= bit;
1083	}
1084	return result;
1085}
1086
1087static void
1088set_mac_filter(uint32_t *filter, uint8_t *mac)
1089{
1090	int bits;
1091
1092	bits = get_hash_index(mac);
1093	filter[bits >> 5] |= 1 << (bits & 31);
1094}
1095
1096static void
1097set_filter(struct macb_softc *sc)
1098{
1099	struct ifnet *ifp;
1100	struct ifmultiaddr *ifma;
1101	int config;
1102	int count;
1103	uint32_t multicast_filter[2];
1104
1105	ifp = sc->ifp;
1106
1107	config = read_4(sc, EMAC_NCFGR);
1108
1109	config &= ~(CFG_CAF | CFG_MTI);
1110	write_4(sc, EMAC_HRB, 0);
1111	write_4(sc, EMAC_HRT, 0);
1112
1113	if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1114		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1115			write_4(sc, EMAC_HRB, ~0);
1116			write_4(sc, EMAC_HRT, ~0);
1117			config |= CFG_MTI;
1118		}
1119		if ((ifp->if_flags & IFF_PROMISC) != 0) {
1120			config |= CFG_CAF;
1121		}
1122		write_4(sc, EMAC_NCFGR, config);
1123		return;
1124	}
1125
1126	if_maddr_rlock(ifp);
1127	count = 0;
1128	multicast_filter[0] = 0;
1129	multicast_filter[1] = 0;
1130
1131	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1132		if (ifma->ifma_addr->sa_family != AF_LINK)
1133			continue;
1134		count++;
1135		set_mac_filter(multicast_filter,
1136			   LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1137	}
1138	if (count) {
1139		write_4(sc, EMAC_HRB, multicast_filter[0]);
1140		write_4(sc, EMAC_HRT, multicast_filter[1]);
1141		write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1142	}
1143	if_maddr_runlock(ifp);
1144}
1145
1146static int
1147macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1148{
1149
1150	struct macb_softc *sc = ifp->if_softc;
1151	struct mii_data *mii;
1152	struct ifreq *ifr = (struct ifreq *)data;
1153
1154	int error = 0;
1155
1156	switch (cmd) {
1157	case SIOCSIFFLAGS:
1158		MACB_LOCK(sc);
1159
1160		if ((ifp->if_flags & IFF_UP) != 0) {
1161			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1162				if (((ifp->if_flags ^ sc->if_flags)
1163				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1164					set_filter(sc);
1165			} else {
1166				macbinit_locked(sc);
1167			}
1168		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1169			macbstop(sc);
1170		}
1171		sc->if_flags = ifp->if_flags;
1172		MACB_UNLOCK(sc);
1173		break;
1174	case SIOCADDMULTI:
1175	case SIOCDELMULTI:
1176		MACB_LOCK(sc);
1177		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1178			set_filter(sc);
1179
1180		MACB_UNLOCK(sc);
1181		break;
1182	case SIOCSIFMEDIA:
1183	case SIOCGIFMEDIA:
1184		mii = device_get_softc(sc->miibus);
1185		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1186		break;
1187	default:
1188		error = ether_ioctl(ifp, cmd, data);
1189		break;
1190	}
1191	return (error);
1192
1193}
1194
1195/* bus entry points */
1196
1197static int
1198macb_probe(device_t dev)
1199{
1200	device_set_desc(dev, "macb");
1201	return (0);
1202}
1203
1204/*
1205 * Change media according to request.
1206 */
1207static int
1208macb_ifmedia_upd(struct ifnet *ifp)
1209{
1210	struct macb_softc *sc = ifp->if_softc;
1211	struct mii_data *mii;
1212
1213	mii = device_get_softc(sc->miibus);
1214	MACB_LOCK(sc);
1215	mii_mediachg(mii);
1216	MACB_UNLOCK(sc);
1217	return (0);
1218}
1219
1220/*
1221 * Notify the world which media we're using.
1222 */
1223static void
1224macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1225{
1226	struct macb_softc *sc = ifp->if_softc;
1227	struct mii_data *mii;
1228
1229	mii = device_get_softc(sc->miibus);
1230
1231	MACB_LOCK(sc);
1232	/* Don't report link state if driver is not running. */
1233	if ((ifp->if_flags & IFF_UP) == 0) {
1234		MACB_UNLOCK(sc);
1235		return;
1236	}
1237	mii_pollstat(mii);
1238	ifmr->ifm_active = mii->mii_media_active;
1239	ifmr->ifm_status = mii->mii_media_status;
1240	MACB_UNLOCK(sc);
1241}
1242
1243static void
1244macb_reset(struct macb_softc *sc)
1245{
1246	/*
1247	 * Disable RX and TX
1248	 */
1249	write_4(sc, EMAC_NCR, 0);
1250
1251	write_4(sc, EMAC_NCR, CLEAR_STAT);
1252
1253	/* Clear all status flags */
1254	write_4(sc, EMAC_TSR, ~0UL);
1255	write_4(sc, EMAC_RSR, ~0UL);
1256
1257	/* Disable all interrupts */
1258	write_4(sc, EMAC_IDR, ~0UL);
1259	read_4(sc, EMAC_ISR);
1260
1261}
1262
1263
1264static int
1265macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1266{
1267	uint32_t bottom;
1268	uint16_t top;
1269
1270	bottom = read_4(sc, EMAC_SA1B);
1271	top = read_4(sc, EMAC_SA1T);
1272
1273	eaddr[0] = bottom & 0xff;
1274	eaddr[1] = (bottom >> 8) & 0xff;
1275	eaddr[2] = (bottom >> 16) & 0xff;
1276	eaddr[3] = (bottom >> 24) & 0xff;
1277	eaddr[4] = top & 0xff;
1278	eaddr[5] = (top >> 8) & 0xff;
1279
1280	return (0);
1281}
1282
1283
1284static int
1285macb_attach(device_t dev)
1286{
1287	struct macb_softc *sc;
1288	struct ifnet *ifp = NULL;
1289	struct sysctl_ctx_list *sctx;
1290	struct sysctl_oid *soid;
1291	int pclk_hz;
1292	u_char eaddr[ETHER_ADDR_LEN];
1293	int rid;
1294	int err;
1295	struct at91_pmc_clock *master;
1296
1297
1298	err = 0;
1299
1300	sc = device_get_softc(dev);
1301	sc->dev = dev;
1302
1303	MACB_LOCK_INIT(sc);
1304
1305	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1306
1307	/*
1308	 * Allocate resources.
1309	 */
1310	rid = 0;
1311	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1312	    RF_ACTIVE);
1313	if (sc->mem_res == NULL) {
1314		device_printf(dev, "could not allocate memory resources.\n");
1315		err = ENOMEM;
1316		goto out;
1317	}
1318	rid = 0;
1319	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1320	    RF_ACTIVE);
1321	if (sc->irq_res == NULL) {
1322		device_printf(dev, "could not allocate interrupt resources.\n");
1323		err = ENOMEM;
1324		goto out;
1325	}
1326
1327	/*setup clock*/
1328	sc->clk = at91_pmc_clock_ref(device_get_nameunit(sc->dev));
1329	at91_pmc_clock_enable(sc->clk);
1330
1331	macb_reset(sc);
1332	macb_get_mac(sc, eaddr);
1333
1334	master = at91_pmc_clock_ref("mck");
1335
1336	pclk_hz = master->hz;
1337
1338	sc->clock = CFG_CLK_8;
1339	if (pclk_hz <= 20000000)
1340		sc->clock = CFG_CLK_8;
1341	else if (pclk_hz <= 40000000)
1342		sc->clock = CFG_CLK_16;
1343	else if (pclk_hz <= 80000000)
1344		sc->clock = CFG_CLK_32;
1345	else
1346		sc->clock = CFG_CLK_64;
1347
1348	sc->clock = sc->clock << 10;
1349
1350	write_4(sc, EMAC_NCFGR, sc->clock);
1351	write_4(sc, EMAC_USRIO, USRIO_CLOCK);       //enable clock
1352
1353	write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1354
1355	sc->ifp = ifp = if_alloc(IFT_ETHER);
1356	err = mii_attach(dev, &sc->miibus, ifp, macb_ifmedia_upd,
1357	    macb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1358	if (err != 0) {
1359		device_printf(dev, "attaching PHYs failed\n");
1360		goto out;
1361	}
1362
1363	if (macb_allocate_dma(sc) != 0)
1364		goto out;
1365
1366	/* Sysctls */
1367	sctx = device_get_sysctl_ctx(dev);
1368	soid = device_get_sysctl_tree(dev);
1369
1370	ifp->if_softc = sc;
1371	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1372	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1373	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1374	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
1375	ifp->if_start = macbstart;
1376	ifp->if_ioctl = macbioctl;
1377	ifp->if_init = macbinit;
1378	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1379	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1380	IFQ_SET_READY(&ifp->if_snd);
1381	sc->if_flags = ifp->if_flags;
1382
1383	TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1384
1385	sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1386	    taskqueue_thread_enqueue, &sc->sc_tq);
1387	if (sc->sc_tq == NULL) {
1388		device_printf(sc->dev, "could not create taskqueue\n");
1389		goto out;
1390	}
1391
1392	ether_ifattach(ifp, eaddr);
1393
1394	/*
1395	 * Activate the interrupt.
1396	 */
1397	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1398	    NULL, macb_intr, sc, &sc->intrhand);
1399	if (err) {
1400		device_printf(dev, "could not establish interrupt handler.\n");
1401		ether_ifdetach(ifp);
1402		goto out;
1403	}
1404
1405	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1406	    device_get_nameunit(sc->dev));
1407
1408	sc->macb_cdata.rxhead = 0;
1409	sc->macb_cdata.rxtail = 0;
1410
1411	phy_write(sc, 0, 0, 0x3300); //force autoneg
1412
1413	return (0);
1414out:
1415
1416	return (err);
1417}
1418
1419static int
1420macb_detach(device_t dev)
1421{
1422	struct macb_softc *sc;
1423
1424	sc = device_get_softc(dev);
1425	ether_ifdetach(sc->ifp);
1426	MACB_LOCK(sc);
1427	macbstop(sc);
1428	MACB_UNLOCK(sc);
1429	callout_drain(&sc->tick_ch);
1430	bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1431	taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
1432	taskqueue_free(sc->sc_tq);
1433	macb_deactivate(dev);
1434	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
1435	bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
1436	MACB_LOCK_DESTROY(sc);
1437
1438	return (0);
1439}
1440
1441/*PHY related functions*/
1442static inline int
1443phy_read(struct macb_softc *sc, int phy, int reg)
1444{
1445	int val;
1446
1447	write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1448	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1449		continue;
1450	val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1451
1452	return (val);
1453}
1454
1455static inline int
1456phy_write(struct macb_softc *sc, int phy, int reg, int data)
1457{
1458
1459	write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1460	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1461		continue;
1462
1463	return (0);
1464}
1465
1466/*
1467 * MII bus support routines.
1468 */
1469static int
1470macb_miibus_readreg(device_t dev, int phy, int reg)
1471{
1472	struct macb_softc *sc;
1473	sc = device_get_softc(dev);
1474	return (phy_read(sc, phy, reg));
1475}
1476
1477static int
1478macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1479{
1480	struct macb_softc *sc;
1481	sc = device_get_softc(dev);
1482	return (phy_write(sc, phy, reg, data));
1483}
1484
1485static void
1486macb_child_detached(device_t dev, device_t child)
1487{
1488	struct macb_softc *sc;
1489	sc = device_get_softc(dev);
1490
1491}
1492
1493static void
1494macb_miibus_statchg(device_t dev)
1495{
1496	struct macb_softc *sc;
1497	struct mii_data *mii;
1498	int config;
1499
1500	sc = device_get_softc(dev);
1501
1502	mii = device_get_softc(sc->miibus);
1503
1504	sc->flags &= ~MACB_FLAG_LINK;
1505
1506	config = read_4(sc, EMAC_NCFGR);
1507
1508	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1509		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1510		case IFM_10_T:
1511			config &= ~(CFG_SPD);
1512			sc->flags |= MACB_FLAG_LINK;
1513			break;
1514		case IFM_100_TX:
1515			config |= CFG_SPD;
1516			sc->flags |= MACB_FLAG_LINK;
1517			break;
1518		default:
1519			break;
1520		}
1521	}
1522
1523	config |= CFG_FD;
1524	write_4(sc, EMAC_NCFGR, config);
1525}
1526
1527static device_method_t macb_methods[] = {
1528	/* Device interface */
1529	DEVMETHOD(device_probe,	macb_probe),
1530	DEVMETHOD(device_attach,	macb_attach),
1531	DEVMETHOD(device_detach,	macb_detach),
1532
1533	/* Bus interface */
1534	DEVMETHOD(bus_child_detached,	macb_child_detached),
1535
1536	/* MII interface */
1537	DEVMETHOD(miibus_readreg,	macb_miibus_readreg),
1538	DEVMETHOD(miibus_writereg,	macb_miibus_writereg),
1539	DEVMETHOD(miibus_statchg,	macb_miibus_statchg),
1540	{ 0, 0 }
1541};
1542
1543static driver_t macb_driver = {
1544	"macb",
1545	macb_methods,
1546	sizeof(struct macb_softc),
1547};
1548
1549
1550DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1551DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1552MODULE_DEPEND(macb, miibus, 1, 1, 1);
1553MODULE_DEPEND(macb, ether, 1, 1, 1);
1554