if_macb.c revision 213894
1/*-
2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/arm/at91/if_macb.c 213894 2010-10-15 15:00:30Z marius $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/mbuf.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/rman.h>
39#include <sys/socket.h>
40#include <sys/sockio.h>
41#include <sys/sysctl.h>
42#include <sys/taskqueue.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_arp.h>
47#include <net/if_dl.h>
48#include <net/if_media.h>
49#include <net/if_types.h>
50#include <net/if_vlan_var.h>
51
52#ifdef INET
53#include <netinet/in.h>
54#include <netinet/in_systm.h>
55#include <netinet/in_var.h>
56#include <netinet/ip.h>
57#endif
58
59#include <net/bpf.h>
60#include <net/bpfdesc.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <arm/at91/at91_pmcvar.h>
66#include <arm/at91/if_macbreg.h>
67#include <arm/at91/if_macbvar.h>
68#include <arm/at91/at91_piovar.h>
69
70#include <arm/at91/at91sam9g20reg.h>
71
72#include <machine/bus.h>
73#include <machine/intr.h>
74
75/* "device miibus" required.  See GENERIC if you get errors here. */
76#include "miibus_if.h"
77
78
79#define	MACB_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
80#define	MACB_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
81#define	MACB_LOCK_INIT(_sc)					\
82	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
83	    MTX_NETWORK_LOCK, MTX_DEF)
84#define	MACB_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
85#define	MACB_LOCK_ASSERT(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
86#define	MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
87
88
89static inline uint32_t
90read_4(struct macb_softc *sc, bus_size_t off)
91{
92
93	return (bus_read_4(sc->mem_res, off));
94}
95
96static inline void
97write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
98{
99
100	bus_write_4(sc->mem_res, off, val);
101}
102
103
104static devclass_t macb_devclass;
105
106/* ifnet entry points */
107
108static void	macbinit_locked(void *);
109static void	macbstart_locked(struct ifnet *);
110
111static void	macbinit(void *);
112static void	macbstart(struct ifnet *);
113static void	macbstop(struct macb_softc *);
114static int	macbioctl(struct ifnet * ifp, u_long, caddr_t);
115
116/* bus entry points */
117
118static int	macb_probe(device_t dev);
119static int	macb_attach(device_t dev);
120static int	macb_detach(device_t dev);
121
122/* helper functions */
123static int
124macb_new_rxbuf(struct macb_softc *sc, int index);
125
126static void
127macb_free_desc_dma_tx(struct macb_softc *sc);
128
129static void
130macb_free_desc_dma_rx(struct macb_softc *sc);
131
132static void
133macb_init_desc_dma_tx(struct macb_softc *sc);
134
135static void
136macb_watchdog(struct macb_softc *sc);
137
138static int macb_intr_rx_locked(struct macb_softc *sc, int count);
139static void macb_intr_task(void *arg, int pending __unused);
140static void	macb_tx_task(void *arg, int pending __unused);
141static void macb_intr(void *xsc);
142
143static void
144macb_tx_cleanup(struct macb_softc *sc);
145
146static inline int
147phy_write(struct macb_softc *sc, int phy, int reg, int data);
148
149static void	macb_reset(struct macb_softc *sc);
150
151static void
152macb_deactivate(device_t dev)
153{
154	struct macb_softc *sc;
155
156	sc = device_get_softc(dev);
157
158	macb_free_desc_dma_tx(sc);
159	macb_free_desc_dma_rx(sc);
160
161}
162
163static void
164macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
165{
166	bus_addr_t *paddr;
167
168	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
169	paddr = arg;
170	*paddr = segs->ds_addr;
171}
172
173static int
174macb_alloc_desc_dma_tx(struct macb_softc *sc)
175{
176	int error, i;
177
178	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
179	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
180	    16, 0,			/* alignment, boundary */
181	    BUS_SPACE_MAXADDR,		/* lowaddr */
182	    BUS_SPACE_MAXADDR,		/* highaddr */
183	    NULL, NULL,			/* filtfunc, filtfuncarg */
184	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
185	    1,				/* nsegments */
186	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
187	    0,				/* flags */
188	    NULL, NULL,			/* lockfunc, lockfuncarg */
189	    &sc->dmatag_data_tx);	/* dmat */
190	if (error != 0) {
191		device_printf(sc->dev,
192		    "Couldn't create TX descriptor dma tag\n");
193		return (error);
194	}
195	/* Allocate memory for TX ring. */
196	error = bus_dmamem_alloc(sc->dmatag_data_tx,
197	    (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
198	    BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
199	if (error != 0) {
200		device_printf(sc->dev, "failed to allocate TX dma memory\n");
201		return (error);
202	}
203	/* Load Ring DMA. */
204	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
205	    sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
206	    macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
207	if (error != 0) {
208		device_printf(sc->dev, "can't load TX descriptor dma map\n");
209		return (error);
210	}
211	/* Allocate a busdma tag for mbufs. No alignment restriction applys. */
212	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
213	    1, 0,			/* alignment, boundary */
214	    BUS_SPACE_MAXADDR,		/* lowaddr */
215	    BUS_SPACE_MAXADDR,		/* highaddr */
216	    NULL, NULL,			/* filtfunc, filtfuncarg */
217	    MCLBYTES * MAX_FRAGMENT,	/* maxsize */
218	    MAX_FRAGMENT,		/* nsegments */
219	    MCLBYTES, 0,		/* maxsegsz, flags */
220	    NULL, NULL,			/* lockfunc, lockfuncarg */
221	    &sc->dmatag_ring_tx);	/* dmat */
222	if (error != 0) {
223		device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
224		return (error);
225	}
226
227	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
228		/* Create dma map for each descriptor. */
229		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
230		    &sc->tx_desc[i].dmamap);
231		if (error != 0) {
232			device_printf(sc->dev,
233			    "failed to create TX mbuf dma map\n");
234			return (error);
235		}
236	}
237	return (0);
238}
239
240static void
241macb_free_desc_dma_tx(struct macb_softc *sc)
242{
243	struct tx_desc_info *td;
244	int i;
245
246	/* TX buffers. */
247	if (sc->dmatag_ring_tx != NULL) {
248		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
249			td = &sc->tx_desc[i];
250			if (td->dmamap != NULL) {
251				bus_dmamap_destroy(sc->dmatag_ring_tx,
252				    td->dmamap);
253				td->dmamap = NULL;
254			}
255		}
256		bus_dma_tag_destroy(sc->dmatag_ring_tx);
257		sc->dmatag_ring_tx = NULL;
258	}
259
260	/* TX descriptor ring. */
261	if (sc->dmatag_data_tx != NULL) {
262		if (sc->dmamap_ring_tx != NULL)
263			bus_dmamap_unload(sc->dmatag_data_tx,
264			    sc->dmamap_ring_tx);
265		if (sc->dmamap_ring_tx != NULL && sc->desc_tx != NULL)
266			bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
267			    sc->dmamap_ring_tx);
268		sc->dmamap_ring_tx = NULL;
269		sc->dmamap_ring_tx = NULL;
270		bus_dma_tag_destroy(sc->dmatag_data_tx);
271		sc->dmatag_data_tx = NULL;
272	}
273}
274
275static void
276macb_init_desc_dma_tx(struct macb_softc *sc)
277{
278	struct eth_tx_desc *desc;
279	int i;
280
281	MACB_LOCK_ASSERT(sc);
282
283	sc->tx_prod = 0;
284	sc->tx_cons = 0;
285	sc->tx_cnt = 0;
286
287	desc = &sc->desc_tx[0];
288	bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
289
290	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
291		desc = &sc->desc_tx[i];
292		if (i == MACB_MAX_TX_BUFFERS - 1)
293			desc->flags = TD_OWN | TD_WRAP_MASK;
294		else
295			desc->flags = TD_OWN;
296	}
297
298	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
299	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
300}
301
302static int
303macb_alloc_desc_dma_rx(struct macb_softc *sc)
304{
305	int error, i;
306
307	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
308	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
309	    16, 0,			/* alignment, boundary */
310	    BUS_SPACE_MAXADDR,		/* lowaddr */
311	    BUS_SPACE_MAXADDR,		/* highaddr */
312	    NULL, NULL,			/* filtfunc, filtfuncarg */
313	    /* maxsize, nsegments */
314	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
315	    /* maxsegsz, flags */
316	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
317	    NULL, NULL,			/* lockfunc, lockfuncarg */
318	    &sc->dmatag_data_rx);	/* dmat */
319	if (error != 0) {
320		device_printf(sc->dev,
321		    "Couldn't create RX descriptor dma tag\n");
322		return (error);
323	}
324	/* Allocate RX ring. */
325	error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
326	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
327	    &sc->dmamap_ring_rx);
328	if (error != 0) {
329		device_printf(sc->dev,
330		    "failed to allocate RX descriptor dma memory\n");
331		return (error);
332	}
333
334	/* Load dmamap. */
335	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
336	    sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
337	    macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
338	if (error != 0) {
339		device_printf(sc->dev, "can't load RX descriptor dma map\n");
340		return (error);
341	}
342
343	/* Allocate a busdma tag for mbufs. */
344	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
345	    16, 0,			/* alignment, boundary */
346	    BUS_SPACE_MAXADDR,		/* lowaddr */
347	    BUS_SPACE_MAXADDR,		/* highaddr */
348	    NULL, NULL,			/* filtfunc, filtfuncarg */
349	    MCLBYTES, 1,		/* maxsize, nsegments */
350	    MCLBYTES, 0,		/* maxsegsz, flags */
351	    NULL, NULL,			/* lockfunc, lockfuncarg */
352	    &sc->dmatag_ring_rx);	/* dmat */
353
354	if (error != 0) {
355		device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
356		return (error);
357	}
358
359	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
360		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
361		    &sc->rx_desc[i].dmamap);
362		if (error != 0) {
363			device_printf(sc->dev,
364			    "failed to create RX mbuf dmamap\n");
365			return (error);
366		}
367	}
368
369	return (0);
370}
371
372static void
373macb_free_desc_dma_rx(struct macb_softc *sc)
374{
375	struct rx_desc_info *rd;
376	int i;
377
378	/* RX buffers. */
379	if (sc->dmatag_ring_rx != NULL) {
380		for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
381			rd = &sc->rx_desc[i];
382			if (rd->dmamap != NULL) {
383				bus_dmamap_destroy(sc->dmatag_ring_rx,
384				    rd->dmamap);
385				rd->dmamap = NULL;
386			}
387		}
388		bus_dma_tag_destroy(sc->dmatag_ring_rx);
389		sc->dmatag_ring_rx = NULL;
390	}
391	/* RX descriptor ring. */
392	if (sc->dmatag_data_rx != NULL) {
393		if (sc->dmamap_ring_rx != NULL)
394			bus_dmamap_unload(sc->dmatag_data_rx,
395			    sc->dmamap_ring_rx);
396		if (sc->dmamap_ring_rx != NULL &&
397		    sc->desc_rx != NULL)
398			bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
399			    sc->dmamap_ring_rx);
400		sc->desc_rx = NULL;
401		sc->dmamap_ring_rx = NULL;
402		bus_dma_tag_destroy(sc->dmatag_data_rx);
403		sc->dmatag_data_rx = NULL;
404	}
405}
406
407static int
408macb_init_desc_dma_rx(struct macb_softc *sc)
409{
410	struct eth_rx_desc *desc;
411	struct rx_desc_info *rd;
412	int i;
413
414	MACB_LOCK_ASSERT(sc);
415
416	sc->rx_cons = 0;
417	desc = &sc->desc_rx[0];
418	bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
419	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
420		rd = &sc->rx_desc[i];
421		rd->buff = NULL;
422		if (macb_new_rxbuf(sc, i) != 0)
423			return (ENOBUFS);
424	}
425	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
426	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
427	return (0);
428}
429
430static int
431macb_new_rxbuf(struct macb_softc *sc, int index)
432{
433	struct rx_desc_info *rd;
434	struct eth_rx_desc *desc;
435	struct mbuf *m;
436	bus_dma_segment_t seg[1];
437	int error, nsegs;
438
439	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
440	if (m == NULL)
441		return (ENOBUFS);
442	m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
443	rd = &sc->rx_desc[index];
444	bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
445	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
446	    seg, &nsegs, 0);
447	KASSERT(nsegs == 1, ("Too many segments returned!"));
448	if (error != 0) {
449		m_free(m);
450		return (error);
451	}
452
453	bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
454	rd->buff = m;
455
456	desc = &sc->desc_rx[index];
457	desc->addr = seg[0].ds_addr;
458
459	desc->flags = DATA_SIZE;
460
461	if (index == MACB_MAX_RX_BUFFERS - 1)
462		desc->addr |= RD_WRAP_MASK;
463
464	return (0);
465}
466
467static int
468macb_allocate_dma(struct macb_softc *sc)
469{
470	int error;
471
472	/* Create parent tag for tx and rx */
473	error = bus_dma_tag_create(
474	    bus_get_dma_tag(sc->dev),	/* parent */
475	    1, 0,			/* alignment, boundary */
476	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
477	    BUS_SPACE_MAXADDR,		/* highaddr */
478	    NULL, NULL,			/* filter, filterarg */
479	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
480	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
481	    0,				/* flags */
482	    NULL, NULL,		/* lockfunc, lockarg */
483	    &sc->sc_parent_tag);
484	if (error != 0) {
485		device_printf(sc->dev, "Couldn't create parent DMA tag\n");
486		return (error);
487	}
488
489	if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
490		return (error);
491	if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
492		return (error);
493	return (0);
494}
495
496
497static void
498macb_tick(void *xsc)
499{
500	struct macb_softc *sc;
501	struct mii_data *mii;
502
503	sc = xsc;
504	mii = device_get_softc(sc->miibus);
505	mii_tick(mii);
506	macb_watchdog(sc);
507	/*
508	 * Schedule another timeout one second from now.
509	 */
510	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
511}
512
513
514static void
515macb_watchdog(struct macb_softc *sc)
516{
517	struct ifnet *ifp;
518
519	MACB_LOCK_ASSERT(sc);
520
521	if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
522		return;
523
524	ifp = sc->ifp;
525	if ((sc->flags & MACB_FLAG_LINK) == 0) {
526		if_printf(ifp, "watchdog timeout (missed link)\n");
527		ifp->if_oerrors++;
528		return;
529	}
530
531	if_printf(ifp, "watchdog timeout\n");
532	ifp->if_oerrors++;
533	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
534	macbinit_locked(sc);
535	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
536	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
537}
538
539
540
541static void
542macbinit_locked(void *xsc)
543{
544	struct macb_softc *sc;
545	struct ifnet *ifp;
546	int err;
547	uint32_t config;
548	struct mii_data *mii;
549
550	sc = xsc;
551	ifp = sc->ifp;
552
553	MACB_LOCK_ASSERT(sc);
554
555	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
556		return;
557
558	if ((err = macb_init_desc_dma_rx(sc)) != 0) {
559		device_printf(sc->dev, "no memory for RX buffers\n");
560		//ecestop(sc);
561		return;
562	}
563	macb_init_desc_dma_tx(sc);
564
565	config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
566	config |= CFG_PAE;		/* PAuse Enable */
567	config |= CFG_DRFCS;		/* Discard Rx FCS */
568	config |= CFG_SPD;		/* 100 mbps*/
569	//config |= CFG_CAF;
570	config |= CFG_FD;
571
572	config |= CFG_RBOF_2; /*offset +2*/
573
574	write_4(sc, EMAC_NCFGR, config);
575
576	/* Initialize TX and RX buffers */
577	write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
578	write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
579
580	/* Enable TX and RX */
581	write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
582
583
584	/* Enable interrupts */
585	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
586			       RXUBR_INTERRUPT |
587			       TUND_INTERRUPT |
588			       RLE_INTERRUPT |
589			       TXERR_INTERRUPT |
590			       ROVR_INTERRUPT |
591			       HRESP_INTERRUPT|
592			       TCOMP_INTERRUPT
593			));
594
595	/*
596	 * Set 'running' flag, and clear output active flag
597	 * and attempt to start the output
598	 */
599	ifp->if_drv_flags |= IFF_DRV_RUNNING;
600	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
601
602	mii = device_get_softc(sc->miibus);
603
604	sc->flags |= MACB_FLAG_LINK;
605
606	mii_mediachg(mii);
607
608	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
609}
610
611
612static void
613macb_tx_cleanup(struct macb_softc *sc)
614{
615	struct ifnet *ifp;
616	struct eth_tx_desc *desc;
617	struct tx_desc_info *td;
618	int flags;
619	int status;
620	int i;
621
622	MACB_LOCK_ASSERT(sc);
623
624	status = read_4(sc, EMAC_TSR);
625
626	write_4(sc, EMAC_TSR, status);
627
628	/*buffer underrun*/
629	if ((status & TSR_UND) != 0) {
630		/*reset buffers*/
631		printf("underrun\n");
632		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
633		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
634		sc->tx_cons = sc->tx_prod = 0;
635		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
636			desc = &sc->desc_tx[i];
637			desc->flags = TD_OWN;
638		}
639
640		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
641			td = &sc->tx_desc[i];
642			if (td->buff != NULL) {
643				/* We are finished with this descriptor. */
644				bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
645						BUS_DMASYNC_POSTWRITE);
646				/* ... and unload, so we can reuse. */
647				bus_dmamap_unload(sc->dmatag_data_tx,
648						  td->dmamap);
649				m_freem(td->buff);
650				td->buff = NULL;
651			}
652		}
653	}
654
655	if ((status & TSR_COMP) == 0)
656		return;
657
658
659	if (sc->tx_cons == sc->tx_prod)
660		return;
661
662	ifp = sc->ifp;
663
664	/* Prepare to read the ring (owner bit). */
665	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
666	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
667	while (sc->tx_cons != sc->tx_prod) {
668		desc = &sc->desc_tx[sc->tx_cons];
669		if ((desc->flags & TD_OWN) == 0)
670			break;
671
672		td = &sc->tx_desc[sc->tx_cons];
673		if (td->buff != NULL) {
674			/* We are finished with this descriptor. */
675			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
676					BUS_DMASYNC_POSTWRITE);
677			/* ... and unload, so we can reuse. */
678			bus_dmamap_unload(sc->dmatag_data_tx,
679					  td->dmamap);
680			m_freem(td->buff);
681			td->buff = NULL;
682			ifp->if_opackets++;
683		}
684
685		do {
686			sc->tx_cnt--;
687			MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
688			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
689			flags = desc->flags;
690			desc->flags = TD_OWN;
691			desc = &sc->desc_tx[sc->tx_cons];
692			if (flags & TD_LAST) {
693				break;
694			}
695		} while (sc->tx_cons != sc->tx_prod);
696	}
697
698	/* Unarm watchog timer when there is no pending descriptors in queue. */
699	if (sc->tx_cnt == 0)
700		sc->macb_watchdog_timer = 0;
701}
702
703static void
704macb_rx(struct macb_softc *sc)
705{
706	struct eth_rx_desc	*rxdesc;
707	struct ifnet *ifp;
708	struct mbuf *m;
709	int rxbytes;
710	int flags;
711	int nsegs;
712	int first;
713
714	rxdesc = &(sc->desc_rx[sc->rx_cons]);
715
716	ifp = sc->ifp;
717
718	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
719	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
720
721
722	nsegs = 0;
723	while (rxdesc->addr & RD_OWN) {
724
725		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
726			break;
727
728		flags = rxdesc->flags;
729
730		rxbytes = flags & RD_LEN_MASK;
731
732		m = sc->rx_desc[sc->rx_cons].buff;
733
734		bus_dmamap_sync(sc->dmatag_ring_rx,
735		    sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
736		if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
737			ifp->if_iqdrops++;
738			first = sc->rx_cons;
739
740			do  {
741				rxdesc->flags = DATA_SIZE;
742				MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
743				if ((rxdesc->flags & RD_EOF) != 0)
744					break;
745				rxdesc = &(sc->desc_rx[sc->rx_cons]);
746			} while (sc->rx_cons != first);
747
748			if (sc->macb_cdata.rxhead != NULL) {
749				m_freem(sc->macb_cdata.rxhead);
750				sc->macb_cdata.rxhead = NULL;
751				sc->macb_cdata.rxtail = NULL;
752			}
753
754			break;
755		}
756
757		nsegs++;
758
759		/* Chain received mbufs. */
760		if (sc->macb_cdata.rxhead == NULL) {
761			m->m_data += 2;
762			sc->macb_cdata.rxhead = m;
763			sc->macb_cdata.rxtail = m;
764			if (flags & RD_EOF)
765				m->m_len = rxbytes;
766			else
767				m->m_len = DATA_SIZE - 2;
768		} else {
769			m->m_flags &= ~M_PKTHDR;
770			m->m_len = DATA_SIZE;
771			sc->macb_cdata.rxtail->m_next = m;
772			sc->macb_cdata.rxtail = m;
773		}
774
775		if (flags & RD_EOF) {
776
777			if (nsegs > 1) {
778				sc->macb_cdata.rxtail->m_len = (rxbytes -
779				    ((nsegs - 1) * DATA_SIZE)) + 2;
780			}
781
782			m = sc->macb_cdata.rxhead;
783			m->m_flags |= M_PKTHDR;
784			m->m_pkthdr.len = rxbytes;
785			m->m_pkthdr.rcvif = ifp;
786			ifp->if_ipackets++;
787
788			nsegs = 0;
789			MACB_UNLOCK(sc);
790			(*ifp->if_input)(ifp, m);
791			MACB_LOCK(sc);
792			sc->macb_cdata.rxhead = NULL;
793			sc->macb_cdata.rxtail = NULL;
794
795		}
796
797		rxdesc->addr &= ~RD_OWN;
798
799		MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
800
801		rxdesc = &(sc->desc_rx[sc->rx_cons]);
802	}
803
804	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
805
806}
807
808static int
809macb_intr_rx_locked(struct macb_softc *sc, int count)
810{
811	macb_rx(sc);
812	return (0);
813}
814
815static void
816macb_intr_task(void *arg, int pending __unused)
817{
818	struct macb_softc *sc;
819
820	sc = arg;
821	MACB_LOCK(sc);
822	macb_intr_rx_locked(sc, -1);
823	MACB_UNLOCK(sc);
824}
825
826static void
827macb_intr(void *xsc)
828{
829	struct macb_softc *sc;
830	struct ifnet *ifp;
831	uint32_t status;
832
833	sc = xsc;
834	ifp = sc->ifp;
835	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
836		printf("not running\n");
837		return;
838	}
839
840	status = read_4(sc, EMAC_ISR);
841
842	while (status) {
843		if (status & RCOMP_INTERRUPT) {
844			write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
845			taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
846		}
847
848		if (status & TCOMP_INTERRUPT) {
849			MACB_LOCK(sc);
850			macb_tx_cleanup(sc);
851			MACB_UNLOCK(sc);
852		}
853
854		status = read_4(sc, EMAC_ISR);
855	}
856
857	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
858	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
859}
860
861static inline int
862macb_encap(struct macb_softc *sc, struct mbuf **m_head)
863{
864	struct eth_tx_desc *desc;
865	struct tx_desc_info *txd, *txd_last;
866	struct mbuf *m;
867	bus_dma_segment_t segs[MAX_FRAGMENT];
868	bus_dmamap_t map;
869	uint32_t csum_flags;
870	int error, i, nsegs, prod, si;
871
872	M_ASSERTPKTHDR((*m_head));
873
874	prod = sc->tx_prod;
875
876	m = *m_head;
877
878	txd = txd_last = &sc->tx_desc[prod];
879	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
880	    *m_head, segs, &nsegs, 0);
881	if (error == EFBIG) {
882		m = m_collapse(*m_head, M_DONTWAIT, MAX_FRAGMENT);
883		if (m == NULL) {
884			m_freem(*m_head);
885			*m_head = NULL;
886			return (ENOMEM);
887		}
888		*m_head = m;
889		error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
890		    *m_head, segs, &nsegs, 0);
891		if (error != 0) {
892			m_freem(*m_head);
893			*m_head = NULL;
894			return (error);
895		}
896	} else if (error != 0) {
897		return (error);
898	}
899	/* Check for TX descriptor overruns. */
900	if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
901		bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
902		return (ENOBUFS);
903	}
904	bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
905	m = *m_head;
906
907	/* TODO: VLAN hardware tag insertion. */
908
909	csum_flags = 0;
910	si = prod;
911	desc = NULL;
912
913	for (i = 0; i < nsegs; i++) {
914		desc = &sc->desc_tx[prod];
915		desc->addr = segs[i].ds_addr;
916
917		if (i == 0 ) {
918			desc->flags = segs[i].ds_len | TD_OWN;
919		} else {
920			desc->flags = segs[i].ds_len;
921		}
922
923		if (prod == MACB_MAX_TX_BUFFERS - 1)
924			desc->flags |= TD_WRAP_MASK;
925
926		sc->tx_cnt++;
927		MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
928	}
929	/*
930	 * Set EOP on the last fragment.
931	 */
932
933	desc->flags |= TD_LAST;
934	desc = &sc->desc_tx[si];
935	desc->flags &= ~TD_OWN;
936
937	sc->tx_prod = prod;
938
939	/* Swap the first dma map and the last. */
940	map = txd_last->dmamap;
941	txd_last->dmamap = txd->dmamap;
942	txd->dmamap = map;
943	txd->buff = m;
944
945	return (0);
946}
947
948
949static void
950macbstart_locked(struct ifnet *ifp)
951{
952
953
954
955	struct macb_softc *sc;
956	struct mbuf *m0;
957#if 0
958	struct mbuf *m_new;
959#endif
960	int queued = 0;
961
962	sc = ifp->if_softc;
963
964	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
965	    IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
966		return;
967	}
968
969	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
970		/* Get packet from the queue */
971		IF_DEQUEUE(&ifp->if_snd, m0);
972		if (m0 == NULL)
973			break;
974#if 0
975		if (m0->m_next != NULL) {
976			/* Fragmented mbuf chain, collapse it. */
977			m_new = m_defrag(m0, M_DONTWAIT);
978			if (m_new != NULL) {
979				/* Original frame freed. */
980				m0 = m_new;
981			} else {
982				/* Defragmentation failed, just use the chain. */
983			}
984		}
985#endif
986		if (macb_encap(sc, &m0)) {
987			if (m0 == NULL)
988				break;
989			IF_PREPEND(&ifp->if_snd, m0);
990			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
991			break;
992		}
993		queued++;
994		BPF_MTAP(ifp, m0);
995	}
996	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
997		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
998	if (queued) {
999		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1000		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1001		write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1002		sc->macb_watchdog_timer = MACB_TIMEOUT;
1003	}
1004}
1005
1006static void
1007macbinit(void *xsc)
1008{
1009	struct macb_softc *sc = xsc;
1010
1011	MACB_LOCK(sc);
1012	macbinit_locked(sc);
1013	MACB_UNLOCK(sc);
1014}
1015
1016static void
1017macbstart(struct ifnet *ifp)
1018{
1019	struct macb_softc *sc = ifp->if_softc;
1020	MACB_ASSERT_UNLOCKED(sc);
1021	MACB_LOCK(sc);
1022	macbstart_locked(ifp);
1023	MACB_UNLOCK(sc);
1024
1025}
1026
1027
1028static void
1029macb_tx_task(void *arg, int pending __unused)
1030{
1031	struct ifnet *ifp;
1032
1033	ifp = (struct ifnet *)arg;
1034	macbstart(ifp);
1035}
1036
1037
1038static void
1039macbstop(struct macb_softc *sc)
1040{
1041	struct ifnet *ifp = sc->ifp;
1042	struct rx_desc_info *rd;
1043	struct tx_desc_info *td;
1044	int i;
1045
1046	ifp = sc->ifp;
1047
1048	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1049
1050	macb_reset(sc);
1051
1052	sc->flags &= ~MACB_FLAG_LINK;
1053	callout_stop(&sc->tick_ch);
1054	sc->macb_watchdog_timer = 0;
1055
1056	/* Free TX/RX mbufs still in the queues. */
1057	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1058		td = &sc->tx_desc[i];
1059		if (td->buff != NULL) {
1060			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1061			    BUS_DMASYNC_POSTWRITE);
1062			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1063			m_freem(td->buff);
1064			td->buff = NULL;
1065		}
1066	}
1067	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1068		rd = &sc->rx_desc[i];
1069		if (rd->buff != NULL) {
1070			bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1071			    BUS_DMASYNC_POSTREAD);
1072			bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1073			m_freem(rd->buff);
1074			rd->buff = NULL;
1075		}
1076	}
1077}
1078
1079static int
1080get_hash_index(uint8_t *mac)
1081{
1082	int i, j, k;
1083	int result;
1084	int bit;
1085
1086	result = 0;
1087	for (i = 0; i < 6; i++) {
1088		bit = 0;
1089		for (j = 0; j < 8;  j++) {
1090			k = j * 6 + i;
1091			bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1092		}
1093		result |= bit;
1094	}
1095	return result;
1096}
1097
1098static void
1099set_mac_filter(uint32_t *filter, uint8_t *mac)
1100{
1101	int bits;
1102
1103	bits = get_hash_index(mac);
1104	filter[bits >> 5] |= 1 << (bits & 31);
1105}
1106
1107static void
1108set_filter(struct macb_softc *sc)
1109{
1110	struct ifnet *ifp;
1111	struct ifmultiaddr *ifma;
1112	int config;
1113	int count;
1114	uint32_t multicast_filter[2];
1115
1116	ifp = sc->ifp;
1117
1118	config = read_4(sc, EMAC_NCFGR);
1119
1120	config &= ~(CFG_CAF | CFG_MTI);
1121	write_4(sc, EMAC_HRB, 0);
1122	write_4(sc, EMAC_HRT, 0);
1123
1124	if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1125		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1126			write_4(sc, EMAC_HRB, ~0);
1127			write_4(sc, EMAC_HRT, ~0);
1128			config |= CFG_MTI;
1129		}
1130		if ((ifp->if_flags & IFF_PROMISC) != 0) {
1131			config |= CFG_CAF;
1132		}
1133		write_4(sc, EMAC_NCFGR, config);
1134		return;
1135	}
1136
1137	if_maddr_rlock(ifp);
1138	count = 0;
1139	multicast_filter[0] = 0;
1140	multicast_filter[1] = 0;
1141
1142	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1143		if (ifma->ifma_addr->sa_family != AF_LINK)
1144			continue;
1145		count++;
1146		set_mac_filter(multicast_filter,
1147			   LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1148	}
1149	if (count) {
1150		write_4(sc, EMAC_HRB, multicast_filter[0]);
1151		write_4(sc, EMAC_HRT, multicast_filter[1]);
1152		write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1153	}
1154	if_maddr_runlock(ifp);
1155}
1156
1157static int
1158macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1159{
1160
1161	struct macb_softc *sc = ifp->if_softc;
1162	struct mii_data *mii;
1163	struct ifreq *ifr = (struct ifreq *)data;
1164
1165	int error = 0;
1166
1167	switch (cmd) {
1168	case SIOCSIFFLAGS:
1169		MACB_LOCK(sc);
1170
1171		if ((ifp->if_flags & IFF_UP) != 0) {
1172			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1173				if (((ifp->if_flags ^ sc->if_flags)
1174				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1175					set_filter(sc);
1176			} else {
1177				macbinit_locked(sc);
1178			}
1179		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1180			macbstop(sc);
1181		}
1182		sc->if_flags = ifp->if_flags;
1183		MACB_UNLOCK(sc);
1184		break;
1185	case SIOCADDMULTI:
1186	case SIOCDELMULTI:
1187		MACB_LOCK(sc);
1188		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1189			set_filter(sc);
1190
1191		MACB_UNLOCK(sc);
1192		break;
1193	case SIOCSIFMEDIA:
1194	case SIOCGIFMEDIA:
1195		mii = device_get_softc(sc->miibus);
1196		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1197		break;
1198	default:
1199		error = ether_ioctl(ifp, cmd, data);
1200		break;
1201	}
1202	return (error);
1203
1204}
1205
1206/* bus entry points */
1207
1208static int
1209macb_probe(device_t dev)
1210{
1211	device_set_desc(dev, "macb");
1212	return (0);
1213}
1214
1215/*
1216 * Change media according to request.
1217 */
1218static int
1219macb_ifmedia_upd(struct ifnet *ifp)
1220{
1221	struct macb_softc *sc = ifp->if_softc;
1222	struct mii_data *mii;
1223
1224	mii = device_get_softc(sc->miibus);
1225	MACB_LOCK(sc);
1226	mii_mediachg(mii);
1227	MACB_UNLOCK(sc);
1228	return (0);
1229}
1230
1231/*
1232 * Notify the world which media we're using.
1233 */
1234static void
1235macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1236{
1237	struct macb_softc *sc = ifp->if_softc;
1238	struct mii_data *mii;
1239
1240	mii = device_get_softc(sc->miibus);
1241
1242	MACB_LOCK(sc);
1243	/* Don't report link state if driver is not running. */
1244	if ((ifp->if_flags & IFF_UP) == 0) {
1245		MACB_UNLOCK(sc);
1246		return;
1247	}
1248	mii_pollstat(mii);
1249	ifmr->ifm_active = mii->mii_media_active;
1250	ifmr->ifm_status = mii->mii_media_status;
1251	MACB_UNLOCK(sc);
1252}
1253
1254static void
1255macb_reset(struct macb_softc *sc)
1256{
1257	/*
1258	 * Disable RX and TX
1259	 */
1260	write_4(sc, EMAC_NCR, 0);
1261
1262	write_4(sc, EMAC_NCR, CLEAR_STAT);
1263
1264	/* Clear all status flags */
1265	write_4(sc, EMAC_TSR, ~0UL);
1266	write_4(sc, EMAC_RSR, ~0UL);
1267
1268	/* Disable all interrupts */
1269	write_4(sc, EMAC_IDR, ~0UL);
1270	read_4(sc, EMAC_ISR);
1271
1272}
1273
1274
1275static int
1276macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1277{
1278	uint32_t bottom;
1279	uint16_t top;
1280
1281	bottom = read_4(sc, EMAC_SA1B);
1282	top = read_4(sc, EMAC_SA1T);
1283
1284	eaddr[0] = bottom & 0xff;
1285	eaddr[1] = (bottom >> 8) & 0xff;
1286	eaddr[2] = (bottom >> 16) & 0xff;
1287	eaddr[3] = (bottom >> 24) & 0xff;
1288	eaddr[4] = top & 0xff;
1289	eaddr[5] = (top >> 8) & 0xff;
1290
1291	return (0);
1292}
1293
1294
1295static int
1296macb_attach(device_t dev)
1297{
1298	struct macb_softc *sc;
1299	struct ifnet *ifp = NULL;
1300	struct sysctl_ctx_list *sctx;
1301	struct sysctl_oid *soid;
1302	int pclk_hz;
1303	u_char eaddr[ETHER_ADDR_LEN];
1304	int rid;
1305	int err;
1306	struct at91_pmc_clock *master;
1307
1308
1309	err = 0;
1310
1311	sc = device_get_softc(dev);
1312	sc->dev = dev;
1313
1314	MACB_LOCK_INIT(sc);
1315
1316	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1317
1318	/*
1319	 * Allocate resources.
1320	 */
1321	rid = 0;
1322	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1323	    RF_ACTIVE);
1324	if (sc->mem_res == NULL) {
1325		device_printf(dev, "could not allocate memory resources.\n");
1326		err = ENOMEM;
1327		goto out;
1328	}
1329	rid = 0;
1330	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1331	    RF_ACTIVE);
1332	if (sc->irq_res == NULL) {
1333		device_printf(dev, "could not allocate interrupt resources.\n");
1334		err = ENOMEM;
1335		goto out;
1336	}
1337
1338	/*setup clock*/
1339	sc->clk = at91_pmc_clock_ref(device_get_nameunit(sc->dev));
1340	at91_pmc_clock_enable(sc->clk);
1341
1342	macb_reset(sc);
1343	macb_get_mac(sc, eaddr);
1344
1345	master = at91_pmc_clock_ref("mck");
1346
1347	pclk_hz = master->hz;
1348
1349	sc->clock = CFG_CLK_8;
1350	if (pclk_hz <= 20000000)
1351		sc->clock = CFG_CLK_8;
1352	else if (pclk_hz <= 40000000)
1353		sc->clock = CFG_CLK_16;
1354	else if (pclk_hz <= 80000000)
1355		sc->clock = CFG_CLK_32;
1356	else
1357		sc->clock = CFG_CLK_64;
1358
1359	sc->clock = sc->clock << 10;
1360
1361	write_4(sc, EMAC_NCFGR, sc->clock);
1362	write_4(sc, EMAC_USRIO, USRIO_CLOCK);       //enable clock
1363
1364	write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1365
1366	sc->ifp = ifp = if_alloc(IFT_ETHER);
1367	err = mii_attach(dev, &sc->miibus, ifp, macb_ifmedia_upd,
1368	    macb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1369	if (err != 0) {
1370		device_printf(dev, "attaching PHYs failed\n");
1371		goto out;
1372	}
1373
1374	if (macb_allocate_dma(sc) != 0)
1375		goto out;
1376
1377	/* Sysctls */
1378	sctx = device_get_sysctl_ctx(dev);
1379	soid = device_get_sysctl_tree(dev);
1380
1381	ifp->if_softc = sc;
1382	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1383	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1384	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1385	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
1386	ifp->if_start = macbstart;
1387	ifp->if_ioctl = macbioctl;
1388	ifp->if_init = macbinit;
1389	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1390	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1391	IFQ_SET_READY(&ifp->if_snd);
1392	sc->if_flags = ifp->if_flags;
1393
1394	TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1395	TASK_INIT(&sc->sc_tx_task, 0, macb_tx_task, ifp);
1396
1397	sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1398	    taskqueue_thread_enqueue, &sc->sc_tq);
1399	if (sc->sc_tq == NULL) {
1400		device_printf(sc->dev, "could not create taskqueue\n");
1401		goto out;
1402	}
1403
1404	ether_ifattach(ifp, eaddr);
1405
1406	/*
1407	 * Activate the interrupt.
1408	 */
1409	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1410	    NULL, macb_intr, sc, &sc->intrhand);
1411	if (err) {
1412		device_printf(dev, "could not establish interrupt handler.\n");
1413		ether_ifdetach(ifp);
1414		goto out;
1415	}
1416
1417	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1418	    device_get_nameunit(sc->dev));
1419
1420	sc->macb_cdata.rxhead = 0;
1421	sc->macb_cdata.rxtail = 0;
1422
1423	phy_write(sc, 0, 0, 0x3300); //force autoneg
1424
1425	return (0);
1426out:
1427
1428	return (err);
1429}
1430
1431static int
1432macb_detach(device_t dev)
1433{
1434	struct macb_softc *sc;
1435
1436	sc = device_get_softc(dev);
1437	macbstop(sc);
1438	macb_deactivate(dev);
1439
1440	return (0);
1441}
1442
1443/*PHY related functions*/
1444static inline int
1445phy_read(struct macb_softc *sc, int phy, int reg)
1446{
1447	int val;
1448
1449	write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1450	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1451		continue;
1452	val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1453
1454	return (val);
1455}
1456
1457static inline int
1458phy_write(struct macb_softc *sc, int phy, int reg, int data)
1459{
1460
1461	write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1462	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1463		continue;
1464
1465	return (0);
1466}
1467
1468/*
1469 * MII bus support routines.
1470 */
1471static int
1472macb_miibus_readreg(device_t dev, int phy, int reg)
1473{
1474	struct macb_softc *sc;
1475	sc = device_get_softc(dev);
1476	return (phy_read(sc, phy, reg));
1477}
1478
1479static int
1480macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1481{
1482	struct macb_softc *sc;
1483	sc = device_get_softc(dev);
1484	return (phy_write(sc, phy, reg, data));
1485}
1486
1487static void
1488macb_child_detached(device_t dev, device_t child)
1489{
1490	struct macb_softc *sc;
1491	sc = device_get_softc(dev);
1492
1493}
1494
1495static void
1496macb_miibus_statchg(device_t dev)
1497{
1498	struct macb_softc *sc;
1499	struct mii_data *mii;
1500	int config;
1501
1502	sc = device_get_softc(dev);
1503
1504	mii = device_get_softc(sc->miibus);
1505
1506	sc->flags &= ~MACB_FLAG_LINK;
1507
1508	config = read_4(sc, EMAC_NCFGR);
1509
1510	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1511		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1512		case IFM_10_T:
1513			config &= ~(CFG_SPD);
1514			sc->flags |= MACB_FLAG_LINK;
1515			break;
1516		case IFM_100_TX:
1517			config |= CFG_SPD;
1518			sc->flags |= MACB_FLAG_LINK;
1519			break;
1520		default:
1521			break;
1522		}
1523	}
1524
1525	config |= CFG_FD;
1526	write_4(sc, EMAC_NCFGR, config);
1527}
1528
1529static device_method_t macb_methods[] = {
1530	/* Device interface */
1531	DEVMETHOD(device_probe,	macb_probe),
1532	DEVMETHOD(device_attach,	macb_attach),
1533	DEVMETHOD(device_detach,	macb_detach),
1534
1535	/* Bus interface */
1536	DEVMETHOD(bus_child_detached,	macb_child_detached),
1537
1538	/* MII interface */
1539	DEVMETHOD(miibus_readreg,	macb_miibus_readreg),
1540	DEVMETHOD(miibus_writereg,	macb_miibus_writereg),
1541	DEVMETHOD(miibus_statchg,	macb_miibus_statchg),
1542	{ 0, 0 }
1543};
1544
1545static driver_t macb_driver = {
1546	"macb",
1547	macb_methods,
1548	sizeof(struct macb_softc),
1549};
1550
1551
1552DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1553DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1554MODULE_DEPEND(macb, miibus, 1, 1, 1);
1555MODULE_DEPEND(macb, ether, 1, 1, 1);
1556