1/*-
2 * Copyright (c) 2010 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "opt_platform.h"
28#include "opt_at91.h"
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: releng/11.0/sys/arm/at91/if_macb.c 290516 2015-11-07 22:52:06Z imp $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/kernel.h>
37#include <sys/lock.h>
38#include <sys/mbuf.h>
39#include <sys/malloc.h>
40#include <sys/module.h>
41#include <sys/rman.h>
42#include <sys/socket.h>
43#include <sys/sockio.h>
44#include <sys/sysctl.h>
45#include <sys/taskqueue.h>
46
47#include <net/ethernet.h>
48#include <net/if.h>
49#include <net/if_arp.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52#include <net/if_types.h>
53#include <net/if_vlan_var.h>
54
55#ifdef INET
56#include <netinet/in.h>
57#include <netinet/in_systm.h>
58#include <netinet/in_var.h>
59#include <netinet/ip.h>
60#endif
61
62#include <net/bpf.h>
63#include <net/bpfdesc.h>
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <arm/at91/at91_pmcvar.h>
69#include <arm/at91/if_macbreg.h>
70#include <arm/at91/if_macbvar.h>
71#include <arm/at91/at91_piovar.h>
72
73#include <arm/at91/at91sam9g20reg.h>
74
75#include <machine/bus.h>
76#include <machine/intr.h>
77
78#ifdef FDT
79#include <dev/fdt/fdt_common.h>
80#include <dev/ofw/ofw_bus.h>
81#include <dev/ofw/ofw_bus_subr.h>
82#endif
83
84/* "device miibus" required.  See GENERIC if you get errors here. */
85#include "miibus_if.h"
86
87
88#define	MACB_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
89#define	MACB_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
90#define	MACB_LOCK_INIT(_sc)					\
91	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
92	    MTX_NETWORK_LOCK, MTX_DEF)
93#define	MACB_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
94#define	MACB_LOCK_ASSERT(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
95#define	MACB_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
96
97
98static inline uint32_t
99read_4(struct macb_softc *sc, bus_size_t off)
100{
101
102	return (bus_read_4(sc->mem_res, off));
103}
104
105static inline void
106write_4(struct macb_softc *sc, bus_size_t off, uint32_t val)
107{
108
109	bus_write_4(sc->mem_res, off, val);
110}
111
112
113static devclass_t macb_devclass;
114
115/* ifnet entry points */
116
117static void	macbinit_locked(void *);
118static void	macbstart_locked(struct ifnet *);
119
120static void	macbinit(void *);
121static void	macbstart(struct ifnet *);
122static void	macbstop(struct macb_softc *);
123static int	macbioctl(struct ifnet * ifp, u_long, caddr_t);
124
125/* bus entry points */
126
127static int	macb_probe(device_t dev);
128static int	macb_attach(device_t dev);
129static int	macb_detach(device_t dev);
130
131/* helper functions */
132static int
133macb_new_rxbuf(struct macb_softc *sc, int index);
134
135static void
136macb_free_desc_dma_tx(struct macb_softc *sc);
137
138static void
139macb_free_desc_dma_rx(struct macb_softc *sc);
140
141static void
142macb_init_desc_dma_tx(struct macb_softc *sc);
143
144static void
145macb_watchdog(struct macb_softc *sc);
146
147static int macb_intr_rx_locked(struct macb_softc *sc, int count);
148static void macb_intr_task(void *arg, int pending __unused);
149static void macb_intr(void *xsc);
150
151static void
152macb_tx_cleanup(struct macb_softc *sc);
153
154static inline int
155phy_write(struct macb_softc *sc, int phy, int reg, int data);
156
157static void	macb_reset(struct macb_softc *sc);
158
159static void
160macb_deactivate(device_t dev)
161{
162	struct macb_softc *sc;
163
164	sc = device_get_softc(dev);
165
166	macb_free_desc_dma_tx(sc);
167	macb_free_desc_dma_rx(sc);
168
169}
170
171static void
172macb_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
173{
174	bus_addr_t *paddr;
175
176	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
177	paddr = arg;
178	*paddr = segs->ds_addr;
179}
180
181static int
182macb_alloc_desc_dma_tx(struct macb_softc *sc)
183{
184	int error, i;
185
186	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
187	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
188	    16, 0,			/* alignment, boundary */
189	    BUS_SPACE_MAXADDR,		/* lowaddr */
190	    BUS_SPACE_MAXADDR,		/* highaddr */
191	    NULL, NULL,			/* filtfunc, filtfuncarg */
192	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS, /* max size */
193	    1,				/* nsegments */
194	    sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
195	    0,				/* flags */
196	    NULL, NULL,			/* lockfunc, lockfuncarg */
197	    &sc->dmatag_data_tx);	/* dmat */
198	if (error != 0) {
199		device_printf(sc->dev,
200		    "Couldn't create TX descriptor dma tag\n");
201		return (error);
202	}
203	/* Allocate memory for TX ring. */
204	error = bus_dmamem_alloc(sc->dmatag_data_tx,
205	    (void**)&(sc->desc_tx), BUS_DMA_NOWAIT | BUS_DMA_ZERO |
206	    BUS_DMA_COHERENT, &sc->dmamap_ring_tx);
207	if (error != 0) {
208		device_printf(sc->dev, "failed to allocate TX dma memory\n");
209		return (error);
210	}
211	/* Load Ring DMA. */
212	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
213	    sc->desc_tx, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS,
214	    macb_getaddr, &sc->ring_paddr_tx, BUS_DMA_NOWAIT);
215	if (error != 0) {
216		device_printf(sc->dev, "can't load TX descriptor dma map\n");
217		return (error);
218	}
219	/* Allocate a busdma tag for mbufs. No alignment restriction applys. */
220	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
221	    1, 0,			/* alignment, boundary */
222	    BUS_SPACE_MAXADDR,		/* lowaddr */
223	    BUS_SPACE_MAXADDR,		/* highaddr */
224	    NULL, NULL,			/* filtfunc, filtfuncarg */
225	    MCLBYTES * MAX_FRAGMENT,	/* maxsize */
226	    MAX_FRAGMENT,		/* nsegments */
227	    MCLBYTES, 0,		/* maxsegsz, flags */
228	    NULL, NULL,			/* lockfunc, lockfuncarg */
229	    &sc->dmatag_ring_tx);	/* dmat */
230	if (error != 0) {
231		device_printf(sc->dev, "failed to create TX mbuf dma tag\n");
232		return (error);
233	}
234
235	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
236		/* Create dma map for each descriptor. */
237		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
238		    &sc->tx_desc[i].dmamap);
239		if (error != 0) {
240			device_printf(sc->dev,
241			    "failed to create TX mbuf dma map\n");
242			return (error);
243		}
244	}
245	return (0);
246}
247
248static void
249macb_free_desc_dma_tx(struct macb_softc *sc)
250{
251	struct tx_desc_info *td;
252	int i;
253
254	/* TX buffers. */
255	if (sc->dmatag_ring_tx != NULL) {
256		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
257			td = &sc->tx_desc[i];
258			if (td->dmamap != NULL) {
259				bus_dmamap_destroy(sc->dmatag_ring_tx,
260				    td->dmamap);
261				td->dmamap = NULL;
262			}
263		}
264		bus_dma_tag_destroy(sc->dmatag_ring_tx);
265		sc->dmatag_ring_tx = NULL;
266	}
267
268	/* TX descriptor ring. */
269	if (sc->dmatag_data_tx != NULL) {
270		if (sc->ring_paddr_tx != 0)
271			bus_dmamap_unload(sc->dmatag_data_tx,
272			    sc->dmamap_ring_tx);
273		if (sc->desc_tx != NULL)
274			bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
275			    sc->dmamap_ring_tx);
276		sc->ring_paddr_tx = 0;
277		sc->desc_tx = NULL;
278		bus_dma_tag_destroy(sc->dmatag_data_tx);
279		sc->dmatag_data_tx = NULL;
280	}
281}
282
283static void
284macb_init_desc_dma_tx(struct macb_softc *sc)
285{
286	struct eth_tx_desc *desc;
287	int i;
288
289	MACB_LOCK_ASSERT(sc);
290
291	sc->tx_prod = 0;
292	sc->tx_cons = 0;
293	sc->tx_cnt = 0;
294
295	desc = &sc->desc_tx[0];
296	bzero(desc, sizeof(struct eth_tx_desc) * MACB_MAX_TX_BUFFERS);
297
298	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
299		desc = &sc->desc_tx[i];
300		if (i == MACB_MAX_TX_BUFFERS - 1)
301			desc->flags = TD_OWN | TD_WRAP_MASK;
302		else
303			desc->flags = TD_OWN;
304	}
305
306	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
307	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
308}
309
310static int
311macb_alloc_desc_dma_rx(struct macb_softc *sc)
312{
313	int error, i;
314
315	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
316	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
317	    16, 0,			/* alignment, boundary */
318	    BUS_SPACE_MAXADDR,		/* lowaddr */
319	    BUS_SPACE_MAXADDR,		/* highaddr */
320	    NULL, NULL,			/* filtfunc, filtfuncarg */
321	    /* maxsize, nsegments */
322	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 1,
323	    /* maxsegsz, flags */
324	    sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS, 0,
325	    NULL, NULL,			/* lockfunc, lockfuncarg */
326	    &sc->dmatag_data_rx);	/* dmat */
327	if (error != 0) {
328		device_printf(sc->dev,
329		    "Couldn't create RX descriptor dma tag\n");
330		return (error);
331	}
332	/* Allocate RX ring. */
333	error = bus_dmamem_alloc(sc->dmatag_data_rx, (void**)&(sc->desc_rx),
334	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
335	    &sc->dmamap_ring_rx);
336	if (error != 0) {
337		device_printf(sc->dev,
338		    "failed to allocate RX descriptor dma memory\n");
339		return (error);
340	}
341
342	/* Load dmamap. */
343	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
344	    sc->desc_rx, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS,
345	    macb_getaddr, &sc->ring_paddr_rx, BUS_DMA_NOWAIT);
346	if (error != 0) {
347		device_printf(sc->dev, "can't load RX descriptor dma map\n");
348		return (error);
349	}
350
351	/* Allocate a busdma tag for mbufs. */
352	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
353	    16, 0,			/* alignment, boundary */
354	    BUS_SPACE_MAXADDR,		/* lowaddr */
355	    BUS_SPACE_MAXADDR,		/* highaddr */
356	    NULL, NULL,			/* filtfunc, filtfuncarg */
357	    MCLBYTES, 1,		/* maxsize, nsegments */
358	    MCLBYTES, 0,		/* maxsegsz, flags */
359	    NULL, NULL,			/* lockfunc, lockfuncarg */
360	    &sc->dmatag_ring_rx);	/* dmat */
361
362	if (error != 0) {
363		device_printf(sc->dev, "failed to create RX mbuf dma tag\n");
364		return (error);
365	}
366
367	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
368		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
369		    &sc->rx_desc[i].dmamap);
370		if (error != 0) {
371			device_printf(sc->dev,
372			    "failed to create RX mbuf dmamap\n");
373			return (error);
374		}
375	}
376
377	return (0);
378}
379
380static void
381macb_free_desc_dma_rx(struct macb_softc *sc)
382{
383	struct rx_desc_info *rd;
384	int i;
385
386	/* RX buffers. */
387	if (sc->dmatag_ring_rx != NULL) {
388		for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
389			rd = &sc->rx_desc[i];
390			if (rd->dmamap != NULL) {
391				bus_dmamap_destroy(sc->dmatag_ring_rx,
392				    rd->dmamap);
393				rd->dmamap = NULL;
394			}
395		}
396		bus_dma_tag_destroy(sc->dmatag_ring_rx);
397		sc->dmatag_ring_rx = NULL;
398	}
399	/* RX descriptor ring. */
400	if (sc->dmatag_data_rx != NULL) {
401		if (sc->ring_paddr_rx != 0)
402			bus_dmamap_unload(sc->dmatag_data_rx,
403			    sc->dmamap_ring_rx);
404		if (sc->desc_rx != NULL)
405			bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
406			    sc->dmamap_ring_rx);
407		sc->ring_paddr_rx = 0;
408		sc->desc_rx = NULL;
409		bus_dma_tag_destroy(sc->dmatag_data_rx);
410		sc->dmatag_data_rx = NULL;
411	}
412}
413
414static int
415macb_init_desc_dma_rx(struct macb_softc *sc)
416{
417	struct eth_rx_desc *desc;
418	struct rx_desc_info *rd;
419	int i;
420
421	MACB_LOCK_ASSERT(sc);
422
423	sc->rx_cons = 0;
424	desc = &sc->desc_rx[0];
425	bzero(desc, sizeof(struct eth_rx_desc) * MACB_MAX_RX_BUFFERS);
426	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
427		rd = &sc->rx_desc[i];
428		rd->buff = NULL;
429		if (macb_new_rxbuf(sc, i) != 0)
430			return (ENOBUFS);
431	}
432	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
433	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434	return (0);
435}
436
437static int
438macb_new_rxbuf(struct macb_softc *sc, int index)
439{
440	struct rx_desc_info *rd;
441	struct eth_rx_desc *desc;
442	struct mbuf *m;
443	bus_dma_segment_t seg[1];
444	int error, nsegs;
445
446	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
447	if (m == NULL)
448		return (ENOBUFS);
449	m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
450	rd = &sc->rx_desc[index];
451	bus_dmamap_unload(sc->dmatag_ring_rx, rd->dmamap);
452	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_rx, rd->dmamap, m,
453	    seg, &nsegs, 0);
454	KASSERT(nsegs == 1, ("Too many segments returned!"));
455	if (error != 0) {
456		m_free(m);
457		return (error);
458	}
459
460	bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap, BUS_DMASYNC_PREREAD);
461	rd->buff = m;
462
463	desc = &sc->desc_rx[index];
464	desc->addr = seg[0].ds_addr;
465
466	desc->flags = DATA_SIZE;
467
468	if (index == MACB_MAX_RX_BUFFERS - 1)
469		desc->addr |= RD_WRAP_MASK;
470
471	return (0);
472}
473
474static int
475macb_allocate_dma(struct macb_softc *sc)
476{
477	int error;
478
479	/* Create parent tag for tx and rx */
480	error = bus_dma_tag_create(
481	    bus_get_dma_tag(sc->dev),	/* parent */
482	    1, 0,			/* alignment, boundary */
483	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
484	    BUS_SPACE_MAXADDR,		/* highaddr */
485	    NULL, NULL,			/* filter, filterarg */
486	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
487	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
488	    0,				/* flags */
489	    NULL, NULL,		/* lockfunc, lockarg */
490	    &sc->sc_parent_tag);
491	if (error != 0) {
492		device_printf(sc->dev, "Couldn't create parent DMA tag\n");
493		return (error);
494	}
495
496	if ((error = macb_alloc_desc_dma_tx(sc)) != 0)
497		return (error);
498	if ((error = macb_alloc_desc_dma_rx(sc)) != 0)
499		return (error);
500	return (0);
501}
502
503
504static void
505macb_tick(void *xsc)
506{
507	struct macb_softc *sc;
508	struct mii_data *mii;
509
510	sc = xsc;
511	mii = device_get_softc(sc->miibus);
512	mii_tick(mii);
513	macb_watchdog(sc);
514	/*
515	 * Schedule another timeout one second from now.
516	 */
517	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
518}
519
520
521static void
522macb_watchdog(struct macb_softc *sc)
523{
524	struct ifnet *ifp;
525
526	MACB_LOCK_ASSERT(sc);
527
528	if (sc->macb_watchdog_timer == 0 || --sc->macb_watchdog_timer)
529		return;
530
531	ifp = sc->ifp;
532	if ((sc->flags & MACB_FLAG_LINK) == 0) {
533		if_printf(ifp, "watchdog timeout (missed link)\n");
534		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
535		return;
536	}
537
538	if_printf(ifp, "watchdog timeout\n");
539	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
540	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
541	macbinit_locked(sc);
542	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
543		macbstart_locked(ifp);
544}
545
546
547
548static void
549macbinit_locked(void *xsc)
550{
551	struct macb_softc *sc;
552	struct ifnet *ifp;
553	int err;
554	uint32_t config;
555	struct mii_data *mii;
556
557	sc = xsc;
558	ifp = sc->ifp;
559
560	MACB_LOCK_ASSERT(sc);
561
562	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
563		return;
564
565	if ((err = macb_init_desc_dma_rx(sc)) != 0) {
566		device_printf(sc->dev, "no memory for RX buffers\n");
567		//ecestop(sc);
568		return;
569	}
570	macb_init_desc_dma_tx(sc);
571
572	config = read_4(sc, EMAC_NCFGR) | (sc->clock << 10); /*set clock*/
573	config |= CFG_PAE;		/* PAuse Enable */
574	config |= CFG_DRFCS;		/* Discard Rx FCS */
575	config |= CFG_SPD;		/* 100 mbps*/
576	//config |= CFG_CAF;
577	config |= CFG_FD;
578
579	config |= CFG_RBOF_2; /*offset +2*/
580
581	write_4(sc, EMAC_NCFGR, config);
582
583	/* Initialize TX and RX buffers */
584	write_4(sc, EMAC_RBQP, sc->ring_paddr_rx);
585	write_4(sc, EMAC_TBQP, sc->ring_paddr_tx);
586
587	/* Enable TX and RX */
588	write_4(sc, EMAC_NCR, RX_ENABLE | TX_ENABLE | MPE_ENABLE);
589
590
591	/* Enable interrupts */
592	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT |
593			       RXUBR_INTERRUPT |
594			       TUND_INTERRUPT |
595			       RLE_INTERRUPT |
596			       TXERR_INTERRUPT |
597			       ROVR_INTERRUPT |
598			       HRESP_INTERRUPT|
599			       TCOMP_INTERRUPT
600			));
601
602	/*
603	 * Set 'running' flag, and clear output active flag
604	 * and attempt to start the output
605	 */
606	ifp->if_drv_flags |= IFF_DRV_RUNNING;
607	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
608
609	mii = device_get_softc(sc->miibus);
610
611	sc->flags |= MACB_FLAG_LINK;
612
613	mii_mediachg(mii);
614
615	callout_reset(&sc->tick_ch, hz, macb_tick, sc);
616}
617
618
619static void
620macb_tx_cleanup(struct macb_softc *sc)
621{
622	struct ifnet *ifp;
623	struct eth_tx_desc *desc;
624	struct tx_desc_info *td;
625	int flags;
626	int status;
627	int i;
628
629	MACB_LOCK_ASSERT(sc);
630
631	status = read_4(sc, EMAC_TSR);
632
633	write_4(sc, EMAC_TSR, status);
634
635	/*buffer underrun*/
636	if ((status & TSR_UND) != 0) {
637		/*reset buffers*/
638		printf("underrun\n");
639		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
640		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
641		sc->tx_cons = sc->tx_prod = 0;
642		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
643			desc = &sc->desc_tx[i];
644			desc->flags = TD_OWN;
645		}
646
647		for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
648			td = &sc->tx_desc[i];
649			if (td->buff != NULL) {
650				/* We are finished with this descriptor. */
651				bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
652						BUS_DMASYNC_POSTWRITE);
653				/* ... and unload, so we can reuse. */
654				bus_dmamap_unload(sc->dmatag_data_tx,
655						  td->dmamap);
656				m_freem(td->buff);
657				td->buff = NULL;
658			}
659		}
660	}
661
662	if ((status & TSR_COMP) == 0)
663		return;
664
665
666	if (sc->tx_cons == sc->tx_prod)
667		return;
668
669	ifp = sc->ifp;
670
671	/* Prepare to read the ring (owner bit). */
672	bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
673	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674	while (sc->tx_cons != sc->tx_prod) {
675		desc = &sc->desc_tx[sc->tx_cons];
676		if ((desc->flags & TD_OWN) == 0)
677			break;
678
679		td = &sc->tx_desc[sc->tx_cons];
680		if (td->buff != NULL) {
681			/* We are finished with this descriptor. */
682			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
683					BUS_DMASYNC_POSTWRITE);
684			/* ... and unload, so we can reuse. */
685			bus_dmamap_unload(sc->dmatag_data_tx,
686					  td->dmamap);
687			m_freem(td->buff);
688			td->buff = NULL;
689			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
690		}
691
692		do {
693			sc->tx_cnt--;
694			MACB_DESC_INC(sc->tx_cons, MACB_MAX_TX_BUFFERS);
695			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
696			flags = desc->flags;
697			desc->flags = TD_OWN;
698			desc = &sc->desc_tx[sc->tx_cons];
699			if (flags & TD_LAST) {
700				break;
701			}
702		} while (sc->tx_cons != sc->tx_prod);
703	}
704
705	/* Unarm watchog timer when there is no pending descriptors in queue. */
706	if (sc->tx_cnt == 0)
707		sc->macb_watchdog_timer = 0;
708}
709
710static void
711macb_rx(struct macb_softc *sc)
712{
713	struct eth_rx_desc	*rxdesc;
714	struct ifnet *ifp;
715	struct mbuf *m;
716	int rxbytes;
717	int flags;
718	int nsegs;
719	int first;
720
721	rxdesc = &(sc->desc_rx[sc->rx_cons]);
722
723	ifp = sc->ifp;
724
725	bus_dmamap_sync(sc->dmatag_ring_rx, sc->dmamap_ring_rx,
726	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
727
728
729	nsegs = 0;
730	while (rxdesc->addr & RD_OWN) {
731
732		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
733			break;
734
735		flags = rxdesc->flags;
736
737		rxbytes = flags & RD_LEN_MASK;
738
739		m = sc->rx_desc[sc->rx_cons].buff;
740
741		bus_dmamap_sync(sc->dmatag_ring_rx,
742		    sc->rx_desc[sc->rx_cons].dmamap, BUS_DMASYNC_POSTREAD);
743		if (macb_new_rxbuf(sc, sc->rx_cons) != 0) {
744			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
745			first = sc->rx_cons;
746
747			do  {
748				rxdesc->flags = DATA_SIZE;
749				MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
750				if ((rxdesc->flags & RD_EOF) != 0)
751					break;
752				rxdesc = &(sc->desc_rx[sc->rx_cons]);
753			} while (sc->rx_cons != first);
754
755			if (sc->macb_cdata.rxhead != NULL) {
756				m_freem(sc->macb_cdata.rxhead);
757				sc->macb_cdata.rxhead = NULL;
758				sc->macb_cdata.rxtail = NULL;
759			}
760
761			break;
762		}
763
764		nsegs++;
765
766		/* Chain received mbufs. */
767		if (sc->macb_cdata.rxhead == NULL) {
768			m->m_data += 2;
769			sc->macb_cdata.rxhead = m;
770			sc->macb_cdata.rxtail = m;
771			if (flags & RD_EOF)
772				m->m_len = rxbytes;
773			else
774				m->m_len = DATA_SIZE - 2;
775		} else {
776			m->m_flags &= ~M_PKTHDR;
777			m->m_len = DATA_SIZE;
778			sc->macb_cdata.rxtail->m_next = m;
779			sc->macb_cdata.rxtail = m;
780		}
781
782		if (flags & RD_EOF) {
783
784			if (nsegs > 1) {
785				sc->macb_cdata.rxtail->m_len = (rxbytes -
786				    ((nsegs - 1) * DATA_SIZE)) + 2;
787			}
788
789			m = sc->macb_cdata.rxhead;
790			m->m_flags |= M_PKTHDR;
791			m->m_pkthdr.len = rxbytes;
792			m->m_pkthdr.rcvif = ifp;
793			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
794
795			nsegs = 0;
796			MACB_UNLOCK(sc);
797			(*ifp->if_input)(ifp, m);
798			MACB_LOCK(sc);
799			sc->macb_cdata.rxhead = NULL;
800			sc->macb_cdata.rxtail = NULL;
801
802		}
803
804		rxdesc->addr &= ~RD_OWN;
805
806		MACB_DESC_INC(sc->rx_cons, MACB_MAX_RX_BUFFERS);
807
808		rxdesc = &(sc->desc_rx[sc->rx_cons]);
809	}
810
811	write_4(sc, EMAC_IER, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
812
813}
814
815static int
816macb_intr_rx_locked(struct macb_softc *sc, int count)
817{
818	macb_rx(sc);
819	return (0);
820}
821
822static void
823macb_intr_task(void *arg, int pending __unused)
824{
825	struct macb_softc *sc;
826
827	sc = arg;
828	MACB_LOCK(sc);
829	macb_intr_rx_locked(sc, -1);
830	MACB_UNLOCK(sc);
831}
832
833static void
834macb_intr(void *xsc)
835{
836	struct macb_softc *sc;
837	struct ifnet *ifp;
838	uint32_t status;
839
840	sc = xsc;
841	ifp = sc->ifp;
842	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
843		printf("not running\n");
844		return;
845	}
846
847	MACB_LOCK(sc);
848	status = read_4(sc, EMAC_ISR);
849
850	while (status) {
851		if (status & RCOMP_INTERRUPT) {
852			write_4(sc, EMAC_IDR, (RCOMP_INTERRUPT|RXUBR_INTERRUPT));
853			taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
854		}
855
856		if (status & TCOMP_INTERRUPT) {
857			macb_tx_cleanup(sc);
858		}
859
860		status = read_4(sc, EMAC_ISR);
861	}
862
863	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
864		macbstart_locked(ifp);
865	MACB_UNLOCK(sc);
866}
867
868static inline int
869macb_encap(struct macb_softc *sc, struct mbuf **m_head)
870{
871	struct eth_tx_desc *desc;
872	struct tx_desc_info *txd, *txd_last;
873	struct mbuf *m;
874	bus_dma_segment_t segs[MAX_FRAGMENT];
875	bus_dmamap_t map;
876	uint32_t csum_flags;
877	int error, i, nsegs, prod, si;
878
879	M_ASSERTPKTHDR((*m_head));
880
881	prod = sc->tx_prod;
882
883	m = *m_head;
884
885	txd = txd_last = &sc->tx_desc[prod];
886	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
887	    *m_head, segs, &nsegs, 0);
888	if (error == EFBIG) {
889		m = m_collapse(*m_head, M_NOWAIT, MAX_FRAGMENT);
890		if (m == NULL) {
891			m_freem(*m_head);
892			*m_head = NULL;
893			return (ENOMEM);
894		}
895		*m_head = m;
896		error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, txd->dmamap,
897		    *m_head, segs, &nsegs, 0);
898		if (error != 0) {
899			m_freem(*m_head);
900			*m_head = NULL;
901			return (error);
902		}
903	} else if (error != 0) {
904		return (error);
905	}
906	/* Check for TX descriptor overruns. */
907	if (sc->tx_cnt + nsegs > MACB_MAX_TX_BUFFERS - 1) {
908		bus_dmamap_unload(sc->dmatag_ring_tx, txd->dmamap);
909		return (ENOBUFS);
910	}
911	bus_dmamap_sync(sc->dmatag_ring_tx, txd->dmamap, BUS_DMASYNC_PREWRITE);
912	m = *m_head;
913
914	/* TODO: VLAN hardware tag insertion. */
915
916	csum_flags = 0;
917	si = prod;
918	desc = NULL;
919
920	for (i = 0; i < nsegs; i++) {
921		desc = &sc->desc_tx[prod];
922		desc->addr = segs[i].ds_addr;
923
924		if (i == 0 ) {
925			desc->flags = segs[i].ds_len | TD_OWN;
926		} else {
927			desc->flags = segs[i].ds_len;
928		}
929
930		if (prod == MACB_MAX_TX_BUFFERS - 1)
931			desc->flags |= TD_WRAP_MASK;
932
933		sc->tx_cnt++;
934		MACB_DESC_INC(prod, MACB_MAX_TX_BUFFERS);
935	}
936	/*
937	 * Set EOP on the last fragment.
938	 */
939
940	desc->flags |= TD_LAST;
941	desc = &sc->desc_tx[si];
942	desc->flags &= ~TD_OWN;
943
944	sc->tx_prod = prod;
945
946	/* Swap the first dma map and the last. */
947	map = txd_last->dmamap;
948	txd_last->dmamap = txd->dmamap;
949	txd->dmamap = map;
950	txd->buff = m;
951
952	return (0);
953}
954
955
956static void
957macbstart_locked(struct ifnet *ifp)
958{
959
960
961
962	struct macb_softc *sc;
963	struct mbuf *m0;
964#if 0
965	struct mbuf *m_new;
966#endif
967	int queued = 0;
968
969	sc = ifp->if_softc;
970
971	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
972	    IFF_DRV_RUNNING || (sc->flags & MACB_FLAG_LINK) == 0) {
973		return;
974	}
975
976	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
977		/* Get packet from the queue */
978		IF_DEQUEUE(&ifp->if_snd, m0);
979		if (m0 == NULL)
980			break;
981#if 0
982		if (m0->m_next != NULL) {
983			/* Fragmented mbuf chain, collapse it. */
984			m_new = m_defrag(m0, M_NOWAIT);
985			if (m_new != NULL) {
986				/* Original frame freed. */
987				m0 = m_new;
988			} else {
989				/* Defragmentation failed, just use the chain. */
990			}
991		}
992#endif
993		if (macb_encap(sc, &m0)) {
994			if (m0 == NULL)
995				break;
996			IF_PREPEND(&ifp->if_snd, m0);
997			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
998			break;
999		}
1000		queued++;
1001		BPF_MTAP(ifp, m0);
1002	}
1003	if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1004		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1005	if (queued) {
1006		bus_dmamap_sync(sc->dmatag_data_tx, sc->dmamap_ring_tx,
1007		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1008		write_4(sc, EMAC_NCR, read_4(sc, EMAC_NCR) | TRANSMIT_START);
1009		sc->macb_watchdog_timer = MACB_TIMEOUT;
1010	}
1011}
1012
1013static void
1014macbinit(void *xsc)
1015{
1016	struct macb_softc *sc = xsc;
1017
1018	MACB_LOCK(sc);
1019	macbinit_locked(sc);
1020	MACB_UNLOCK(sc);
1021}
1022
1023static void
1024macbstart(struct ifnet *ifp)
1025{
1026	struct macb_softc *sc = ifp->if_softc;
1027	MACB_ASSERT_UNLOCKED(sc);
1028	MACB_LOCK(sc);
1029	macbstart_locked(ifp);
1030	MACB_UNLOCK(sc);
1031
1032}
1033
1034
1035static void
1036macbstop(struct macb_softc *sc)
1037{
1038	struct ifnet *ifp = sc->ifp;
1039	struct rx_desc_info *rd;
1040	struct tx_desc_info *td;
1041	int i;
1042
1043	ifp = sc->ifp;
1044
1045	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1046
1047	macb_reset(sc);
1048
1049	sc->flags &= ~MACB_FLAG_LINK;
1050	callout_stop(&sc->tick_ch);
1051	sc->macb_watchdog_timer = 0;
1052
1053	/* Free TX/RX mbufs still in the queues. */
1054	for (i = 0; i < MACB_MAX_TX_BUFFERS; i++) {
1055		td = &sc->tx_desc[i];
1056		if (td->buff != NULL) {
1057			bus_dmamap_sync(sc->dmatag_ring_tx, td->dmamap,
1058			    BUS_DMASYNC_POSTWRITE);
1059			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1060			m_freem(td->buff);
1061			td->buff = NULL;
1062		}
1063	}
1064	for (i = 0; i < MACB_MAX_RX_BUFFERS; i++) {
1065		rd = &sc->rx_desc[i];
1066		if (rd->buff != NULL) {
1067			bus_dmamap_sync(sc->dmatag_ring_rx, rd->dmamap,
1068			    BUS_DMASYNC_POSTREAD);
1069			bus_dmamap_unload(sc->dmatag_data_rx, rd->dmamap);
1070			m_freem(rd->buff);
1071			rd->buff = NULL;
1072		}
1073	}
1074}
1075
1076static int
1077get_hash_index(uint8_t *mac)
1078{
1079	int i, j, k;
1080	int result;
1081	int bit;
1082
1083	result = 0;
1084	for (i = 0; i < 6; i++) {
1085		bit = 0;
1086		for (j = 0; j < 8;  j++) {
1087			k = j * 6 + i;
1088			bit ^= (mac[k/8] & (1 << (k % 8)) ) != 0;
1089		}
1090		result |= bit;
1091	}
1092	return result;
1093}
1094
1095static void
1096set_mac_filter(uint32_t *filter, uint8_t *mac)
1097{
1098	int bits;
1099
1100	bits = get_hash_index(mac);
1101	filter[bits >> 5] |= 1 << (bits & 31);
1102}
1103
1104static void
1105set_filter(struct macb_softc *sc)
1106{
1107	struct ifnet *ifp;
1108	struct ifmultiaddr *ifma;
1109	int config;
1110	int count;
1111	uint32_t multicast_filter[2];
1112
1113	ifp = sc->ifp;
1114
1115	config = read_4(sc, EMAC_NCFGR);
1116
1117	config &= ~(CFG_CAF | CFG_MTI);
1118	write_4(sc, EMAC_HRB, 0);
1119	write_4(sc, EMAC_HRT, 0);
1120
1121	if ((ifp->if_flags & (IFF_ALLMULTI |IFF_PROMISC)) != 0){
1122		if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1123			write_4(sc, EMAC_HRB, ~0);
1124			write_4(sc, EMAC_HRT, ~0);
1125			config |= CFG_MTI;
1126		}
1127		if ((ifp->if_flags & IFF_PROMISC) != 0) {
1128			config |= CFG_CAF;
1129		}
1130		write_4(sc, EMAC_NCFGR, config);
1131		return;
1132	}
1133
1134	if_maddr_rlock(ifp);
1135	count = 0;
1136	multicast_filter[0] = 0;
1137	multicast_filter[1] = 0;
1138
1139	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1140		if (ifma->ifma_addr->sa_family != AF_LINK)
1141			continue;
1142		count++;
1143		set_mac_filter(multicast_filter,
1144			   LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1145	}
1146	if (count) {
1147		write_4(sc, EMAC_HRB, multicast_filter[0]);
1148		write_4(sc, EMAC_HRT, multicast_filter[1]);
1149		write_4(sc, EMAC_NCFGR, config|CFG_MTI);
1150	}
1151	if_maddr_runlock(ifp);
1152}
1153
1154static int
1155macbioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1156{
1157
1158	struct macb_softc *sc = ifp->if_softc;
1159	struct mii_data *mii;
1160	struct ifreq *ifr = (struct ifreq *)data;
1161
1162	int error = 0;
1163
1164	switch (cmd) {
1165	case SIOCSIFFLAGS:
1166		MACB_LOCK(sc);
1167
1168		if ((ifp->if_flags & IFF_UP) != 0) {
1169			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1170				if (((ifp->if_flags ^ sc->if_flags)
1171				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1172					set_filter(sc);
1173			} else {
1174				macbinit_locked(sc);
1175			}
1176		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1177			macbstop(sc);
1178		}
1179		sc->if_flags = ifp->if_flags;
1180		MACB_UNLOCK(sc);
1181		break;
1182	case SIOCADDMULTI:
1183	case SIOCDELMULTI:
1184		MACB_LOCK(sc);
1185		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1186			set_filter(sc);
1187
1188		MACB_UNLOCK(sc);
1189		break;
1190	case SIOCSIFMEDIA:
1191	case SIOCGIFMEDIA:
1192		mii = device_get_softc(sc->miibus);
1193		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1194		break;
1195	default:
1196		error = ether_ioctl(ifp, cmd, data);
1197		break;
1198	}
1199	return (error);
1200
1201}
1202
1203/* bus entry points */
1204
1205static int
1206macb_probe(device_t dev)
1207{
1208#ifdef FDT
1209        if (!ofw_bus_is_compatible(dev, "cdns,at32ap7000-macb"))
1210                return (ENXIO);
1211#endif
1212
1213	device_set_desc(dev, "macb");
1214	return (0);
1215}
1216
1217/*
1218 * Change media according to request.
1219 */
1220static int
1221macb_ifmedia_upd(struct ifnet *ifp)
1222{
1223	struct macb_softc *sc = ifp->if_softc;
1224	struct mii_data *mii;
1225
1226	mii = device_get_softc(sc->miibus);
1227	MACB_LOCK(sc);
1228	mii_mediachg(mii);
1229	MACB_UNLOCK(sc);
1230	return (0);
1231}
1232
1233/*
1234 * Notify the world which media we're using.
1235 */
1236static void
1237macb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1238{
1239	struct macb_softc *sc = ifp->if_softc;
1240	struct mii_data *mii;
1241
1242	mii = device_get_softc(sc->miibus);
1243
1244	MACB_LOCK(sc);
1245	/* Don't report link state if driver is not running. */
1246	if ((ifp->if_flags & IFF_UP) == 0) {
1247		MACB_UNLOCK(sc);
1248		return;
1249	}
1250	mii_pollstat(mii);
1251	ifmr->ifm_active = mii->mii_media_active;
1252	ifmr->ifm_status = mii->mii_media_status;
1253	MACB_UNLOCK(sc);
1254}
1255
1256static void
1257macb_reset(struct macb_softc *sc)
1258{
1259	/*
1260	 * Disable RX and TX
1261	 */
1262	write_4(sc, EMAC_NCR, 0);
1263
1264	write_4(sc, EMAC_NCR, CLEAR_STAT);
1265
1266	/* Clear all status flags */
1267	write_4(sc, EMAC_TSR, ~0UL);
1268	write_4(sc, EMAC_RSR, ~0UL);
1269
1270	/* Disable all interrupts */
1271	write_4(sc, EMAC_IDR, ~0UL);
1272	read_4(sc, EMAC_ISR);
1273
1274}
1275
1276
1277static int
1278macb_get_mac(struct macb_softc *sc, u_char *eaddr)
1279{
1280	uint32_t bottom;
1281	uint16_t top;
1282
1283	bottom = read_4(sc, EMAC_SA1B);
1284	top = read_4(sc, EMAC_SA1T);
1285
1286	eaddr[0] = bottom & 0xff;
1287	eaddr[1] = (bottom >> 8) & 0xff;
1288	eaddr[2] = (bottom >> 16) & 0xff;
1289	eaddr[3] = (bottom >> 24) & 0xff;
1290	eaddr[4] = top & 0xff;
1291	eaddr[5] = (top >> 8) & 0xff;
1292
1293	return (0);
1294}
1295
1296
1297#ifdef FDT
1298/*
1299 * We have to know if we're using MII or RMII attachment
1300 * for the MACB to talk to the PHY correctly. With FDT,
1301 * we must use rmii if there's a proprety phy-mode
1302 * equal to "rmii". Otherwise we MII mode is used.
1303 */
1304static void
1305macb_set_rmii(struct macb_softc *sc)
1306{
1307	phandle_t node;
1308	char prop[10];
1309	ssize_t len;
1310
1311	node = ofw_bus_get_node(sc->dev);
1312	memset(prop, 0 ,sizeof(prop));
1313	len = OF_getproplen(node, "phy-mode");
1314	if (len != 4)
1315		return;
1316	if (OF_getprop(node, "phy-mode", prop, len) != len)
1317		return;
1318	if (strncmp(prop, "rmii", 4) == 0)
1319		sc->use_rmii = USRIO_RMII;
1320}
1321#else
1322/*
1323 * We have to know if we're using MII or RMII attachment
1324 * for the MACB to talk to the PHY correctly. Without FDT,
1325 * there's no good way to do this. So, if the config file
1326 * has 'option AT91_MACB_USE_RMII', then we'll force RMII.
1327 * Otherwise, we'll use what the bootloader setup. Either
1328 * it setup RMII or MII, in which case we'll get it right,
1329 * or it did nothing, and we'll fall back to MII and the
1330 * option would override if present.
1331 */
1332static void
1333macb_set_rmii(struct macb_softc *sc)
1334{
1335#ifdef AT91_MACB_USE_RMII
1336	sc->use_rmii = USRIO_RMII;
1337#else
1338	sc->use_rmii = read_4(sc, EMAC_USRIO) & USRIO_RMII;
1339#endif
1340}
1341#endif
1342
1343static int
1344macb_attach(device_t dev)
1345{
1346	struct macb_softc *sc;
1347	struct ifnet *ifp = NULL;
1348	struct sysctl_ctx_list *sctx;
1349	struct sysctl_oid *soid;
1350	int pclk_hz;
1351	u_char eaddr[ETHER_ADDR_LEN];
1352	int rid;
1353	int err;
1354	struct at91_pmc_clock *master;
1355
1356
1357	err = 0;
1358
1359	sc = device_get_softc(dev);
1360	sc->dev = dev;
1361
1362	MACB_LOCK_INIT(sc);
1363
1364	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
1365
1366	/*
1367	 * Allocate resources.
1368	 */
1369	rid = 0;
1370	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1371	    RF_ACTIVE);
1372	if (sc->mem_res == NULL) {
1373		device_printf(dev, "could not allocate memory resources.\n");
1374		err = ENOMEM;
1375		goto out;
1376	}
1377	rid = 0;
1378	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1379	    RF_ACTIVE);
1380	if (sc->irq_res == NULL) {
1381		device_printf(dev, "could not allocate interrupt resources.\n");
1382		err = ENOMEM;
1383		goto out;
1384	}
1385
1386	/*setup clock*/
1387	sc->clk = at91_pmc_clock_ref(device_get_nameunit(sc->dev));
1388	at91_pmc_clock_enable(sc->clk);
1389
1390	macb_reset(sc);
1391	macb_get_mac(sc, eaddr);
1392
1393	master = at91_pmc_clock_ref("mck");
1394
1395	pclk_hz = master->hz;
1396
1397	sc->clock = CFG_CLK_8;
1398	if (pclk_hz <= 20000000)
1399		sc->clock = CFG_CLK_8;
1400	else if (pclk_hz <= 40000000)
1401		sc->clock = CFG_CLK_16;
1402	else if (pclk_hz <= 80000000)
1403		sc->clock = CFG_CLK_32;
1404	else
1405		sc->clock = CFG_CLK_64;
1406
1407	sc->clock = sc->clock << 10;
1408
1409	macb_set_rmii(sc);
1410	write_4(sc, EMAC_NCFGR, sc->clock);
1411	write_4(sc, EMAC_USRIO, USRIO_CLOCK | sc->use_rmii);       //enable clock
1412
1413	write_4(sc, EMAC_NCR, MPE_ENABLE); //enable MPE
1414
1415	sc->ifp = ifp = if_alloc(IFT_ETHER);
1416	err = mii_attach(dev, &sc->miibus, ifp, macb_ifmedia_upd,
1417	    macb_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1418	if (err != 0) {
1419		device_printf(dev, "attaching PHYs failed\n");
1420		goto out;
1421	}
1422
1423	if (macb_allocate_dma(sc) != 0)
1424		goto out;
1425
1426	/* Sysctls */
1427	sctx = device_get_sysctl_ctx(dev);
1428	soid = device_get_sysctl_tree(dev);
1429
1430	ifp->if_softc = sc;
1431	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1432	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1433	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1434	ifp->if_capenable |= IFCAP_VLAN_MTU;	/* The hw bits already set. */
1435	ifp->if_start = macbstart;
1436	ifp->if_ioctl = macbioctl;
1437	ifp->if_init = macbinit;
1438	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1439	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
1440	IFQ_SET_READY(&ifp->if_snd);
1441	sc->if_flags = ifp->if_flags;
1442
1443	TASK_INIT(&sc->sc_intr_task, 0, macb_intr_task, sc);
1444
1445	sc->sc_tq = taskqueue_create_fast("macb_taskq", M_WAITOK,
1446	    taskqueue_thread_enqueue, &sc->sc_tq);
1447	if (sc->sc_tq == NULL) {
1448		device_printf(sc->dev, "could not create taskqueue\n");
1449		goto out;
1450	}
1451
1452	ether_ifattach(ifp, eaddr);
1453
1454	/*
1455	 * Activate the interrupt.
1456	 */
1457	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1458	    NULL, macb_intr, sc, &sc->intrhand);
1459	if (err) {
1460		device_printf(dev, "could not establish interrupt handler.\n");
1461		ether_ifdetach(ifp);
1462		goto out;
1463	}
1464
1465	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
1466	    device_get_nameunit(sc->dev));
1467
1468	sc->macb_cdata.rxhead = 0;
1469	sc->macb_cdata.rxtail = 0;
1470
1471	phy_write(sc, 0, 0, 0x3300); //force autoneg
1472
1473	return (0);
1474out:
1475
1476	return (err);
1477}
1478
1479static int
1480macb_detach(device_t dev)
1481{
1482	struct macb_softc *sc;
1483
1484	sc = device_get_softc(dev);
1485	ether_ifdetach(sc->ifp);
1486	MACB_LOCK(sc);
1487	macbstop(sc);
1488	MACB_UNLOCK(sc);
1489	callout_drain(&sc->tick_ch);
1490	bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
1491	taskqueue_drain(sc->sc_tq, &sc->sc_intr_task);
1492	taskqueue_free(sc->sc_tq);
1493	macb_deactivate(dev);
1494	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
1495	bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
1496	MACB_LOCK_DESTROY(sc);
1497
1498	return (0);
1499}
1500
1501/*PHY related functions*/
1502static inline int
1503phy_read(struct macb_softc *sc, int phy, int reg)
1504{
1505	int val;
1506
1507	write_4(sc, EMAC_MAN, EMAC_MAN_REG_RD(phy, reg));
1508	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1509		continue;
1510	val = read_4(sc, EMAC_MAN) & EMAC_MAN_VALUE_MASK;
1511
1512	return (val);
1513}
1514
1515static inline int
1516phy_write(struct macb_softc *sc, int phy, int reg, int data)
1517{
1518
1519	write_4(sc, EMAC_MAN, EMAC_MAN_REG_WR(phy, reg, data));
1520	while ((read_4(sc, EMAC_SR) & EMAC_SR_IDLE) == 0)
1521		continue;
1522
1523	return (0);
1524}
1525
1526/*
1527 * MII bus support routines.
1528 */
1529static int
1530macb_miibus_readreg(device_t dev, int phy, int reg)
1531{
1532	struct macb_softc *sc;
1533	sc = device_get_softc(dev);
1534	return (phy_read(sc, phy, reg));
1535}
1536
1537static int
1538macb_miibus_writereg(device_t dev, int phy, int reg, int data)
1539{
1540	struct macb_softc *sc;
1541	sc = device_get_softc(dev);
1542	return (phy_write(sc, phy, reg, data));
1543}
1544
1545static void
1546macb_child_detached(device_t dev, device_t child)
1547{
1548	struct macb_softc *sc;
1549	sc = device_get_softc(dev);
1550
1551}
1552
1553static void
1554macb_miibus_statchg(device_t dev)
1555{
1556	struct macb_softc *sc;
1557	struct mii_data *mii;
1558	int config;
1559
1560	sc = device_get_softc(dev);
1561
1562	mii = device_get_softc(sc->miibus);
1563
1564	sc->flags &= ~MACB_FLAG_LINK;
1565
1566	config = read_4(sc, EMAC_NCFGR);
1567
1568	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1569		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1570		case IFM_10_T:
1571			config &= ~(CFG_SPD);
1572			sc->flags |= MACB_FLAG_LINK;
1573			break;
1574		case IFM_100_TX:
1575			config |= CFG_SPD;
1576			sc->flags |= MACB_FLAG_LINK;
1577			break;
1578		default:
1579			break;
1580		}
1581	}
1582
1583	config |= CFG_FD;
1584	write_4(sc, EMAC_NCFGR, config);
1585}
1586
1587static device_method_t macb_methods[] = {
1588	/* Device interface */
1589	DEVMETHOD(device_probe,	macb_probe),
1590	DEVMETHOD(device_attach,	macb_attach),
1591	DEVMETHOD(device_detach,	macb_detach),
1592
1593	/* Bus interface */
1594	DEVMETHOD(bus_child_detached,	macb_child_detached),
1595
1596	/* MII interface */
1597	DEVMETHOD(miibus_readreg,	macb_miibus_readreg),
1598	DEVMETHOD(miibus_writereg,	macb_miibus_writereg),
1599	DEVMETHOD(miibus_statchg,	macb_miibus_statchg),
1600	{ 0, 0 }
1601};
1602
1603static driver_t macb_driver = {
1604	"macb",
1605	macb_methods,
1606	sizeof(struct macb_softc),
1607};
1608
1609
1610#ifdef FDT
1611DRIVER_MODULE(macb, simplebus, macb_driver, macb_devclass, NULL, NULL);
1612#else
1613DRIVER_MODULE(macb, atmelarm, macb_driver, macb_devclass, 0, 0);
1614#endif
1615DRIVER_MODULE(miibus, macb, miibus_driver, miibus_devclass, 0, 0);
1616MODULE_DEPEND(macb, miibus, 1, 1, 1);
1617MODULE_DEPEND(macb, ether, 1, 1, 1);
1618