1/*-
2 * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/rman.h>
38#include <sys/socket.h>
39#include <sys/sockio.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/ethernet.h>
44#include <net/if.h>
45#include <net/if_arp.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_vlan_var.h>
50
51#ifdef INET
52#include <netinet/in.h>
53#include <netinet/in_systm.h>
54#include <netinet/in_var.h>
55#include <netinet/ip.h>
56#endif
57
58#include <net/bpf.h>
59#include <net/bpfdesc.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <arm/econa/if_ecereg.h>
65#include <arm/econa/if_ecevar.h>
66#include <arm/econa/econa_var.h>
67
68#include <machine/bus.h>
69#include <machine/intr.h>
70
71/* "device miibus" required.  See GENERIC if you get errors here. */
72#include "miibus_if.h"
73
74static uint8_t
75vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19};
76
77/*
78 * Boot loader expects the hardware state to be the same when we
79 * restart the device (warm boot), so we need to save the initial
80 * config values.
81 */
82int initial_switch_config;
83int initial_cpu_config;
84int initial_port0_config;
85int initial_port1_config;
86
87static inline uint32_t
88read_4(struct ece_softc *sc, bus_size_t off)
89{
90
91	return (bus_read_4(sc->mem_res, off));
92}
93
94static inline void
95write_4(struct ece_softc *sc, bus_size_t off, uint32_t val)
96{
97
98	bus_write_4(sc->mem_res, off, val);
99}
100
101#define	ECE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
102#define	ECE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
103#define	ECE_LOCK_INIT(_sc) \
104	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
105		 MTX_NETWORK_LOCK, MTX_DEF)
106
107#define	ECE_TXLOCK(_sc)		mtx_lock(&(_sc)->sc_mtx_tx)
108#define	ECE_TXUNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx_tx)
109#define	ECE_TXLOCK_INIT(_sc) \
110	mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev),	\
111		 "ECE TX Lock", MTX_DEF)
112
113#define	ECE_CLEANUPLOCK(_sc)	mtx_lock(&(_sc)->sc_mtx_cleanup)
114#define	ECE_CLEANUPUNLOCK(_sc)	mtx_unlock(&(_sc)->sc_mtx_cleanup)
115#define	ECE_CLEANUPLOCK_INIT(_sc) \
116	mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev),	\
117		 "ECE cleanup Lock", MTX_DEF)
118
119#define	ECE_RXLOCK(_sc)		mtx_lock(&(_sc)->sc_mtx_rx)
120#define	ECE_RXUNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx_rx)
121#define	ECE_RXLOCK_INIT(_sc) \
122	mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev),	\
123		 "ECE RX Lock", MTX_DEF)
124
125#define	ECE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
126#define	ECE_TXLOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx_tx);
127#define	ECE_RXLOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx_rx);
128#define	ECE_CLEANUPLOCK_DESTROY(_sc)	\
129	mtx_destroy(&_sc->sc_mtx_cleanup);
130
131#define	ECE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
132#define	ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
133
134static devclass_t ece_devclass;
135
136/* ifnet entry points */
137
138static void	eceinit_locked(void *);
139static void	ecestart_locked(struct ifnet *);
140
141static void	eceinit(void *);
142static void	ecestart(struct ifnet *);
143static void	ecestop(struct ece_softc *);
144static int	eceioctl(struct ifnet * ifp, u_long, caddr_t);
145
146/* bus entry points */
147
148static int	ece_probe(device_t dev);
149static int	ece_attach(device_t dev);
150static int	ece_detach(device_t dev);
151static void	ece_intr(void *);
152static void	ece_intr_qf(void *);
153static void	ece_intr_status(void *xsc);
154
155/* helper routines */
156static int	ece_activate(device_t dev);
157static void	ece_deactivate(device_t dev);
158static int	ece_ifmedia_upd(struct ifnet *ifp);
159static void	ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
160static int	ece_get_mac(struct ece_softc *sc, u_char *eaddr);
161static void	ece_set_mac(struct ece_softc *sc, u_char *eaddr);
162static int	configure_cpu_port(struct ece_softc *sc);
163static int	configure_lan_port(struct ece_softc *sc, int phy_type);
164static void	set_pvid(struct ece_softc *sc, int port0, int port1, int cpu);
165static void	set_vlan_vid(struct ece_softc *sc, int vlan);
166static void	set_vlan_member(struct ece_softc *sc, int vlan);
167static void	set_vlan_tag(struct ece_softc *sc, int vlan);
168static int	hardware_init(struct ece_softc *sc);
169static void	ece_intr_rx_locked(struct ece_softc *sc, int count);
170
171static void	ece_free_desc_dma_tx(struct ece_softc *sc);
172static void	ece_free_desc_dma_rx(struct ece_softc *sc);
173
174static void	ece_intr_task(void *arg, int pending __unused);
175static void	ece_tx_task(void *arg, int pending __unused);
176static void	ece_cleanup_task(void *arg, int pending __unused);
177
178static int	ece_allocate_dma(struct ece_softc *sc);
179
180static void	ece_intr_tx(void *xsc);
181
182static void	clear_mac_entries(struct ece_softc *ec, int include_this_mac);
183
184static uint32_t read_mac_entry(struct ece_softc *ec,
185	    uint8_t *mac_result,
186	    int first);
187
188/*PHY related functions*/
189static inline int
190phy_read(struct ece_softc *sc, int phy, int reg)
191{
192	int val;
193	int ii;
194	int status;
195
196	write_4(sc, PHY_CONTROL, PHY_RW_OK);
197	write_4(sc, PHY_CONTROL,
198	    (PHY_ADDRESS(phy)|PHY_READ_COMMAND |
199	    PHY_REGISTER(reg)));
200
201	for (ii = 0; ii < 0x1000; ii++) {
202		status = read_4(sc, PHY_CONTROL);
203		if (status & PHY_RW_OK) {
204			/* Clear the rw_ok status, and clear other
205			 * bits value. */
206			write_4(sc, PHY_CONTROL, PHY_RW_OK);
207			val = PHY_GET_DATA(status);
208			return (val);
209		}
210	}
211	return (0);
212}
213
214static inline void
215phy_write(struct ece_softc *sc, int phy, int reg, int data)
216{
217	int ii;
218
219	write_4(sc, PHY_CONTROL, PHY_RW_OK);
220	write_4(sc, PHY_CONTROL,
221	    PHY_ADDRESS(phy) | PHY_REGISTER(reg) |
222	    PHY_WRITE_COMMAND | PHY_DATA(data));
223	for (ii = 0; ii < 0x1000; ii++) {
224		if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) {
225			/* Clear the rw_ok status, and clear other
226			 * bits value.
227			 */
228			write_4(sc, PHY_CONTROL, PHY_RW_OK);
229			return;
230		}
231	}
232}
233
234static int get_phy_type(struct ece_softc *sc)
235{
236	uint16_t phy0_id = 0, phy1_id = 0;
237
238	/*
239	 * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier
240	 * Register 1.
241	 */
242	phy0_id = phy_read(sc, 0, 0x2);
243	phy1_id = phy_read(sc, 1, 0x2);
244
245	if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F))
246		return (ASIX_GIGA_PHY);
247	else if ((phy0_id == 0x0243) && (phy1_id == 0x0243))
248		return (TWO_SINGLE_PHY);
249	else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007))
250		return (VSC8601_GIGA_PHY);
251	else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF))
252		return (IC_PLUS_PHY);
253
254	return (NOT_FOUND_PHY);
255}
256
257static int
258ece_probe(device_t dev)
259{
260
261	device_set_desc(dev, "Econa Ethernet Controller");
262	return (0);
263}
264
265
266static int
267ece_attach(device_t dev)
268{
269	struct ece_softc *sc;
270	struct ifnet *ifp = NULL;
271	struct sysctl_ctx_list *sctx;
272	struct sysctl_oid *soid;
273	u_char eaddr[ETHER_ADDR_LEN];
274	int err;
275	int i, rid;
276	uint32_t rnd;
277
278	err = 0;
279
280	sc = device_get_softc(dev);
281
282	sc->dev = dev;
283
284	rid = 0;
285	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
286		    RF_ACTIVE);
287	if (sc->mem_res == NULL)
288		goto out;
289
290	power_on_network_interface();
291
292	rid = 0;
293	sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
294	    RF_ACTIVE);
295	if (sc->irq_res_status == NULL)
296		goto out;
297
298	rid = 1;
299	/*TSTC: Fm-Switch-Tx-Complete*/
300	sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
301	    RF_ACTIVE);
302	if (sc->irq_res_tx == NULL)
303		goto out;
304
305	rid = 2;
306	/*FSRC: Fm-Switch-Rx-Complete*/
307	sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
308	    RF_ACTIVE);
309	if (sc->irq_res_rec == NULL)
310		goto out;
311
312	rid = 4;
313	/*FSQF: Fm-Switch-Queue-Full*/
314	sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
315	    RF_ACTIVE);
316	if (sc->irq_res_qf == NULL)
317		goto out;
318
319	err = ece_activate(dev);
320	if (err)
321		goto out;
322
323	/* Sysctls */
324	sctx = device_get_sysctl_ctx(dev);
325	soid = device_get_sysctl_tree(dev);
326
327	ECE_LOCK_INIT(sc);
328
329	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
330
331	if ((err = ece_get_mac(sc, eaddr)) != 0) {
332		/* No MAC address configured. Generate the random one. */
333		if (bootverbose)
334			device_printf(dev,
335			    "Generating random ethernet address.\n");
336		rnd = arc4random();
337
338		/*from if_ae.c/if_ate.c*/
339		/*
340		 * Set OUI to convenient locally assigned address. 'b'
341		 * is 0x62, which has the locally assigned bit set, and
342		 * the broadcast/multicast bit clear.
343		 */
344		eaddr[0] = 'b';
345		eaddr[1] = 's';
346		eaddr[2] = 'd';
347		eaddr[3] = (rnd >> 16) & 0xff;
348		eaddr[4] = (rnd >> 8) & 0xff;
349		eaddr[5] = rnd & 0xff;
350
351		for (i = 0; i < ETHER_ADDR_LEN; i++)
352			eaddr[i] = vlan0_mac[i];
353	}
354	ece_set_mac(sc, eaddr);
355	sc->ifp = ifp = if_alloc(IFT_ETHER);
356	/* Only one PHY at address 0 in this device. */
357	err = mii_attach(dev, &sc->miibus, ifp, ece_ifmedia_upd,
358	    ece_ifmedia_sts, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0);
359	if (err != 0) {
360		device_printf(dev, "attaching PHYs failed\n");
361		goto out;
362	}
363	ifp->if_softc = sc;
364	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
365	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
366
367	ifp->if_capabilities = IFCAP_HWCSUM;
368
369	ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
370	ifp->if_capenable = ifp->if_capabilities;
371	ifp->if_start = ecestart;
372	ifp->if_ioctl = eceioctl;
373	ifp->if_init = eceinit;
374	ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1;
375	IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1);
376	IFQ_SET_READY(&ifp->if_snd);
377
378	/* Create local taskq. */
379
380	TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc);
381	TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp);
382	TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc);
383	sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK,
384	    taskqueue_thread_enqueue,
385	    &sc->sc_tq);
386	if (sc->sc_tq == NULL) {
387		device_printf(sc->dev, "could not create taskqueue\n");
388		goto out;
389	}
390
391	ether_ifattach(ifp, eaddr);
392
393	/*
394	 * Activate interrupts
395	 */
396	err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE,
397	    NULL, ece_intr, sc, &sc->intrhand);
398	if (err) {
399		ether_ifdetach(ifp);
400		ECE_LOCK_DESTROY(sc);
401		goto out;
402	}
403
404	err = bus_setup_intr(dev, sc->irq_res_status,
405	    INTR_TYPE_NET | INTR_MPSAFE,
406	    NULL, ece_intr_status, sc, &sc->intrhand_status);
407	if (err) {
408		ether_ifdetach(ifp);
409		ECE_LOCK_DESTROY(sc);
410		goto out;
411	}
412
413	err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE,
414	    NULL,ece_intr_qf, sc, &sc->intrhand_qf);
415
416	if (err) {
417		ether_ifdetach(ifp);
418		ECE_LOCK_DESTROY(sc);
419		goto out;
420	}
421
422	err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE,
423	    NULL, ece_intr_tx, sc, &sc->intrhand_tx);
424
425	if (err) {
426		ether_ifdetach(ifp);
427		ECE_LOCK_DESTROY(sc);
428		goto out;
429	}
430
431	ECE_TXLOCK_INIT(sc);
432	ECE_RXLOCK_INIT(sc);
433	ECE_CLEANUPLOCK_INIT(sc);
434
435	/* Enable all interrupt sources. */
436	write_4(sc, INTERRUPT_MASK, 0x00000000);
437
438	/* Enable port 0. */
439	write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
440
441	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
442	    device_get_nameunit(sc->dev));
443
444out:
445	if (err)
446		ece_deactivate(dev);
447	if (err && ifp)
448		if_free(ifp);
449	return (err);
450}
451
452static int
453ece_detach(device_t dev)
454{
455	struct ece_softc *sc = device_get_softc(dev);
456	struct ifnet *ifp = sc->ifp;
457
458	ecestop(sc);
459	if (ifp != NULL) {
460		ether_ifdetach(ifp);
461		if_free(ifp);
462	}
463	ece_deactivate(dev);
464	return (0);
465}
466
467static void
468ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
469{
470	u_int32_t *paddr;
471	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
472	paddr = arg;
473	*paddr = segs->ds_addr;
474}
475
476static int
477ece_alloc_desc_dma_tx(struct ece_softc *sc)
478{
479	int i;
480	int error;
481
482	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
483	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
484	    16, 0, /* alignment, boundary */
485	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
486	    BUS_SPACE_MAXADDR,	/* highaddr */
487	    NULL, NULL,	/* filtfunc, filtfuncarg */
488	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */
489	    1, /*nsegments */
490	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
491	    0, /* flags */
492	    NULL, NULL,	/* lockfunc, lockfuncarg */
493	    &sc->dmatag_data_tx); /* dmat */
494
495	/* Allocate memory for TX ring. */
496	error = bus_dmamem_alloc(sc->dmatag_data_tx,
497	    (void**)&(sc->desc_tx),
498	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
499	    BUS_DMA_COHERENT,
500	    &(sc->dmamap_ring_tx));
501
502	if (error) {
503		if_printf(sc->ifp, "failed to allocate DMA memory\n");
504		bus_dma_tag_destroy(sc->dmatag_data_tx);
505		sc->dmatag_data_tx = 0;
506		return (ENXIO);
507	}
508
509	/* Load Ring DMA. */
510	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
511	    sc->desc_tx,
512	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
513	    ece_getaddr,
514	    &(sc->ring_paddr_tx), BUS_DMA_NOWAIT);
515
516	if (error) {
517		if_printf(sc->ifp, "can't load descriptor\n");
518		bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
519		    sc->dmamap_ring_tx);
520		sc->desc_tx = NULL;
521		bus_dma_tag_destroy(sc->dmatag_data_tx);
522		sc->dmatag_data_tx = 0;
523		return (ENXIO);
524	}
525
526	/* Allocate a busdma tag for mbufs. Alignment is 2 bytes */
527	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
528	    1, 0,			/* alignment, boundary */
529	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
530	    BUS_SPACE_MAXADDR,		/* highaddr */
531	    NULL, NULL,		/* filtfunc, filtfuncarg */
532	   MCLBYTES*MAX_FRAGMENT,	/* maxsize */
533	   MAX_FRAGMENT,		 /* nsegments */
534	    MCLBYTES, 0,		/* maxsegsz, flags */
535	    NULL, NULL,		/* lockfunc, lockfuncarg */
536	    &sc->dmatag_ring_tx);	/* dmat */
537
538	if (error) {
539		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
540		return (ENXIO);
541	}
542
543	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
544		/* Create dma map for each descriptor. */
545		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
546		    &(sc->tx_desc[i].dmamap));
547		if (error) {
548			if_printf(sc->ifp, "failed to create map for mbuf\n");
549			return (ENXIO);
550		}
551	}
552	return (0);
553}
554
555static void
556ece_free_desc_dma_tx(struct ece_softc *sc)
557{
558	int i;
559
560	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
561		if (sc->tx_desc[i].buff) {
562			m_freem(sc->tx_desc[i].buff);
563			sc->tx_desc[i].buff= 0;
564		}
565	}
566
567	if (sc->dmamap_ring_tx) {
568		bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx);
569		if (sc->desc_tx) {
570			bus_dmamem_free(sc->dmatag_data_tx,
571			    sc->desc_tx, sc->dmamap_ring_tx);
572		}
573		sc->dmamap_ring_tx = 0;
574	}
575
576	if (sc->dmatag_data_tx) {
577		bus_dma_tag_destroy(sc->dmatag_data_tx);
578		sc->dmatag_data_tx = 0;
579	}
580
581	if (sc->dmatag_ring_tx) {
582		for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) {
583			bus_dmamap_destroy(sc->dmatag_ring_tx,
584			    sc->tx_desc[i].dmamap);
585			sc->tx_desc[i].dmamap = 0;
586		}
587		bus_dma_tag_destroy(sc->dmatag_ring_tx);
588		sc->dmatag_ring_tx = 0;
589	}
590}
591
592static int
593ece_alloc_desc_dma_rx(struct ece_softc *sc)
594{
595	int error;
596	int i;
597
598	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
599	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
600	    16, 0,			/* alignment, boundary */
601	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
602	    BUS_SPACE_MAXADDR,		/* highaddr */
603	    NULL, NULL,		/* filtfunc, filtfuncarg */
604	    /* maxsize, nsegments */
605	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1,
606	    /* maxsegsz, flags */
607	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0,
608	    NULL, NULL,		/* lockfunc, lockfuncarg */
609	    &sc->dmatag_data_rx);	/* dmat */
610
611	/* Allocate RX ring. */
612	error = bus_dmamem_alloc(sc->dmatag_data_rx,
613	    (void**)&(sc->desc_rx),
614	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
615	    BUS_DMA_COHERENT,
616	    &(sc->dmamap_ring_rx));
617
618	if (error) {
619		if_printf(sc->ifp, "failed to allocate DMA memory\n");
620		return (ENXIO);
621	}
622
623	/* Load dmamap. */
624	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
625	    sc->desc_rx,
626	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS,
627	    ece_getaddr,
628	    &(sc->ring_paddr_rx), BUS_DMA_NOWAIT);
629
630	if (error) {
631		if_printf(sc->ifp, "can't load descriptor\n");
632		bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
633		    sc->dmamap_ring_rx);
634		bus_dma_tag_destroy(sc->dmatag_data_rx);
635		sc->desc_rx = NULL;
636		return (ENXIO);
637	}
638
639	/* Allocate a busdma tag for mbufs. */
640	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
641	    16, 0,			/* alignment, boundary */
642	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
643	    BUS_SPACE_MAXADDR,		/* highaddr */
644	    NULL, NULL,		/* filtfunc, filtfuncarg */
645	    MCLBYTES, 1,		/* maxsize, nsegments */
646	    MCLBYTES, 0,		/* maxsegsz, flags */
647	    NULL, NULL,		/* lockfunc, lockfuncarg */
648	    &sc->dmatag_ring_rx);	/* dmat */
649
650	if (error) {
651		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
652		return (ENXIO);
653	}
654
655	for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) {
656		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
657		    &sc->rx_desc[i].dmamap);
658		if (error) {
659			if_printf(sc->ifp, "failed to create map for mbuf\n");
660			return (ENXIO);
661		}
662	}
663
664	error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap);
665	if (error) {
666		if_printf(sc->ifp, "failed to create spare map\n");
667		return (ENXIO);
668	}
669
670	return (0);
671}
672
673static void
674ece_free_desc_dma_rx(struct ece_softc *sc)
675{
676	int i;
677
678	for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
679		if (sc->rx_desc[i].buff) {
680			m_freem(sc->rx_desc[i].buff);
681			sc->rx_desc[i].buff= 0;
682		}
683	}
684
685	if (sc->dmatag_data_rx) {
686		bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx);
687		bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
688		    sc->dmamap_ring_rx);
689		bus_dma_tag_destroy(sc->dmatag_data_rx);
690		sc->dmatag_data_rx = 0;
691		sc->dmamap_ring_rx = 0;
692		sc->desc_rx = 0;
693	}
694
695	if (sc->dmatag_ring_rx) {
696		for (i = 0; i < ECE_MAX_RX_BUFFERS; i++)
697			bus_dmamap_destroy(sc->dmatag_ring_rx,
698			    sc->rx_desc[i].dmamap);
699		bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap);
700		bus_dma_tag_destroy(sc->dmatag_ring_rx);
701		sc->dmatag_ring_rx = 0;
702	}
703}
704
705static int
706ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo)
707{
708	struct mbuf *new_mbuf;
709	bus_dma_segment_t seg[1];
710	bus_dmamap_t map;
711	int error;
712	int nsegs;
713	bus_dma_tag_t tag;
714
715	tag = sc->dmatag_ring_rx;
716
717	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
718
719	if (new_mbuf == NULL)
720		return (ENOBUFS);
721
722	new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES;
723
724	error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf,
725	    seg, &nsegs, BUS_DMA_NOWAIT);
726
727	KASSERT(nsegs == 1, ("Too many segments returned!"));
728
729	if (nsegs != 1 || error) {
730		m_free(new_mbuf);
731		return (ENOBUFS);
732	}
733
734	if (descinfo->buff != NULL) {
735		bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD);
736		bus_dmamap_unload(tag, descinfo->dmamap);
737	}
738
739	map = descinfo->dmamap;
740	descinfo->dmamap = sc->rx_sparemap;
741	sc->rx_sparemap = map;
742
743	bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD);
744
745	descinfo->buff = new_mbuf;
746	descinfo->desc->data_ptr = seg->ds_addr;
747	descinfo->desc->length = seg->ds_len - 2;
748
749	return (0);
750}
751
752static int
753ece_allocate_dma(struct ece_softc *sc)
754{
755	eth_tx_desc_t *desctx;
756	eth_rx_desc_t *descrx;
757	int i;
758	int error;
759
760	/* Create parent tag for tx and rx */
761	error = bus_dma_tag_create(
762	    bus_get_dma_tag(sc->dev),/* parent */
763	    1, 0,		/* alignment, boundary */
764	    BUS_SPACE_MAXADDR,	/* lowaddr */
765	    BUS_SPACE_MAXADDR,	/* highaddr */
766	    NULL, NULL,	/* filter, filterarg */
767	    BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */
768	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
769	    0,			/* flags */
770	    NULL, NULL,	/* lockfunc, lockarg */
771	    &sc->sc_parent_tag);
772
773	ece_alloc_desc_dma_tx(sc);
774
775	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
776		desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]);
777		memset(desctx, 0, sizeof(eth_tx_desc_t));
778		desctx->length = MAX_PACKET_LEN;
779		desctx->cown = 1;
780		if (i == ECE_MAX_TX_BUFFERS - 1)
781			desctx->eor = 1;
782	}
783
784	ece_alloc_desc_dma_rx(sc);
785
786	for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
787		descrx = &(sc->desc_rx[i]);
788		memset(descrx, 0, sizeof(eth_rx_desc_t));
789		sc->rx_desc[i].desc = descrx;
790		sc->rx_desc[i].buff = 0;
791		ece_new_rxbuf(sc, &(sc->rx_desc[i]));
792
793		if (i == ECE_MAX_RX_BUFFERS - 1)
794			descrx->eor = 1;
795	}
796	sc->tx_prod = 0;
797	sc->tx_cons = 0;
798	sc->last_rx = 0;
799	sc->desc_curr_tx = 0;
800
801	return (0);
802}
803
804static int
805ece_activate(device_t dev)
806{
807	struct ece_softc *sc;
808	int err;
809	uint32_t mac_port_config;
810	struct ifnet *ifp;
811
812	sc = device_get_softc(dev);
813	ifp = sc->ifp;
814
815	initial_switch_config = read_4(sc, SWITCH_CONFIG);
816	initial_cpu_config = read_4(sc, CPU_PORT_CONFIG);
817	initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG);
818	initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG);
819
820	/* Disable Port 0 */
821	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
822	mac_port_config |= (PORT_DISABLE);
823	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
824
825	/* Disable Port 1 */
826	mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
827	mac_port_config |= (PORT_DISABLE);
828	write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
829
830	err = ece_allocate_dma(sc);
831	if (err) {
832		if_printf(sc->ifp, "failed allocating dma\n");
833		goto out;
834	}
835
836	write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx);
837	write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx);
838
839	write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx);
840	write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx);
841
842	write_4(sc, FS_DMA_CONTROL, 1);
843
844	return (0);
845out:
846	return (ENXIO);
847
848}
849
850static void
851ece_deactivate(device_t dev)
852{
853	struct ece_softc *sc;
854
855	sc = device_get_softc(dev);
856
857	if (sc->intrhand)
858		bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand);
859
860	sc->intrhand = 0;
861
862	if (sc->intrhand_qf)
863		bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf);
864
865	sc->intrhand_qf = 0;
866
867	bus_generic_detach(sc->dev);
868	if (sc->miibus)
869		device_delete_child(sc->dev, sc->miibus);
870	if (sc->mem_res)
871		bus_release_resource(dev, SYS_RES_IOPORT,
872		    rman_get_rid(sc->mem_res), sc->mem_res);
873	sc->mem_res = 0;
874
875	if (sc->irq_res_rec)
876		bus_release_resource(dev, SYS_RES_IRQ,
877		    rman_get_rid(sc->irq_res_rec), sc->irq_res_rec);
878
879	if (sc->irq_res_qf)
880		bus_release_resource(dev, SYS_RES_IRQ,
881		    rman_get_rid(sc->irq_res_qf), sc->irq_res_qf);
882
883	if (sc->irq_res_qf)
884		bus_release_resource(dev, SYS_RES_IRQ,
885		    rman_get_rid(sc->irq_res_status), sc->irq_res_status);
886
887	sc->irq_res_rec = 0;
888	sc->irq_res_qf = 0;
889	sc->irq_res_status = 0;
890	ECE_TXLOCK_DESTROY(sc);
891	ECE_RXLOCK_DESTROY(sc);
892
893	ece_free_desc_dma_tx(sc);
894	ece_free_desc_dma_rx(sc);
895
896	return;
897}
898
899/*
900 * Change media according to request.
901 */
902static int
903ece_ifmedia_upd(struct ifnet *ifp)
904{
905	struct ece_softc *sc = ifp->if_softc;
906	struct mii_data *mii;
907	int error;
908
909	mii = device_get_softc(sc->miibus);
910	ECE_LOCK(sc);
911	error = mii_mediachg(mii);
912	ECE_UNLOCK(sc);
913	return (error);
914}
915
916/*
917 * Notify the world which media we're using.
918 */
919static void
920ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
921{
922	struct ece_softc *sc = ifp->if_softc;
923	struct mii_data *mii;
924
925	mii = device_get_softc(sc->miibus);
926	ECE_LOCK(sc);
927	mii_pollstat(mii);
928	ifmr->ifm_active = mii->mii_media_active;
929	ifmr->ifm_status = mii->mii_media_status;
930	ECE_UNLOCK(sc);
931}
932
933static void
934ece_tick(void *xsc)
935{
936	struct ece_softc *sc = xsc;
937	struct mii_data *mii;
938	int active;
939
940	mii = device_get_softc(sc->miibus);
941	active = mii->mii_media_active;
942	mii_tick(mii);
943
944	/*
945	 * Schedule another timeout one second from now.
946	 */
947	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
948}
949
950static uint32_t
951read_mac_entry(struct ece_softc *ec,
952    uint8_t *mac_result,
953    int first)
954{
955	uint32_t ii;
956	struct arl_table_entry_t entry;
957	uint32_t *entry_val;
958	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
959	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
960	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
961	if (first)
962		write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1);
963	else
964		write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2);
965
966	for (ii = 0; ii < 0x1000; ii++)
967		if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
968			break;
969
970	entry_val = (uint32_t*) (&entry);
971	entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1);
972	entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2);
973
974	if (mac_result)
975		memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN);
976
977	return (entry.table_end);
978}
979
980static uint32_t
981write_arl_table_entry(struct ece_softc *ec,
982    uint32_t filter,
983    uint32_t vlan_mac,
984    uint32_t vlan_gid,
985    uint32_t age_field,
986    uint32_t port_map,
987    const uint8_t *mac_addr)
988{
989	uint32_t ii;
990	uint32_t *entry_val;
991	struct arl_table_entry_t entry;
992
993	memset(&entry, 0, sizeof(entry));
994
995	entry.filter = filter;
996	entry.vlan_mac = vlan_mac;
997	entry.vlan_gid = vlan_gid;
998	entry.age_field = age_field;
999	entry.port_map = port_map;
1000	memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN);
1001
1002	entry_val = (uint32_t*) (&entry);
1003
1004	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
1005	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
1006	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
1007
1008	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]);
1009	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]);
1010
1011	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND);
1012
1013	for (ii = 0; ii < 0x1000; ii++)
1014		if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) &
1015		    ARL_COMMAND_COMPLETE)
1016			return (1); /* Write OK. */
1017
1018	/* Write failed. */
1019	return (0);
1020}
1021
1022static void
1023remove_mac_entry(struct ece_softc *sc,
1024    uint8_t *mac)
1025{
1026
1027	/* Invalid age_field mean erase this entry. */
1028	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1029	    INVALID_ENTRY, VLAN0_GROUP,
1030	    mac);
1031}
1032
1033static void
1034add_mac_entry(struct ece_softc *sc,
1035    uint8_t *mac)
1036{
1037
1038	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1039	    NEW_ENTRY, VLAN0_GROUP,
1040	    mac);
1041}
1042
1043/**
1044 * The behavior of ARL table reading and deletion is not well defined
1045 * in the documentation. To be safe, all mac addresses are put to a
1046 * list, then deleted.
1047 *
1048 */
1049static void
1050clear_mac_entries(struct ece_softc *ec, int include_this_mac)
1051{
1052	int table_end;
1053	struct mac_list * temp;
1054	struct mac_list * mac_list_header;
1055	struct mac_list * current;
1056	char mac[ETHER_ADDR_LEN];
1057
1058	current = 0;
1059	mac_list_header = 0;
1060
1061	table_end = read_mac_entry(ec, mac, 1);
1062	while (!table_end) {
1063		if (!include_this_mac &&
1064		    memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) {
1065			/* Read next entry. */
1066			table_end = read_mac_entry(ec, mac, 0);
1067			continue;
1068		}
1069
1070		temp = (struct mac_list*)malloc(sizeof(struct mac_list),
1071		    M_DEVBUF,
1072		    M_NOWAIT | M_ZERO);
1073		memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN);
1074		temp->next = 0;
1075		if (mac_list_header) {
1076			current->next = temp;
1077			current = temp;
1078		} else {
1079			mac_list_header = temp;
1080			current = temp;
1081		}
1082		/* Read next Entry */
1083		table_end = read_mac_entry(ec, mac, 0);
1084	}
1085
1086	current = mac_list_header;
1087
1088	while (current) {
1089		remove_mac_entry(ec, current->mac_addr);
1090		temp = current;
1091		current = current->next;
1092		free(temp, M_DEVBUF);
1093	}
1094}
1095
1096static int
1097configure_lan_port(struct ece_softc *sc, int phy_type)
1098{
1099	uint32_t sw_config;
1100	uint32_t mac_port_config;
1101
1102	/*
1103	 * Configure switch
1104	 */
1105	sw_config = read_4(sc, SWITCH_CONFIG);
1106	/* Enable fast aging. */
1107	sw_config |= FAST_AGING;
1108	/* Enable IVL learning. */
1109	sw_config |= IVL_LEARNING;
1110	/* Disable hardware NAT. */
1111	sw_config &= ~(HARDWARE_NAT);
1112
1113	sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE;
1114
1115	write_4(sc, SWITCH_CONFIG, sw_config);
1116
1117	sw_config = read_4(sc, SWITCH_CONFIG);
1118
1119	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1120
1121	if (!(mac_port_config & 0x1) || (mac_port_config & 0x2))
1122		if_printf(sc->ifp, "Link Down\n");
1123	else
1124		write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1125	return (0);
1126}
1127
1128static void
1129set_pvid(struct ece_softc *sc, int port0, int port1, int cpu)
1130{
1131	uint32_t val;
1132	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0));
1133	write_4(sc, VLAN_PORT_PVID, val);
1134	val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07);
1135	write_4(sc, VLAN_PORT_PVID, val);
1136	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4));
1137	write_4(sc, VLAN_PORT_PVID, val);
1138	val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4);
1139	write_4(sc, VLAN_PORT_PVID, val);
1140
1141	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8));
1142	write_4(sc, VLAN_PORT_PVID, val);
1143	val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8);
1144	write_4(sc, VLAN_PORT_PVID, val);
1145
1146}
1147
1148/* VLAN related functions */
1149static void
1150set_vlan_vid(struct ece_softc *sc, int vlan)
1151{
1152	const uint32_t regs[] = {
1153	    VLAN_VID_0_1,
1154	    VLAN_VID_0_1,
1155	    VLAN_VID_2_3,
1156	    VLAN_VID_2_3,
1157	    VLAN_VID_4_5,
1158	    VLAN_VID_4_5,
1159	    VLAN_VID_6_7,
1160	    VLAN_VID_6_7
1161	};
1162
1163	const int vids[] = {
1164	    VLAN0_VID,
1165	    VLAN1_VID,
1166	    VLAN2_VID,
1167	    VLAN3_VID,
1168	    VLAN4_VID,
1169	    VLAN5_VID,
1170	    VLAN6_VID,
1171	    VLAN7_VID
1172	};
1173
1174	uint32_t val;
1175	uint32_t reg;
1176	int vid;
1177
1178	reg = regs[vlan];
1179	vid = vids[vlan];
1180
1181	if (vlan & 1) {
1182		val = read_4(sc, reg);
1183		write_4(sc, reg, val & (~(0xFFF << 0)));
1184		val = read_4(sc, reg);
1185		write_4(sc, reg, val|((vid & 0xFFF) << 0));
1186	} else {
1187		val = read_4(sc, reg);
1188		write_4(sc, reg, val & (~(0xFFF << 12)));
1189		val = read_4(sc, reg);
1190		write_4(sc, reg, val|((vid & 0xFFF) << 12));
1191	}
1192}
1193
1194static void
1195set_vlan_member(struct ece_softc *sc, int vlan)
1196{
1197	unsigned char shift;
1198	uint32_t val;
1199	int group;
1200	const int groups[] = {
1201	    VLAN0_GROUP,
1202	    VLAN1_GROUP,
1203	    VLAN2_GROUP,
1204	    VLAN3_GROUP,
1205	    VLAN4_GROUP,
1206	    VLAN5_GROUP,
1207	    VLAN6_GROUP,
1208	    VLAN7_GROUP
1209	};
1210
1211	group = groups[vlan];
1212
1213	shift = vlan*3;
1214	val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift));
1215	write_4(sc, VLAN_MEMBER_PORT_MAP, val);
1216	val = read_4(sc, VLAN_MEMBER_PORT_MAP);
1217	write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift));
1218}
1219
1220static void
1221set_vlan_tag(struct ece_softc *sc, int vlan)
1222{
1223	unsigned char shift;
1224	uint32_t val;
1225
1226	int tag = 0;
1227
1228	shift = vlan*3;
1229	val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift));
1230	write_4(sc, VLAN_TAG_PORT_MAP, val);
1231	val = read_4(sc, VLAN_TAG_PORT_MAP);
1232	write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift));
1233}
1234
1235static int
1236configure_cpu_port(struct ece_softc *sc)
1237{
1238	uint32_t cpu_port_config;
1239	int i;
1240
1241	cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1242	/* SA learning Disable */
1243	cpu_port_config |= (SA_LEARNING_DISABLE);
1244	/* set data offset + 2 */
1245	cpu_port_config &= ~(1 << 31);
1246
1247	write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1248
1249	if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1250	    STATIC_ENTRY, VLAN0_GROUP,
1251	    vlan0_mac))
1252		return (1);
1253
1254	set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID);
1255
1256	for (i = 0; i < 8; i++) {
1257		set_vlan_vid(sc, i);
1258		set_vlan_member(sc, i);
1259		set_vlan_tag(sc, i);
1260	}
1261
1262	/* disable all interrupt status sources */
1263	write_4(sc, INTERRUPT_MASK, 0xffff1fff);
1264
1265	/* clear previous interrupt sources */
1266	write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1267
1268	write_4(sc, TS_DMA_CONTROL, 0);
1269	write_4(sc, FS_DMA_CONTROL, 0);
1270	return (0);
1271}
1272
1273static int
1274hardware_init(struct ece_softc *sc)
1275{
1276	int status = 0;
1277	static int gw_phy_type;
1278
1279	gw_phy_type = get_phy_type(sc);
1280	/* Currently only ic_plus phy is supported. */
1281	if (gw_phy_type != IC_PLUS_PHY) {
1282		device_printf(sc->dev, "PHY type is not supported (%d)\n",
1283		    gw_phy_type);
1284		return (-1);
1285	}
1286	status = configure_lan_port(sc, gw_phy_type);
1287	configure_cpu_port(sc);
1288	return (0);
1289}
1290
1291static void
1292set_mac_address(struct ece_softc *sc, const char *mac, int mac_len)
1293{
1294
1295	/* Invalid age_field mean erase this entry. */
1296	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1297	    INVALID_ENTRY, VLAN0_GROUP,
1298	    mac);
1299	memcpy(vlan0_mac, mac, ETHER_ADDR_LEN);
1300
1301	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1302	    STATIC_ENTRY, VLAN0_GROUP,
1303	    mac);
1304}
1305
1306static void
1307ece_set_mac(struct ece_softc *sc, u_char *eaddr)
1308{
1309	memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN);
1310	set_mac_address(sc, eaddr, ETHER_ADDR_LEN);
1311}
1312
1313/*
1314 * TODO: the device doesn't have MAC stored, we should read the
1315 * configuration stored in FLASH, but the format depends on the
1316 * bootloader used.*
1317 */
1318static int
1319ece_get_mac(struct ece_softc *sc, u_char *eaddr)
1320{
1321	return (ENXIO);
1322}
1323
1324static void
1325ece_intr_rx_locked(struct ece_softc *sc, int count)
1326{
1327	struct ifnet *ifp = sc->ifp;
1328	struct mbuf *mb;
1329	struct rx_desc_info *rxdesc;
1330	eth_rx_desc_t *desc;
1331
1332	int fssd_curr;
1333	int fssd;
1334	int i;
1335	int idx;
1336	int rxcount;
1337	uint32_t status;
1338
1339	fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER);
1340
1341	fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4;
1342
1343	desc = sc->rx_desc[sc->last_rx].desc;
1344
1345	/* Prepare to read the data in the ring. */
1346	bus_dmamap_sync(sc->dmatag_ring_rx,
1347	    sc->dmamap_ring_rx,
1348	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1349
1350	if (fssd > sc->last_rx)
1351		rxcount = fssd - sc->last_rx;
1352	else if (fssd < sc->last_rx)
1353		rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd;
1354	else {
1355		if (desc->cown == 0)
1356			return;
1357		else
1358			rxcount = ECE_MAX_RX_BUFFERS;
1359	}
1360
1361	for (i= 0; i < rxcount; i++) {
1362		status = desc->cown;
1363		if (!status)
1364			break;
1365
1366		idx = sc->last_rx;
1367		rxdesc = &sc->rx_desc[idx];
1368		mb = rxdesc->buff;
1369
1370		if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN ||
1371		    desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN +
1372		    ETHER_VLAN_ENCAP_LEN) {
1373			ifp->if_ierrors++;
1374			desc->cown = 0;
1375			desc->length = MCLBYTES - 2;
1376			/* Invalid packet, skip and process next
1377			 * packet.
1378			 */
1379			continue;
1380		}
1381
1382		if (ece_new_rxbuf(sc, rxdesc) != 0) {
1383			ifp->if_iqdrops++;
1384			desc->cown = 0;
1385			desc->length = MCLBYTES - 2;
1386			break;
1387		}
1388
1389		/**
1390		 * The device will write to addrress + 2 So we need to adjust
1391		 * the address after the packet is received.
1392		 */
1393		mb->m_data += 2;
1394		mb->m_len = mb->m_pkthdr.len = desc->length;
1395
1396		mb->m_flags |= M_PKTHDR;
1397		mb->m_pkthdr.rcvif = ifp;
1398		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1399			/*check for valid checksum*/
1400			if ( (!desc->l4f)  && (desc->prot != 3)) {
1401				mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1402				mb->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1403				mb->m_pkthdr.csum_data = 0xffff;
1404			}
1405		}
1406		ECE_RXUNLOCK(sc);
1407		(*ifp->if_input)(ifp, mb);
1408		ECE_RXLOCK(sc);
1409
1410		desc->cown = 0;
1411		desc->length = MCLBYTES - 2;
1412
1413		bus_dmamap_sync(sc->dmatag_ring_rx,
1414		    sc->dmamap_ring_rx,
1415		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1416
1417		if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1)
1418			sc->last_rx = 0;
1419		else
1420			sc->last_rx++;
1421
1422		desc = sc->rx_desc[sc->last_rx].desc;
1423	}
1424
1425	/* Sync updated flags. */
1426	bus_dmamap_sync(sc->dmatag_ring_rx,
1427	    sc->dmamap_ring_rx,
1428	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1429
1430	return;
1431}
1432
1433static void
1434ece_intr_task(void *arg, int pending __unused)
1435{
1436	struct ece_softc *sc = arg;
1437	ECE_RXLOCK(sc);
1438	ece_intr_rx_locked(sc, -1);
1439	ECE_RXUNLOCK(sc);
1440}
1441
1442static void
1443ece_intr(void *xsc)
1444{
1445	struct ece_softc *sc = xsc;
1446	struct ifnet *ifp = sc->ifp;
1447
1448	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1449		write_4(sc, FS_DMA_CONTROL, 0);
1450		return;
1451	}
1452
1453	taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1454
1455	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1456		taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
1457}
1458
1459static void
1460ece_intr_status(void *xsc)
1461{
1462	struct ece_softc *sc = xsc;
1463	struct ifnet *ifp = sc->ifp;
1464	int stat;
1465
1466	stat = read_4(sc, INTERRUPT_STATUS);
1467
1468	write_4(sc, INTERRUPT_STATUS, stat);
1469
1470	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1471		if ((stat & ERROR_MASK) != 0)
1472			ifp->if_iqdrops++;
1473	}
1474}
1475
1476static void
1477ece_cleanup_locked(struct ece_softc *sc)
1478{
1479	eth_tx_desc_t *desc;
1480
1481	if (sc->tx_cons == sc->tx_prod) return;
1482
1483	/* Prepare to read the ring (owner bit). */
1484	bus_dmamap_sync(sc->dmatag_ring_tx,
1485	    sc->dmamap_ring_tx,
1486	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1487
1488	while (sc->tx_cons != sc->tx_prod) {
1489		desc = sc->tx_desc[sc->tx_cons].desc;
1490		if (desc->cown != 0) {
1491			struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]);
1492			/* We are finished with this descriptor ... */
1493			bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap,
1494			    BUS_DMASYNC_POSTWRITE);
1495			/* ... and unload, so we can reuse. */
1496			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1497			m_freem(td->buff);
1498			td->buff = 0;
1499			sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS;
1500		} else {
1501			break;
1502		}
1503	}
1504
1505}
1506
1507static void
1508ece_cleanup_task(void *arg, int pending __unused)
1509{
1510	struct ece_softc *sc = arg;
1511	ECE_CLEANUPLOCK(sc);
1512	ece_cleanup_locked(sc);
1513	ECE_CLEANUPUNLOCK(sc);
1514}
1515
1516static void
1517ece_intr_tx(void *xsc)
1518{
1519	struct ece_softc *sc = xsc;
1520	struct ifnet *ifp = sc->ifp;
1521	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1522		/* This should not happen, stop DMA. */
1523		write_4(sc, FS_DMA_CONTROL, 0);
1524		return;
1525	}
1526	taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task);
1527}
1528
1529static void
1530ece_intr_qf(void *xsc)
1531{
1532	struct ece_softc *sc = xsc;
1533	struct ifnet *ifp = sc->ifp;
1534	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1535		/* This should not happen, stop DMA. */
1536		write_4(sc, FS_DMA_CONTROL, 0);
1537		return;
1538	}
1539	taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1540	write_4(sc, FS_DMA_CONTROL, 1);
1541}
1542
1543/*
1544 * Reset and initialize the chip
1545 */
1546static void
1547eceinit_locked(void *xsc)
1548{
1549	struct ece_softc *sc = xsc;
1550	struct ifnet *ifp = sc->ifp;
1551	struct mii_data *mii;
1552	uint32_t cfg_reg;
1553	uint32_t cpu_port_config;
1554	uint32_t mac_port_config;
1555
1556	while (1) {
1557		cfg_reg = read_4(sc, BIST_RESULT_TEST_0);
1558		if ((cfg_reg & (1<<17)))
1559			break;
1560		DELAY(100);
1561	}
1562	/* Set to default values. */
1563	write_4(sc, SWITCH_CONFIG, 0x007AA7A1);
1564	write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00);
1565	write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80);
1566	write_4(sc, CPU_PORT_CONFIG, 0x004C0000);
1567
1568	hardware_init(sc);
1569
1570	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1571
1572	 /* Enable Port 0 */
1573	mac_port_config &= (~(PORT_DISABLE));
1574	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1575
1576	cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1577	/* Enable CPU. */
1578	cpu_port_config &= ~(PORT_DISABLE);
1579	write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1580
1581	/*
1582	 * Set 'running' flag, and clear output active flag
1583	 * and attempt to start the output
1584	 */
1585	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1586	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1587
1588	mii = device_get_softc(sc->miibus);
1589	mii_pollstat(mii);
1590	/* Enable DMA. */
1591	write_4(sc, FS_DMA_CONTROL, 1);
1592
1593	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1594}
1595
1596static inline int
1597ece_encap(struct ece_softc *sc, struct mbuf *m0)
1598{
1599	struct ifnet *ifp;
1600	bus_dma_segment_t segs[MAX_FRAGMENT];
1601	bus_dmamap_t mapp;
1602	eth_tx_desc_t *desc = 0;
1603	int csum_flags;
1604	int desc_no;
1605	int error;
1606	int nsegs;
1607	int seg;
1608
1609	ifp = sc->ifp;
1610
1611	/* Fetch unused map */
1612	mapp = sc->tx_desc[sc->tx_prod].dmamap;
1613
1614	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp,
1615	    m0, segs, &nsegs,
1616	    BUS_DMA_NOWAIT);
1617
1618	if (error != 0) {
1619		bus_dmamap_unload(sc->dmatag_ring_tx, mapp);
1620		return ((error != 0) ? error : -1);
1621	}
1622
1623	desc = &(sc->desc_tx[sc->desc_curr_tx]);
1624	sc->tx_desc[sc->tx_prod].desc = desc;
1625	sc->tx_desc[sc->tx_prod].buff = m0;
1626	desc_no = sc->desc_curr_tx;
1627
1628	for (seg = 0; seg < nsegs; seg++) {
1629		if (desc->cown == 0 ) {
1630			if_printf(ifp, "ERROR: descriptor is still used\n");
1631			return (-1);
1632		}
1633
1634		desc->length = segs[seg].ds_len;
1635		desc->data_ptr = segs[seg].ds_addr;
1636
1637		if (seg == 0) {
1638			desc->fs = 1;
1639		} else {
1640			desc->fs = 0;
1641		}
1642		if (seg == nsegs - 1) {
1643			desc->ls = 1;
1644		} else {
1645			desc->ls = 0;
1646		}
1647
1648		csum_flags = m0->m_pkthdr.csum_flags;
1649
1650		desc->fr =  1;
1651		desc->pmap =  1;
1652		desc->insv =  0;
1653		desc->ico = 0;
1654		desc->tco = 0;
1655		desc->uco = 0;
1656		desc->interrupt = 1;
1657
1658		if (csum_flags & CSUM_IP) {
1659			desc->ico = 1;
1660			if (csum_flags & CSUM_TCP)
1661				desc->tco = 1;
1662			if (csum_flags & CSUM_UDP)
1663				desc->uco = 1;
1664		}
1665
1666		desc++;
1667		sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS;
1668		if (sc->desc_curr_tx == 0) {
1669			desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1670		}
1671	}
1672
1673	desc = sc->tx_desc[sc->tx_prod].desc;
1674
1675	sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS;
1676
1677	/*
1678	 * After all descriptors are set, we set the flags to start the
1679	 * sending proces.
1680	 */
1681	for (seg = 0; seg < nsegs; seg++) {
1682		desc->cown = 0;
1683		desc++;
1684		desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS;
1685		if (desc_no == 0)
1686			desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1687	}
1688
1689	bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE);
1690	return (0);
1691}
1692
1693/*
1694 * dequeu packets and transmit
1695 */
1696static void
1697ecestart_locked(struct ifnet *ifp)
1698{
1699	struct ece_softc *sc;
1700	struct mbuf *m0;
1701	uint32_t queued = 0;
1702
1703	sc = ifp->if_softc;
1704	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1705	    IFF_DRV_RUNNING)
1706		return;
1707
1708	bus_dmamap_sync(sc->dmatag_ring_tx,
1709	    sc->dmamap_ring_tx,
1710	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1711
1712	for (;;) {
1713		/* Get packet from the queue */
1714		IF_DEQUEUE(&ifp->if_snd, m0);
1715		if (m0 == NULL)
1716			break;
1717		if (ece_encap(sc, m0)) {
1718			IF_PREPEND(&ifp->if_snd, m0);
1719			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1720			break;
1721		}
1722		queued++;
1723		BPF_MTAP(ifp, m0);
1724	}
1725	if (queued) {
1726		bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx,
1727		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1728		write_4(sc, TS_DMA_CONTROL, 1);
1729	}
1730}
1731
1732static void
1733eceinit(void *xsc)
1734{
1735	struct ece_softc *sc = xsc;
1736	ECE_LOCK(sc);
1737	eceinit_locked(sc);
1738	ECE_UNLOCK(sc);
1739}
1740
1741static void
1742ece_tx_task(void *arg, int pending __unused)
1743{
1744	struct ifnet *ifp;
1745	ifp = (struct ifnet *)arg;
1746	ecestart(ifp);
1747}
1748
1749static void
1750ecestart(struct ifnet *ifp)
1751{
1752	struct ece_softc *sc = ifp->if_softc;
1753	ECE_TXLOCK(sc);
1754	ecestart_locked(ifp);
1755	ECE_TXUNLOCK(sc);
1756}
1757
1758/*
1759 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp
1760 * NULL so be careful.
1761 */
1762static void
1763ecestop(struct ece_softc *sc)
1764{
1765	struct ifnet *ifp = sc->ifp;
1766	uint32_t mac_port_config;
1767
1768	write_4(sc, TS_DMA_CONTROL, 0);
1769	write_4(sc, FS_DMA_CONTROL, 0);
1770
1771	if (ifp)
1772		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1773
1774	callout_stop(&sc->tick_ch);
1775
1776	/*Disable Port 0 */
1777	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1778	mac_port_config |= (PORT_DISABLE);
1779	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1780
1781	/*Disable Port 1 */
1782	mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
1783	mac_port_config |= (PORT_DISABLE);
1784	write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
1785
1786	/* Disable all interrupt status sources. */
1787	write_4(sc, INTERRUPT_MASK, 0x00001FFF);
1788
1789	/* Clear previous interrupt sources. */
1790	write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1791
1792	write_4(sc, SWITCH_CONFIG, initial_switch_config);
1793	write_4(sc, CPU_PORT_CONFIG, initial_cpu_config);
1794	write_4(sc, MAC_PORT_0_CONFIG, initial_port0_config);
1795	write_4(sc, MAC_PORT_1_CONFIG, initial_port1_config);
1796
1797	clear_mac_entries(sc, 1);
1798}
1799
1800static void
1801ece_restart(struct ece_softc *sc)
1802{
1803	struct ifnet *ifp = sc->ifp;
1804
1805	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1806	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1807	/* Enable port 0. */
1808	write_4(sc, PORT_0_CONFIG,
1809	    read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
1810	write_4(sc, INTERRUPT_MASK, 0x00000000);
1811	write_4(sc, FS_DMA_CONTROL, 1);
1812	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1813}
1814
1815static void
1816set_filter(struct ece_softc *sc)
1817{
1818	struct ifnet		*ifp;
1819	struct ifmultiaddr	*ifma;
1820	uint32_t mac_port_config;
1821
1822	ifp = sc->ifp;
1823
1824	clear_mac_entries(sc, 0);
1825	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1826		mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1827		mac_port_config &= ~(DISABLE_BROADCAST_PACKET);
1828		mac_port_config &= ~(DISABLE_MULTICAST_PACKET);
1829		write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1830		return;
1831	}
1832	if_maddr_rlock(ifp);
1833	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1834		if (ifma->ifma_addr->sa_family != AF_LINK)
1835			continue;
1836		add_mac_entry(sc,
1837		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1838	}
1839	if_maddr_runlock(ifp);
1840}
1841
1842static int
1843eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1844{
1845	struct ece_softc *sc = ifp->if_softc;
1846	struct mii_data *mii;
1847	struct ifreq *ifr = (struct ifreq *)data;
1848	int mask, error = 0;
1849
1850	switch (cmd) {
1851	case SIOCSIFFLAGS:
1852		ECE_LOCK(sc);
1853		if ((ifp->if_flags & IFF_UP) == 0 &&
1854		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1855			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1856			ecestop(sc);
1857		} else {
1858			/* Reinitialize card on any parameter change. */
1859			if ((ifp->if_flags & IFF_UP) &&
1860			    !(ifp->if_drv_flags & IFF_DRV_RUNNING))
1861				ece_restart(sc);
1862		}
1863		ECE_UNLOCK(sc);
1864		break;
1865
1866	case SIOCADDMULTI:
1867	case SIOCDELMULTI:
1868		ECE_LOCK(sc);
1869		set_filter(sc);
1870		ECE_UNLOCK(sc);
1871		break;
1872
1873	case SIOCSIFMEDIA:
1874	case SIOCGIFMEDIA:
1875		mii = device_get_softc(sc->miibus);
1876		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1877		break;
1878	case SIOCSIFCAP:
1879		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1880		if (mask & IFCAP_VLAN_MTU) {
1881			ECE_LOCK(sc);
1882			ECE_UNLOCK(sc);
1883		}
1884	default:
1885		error = ether_ioctl(ifp, cmd, data);
1886		break;
1887	}
1888	return (error);
1889}
1890
1891static void
1892ece_child_detached(device_t dev, device_t child)
1893{
1894	struct ece_softc *sc;
1895
1896	sc = device_get_softc(dev);
1897	if (child == sc->miibus)
1898		sc->miibus = NULL;
1899}
1900
1901/*
1902 * MII bus support routines.
1903 */
1904static int
1905ece_miibus_readreg(device_t dev, int phy, int reg)
1906{
1907	struct ece_softc *sc;
1908	sc = device_get_softc(dev);
1909	return (phy_read(sc, phy, reg));
1910}
1911
1912static int
1913ece_miibus_writereg(device_t dev, int phy, int reg, int data)
1914{
1915	struct ece_softc *sc;
1916	sc = device_get_softc(dev);
1917	phy_write(sc, phy, reg, data);
1918	return (0);
1919}
1920
1921static device_method_t ece_methods[] = {
1922	/* Device interface */
1923	DEVMETHOD(device_probe,	ece_probe),
1924	DEVMETHOD(device_attach,	ece_attach),
1925	DEVMETHOD(device_detach,	ece_detach),
1926
1927	/* Bus interface */
1928	DEVMETHOD(bus_child_detached,	ece_child_detached),
1929
1930	/* MII interface */
1931	DEVMETHOD(miibus_readreg,	ece_miibus_readreg),
1932	DEVMETHOD(miibus_writereg,	ece_miibus_writereg),
1933
1934	{ 0, 0 }
1935};
1936
1937static driver_t ece_driver = {
1938	"ece",
1939	ece_methods,
1940	sizeof(struct ece_softc),
1941};
1942
1943DRIVER_MODULE(ece, econaarm, ece_driver, ece_devclass, 0, 0);
1944DRIVER_MODULE(miibus, ece, miibus_driver, miibus_devclass, 0, 0);
1945MODULE_DEPEND(ece, miibus, 1, 1, 1);
1946MODULE_DEPEND(ece, ether, 1, 1, 1);
1947