1/*-
2 * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/rman.h>
38#include <sys/socket.h>
39#include <sys/sockio.h>
40#include <sys/sysctl.h>
41#include <sys/taskqueue.h>
42
43#include <net/ethernet.h>
44#include <net/if.h>
45#include <net/if_arp.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48#include <net/if_types.h>
49#include <net/if_var.h>
50#include <net/if_vlan_var.h>
51
52#ifdef INET
53#include <netinet/in.h>
54#include <netinet/in_systm.h>
55#include <netinet/in_var.h>
56#include <netinet/ip.h>
57#endif
58
59#include <net/bpf.h>
60#include <net/bpfdesc.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <arm/cavium/cns11xx/if_ecereg.h>
66#include <arm/cavium/cns11xx/if_ecevar.h>
67#include <arm/cavium/cns11xx/econa_var.h>
68
69#include <machine/bus.h>
70#include <machine/intr.h>
71
72/* "device miibus" required.  See GENERIC if you get errors here. */
73#include "miibus_if.h"
74
75static uint8_t
76vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19};
77
78/*
79 * Boot loader expects the hardware state to be the same when we
80 * restart the device (warm boot), so we need to save the initial
81 * config values.
82 */
83int initial_switch_config;
84int initial_cpu_config;
85int initial_port0_config;
86int initial_port1_config;
87
88static inline uint32_t
89read_4(struct ece_softc *sc, bus_size_t off)
90{
91
92	return (bus_read_4(sc->mem_res, off));
93}
94
95static inline void
96write_4(struct ece_softc *sc, bus_size_t off, uint32_t val)
97{
98
99	bus_write_4(sc->mem_res, off, val);
100}
101
102#define	ECE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
103#define	ECE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
104#define	ECE_LOCK_INIT(_sc) \
105	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev),	\
106		 MTX_NETWORK_LOCK, MTX_DEF)
107
108#define	ECE_TXLOCK(_sc)		mtx_lock(&(_sc)->sc_mtx_tx)
109#define	ECE_TXUNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx_tx)
110#define	ECE_TXLOCK_INIT(_sc) \
111	mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev),	\
112		 "ECE TX Lock", MTX_DEF)
113
114#define	ECE_CLEANUPLOCK(_sc)	mtx_lock(&(_sc)->sc_mtx_cleanup)
115#define	ECE_CLEANUPUNLOCK(_sc)	mtx_unlock(&(_sc)->sc_mtx_cleanup)
116#define	ECE_CLEANUPLOCK_INIT(_sc) \
117	mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev),	\
118		 "ECE cleanup Lock", MTX_DEF)
119
120#define	ECE_RXLOCK(_sc)		mtx_lock(&(_sc)->sc_mtx_rx)
121#define	ECE_RXUNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx_rx)
122#define	ECE_RXLOCK_INIT(_sc) \
123	mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev),	\
124		 "ECE RX Lock", MTX_DEF)
125
126#define	ECE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
127#define	ECE_TXLOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx_tx);
128#define	ECE_RXLOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx_rx);
129#define	ECE_CLEANUPLOCK_DESTROY(_sc)	\
130	mtx_destroy(&_sc->sc_mtx_cleanup);
131
132#define	ECE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
133#define	ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
134
135static devclass_t ece_devclass;
136
137/* ifnet entry points */
138
139static void	eceinit_locked(void *);
140static void	ecestart_locked(struct ifnet *);
141
142static void	eceinit(void *);
143static void	ecestart(struct ifnet *);
144static void	ecestop(struct ece_softc *);
145static int	eceioctl(struct ifnet * ifp, u_long, caddr_t);
146
147/* bus entry points */
148
149static int	ece_probe(device_t dev);
150static int	ece_attach(device_t dev);
151static int	ece_detach(device_t dev);
152static void	ece_intr(void *);
153static void	ece_intr_qf(void *);
154static void	ece_intr_status(void *xsc);
155
156/* helper routines */
157static int	ece_activate(device_t dev);
158static void	ece_deactivate(device_t dev);
159static int	ece_ifmedia_upd(struct ifnet *ifp);
160static void	ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
161static int	ece_get_mac(struct ece_softc *sc, u_char *eaddr);
162static void	ece_set_mac(struct ece_softc *sc, u_char *eaddr);
163static int	configure_cpu_port(struct ece_softc *sc);
164static int	configure_lan_port(struct ece_softc *sc, int phy_type);
165static void	set_pvid(struct ece_softc *sc, int port0, int port1, int cpu);
166static void	set_vlan_vid(struct ece_softc *sc, int vlan);
167static void	set_vlan_member(struct ece_softc *sc, int vlan);
168static void	set_vlan_tag(struct ece_softc *sc, int vlan);
169static int	hardware_init(struct ece_softc *sc);
170static void	ece_intr_rx_locked(struct ece_softc *sc, int count);
171
172static void	ece_free_desc_dma_tx(struct ece_softc *sc);
173static void	ece_free_desc_dma_rx(struct ece_softc *sc);
174
175static void	ece_intr_task(void *arg, int pending __unused);
176static void	ece_tx_task(void *arg, int pending __unused);
177static void	ece_cleanup_task(void *arg, int pending __unused);
178
179static int	ece_allocate_dma(struct ece_softc *sc);
180
181static void	ece_intr_tx(void *xsc);
182
183static void	clear_mac_entries(struct ece_softc *ec, int include_this_mac);
184
185static uint32_t read_mac_entry(struct ece_softc *ec,
186	    uint8_t *mac_result,
187	    int first);
188
189/*PHY related functions*/
190static inline int
191phy_read(struct ece_softc *sc, int phy, int reg)
192{
193	int val;
194	int ii;
195	int status;
196
197	write_4(sc, PHY_CONTROL, PHY_RW_OK);
198	write_4(sc, PHY_CONTROL,
199	    (PHY_ADDRESS(phy)|PHY_READ_COMMAND |
200	    PHY_REGISTER(reg)));
201
202	for (ii = 0; ii < 0x1000; ii++) {
203		status = read_4(sc, PHY_CONTROL);
204		if (status & PHY_RW_OK) {
205			/* Clear the rw_ok status, and clear other
206			 * bits value. */
207			write_4(sc, PHY_CONTROL, PHY_RW_OK);
208			val = PHY_GET_DATA(status);
209			return (val);
210		}
211	}
212	return (0);
213}
214
215static inline void
216phy_write(struct ece_softc *sc, int phy, int reg, int data)
217{
218	int ii;
219
220	write_4(sc, PHY_CONTROL, PHY_RW_OK);
221	write_4(sc, PHY_CONTROL,
222	    PHY_ADDRESS(phy) | PHY_REGISTER(reg) |
223	    PHY_WRITE_COMMAND | PHY_DATA(data));
224	for (ii = 0; ii < 0x1000; ii++) {
225		if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) {
226			/* Clear the rw_ok status, and clear other
227			 * bits value.
228			 */
229			write_4(sc, PHY_CONTROL, PHY_RW_OK);
230			return;
231		}
232	}
233}
234
235static int get_phy_type(struct ece_softc *sc)
236{
237	uint16_t phy0_id = 0, phy1_id = 0;
238
239	/*
240	 * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier
241	 * Register 1.
242	 */
243	phy0_id = phy_read(sc, 0, 0x2);
244	phy1_id = phy_read(sc, 1, 0x2);
245
246	if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F))
247		return (ASIX_GIGA_PHY);
248	else if ((phy0_id == 0x0243) && (phy1_id == 0x0243))
249		return (TWO_SINGLE_PHY);
250	else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007))
251		return (VSC8601_GIGA_PHY);
252	else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF))
253		return (IC_PLUS_PHY);
254
255	return (NOT_FOUND_PHY);
256}
257
258static int
259ece_probe(device_t dev)
260{
261
262	device_set_desc(dev, "Econa Ethernet Controller");
263	return (0);
264}
265
266
267static int
268ece_attach(device_t dev)
269{
270	struct ece_softc *sc;
271	struct ifnet *ifp = NULL;
272	struct sysctl_ctx_list *sctx;
273	struct sysctl_oid *soid;
274	u_char eaddr[ETHER_ADDR_LEN];
275	int err;
276	int i, rid;
277	uint32_t rnd;
278
279	err = 0;
280
281	sc = device_get_softc(dev);
282
283	sc->dev = dev;
284
285	rid = 0;
286	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
287		    RF_ACTIVE);
288	if (sc->mem_res == NULL)
289		goto out;
290
291	power_on_network_interface();
292
293	rid = 0;
294	sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
295	    RF_ACTIVE);
296	if (sc->irq_res_status == NULL)
297		goto out;
298
299	rid = 1;
300	/*TSTC: Fm-Switch-Tx-Complete*/
301	sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
302	    RF_ACTIVE);
303	if (sc->irq_res_tx == NULL)
304		goto out;
305
306	rid = 2;
307	/*FSRC: Fm-Switch-Rx-Complete*/
308	sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
309	    RF_ACTIVE);
310	if (sc->irq_res_rec == NULL)
311		goto out;
312
313	rid = 4;
314	/*FSQF: Fm-Switch-Queue-Full*/
315	sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
316	    RF_ACTIVE);
317	if (sc->irq_res_qf == NULL)
318		goto out;
319
320	err = ece_activate(dev);
321	if (err)
322		goto out;
323
324	/* Sysctls */
325	sctx = device_get_sysctl_ctx(dev);
326	soid = device_get_sysctl_tree(dev);
327
328	ECE_LOCK_INIT(sc);
329
330	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
331
332	if ((err = ece_get_mac(sc, eaddr)) != 0) {
333		/* No MAC address configured. Generate the random one. */
334		if (bootverbose)
335			device_printf(dev,
336			    "Generating random ethernet address.\n");
337		rnd = arc4random();
338
339		/*from if_ae.c/if_ate.c*/
340		/*
341		 * Set OUI to convenient locally assigned address. 'b'
342		 * is 0x62, which has the locally assigned bit set, and
343		 * the broadcast/multicast bit clear.
344		 */
345		eaddr[0] = 'b';
346		eaddr[1] = 's';
347		eaddr[2] = 'd';
348		eaddr[3] = (rnd >> 16) & 0xff;
349		eaddr[4] = (rnd >> 8) & 0xff;
350		eaddr[5] = rnd & 0xff;
351
352		for (i = 0; i < ETHER_ADDR_LEN; i++)
353			eaddr[i] = vlan0_mac[i];
354	}
355	ece_set_mac(sc, eaddr);
356	sc->ifp = ifp = if_alloc(IFT_ETHER);
357	/* Only one PHY at address 0 in this device. */
358	err = mii_attach(dev, &sc->miibus, ifp, ece_ifmedia_upd,
359	    ece_ifmedia_sts, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0);
360	if (err != 0) {
361		device_printf(dev, "attaching PHYs failed\n");
362		goto out;
363	}
364	ifp->if_softc = sc;
365	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
366	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367
368	ifp->if_capabilities = IFCAP_HWCSUM;
369
370	ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
371	ifp->if_capenable = ifp->if_capabilities;
372	ifp->if_start = ecestart;
373	ifp->if_ioctl = eceioctl;
374	ifp->if_init = eceinit;
375	ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1;
376	IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1);
377	IFQ_SET_READY(&ifp->if_snd);
378
379	/* Create local taskq. */
380
381	TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc);
382	TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp);
383	TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc);
384	sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK,
385	    taskqueue_thread_enqueue,
386	    &sc->sc_tq);
387	if (sc->sc_tq == NULL) {
388		device_printf(sc->dev, "could not create taskqueue\n");
389		goto out;
390	}
391
392	ether_ifattach(ifp, eaddr);
393
394	/*
395	 * Activate interrupts
396	 */
397	err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE,
398	    NULL, ece_intr, sc, &sc->intrhand);
399	if (err) {
400		ether_ifdetach(ifp);
401		ECE_LOCK_DESTROY(sc);
402		goto out;
403	}
404
405	err = bus_setup_intr(dev, sc->irq_res_status,
406	    INTR_TYPE_NET | INTR_MPSAFE,
407	    NULL, ece_intr_status, sc, &sc->intrhand_status);
408	if (err) {
409		ether_ifdetach(ifp);
410		ECE_LOCK_DESTROY(sc);
411		goto out;
412	}
413
414	err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE,
415	    NULL,ece_intr_qf, sc, &sc->intrhand_qf);
416
417	if (err) {
418		ether_ifdetach(ifp);
419		ECE_LOCK_DESTROY(sc);
420		goto out;
421	}
422
423	err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE,
424	    NULL, ece_intr_tx, sc, &sc->intrhand_tx);
425
426	if (err) {
427		ether_ifdetach(ifp);
428		ECE_LOCK_DESTROY(sc);
429		goto out;
430	}
431
432	ECE_TXLOCK_INIT(sc);
433	ECE_RXLOCK_INIT(sc);
434	ECE_CLEANUPLOCK_INIT(sc);
435
436	/* Enable all interrupt sources. */
437	write_4(sc, INTERRUPT_MASK, 0x00000000);
438
439	/* Enable port 0. */
440	write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
441
442	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
443	    device_get_nameunit(sc->dev));
444
445out:
446	if (err)
447		ece_deactivate(dev);
448	if (err && ifp)
449		if_free(ifp);
450	return (err);
451}
452
453static int
454ece_detach(device_t dev)
455{
456	struct ece_softc *sc = device_get_softc(dev);
457	struct ifnet *ifp = sc->ifp;
458
459	ecestop(sc);
460	if (ifp != NULL) {
461		ether_ifdetach(ifp);
462		if_free(ifp);
463	}
464	ece_deactivate(dev);
465	return (0);
466}
467
468static void
469ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
470{
471	u_int32_t *paddr;
472	KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
473	paddr = arg;
474	*paddr = segs->ds_addr;
475}
476
477static int
478ece_alloc_desc_dma_tx(struct ece_softc *sc)
479{
480	int i;
481	int error;
482
483	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
484	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
485	    16, 0, /* alignment, boundary */
486	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
487	    BUS_SPACE_MAXADDR,	/* highaddr */
488	    NULL, NULL,	/* filtfunc, filtfuncarg */
489	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */
490	    1, /*nsegments */
491	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
492	    0, /* flags */
493	    NULL, NULL,	/* lockfunc, lockfuncarg */
494	    &sc->dmatag_data_tx); /* dmat */
495
496	/* Allocate memory for TX ring. */
497	error = bus_dmamem_alloc(sc->dmatag_data_tx,
498	    (void**)&(sc->desc_tx),
499	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
500	    BUS_DMA_COHERENT,
501	    &(sc->dmamap_ring_tx));
502
503	if (error) {
504		if_printf(sc->ifp, "failed to allocate DMA memory\n");
505		bus_dma_tag_destroy(sc->dmatag_data_tx);
506		sc->dmatag_data_tx = 0;
507		return (ENXIO);
508	}
509
510	/* Load Ring DMA. */
511	error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
512	    sc->desc_tx,
513	    sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
514	    ece_getaddr,
515	    &(sc->ring_paddr_tx), BUS_DMA_NOWAIT);
516
517	if (error) {
518		if_printf(sc->ifp, "can't load descriptor\n");
519		bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
520		    sc->dmamap_ring_tx);
521		sc->desc_tx = NULL;
522		bus_dma_tag_destroy(sc->dmatag_data_tx);
523		sc->dmatag_data_tx = 0;
524		return (ENXIO);
525	}
526
527	/* Allocate a busdma tag for mbufs. Alignment is 2 bytes */
528	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
529	    1, 0,			/* alignment, boundary */
530	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
531	    BUS_SPACE_MAXADDR,		/* highaddr */
532	    NULL, NULL,		/* filtfunc, filtfuncarg */
533	   MCLBYTES*MAX_FRAGMENT,	/* maxsize */
534	   MAX_FRAGMENT,		 /* nsegments */
535	    MCLBYTES, 0,		/* maxsegsz, flags */
536	    NULL, NULL,		/* lockfunc, lockfuncarg */
537	    &sc->dmatag_ring_tx);	/* dmat */
538
539	if (error) {
540		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
541		return (ENXIO);
542	}
543
544	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
545		/* Create dma map for each descriptor. */
546		error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
547		    &(sc->tx_desc[i].dmamap));
548		if (error) {
549			if_printf(sc->ifp, "failed to create map for mbuf\n");
550			return (ENXIO);
551		}
552	}
553	return (0);
554}
555
556static void
557ece_free_desc_dma_tx(struct ece_softc *sc)
558{
559	int i;
560
561	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
562		if (sc->tx_desc[i].buff) {
563			m_freem(sc->tx_desc[i].buff);
564			sc->tx_desc[i].buff= 0;
565		}
566	}
567
568	if (sc->dmamap_ring_tx) {
569		bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx);
570		if (sc->desc_tx) {
571			bus_dmamem_free(sc->dmatag_data_tx,
572			    sc->desc_tx, sc->dmamap_ring_tx);
573		}
574		sc->dmamap_ring_tx = 0;
575	}
576
577	if (sc->dmatag_data_tx) {
578		bus_dma_tag_destroy(sc->dmatag_data_tx);
579		sc->dmatag_data_tx = 0;
580	}
581
582	if (sc->dmatag_ring_tx) {
583		for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) {
584			bus_dmamap_destroy(sc->dmatag_ring_tx,
585			    sc->tx_desc[i].dmamap);
586			sc->tx_desc[i].dmamap = 0;
587		}
588		bus_dma_tag_destroy(sc->dmatag_ring_tx);
589		sc->dmatag_ring_tx = 0;
590	}
591}
592
593static int
594ece_alloc_desc_dma_rx(struct ece_softc *sc)
595{
596	int error;
597	int i;
598
599	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
600	error = bus_dma_tag_create(sc->sc_parent_tag,	/* parent */
601	    16, 0,			/* alignment, boundary */
602	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
603	    BUS_SPACE_MAXADDR,		/* highaddr */
604	    NULL, NULL,		/* filtfunc, filtfuncarg */
605	    /* maxsize, nsegments */
606	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1,
607	    /* maxsegsz, flags */
608	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0,
609	    NULL, NULL,		/* lockfunc, lockfuncarg */
610	    &sc->dmatag_data_rx);	/* dmat */
611
612	/* Allocate RX ring. */
613	error = bus_dmamem_alloc(sc->dmatag_data_rx,
614	    (void**)&(sc->desc_rx),
615	    BUS_DMA_NOWAIT | BUS_DMA_ZERO |
616	    BUS_DMA_COHERENT,
617	    &(sc->dmamap_ring_rx));
618
619	if (error) {
620		if_printf(sc->ifp, "failed to allocate DMA memory\n");
621		return (ENXIO);
622	}
623
624	/* Load dmamap. */
625	error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
626	    sc->desc_rx,
627	    sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS,
628	    ece_getaddr,
629	    &(sc->ring_paddr_rx), BUS_DMA_NOWAIT);
630
631	if (error) {
632		if_printf(sc->ifp, "can't load descriptor\n");
633		bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
634		    sc->dmamap_ring_rx);
635		bus_dma_tag_destroy(sc->dmatag_data_rx);
636		sc->desc_rx = NULL;
637		return (ENXIO);
638	}
639
640	/* Allocate a busdma tag for mbufs. */
641	error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
642	    16, 0,			/* alignment, boundary */
643	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
644	    BUS_SPACE_MAXADDR,		/* highaddr */
645	    NULL, NULL,		/* filtfunc, filtfuncarg */
646	    MCLBYTES, 1,		/* maxsize, nsegments */
647	    MCLBYTES, 0,		/* maxsegsz, flags */
648	    NULL, NULL,		/* lockfunc, lockfuncarg */
649	    &sc->dmatag_ring_rx);	/* dmat */
650
651	if (error) {
652		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
653		return (ENXIO);
654	}
655
656	for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) {
657		error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
658		    &sc->rx_desc[i].dmamap);
659		if (error) {
660			if_printf(sc->ifp, "failed to create map for mbuf\n");
661			return (ENXIO);
662		}
663	}
664
665	error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap);
666	if (error) {
667		if_printf(sc->ifp, "failed to create spare map\n");
668		return (ENXIO);
669	}
670
671	return (0);
672}
673
674static void
675ece_free_desc_dma_rx(struct ece_softc *sc)
676{
677	int i;
678
679	for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
680		if (sc->rx_desc[i].buff) {
681			m_freem(sc->rx_desc[i].buff);
682			sc->rx_desc[i].buff= 0;
683		}
684	}
685
686	if (sc->dmatag_data_rx) {
687		bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx);
688		bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
689		    sc->dmamap_ring_rx);
690		bus_dma_tag_destroy(sc->dmatag_data_rx);
691		sc->dmatag_data_rx = 0;
692		sc->dmamap_ring_rx = 0;
693		sc->desc_rx = 0;
694	}
695
696	if (sc->dmatag_ring_rx) {
697		for (i = 0; i < ECE_MAX_RX_BUFFERS; i++)
698			bus_dmamap_destroy(sc->dmatag_ring_rx,
699			    sc->rx_desc[i].dmamap);
700		bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap);
701		bus_dma_tag_destroy(sc->dmatag_ring_rx);
702		sc->dmatag_ring_rx = 0;
703	}
704}
705
706static int
707ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo)
708{
709	struct mbuf *new_mbuf;
710	bus_dma_segment_t seg[1];
711	bus_dmamap_t map;
712	int error;
713	int nsegs;
714	bus_dma_tag_t tag;
715
716	tag = sc->dmatag_ring_rx;
717
718	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
719
720	if (new_mbuf == NULL)
721		return (ENOBUFS);
722
723	new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES;
724
725	error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf,
726	    seg, &nsegs, BUS_DMA_NOWAIT);
727
728	KASSERT(nsegs == 1, ("Too many segments returned!"));
729
730	if (nsegs != 1 || error) {
731		m_free(new_mbuf);
732		return (ENOBUFS);
733	}
734
735	if (descinfo->buff != NULL) {
736		bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD);
737		bus_dmamap_unload(tag, descinfo->dmamap);
738	}
739
740	map = descinfo->dmamap;
741	descinfo->dmamap = sc->rx_sparemap;
742	sc->rx_sparemap = map;
743
744	bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD);
745
746	descinfo->buff = new_mbuf;
747	descinfo->desc->data_ptr = seg->ds_addr;
748	descinfo->desc->length = seg->ds_len - 2;
749
750	return (0);
751}
752
753static int
754ece_allocate_dma(struct ece_softc *sc)
755{
756	eth_tx_desc_t *desctx;
757	eth_rx_desc_t *descrx;
758	int i;
759	int error;
760
761	/* Create parent tag for tx and rx */
762	error = bus_dma_tag_create(
763	    bus_get_dma_tag(sc->dev),/* parent */
764	    1, 0,		/* alignment, boundary */
765	    BUS_SPACE_MAXADDR,	/* lowaddr */
766	    BUS_SPACE_MAXADDR,	/* highaddr */
767	    NULL, NULL,	/* filter, filterarg */
768	    BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */
769	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
770	    0,			/* flags */
771	    NULL, NULL,	/* lockfunc, lockarg */
772	    &sc->sc_parent_tag);
773
774	ece_alloc_desc_dma_tx(sc);
775
776	for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
777		desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]);
778		memset(desctx, 0, sizeof(eth_tx_desc_t));
779		desctx->length = MAX_PACKET_LEN;
780		desctx->cown = 1;
781		if (i == ECE_MAX_TX_BUFFERS - 1)
782			desctx->eor = 1;
783	}
784
785	ece_alloc_desc_dma_rx(sc);
786
787	for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
788		descrx = &(sc->desc_rx[i]);
789		memset(descrx, 0, sizeof(eth_rx_desc_t));
790		sc->rx_desc[i].desc = descrx;
791		sc->rx_desc[i].buff = 0;
792		ece_new_rxbuf(sc, &(sc->rx_desc[i]));
793
794		if (i == ECE_MAX_RX_BUFFERS - 1)
795			descrx->eor = 1;
796	}
797	sc->tx_prod = 0;
798	sc->tx_cons = 0;
799	sc->last_rx = 0;
800	sc->desc_curr_tx = 0;
801
802	return (0);
803}
804
805static int
806ece_activate(device_t dev)
807{
808	struct ece_softc *sc;
809	int err;
810	uint32_t mac_port_config;
811	struct ifnet *ifp;
812
813	sc = device_get_softc(dev);
814	ifp = sc->ifp;
815
816	initial_switch_config = read_4(sc, SWITCH_CONFIG);
817	initial_cpu_config = read_4(sc, CPU_PORT_CONFIG);
818	initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG);
819	initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG);
820
821	/* Disable Port 0 */
822	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
823	mac_port_config |= (PORT_DISABLE);
824	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
825
826	/* Disable Port 1 */
827	mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
828	mac_port_config |= (PORT_DISABLE);
829	write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
830
831	err = ece_allocate_dma(sc);
832	if (err) {
833		if_printf(sc->ifp, "failed allocating dma\n");
834		goto out;
835	}
836
837	write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx);
838	write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx);
839
840	write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx);
841	write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx);
842
843	write_4(sc, FS_DMA_CONTROL, 1);
844
845	return (0);
846out:
847	return (ENXIO);
848
849}
850
851static void
852ece_deactivate(device_t dev)
853{
854	struct ece_softc *sc;
855
856	sc = device_get_softc(dev);
857
858	if (sc->intrhand)
859		bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand);
860
861	sc->intrhand = 0;
862
863	if (sc->intrhand_qf)
864		bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf);
865
866	sc->intrhand_qf = 0;
867
868	bus_generic_detach(sc->dev);
869	if (sc->miibus)
870		device_delete_child(sc->dev, sc->miibus);
871	if (sc->mem_res)
872		bus_release_resource(dev, SYS_RES_IOPORT,
873		    rman_get_rid(sc->mem_res), sc->mem_res);
874	sc->mem_res = 0;
875
876	if (sc->irq_res_rec)
877		bus_release_resource(dev, SYS_RES_IRQ,
878		    rman_get_rid(sc->irq_res_rec), sc->irq_res_rec);
879
880	if (sc->irq_res_qf)
881		bus_release_resource(dev, SYS_RES_IRQ,
882		    rman_get_rid(sc->irq_res_qf), sc->irq_res_qf);
883
884	if (sc->irq_res_qf)
885		bus_release_resource(dev, SYS_RES_IRQ,
886		    rman_get_rid(sc->irq_res_status), sc->irq_res_status);
887
888	sc->irq_res_rec = 0;
889	sc->irq_res_qf = 0;
890	sc->irq_res_status = 0;
891	ECE_TXLOCK_DESTROY(sc);
892	ECE_RXLOCK_DESTROY(sc);
893
894	ece_free_desc_dma_tx(sc);
895	ece_free_desc_dma_rx(sc);
896
897	return;
898}
899
900/*
901 * Change media according to request.
902 */
903static int
904ece_ifmedia_upd(struct ifnet *ifp)
905{
906	struct ece_softc *sc = ifp->if_softc;
907	struct mii_data *mii;
908	int error;
909
910	mii = device_get_softc(sc->miibus);
911	ECE_LOCK(sc);
912	error = mii_mediachg(mii);
913	ECE_UNLOCK(sc);
914	return (error);
915}
916
917/*
918 * Notify the world which media we're using.
919 */
920static void
921ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
922{
923	struct ece_softc *sc = ifp->if_softc;
924	struct mii_data *mii;
925
926	mii = device_get_softc(sc->miibus);
927	ECE_LOCK(sc);
928	mii_pollstat(mii);
929	ifmr->ifm_active = mii->mii_media_active;
930	ifmr->ifm_status = mii->mii_media_status;
931	ECE_UNLOCK(sc);
932}
933
934static void
935ece_tick(void *xsc)
936{
937	struct ece_softc *sc = xsc;
938	struct mii_data *mii;
939	int active;
940
941	mii = device_get_softc(sc->miibus);
942	active = mii->mii_media_active;
943	mii_tick(mii);
944
945	/*
946	 * Schedule another timeout one second from now.
947	 */
948	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
949}
950
951static uint32_t
952read_mac_entry(struct ece_softc *ec,
953    uint8_t *mac_result,
954    int first)
955{
956	uint32_t ii;
957	struct arl_table_entry_t entry;
958	uint32_t *entry_val;
959	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
960	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
961	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
962	if (first)
963		write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1);
964	else
965		write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2);
966
967	for (ii = 0; ii < 0x1000; ii++)
968		if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
969			break;
970
971	entry_val = (uint32_t*) (&entry);
972	entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1);
973	entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2);
974
975	if (mac_result)
976		memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN);
977
978	return (entry.table_end);
979}
980
981static uint32_t
982write_arl_table_entry(struct ece_softc *ec,
983    uint32_t filter,
984    uint32_t vlan_mac,
985    uint32_t vlan_gid,
986    uint32_t age_field,
987    uint32_t port_map,
988    const uint8_t *mac_addr)
989{
990	uint32_t ii;
991	uint32_t *entry_val;
992	struct arl_table_entry_t entry;
993
994	memset(&entry, 0, sizeof(entry));
995
996	entry.filter = filter;
997	entry.vlan_mac = vlan_mac;
998	entry.vlan_gid = vlan_gid;
999	entry.age_field = age_field;
1000	entry.port_map = port_map;
1001	memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN);
1002
1003	entry_val = (uint32_t*) (&entry);
1004
1005	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
1006	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
1007	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
1008
1009	write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]);
1010	write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]);
1011
1012	write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND);
1013
1014	for (ii = 0; ii < 0x1000; ii++)
1015		if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) &
1016		    ARL_COMMAND_COMPLETE)
1017			return (1); /* Write OK. */
1018
1019	/* Write failed. */
1020	return (0);
1021}
1022
1023static void
1024remove_mac_entry(struct ece_softc *sc,
1025    uint8_t *mac)
1026{
1027
1028	/* Invalid age_field mean erase this entry. */
1029	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1030	    INVALID_ENTRY, VLAN0_GROUP,
1031	    mac);
1032}
1033
1034static void
1035add_mac_entry(struct ece_softc *sc,
1036    uint8_t *mac)
1037{
1038
1039	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1040	    NEW_ENTRY, VLAN0_GROUP,
1041	    mac);
1042}
1043
1044/**
1045 * The behavior of ARL table reading and deletion is not well defined
1046 * in the documentation. To be safe, all mac addresses are put to a
1047 * list, then deleted.
1048 *
1049 */
1050static void
1051clear_mac_entries(struct ece_softc *ec, int include_this_mac)
1052{
1053	int table_end;
1054	struct mac_list * temp;
1055	struct mac_list * mac_list_header;
1056	struct mac_list * current;
1057	char mac[ETHER_ADDR_LEN];
1058
1059	current = 0;
1060	mac_list_header = 0;
1061
1062	table_end = read_mac_entry(ec, mac, 1);
1063	while (!table_end) {
1064		if (!include_this_mac &&
1065		    memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) {
1066			/* Read next entry. */
1067			table_end = read_mac_entry(ec, mac, 0);
1068			continue;
1069		}
1070
1071		temp = (struct mac_list*)malloc(sizeof(struct mac_list),
1072		    M_DEVBUF,
1073		    M_NOWAIT | M_ZERO);
1074		memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN);
1075		temp->next = 0;
1076		if (mac_list_header) {
1077			current->next = temp;
1078			current = temp;
1079		} else {
1080			mac_list_header = temp;
1081			current = temp;
1082		}
1083		/* Read next Entry */
1084		table_end = read_mac_entry(ec, mac, 0);
1085	}
1086
1087	current = mac_list_header;
1088
1089	while (current) {
1090		remove_mac_entry(ec, current->mac_addr);
1091		temp = current;
1092		current = current->next;
1093		free(temp, M_DEVBUF);
1094	}
1095}
1096
1097static int
1098configure_lan_port(struct ece_softc *sc, int phy_type)
1099{
1100	uint32_t sw_config;
1101	uint32_t mac_port_config;
1102
1103	/*
1104	 * Configure switch
1105	 */
1106	sw_config = read_4(sc, SWITCH_CONFIG);
1107	/* Enable fast aging. */
1108	sw_config |= FAST_AGING;
1109	/* Enable IVL learning. */
1110	sw_config |= IVL_LEARNING;
1111	/* Disable hardware NAT. */
1112	sw_config &= ~(HARDWARE_NAT);
1113
1114	sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE;
1115
1116	write_4(sc, SWITCH_CONFIG, sw_config);
1117
1118	sw_config = read_4(sc, SWITCH_CONFIG);
1119
1120	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1121
1122	if (!(mac_port_config & 0x1) || (mac_port_config & 0x2))
1123		if_printf(sc->ifp, "Link Down\n");
1124	else
1125		write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1126	return (0);
1127}
1128
1129static void
1130set_pvid(struct ece_softc *sc, int port0, int port1, int cpu)
1131{
1132	uint32_t val;
1133	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0));
1134	write_4(sc, VLAN_PORT_PVID, val);
1135	val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07);
1136	write_4(sc, VLAN_PORT_PVID, val);
1137	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4));
1138	write_4(sc, VLAN_PORT_PVID, val);
1139	val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4);
1140	write_4(sc, VLAN_PORT_PVID, val);
1141
1142	val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8));
1143	write_4(sc, VLAN_PORT_PVID, val);
1144	val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8);
1145	write_4(sc, VLAN_PORT_PVID, val);
1146
1147}
1148
1149/* VLAN related functions */
1150static void
1151set_vlan_vid(struct ece_softc *sc, int vlan)
1152{
1153	const uint32_t regs[] = {
1154	    VLAN_VID_0_1,
1155	    VLAN_VID_0_1,
1156	    VLAN_VID_2_3,
1157	    VLAN_VID_2_3,
1158	    VLAN_VID_4_5,
1159	    VLAN_VID_4_5,
1160	    VLAN_VID_6_7,
1161	    VLAN_VID_6_7
1162	};
1163
1164	const int vids[] = {
1165	    VLAN0_VID,
1166	    VLAN1_VID,
1167	    VLAN2_VID,
1168	    VLAN3_VID,
1169	    VLAN4_VID,
1170	    VLAN5_VID,
1171	    VLAN6_VID,
1172	    VLAN7_VID
1173	};
1174
1175	uint32_t val;
1176	uint32_t reg;
1177	int vid;
1178
1179	reg = regs[vlan];
1180	vid = vids[vlan];
1181
1182	if (vlan & 1) {
1183		val = read_4(sc, reg);
1184		write_4(sc, reg, val & (~(0xFFF << 0)));
1185		val = read_4(sc, reg);
1186		write_4(sc, reg, val|((vid & 0xFFF) << 0));
1187	} else {
1188		val = read_4(sc, reg);
1189		write_4(sc, reg, val & (~(0xFFF << 12)));
1190		val = read_4(sc, reg);
1191		write_4(sc, reg, val|((vid & 0xFFF) << 12));
1192	}
1193}
1194
1195static void
1196set_vlan_member(struct ece_softc *sc, int vlan)
1197{
1198	unsigned char shift;
1199	uint32_t val;
1200	int group;
1201	const int groups[] = {
1202	    VLAN0_GROUP,
1203	    VLAN1_GROUP,
1204	    VLAN2_GROUP,
1205	    VLAN3_GROUP,
1206	    VLAN4_GROUP,
1207	    VLAN5_GROUP,
1208	    VLAN6_GROUP,
1209	    VLAN7_GROUP
1210	};
1211
1212	group = groups[vlan];
1213
1214	shift = vlan*3;
1215	val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift));
1216	write_4(sc, VLAN_MEMBER_PORT_MAP, val);
1217	val = read_4(sc, VLAN_MEMBER_PORT_MAP);
1218	write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift));
1219}
1220
1221static void
1222set_vlan_tag(struct ece_softc *sc, int vlan)
1223{
1224	unsigned char shift;
1225	uint32_t val;
1226
1227	int tag = 0;
1228
1229	shift = vlan*3;
1230	val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift));
1231	write_4(sc, VLAN_TAG_PORT_MAP, val);
1232	val = read_4(sc, VLAN_TAG_PORT_MAP);
1233	write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift));
1234}
1235
1236static int
1237configure_cpu_port(struct ece_softc *sc)
1238{
1239	uint32_t cpu_port_config;
1240	int i;
1241
1242	cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1243	/* SA learning Disable */
1244	cpu_port_config |= (SA_LEARNING_DISABLE);
1245	/* set data offset + 2 */
1246	cpu_port_config &= ~(1U << 31);
1247
1248	write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1249
1250	if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1251	    STATIC_ENTRY, VLAN0_GROUP,
1252	    vlan0_mac))
1253		return (1);
1254
1255	set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID);
1256
1257	for (i = 0; i < 8; i++) {
1258		set_vlan_vid(sc, i);
1259		set_vlan_member(sc, i);
1260		set_vlan_tag(sc, i);
1261	}
1262
1263	/* disable all interrupt status sources */
1264	write_4(sc, INTERRUPT_MASK, 0xffff1fff);
1265
1266	/* clear previous interrupt sources */
1267	write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1268
1269	write_4(sc, TS_DMA_CONTROL, 0);
1270	write_4(sc, FS_DMA_CONTROL, 0);
1271	return (0);
1272}
1273
1274static int
1275hardware_init(struct ece_softc *sc)
1276{
1277	int status = 0;
1278	static int gw_phy_type;
1279
1280	gw_phy_type = get_phy_type(sc);
1281	/* Currently only ic_plus phy is supported. */
1282	if (gw_phy_type != IC_PLUS_PHY) {
1283		device_printf(sc->dev, "PHY type is not supported (%d)\n",
1284		    gw_phy_type);
1285		return (-1);
1286	}
1287	status = configure_lan_port(sc, gw_phy_type);
1288	configure_cpu_port(sc);
1289	return (0);
1290}
1291
1292static void
1293set_mac_address(struct ece_softc *sc, const char *mac, int mac_len)
1294{
1295
1296	/* Invalid age_field mean erase this entry. */
1297	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1298	    INVALID_ENTRY, VLAN0_GROUP,
1299	    mac);
1300	memcpy(vlan0_mac, mac, ETHER_ADDR_LEN);
1301
1302	write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1303	    STATIC_ENTRY, VLAN0_GROUP,
1304	    mac);
1305}
1306
1307static void
1308ece_set_mac(struct ece_softc *sc, u_char *eaddr)
1309{
1310	memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN);
1311	set_mac_address(sc, eaddr, ETHER_ADDR_LEN);
1312}
1313
1314/*
1315 * TODO: the device doesn't have MAC stored, we should read the
1316 * configuration stored in FLASH, but the format depends on the
1317 * bootloader used.*
1318 */
1319static int
1320ece_get_mac(struct ece_softc *sc, u_char *eaddr)
1321{
1322	return (ENXIO);
1323}
1324
1325static void
1326ece_intr_rx_locked(struct ece_softc *sc, int count)
1327{
1328	struct ifnet *ifp = sc->ifp;
1329	struct mbuf *mb;
1330	struct rx_desc_info *rxdesc;
1331	eth_rx_desc_t *desc;
1332
1333	int fssd_curr;
1334	int fssd;
1335	int i;
1336	int idx;
1337	int rxcount;
1338	uint32_t status;
1339
1340	fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER);
1341
1342	fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4;
1343
1344	desc = sc->rx_desc[sc->last_rx].desc;
1345
1346	/* Prepare to read the data in the ring. */
1347	bus_dmamap_sync(sc->dmatag_ring_rx,
1348	    sc->dmamap_ring_rx,
1349	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350
1351	if (fssd > sc->last_rx)
1352		rxcount = fssd - sc->last_rx;
1353	else if (fssd < sc->last_rx)
1354		rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd;
1355	else {
1356		if (desc->cown == 0)
1357			return;
1358		else
1359			rxcount = ECE_MAX_RX_BUFFERS;
1360	}
1361
1362	for (i= 0; i < rxcount; i++) {
1363		status = desc->cown;
1364		if (!status)
1365			break;
1366
1367		idx = sc->last_rx;
1368		rxdesc = &sc->rx_desc[idx];
1369		mb = rxdesc->buff;
1370
1371		if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN ||
1372		    desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN +
1373		    ETHER_VLAN_ENCAP_LEN) {
1374			ifp->if_ierrors++;
1375			desc->cown = 0;
1376			desc->length = MCLBYTES - 2;
1377			/* Invalid packet, skip and process next
1378			 * packet.
1379			 */
1380			continue;
1381		}
1382
1383		if (ece_new_rxbuf(sc, rxdesc) != 0) {
1384			ifp->if_iqdrops++;
1385			desc->cown = 0;
1386			desc->length = MCLBYTES - 2;
1387			break;
1388		}
1389
1390		/**
1391		 * The device will write to addrress + 2 So we need to adjust
1392		 * the address after the packet is received.
1393		 */
1394		mb->m_data += 2;
1395		mb->m_len = mb->m_pkthdr.len = desc->length;
1396
1397		mb->m_flags |= M_PKTHDR;
1398		mb->m_pkthdr.rcvif = ifp;
1399		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1400			/*check for valid checksum*/
1401			if ( (!desc->l4f)  && (desc->prot != 3)) {
1402				mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1403				mb->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1404				mb->m_pkthdr.csum_data = 0xffff;
1405			}
1406		}
1407		ECE_RXUNLOCK(sc);
1408		(*ifp->if_input)(ifp, mb);
1409		ECE_RXLOCK(sc);
1410
1411		desc->cown = 0;
1412		desc->length = MCLBYTES - 2;
1413
1414		bus_dmamap_sync(sc->dmatag_ring_rx,
1415		    sc->dmamap_ring_rx,
1416		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1417
1418		if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1)
1419			sc->last_rx = 0;
1420		else
1421			sc->last_rx++;
1422
1423		desc = sc->rx_desc[sc->last_rx].desc;
1424	}
1425
1426	/* Sync updated flags. */
1427	bus_dmamap_sync(sc->dmatag_ring_rx,
1428	    sc->dmamap_ring_rx,
1429	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1430
1431	return;
1432}
1433
1434static void
1435ece_intr_task(void *arg, int pending __unused)
1436{
1437	struct ece_softc *sc = arg;
1438	ECE_RXLOCK(sc);
1439	ece_intr_rx_locked(sc, -1);
1440	ECE_RXUNLOCK(sc);
1441}
1442
1443static void
1444ece_intr(void *xsc)
1445{
1446	struct ece_softc *sc = xsc;
1447	struct ifnet *ifp = sc->ifp;
1448
1449	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1450		write_4(sc, FS_DMA_CONTROL, 0);
1451		return;
1452	}
1453
1454	taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1455
1456	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1457		taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
1458}
1459
1460static void
1461ece_intr_status(void *xsc)
1462{
1463	struct ece_softc *sc = xsc;
1464	struct ifnet *ifp = sc->ifp;
1465	int stat;
1466
1467	stat = read_4(sc, INTERRUPT_STATUS);
1468
1469	write_4(sc, INTERRUPT_STATUS, stat);
1470
1471	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1472		if ((stat & ERROR_MASK) != 0)
1473			ifp->if_iqdrops++;
1474	}
1475}
1476
1477static void
1478ece_cleanup_locked(struct ece_softc *sc)
1479{
1480	eth_tx_desc_t *desc;
1481
1482	if (sc->tx_cons == sc->tx_prod) return;
1483
1484	/* Prepare to read the ring (owner bit). */
1485	bus_dmamap_sync(sc->dmatag_ring_tx,
1486	    sc->dmamap_ring_tx,
1487	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1488
1489	while (sc->tx_cons != sc->tx_prod) {
1490		desc = sc->tx_desc[sc->tx_cons].desc;
1491		if (desc->cown != 0) {
1492			struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]);
1493			/* We are finished with this descriptor ... */
1494			bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap,
1495			    BUS_DMASYNC_POSTWRITE);
1496			/* ... and unload, so we can reuse. */
1497			bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1498			m_freem(td->buff);
1499			td->buff = 0;
1500			sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS;
1501		} else {
1502			break;
1503		}
1504	}
1505
1506}
1507
1508static void
1509ece_cleanup_task(void *arg, int pending __unused)
1510{
1511	struct ece_softc *sc = arg;
1512	ECE_CLEANUPLOCK(sc);
1513	ece_cleanup_locked(sc);
1514	ECE_CLEANUPUNLOCK(sc);
1515}
1516
1517static void
1518ece_intr_tx(void *xsc)
1519{
1520	struct ece_softc *sc = xsc;
1521	struct ifnet *ifp = sc->ifp;
1522	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1523		/* This should not happen, stop DMA. */
1524		write_4(sc, FS_DMA_CONTROL, 0);
1525		return;
1526	}
1527	taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task);
1528}
1529
1530static void
1531ece_intr_qf(void *xsc)
1532{
1533	struct ece_softc *sc = xsc;
1534	struct ifnet *ifp = sc->ifp;
1535	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1536		/* This should not happen, stop DMA. */
1537		write_4(sc, FS_DMA_CONTROL, 0);
1538		return;
1539	}
1540	taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1541	write_4(sc, FS_DMA_CONTROL, 1);
1542}
1543
1544/*
1545 * Reset and initialize the chip
1546 */
1547static void
1548eceinit_locked(void *xsc)
1549{
1550	struct ece_softc *sc = xsc;
1551	struct ifnet *ifp = sc->ifp;
1552	struct mii_data *mii;
1553	uint32_t cfg_reg;
1554	uint32_t cpu_port_config;
1555	uint32_t mac_port_config;
1556
1557	while (1) {
1558		cfg_reg = read_4(sc, BIST_RESULT_TEST_0);
1559		if ((cfg_reg & (1<<17)))
1560			break;
1561		DELAY(100);
1562	}
1563	/* Set to default values. */
1564	write_4(sc, SWITCH_CONFIG, 0x007AA7A1);
1565	write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00);
1566	write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80);
1567	write_4(sc, CPU_PORT_CONFIG, 0x004C0000);
1568
1569	hardware_init(sc);
1570
1571	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1572
1573	 /* Enable Port 0 */
1574	mac_port_config &= (~(PORT_DISABLE));
1575	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1576
1577	cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1578	/* Enable CPU. */
1579	cpu_port_config &= ~(PORT_DISABLE);
1580	write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1581
1582	/*
1583	 * Set 'running' flag, and clear output active flag
1584	 * and attempt to start the output
1585	 */
1586	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1587	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1588
1589	mii = device_get_softc(sc->miibus);
1590	mii_pollstat(mii);
1591	/* Enable DMA. */
1592	write_4(sc, FS_DMA_CONTROL, 1);
1593
1594	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1595}
1596
1597static inline int
1598ece_encap(struct ece_softc *sc, struct mbuf *m0)
1599{
1600	struct ifnet *ifp;
1601	bus_dma_segment_t segs[MAX_FRAGMENT];
1602	bus_dmamap_t mapp;
1603	eth_tx_desc_t *desc = 0;
1604	int csum_flags;
1605	int desc_no;
1606	int error;
1607	int nsegs;
1608	int seg;
1609
1610	ifp = sc->ifp;
1611
1612	/* Fetch unused map */
1613	mapp = sc->tx_desc[sc->tx_prod].dmamap;
1614
1615	error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp,
1616	    m0, segs, &nsegs,
1617	    BUS_DMA_NOWAIT);
1618
1619	if (error != 0) {
1620		bus_dmamap_unload(sc->dmatag_ring_tx, mapp);
1621		return ((error != 0) ? error : -1);
1622	}
1623
1624	desc = &(sc->desc_tx[sc->desc_curr_tx]);
1625	sc->tx_desc[sc->tx_prod].desc = desc;
1626	sc->tx_desc[sc->tx_prod].buff = m0;
1627	desc_no = sc->desc_curr_tx;
1628
1629	for (seg = 0; seg < nsegs; seg++) {
1630		if (desc->cown == 0 ) {
1631			if_printf(ifp, "ERROR: descriptor is still used\n");
1632			return (-1);
1633		}
1634
1635		desc->length = segs[seg].ds_len;
1636		desc->data_ptr = segs[seg].ds_addr;
1637
1638		if (seg == 0) {
1639			desc->fs = 1;
1640		} else {
1641			desc->fs = 0;
1642		}
1643		if (seg == nsegs - 1) {
1644			desc->ls = 1;
1645		} else {
1646			desc->ls = 0;
1647		}
1648
1649		csum_flags = m0->m_pkthdr.csum_flags;
1650
1651		desc->fr =  1;
1652		desc->pmap =  1;
1653		desc->insv =  0;
1654		desc->ico = 0;
1655		desc->tco = 0;
1656		desc->uco = 0;
1657		desc->interrupt = 1;
1658
1659		if (csum_flags & CSUM_IP) {
1660			desc->ico = 1;
1661			if (csum_flags & CSUM_TCP)
1662				desc->tco = 1;
1663			if (csum_flags & CSUM_UDP)
1664				desc->uco = 1;
1665		}
1666
1667		desc++;
1668		sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS;
1669		if (sc->desc_curr_tx == 0) {
1670			desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1671		}
1672	}
1673
1674	desc = sc->tx_desc[sc->tx_prod].desc;
1675
1676	sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS;
1677
1678	/*
1679	 * After all descriptors are set, we set the flags to start the
1680	 * sending proces.
1681	 */
1682	for (seg = 0; seg < nsegs; seg++) {
1683		desc->cown = 0;
1684		desc++;
1685		desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS;
1686		if (desc_no == 0)
1687			desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1688	}
1689
1690	bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE);
1691	return (0);
1692}
1693
1694/*
1695 * dequeu packets and transmit
1696 */
1697static void
1698ecestart_locked(struct ifnet *ifp)
1699{
1700	struct ece_softc *sc;
1701	struct mbuf *m0;
1702	uint32_t queued = 0;
1703
1704	sc = ifp->if_softc;
1705	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1706	    IFF_DRV_RUNNING)
1707		return;
1708
1709	bus_dmamap_sync(sc->dmatag_ring_tx,
1710	    sc->dmamap_ring_tx,
1711	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1712
1713	for (;;) {
1714		/* Get packet from the queue */
1715		IF_DEQUEUE(&ifp->if_snd, m0);
1716		if (m0 == NULL)
1717			break;
1718		if (ece_encap(sc, m0)) {
1719			IF_PREPEND(&ifp->if_snd, m0);
1720			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1721			break;
1722		}
1723		queued++;
1724		BPF_MTAP(ifp, m0);
1725	}
1726	if (queued) {
1727		bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx,
1728		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1729		write_4(sc, TS_DMA_CONTROL, 1);
1730	}
1731}
1732
1733static void
1734eceinit(void *xsc)
1735{
1736	struct ece_softc *sc = xsc;
1737	ECE_LOCK(sc);
1738	eceinit_locked(sc);
1739	ECE_UNLOCK(sc);
1740}
1741
1742static void
1743ece_tx_task(void *arg, int pending __unused)
1744{
1745	struct ifnet *ifp;
1746	ifp = (struct ifnet *)arg;
1747	ecestart(ifp);
1748}
1749
1750static void
1751ecestart(struct ifnet *ifp)
1752{
1753	struct ece_softc *sc = ifp->if_softc;
1754	ECE_TXLOCK(sc);
1755	ecestart_locked(ifp);
1756	ECE_TXUNLOCK(sc);
1757}
1758
1759/*
1760 * Turn off interrupts, and stop the nic.  Can be called with sc->ifp
1761 * NULL so be careful.
1762 */
1763static void
1764ecestop(struct ece_softc *sc)
1765{
1766	struct ifnet *ifp = sc->ifp;
1767	uint32_t mac_port_config;
1768
1769	write_4(sc, TS_DMA_CONTROL, 0);
1770	write_4(sc, FS_DMA_CONTROL, 0);
1771
1772	if (ifp)
1773		ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1774
1775	callout_stop(&sc->tick_ch);
1776
1777	/*Disable Port 0 */
1778	mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1779	mac_port_config |= (PORT_DISABLE);
1780	write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1781
1782	/*Disable Port 1 */
1783	mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
1784	mac_port_config |= (PORT_DISABLE);
1785	write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
1786
1787	/* Disable all interrupt status sources. */
1788	write_4(sc, INTERRUPT_MASK, 0x00001FFF);
1789
1790	/* Clear previous interrupt sources. */
1791	write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1792
1793	write_4(sc, SWITCH_CONFIG, initial_switch_config);
1794	write_4(sc, CPU_PORT_CONFIG, initial_cpu_config);
1795	write_4(sc, MAC_PORT_0_CONFIG, initial_port0_config);
1796	write_4(sc, MAC_PORT_1_CONFIG, initial_port1_config);
1797
1798	clear_mac_entries(sc, 1);
1799}
1800
1801static void
1802ece_restart(struct ece_softc *sc)
1803{
1804	struct ifnet *ifp = sc->ifp;
1805
1806	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1807	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1808	/* Enable port 0. */
1809	write_4(sc, PORT_0_CONFIG,
1810	    read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
1811	write_4(sc, INTERRUPT_MASK, 0x00000000);
1812	write_4(sc, FS_DMA_CONTROL, 1);
1813	callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1814}
1815
1816static void
1817set_filter(struct ece_softc *sc)
1818{
1819	struct ifnet		*ifp;
1820	struct ifmultiaddr	*ifma;
1821	uint32_t mac_port_config;
1822
1823	ifp = sc->ifp;
1824
1825	clear_mac_entries(sc, 0);
1826	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1827		mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1828		mac_port_config &= ~(DISABLE_BROADCAST_PACKET);
1829		mac_port_config &= ~(DISABLE_MULTICAST_PACKET);
1830		write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1831		return;
1832	}
1833	if_maddr_rlock(ifp);
1834	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1835		if (ifma->ifma_addr->sa_family != AF_LINK)
1836			continue;
1837		add_mac_entry(sc,
1838		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1839	}
1840	if_maddr_runlock(ifp);
1841}
1842
1843static int
1844eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1845{
1846	struct ece_softc *sc = ifp->if_softc;
1847	struct mii_data *mii;
1848	struct ifreq *ifr = (struct ifreq *)data;
1849	int mask, error = 0;
1850
1851	switch (cmd) {
1852	case SIOCSIFFLAGS:
1853		ECE_LOCK(sc);
1854		if ((ifp->if_flags & IFF_UP) == 0 &&
1855		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1856			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1857			ecestop(sc);
1858		} else {
1859			/* Reinitialize card on any parameter change. */
1860			if ((ifp->if_flags & IFF_UP) &&
1861			    !(ifp->if_drv_flags & IFF_DRV_RUNNING))
1862				ece_restart(sc);
1863		}
1864		ECE_UNLOCK(sc);
1865		break;
1866
1867	case SIOCADDMULTI:
1868	case SIOCDELMULTI:
1869		ECE_LOCK(sc);
1870		set_filter(sc);
1871		ECE_UNLOCK(sc);
1872		break;
1873
1874	case SIOCSIFMEDIA:
1875	case SIOCGIFMEDIA:
1876		mii = device_get_softc(sc->miibus);
1877		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1878		break;
1879	case SIOCSIFCAP:
1880		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1881		if (mask & IFCAP_VLAN_MTU) {
1882			ECE_LOCK(sc);
1883			ECE_UNLOCK(sc);
1884		}
1885	default:
1886		error = ether_ioctl(ifp, cmd, data);
1887		break;
1888	}
1889	return (error);
1890}
1891
1892static void
1893ece_child_detached(device_t dev, device_t child)
1894{
1895	struct ece_softc *sc;
1896
1897	sc = device_get_softc(dev);
1898	if (child == sc->miibus)
1899		sc->miibus = NULL;
1900}
1901
1902/*
1903 * MII bus support routines.
1904 */
1905static int
1906ece_miibus_readreg(device_t dev, int phy, int reg)
1907{
1908	struct ece_softc *sc;
1909	sc = device_get_softc(dev);
1910	return (phy_read(sc, phy, reg));
1911}
1912
1913static int
1914ece_miibus_writereg(device_t dev, int phy, int reg, int data)
1915{
1916	struct ece_softc *sc;
1917	sc = device_get_softc(dev);
1918	phy_write(sc, phy, reg, data);
1919	return (0);
1920}
1921
1922static device_method_t ece_methods[] = {
1923	/* Device interface */
1924	DEVMETHOD(device_probe,	ece_probe),
1925	DEVMETHOD(device_attach,	ece_attach),
1926	DEVMETHOD(device_detach,	ece_detach),
1927
1928	/* Bus interface */
1929	DEVMETHOD(bus_child_detached,	ece_child_detached),
1930
1931	/* MII interface */
1932	DEVMETHOD(miibus_readreg,	ece_miibus_readreg),
1933	DEVMETHOD(miibus_writereg,	ece_miibus_writereg),
1934
1935	{ 0, 0 }
1936};
1937
1938static driver_t ece_driver = {
1939	"ece",
1940	ece_methods,
1941	sizeof(struct ece_softc),
1942};
1943
1944DRIVER_MODULE(ece, econaarm, ece_driver, ece_devclass, 0, 0);
1945DRIVER_MODULE(miibus, ece, miibus_driver, miibus_devclass, 0, 0);
1946MODULE_DEPEND(ece, miibus, 1, 1, 1);
1947MODULE_DEPEND(ece, ether, 1, 1, 1);
1948