if_mec.c revision 1.13
1/* $NetBSD: if_mec.c,v 1.13 2007/10/17 19:57:05 garbled Exp $ */
2
3/*
4 * Copyright (c) 2004 Izumi Tsutsui.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Copyright (c) 2003 Christopher SEKIYA
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 *    must display the following acknowledgement:
44 *          This product includes software developed for the
45 *          NetBSD Project.  See http://www.NetBSD.org/ for
46 *          information about NetBSD.
47 * 4. The name of the author may not be used to endorse or promote products
48 *    derived from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 */
61
62/*
63 * MACE MAC-110 ethernet driver
64 */
65
66#include <sys/cdefs.h>
67__KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.13 2007/10/17 19:57:05 garbled Exp $");
68
69#include "opt_ddb.h"
70#include "bpfilter.h"
71#include "rnd.h"
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/device.h>
76#include <sys/callout.h>
77#include <sys/mbuf.h>
78#include <sys/malloc.h>
79#include <sys/kernel.h>
80#include <sys/socket.h>
81#include <sys/ioctl.h>
82#include <sys/errno.h>
83
84#if NRND > 0
85#include <sys/rnd.h>
86#endif
87
88#include <net/if.h>
89#include <net/if_dl.h>
90#include <net/if_media.h>
91#include <net/if_ether.h>
92
93#if NBPFILTER > 0
94#include <net/bpf.h>
95#endif
96
97#include <machine/bus.h>
98#include <machine/intr.h>
99#include <machine/machtype.h>
100
101#include <dev/mii/mii.h>
102#include <dev/mii/miivar.h>
103
104#include <sgimips/mace/macevar.h>
105#include <sgimips/mace/if_mecreg.h>
106
107#include <dev/arcbios/arcbios.h>
108#include <dev/arcbios/arcbiosvar.h>
109
110/* #define MEC_DEBUG */
111
112#ifdef MEC_DEBUG
113#define MEC_DEBUG_RESET		0x01
114#define MEC_DEBUG_START		0x02
115#define MEC_DEBUG_STOP		0x04
116#define MEC_DEBUG_INTR		0x08
117#define MEC_DEBUG_RXINTR	0x10
118#define MEC_DEBUG_TXINTR	0x20
119uint32_t mec_debug = 0;
120#define DPRINTF(x, y)	if (mec_debug & (x)) printf y
121#else
122#define DPRINTF(x, y)	/* nothing */
123#endif
124
125/*
126 * Transmit descriptor list size
127 */
128#define MEC_NTXDESC		64
129#define MEC_NTXDESC_MASK	(MEC_NTXDESC - 1)
130#define MEC_NEXTTX(x)		(((x) + 1) & MEC_NTXDESC_MASK)
131
132/*
133 * software state for TX
134 */
135struct mec_txsoft {
136	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
137	bus_dmamap_t txs_dmamap;	/* our DMA map */
138	uint32_t txs_flags;
139#define MEC_TXS_BUFLEN_MASK	0x0000007f	/* data len in txd_buf */
140#define MEC_TXS_TXDBUF		0x00000080	/* txd_buf is used */
141#define MEC_TXS_TXDPTR1		0x00000100	/* txd_ptr[0] is used */
142};
143
144/*
145 * Transmit buffer descriptor
146 */
147#define MEC_TXDESCSIZE		128
148#define MEC_NTXPTR		3
149#define MEC_TXD_BUFOFFSET	\
150	(sizeof(uint64_t) + MEC_NTXPTR * sizeof(uint64_t))
151#define MEC_TXD_BUFSIZE		(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
152#define MEC_TXD_BUFSTART(len)	(MEC_TXD_BUFSIZE - (len))
153#define MEC_TXD_ALIGN		8
154#define MEC_TXD_ROUNDUP(addr)	\
155	(((addr) + (MEC_TXD_ALIGN - 1)) & ~((uint64_t)MEC_TXD_ALIGN - 1))
156
157struct mec_txdesc {
158	volatile uint64_t txd_cmd;
159#define MEC_TXCMD_DATALEN	0x000000000000ffff	/* data length */
160#define MEC_TXCMD_BUFSTART	0x00000000007f0000	/* start byte offset */
161#define  TXCMD_BUFSTART(x)	((x) << 16)
162#define MEC_TXCMD_TERMDMA	0x0000000000800000	/* stop DMA on abort */
163#define MEC_TXCMD_TXINT		0x0000000001000000	/* INT after TX done */
164#define MEC_TXCMD_PTR1		0x0000000002000000	/* valid 1st txd_ptr */
165#define MEC_TXCMD_PTR2		0x0000000004000000	/* valid 2nd txd_ptr */
166#define MEC_TXCMD_PTR3		0x0000000008000000	/* valid 3rd txd_ptr */
167#define MEC_TXCMD_UNUSED	0xfffffffff0000000ULL	/* should be zero */
168
169#define txd_stat	txd_cmd
170#define MEC_TXSTAT_LEN		0x000000000000ffff	/* TX length */
171#define MEC_TXSTAT_COLCNT	0x00000000000f0000	/* collision count */
172#define MEC_TXSTAT_COLCNT_SHIFT	16
173#define MEC_TXSTAT_LATE_COL	0x0000000000100000	/* late collision */
174#define MEC_TXSTAT_CRCERROR	0x0000000000200000	/* */
175#define MEC_TXSTAT_DEFERRED	0x0000000000400000	/* */
176#define MEC_TXSTAT_SUCCESS	0x0000000000800000	/* TX complete */
177#define MEC_TXSTAT_TOOBIG	0x0000000001000000	/* */
178#define MEC_TXSTAT_UNDERRUN	0x0000000002000000	/* */
179#define MEC_TXSTAT_COLLISIONS	0x0000000004000000	/* */
180#define MEC_TXSTAT_EXDEFERRAL	0x0000000008000000	/* */
181#define MEC_TXSTAT_COLLIDED	0x0000000010000000	/* */
182#define MEC_TXSTAT_UNUSED	0x7fffffffe0000000ULL	/* should be zero */
183#define MEC_TXSTAT_SENT		0x8000000000000000ULL	/* packet sent */
184
185	uint64_t txd_ptr[MEC_NTXPTR];
186#define MEC_TXPTR_UNUSED2	0x0000000000000007	/* should be zero */
187#define MEC_TXPTR_DMAADDR	0x00000000fffffff8	/* TX DMA address */
188#define MEC_TXPTR_LEN		0x0000ffff00000000ULL	/* buffer length */
189#define  TXPTR_LEN(x)		((uint64_t)(x) << 32)
190#define MEC_TXPTR_UNUSED1	0xffff000000000000ULL	/* should be zero */
191
192	uint8_t txd_buf[MEC_TXD_BUFSIZE];
193};
194
195/*
196 * Receive buffer size
197 */
198#define MEC_NRXDESC		16
199#define MEC_NRXDESC_MASK	(MEC_NRXDESC - 1)
200#define MEC_NEXTRX(x)		(((x) + 1) & MEC_NRXDESC_MASK)
201
202/*
203 * Receive buffer description
204 */
205#define MEC_RXDESCSIZE		4096	/* umm, should be 4kbyte aligned */
206#define MEC_RXD_NRXPAD		3
207#define MEC_RXD_DMAOFFSET	(1 + MEC_RXD_NRXPAD)
208#define MEC_RXD_BUFOFFSET	(MEC_RXD_DMAOFFSET * sizeof(uint64_t))
209#define MEC_RXD_BUFSIZE		(MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
210
211struct mec_rxdesc {
212	volatile uint64_t rxd_stat;
213#define MEC_RXSTAT_LEN		0x000000000000ffff	/* data length */
214#define MEC_RXSTAT_VIOLATION	0x0000000000010000	/* code violation (?) */
215#define MEC_RXSTAT_UNUSED2	0x0000000000020000	/* unknown (?) */
216#define MEC_RXSTAT_CRCERROR	0x0000000000040000	/* CRC error */
217#define MEC_RXSTAT_MULTICAST	0x0000000000080000	/* multicast packet */
218#define MEC_RXSTAT_BROADCAST	0x0000000000100000	/* broadcast packet */
219#define MEC_RXSTAT_INVALID	0x0000000000200000	/* invalid preamble */
220#define MEC_RXSTAT_LONGEVENT	0x0000000000400000	/* long packet */
221#define MEC_RXSTAT_BADPACKET	0x0000000000800000	/* bad packet */
222#define MEC_RXSTAT_CAREVENT	0x0000000001000000	/* carrier event */
223#define MEC_RXSTAT_MATCHMCAST	0x0000000002000000	/* match multicast */
224#define MEC_RXSTAT_MATCHMAC	0x0000000004000000	/* match MAC */
225#define MEC_RXSTAT_SEQNUM	0x00000000f8000000	/* sequence number */
226#define MEC_RXSTAT_CKSUM	0x0000ffff00000000ULL	/* IP checksum */
227#define MEC_RXSTAT_UNUSED1	0x7fff000000000000ULL	/* should be zero */
228#define MEC_RXSTAT_RECEIVED	0x8000000000000000ULL	/* set to 1 on RX */
229	uint64_t rxd_pad1[MEC_RXD_NRXPAD];
230	uint8_t  rxd_buf[MEC_RXD_BUFSIZE];
231};
232
233/*
234 * control structures for DMA ops
235 */
236struct mec_control_data {
237	/*
238	 * TX descriptors and buffers
239	 */
240	struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
241
242	/*
243	 * RX descriptors and buffers
244	 */
245	struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
246};
247
248/*
249 * It _seems_ there are some restrictions on descriptor address:
250 *
251 * - Base address of txdescs should be 8kbyte aligned
252 * - Each txdesc should be 128byte aligned
253 * - Each rxdesc should be 4kbyte aligned
254 *
255 * So we should specify 8k align to allocalte txdescs.
256 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
257 * so rxdescs are also allocated at 4kbyte aligned.
258 */
259#define MEC_CONTROL_DATA_ALIGN	(8 * 1024)
260
261#define MEC_CDOFF(x)	offsetof(struct mec_control_data, x)
262#define MEC_CDTXOFF(x)	MEC_CDOFF(mcd_txdesc[(x)])
263#define MEC_CDRXOFF(x)	MEC_CDOFF(mcd_rxdesc[(x)])
264
265/*
266 * software state per device
267 */
268struct mec_softc {
269	struct device sc_dev;		/* generic device structures */
270
271	bus_space_tag_t sc_st;		/* bus_space tag */
272	bus_space_handle_t sc_sh;	/* bus_space handle */
273	bus_dma_tag_t sc_dmat;		/* bus_dma tag */
274	void *sc_sdhook;		/* shoutdown hook */
275
276	struct ethercom sc_ethercom;	/* Ethernet common part */
277
278	struct mii_data sc_mii;		/* MII/media information */
279	int sc_phyaddr;			/* MII address */
280	struct callout sc_tick_ch;	/* tick callout */
281
282	uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
283
284	bus_dmamap_t sc_cddmamap;	/* bus_dma map for control data */
285#define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
286
287	/* pointer to allocalted control data */
288	struct mec_control_data *sc_control_data;
289#define sc_txdesc	sc_control_data->mcd_txdesc
290#define sc_rxdesc	sc_control_data->mcd_rxdesc
291
292	/* software state for TX descs */
293	struct mec_txsoft sc_txsoft[MEC_NTXDESC];
294
295	int sc_txpending;		/* number of TX requests pending */
296	int sc_txdirty;			/* first dirty TX descriptor */
297	int sc_txlast;			/* last used TX descriptor */
298
299	int sc_rxptr;			/* next ready RX buffer */
300
301#if NRND > 0
302	rndsource_element_t sc_rnd_source; /* random source */
303#endif
304};
305
306#define MEC_CDTXADDR(sc, x)	((sc)->sc_cddma + MEC_CDTXOFF(x))
307#define MEC_CDRXADDR(sc, x)	((sc)->sc_cddma + MEC_CDRXOFF(x))
308
309#define MEC_TXDESCSYNC(sc, x, ops)					\
310	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
311	    MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
312#define MEC_TXCMDSYNC(sc, x, ops)					\
313	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
314	    MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
315
316#define MEC_RXSTATSYNC(sc, x, ops)					\
317	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
318	    MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
319#define MEC_RXBUFSYNC(sc, x, len, ops)					\
320	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
321	    MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET,				\
322	    MEC_ETHER_ALIGN + (len), (ops))
323
324/* XXX these values should be moved to <net/if_ether.h> ? */
325#define ETHER_PAD_LEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
326#define MEC_ETHER_ALIGN	2
327
328#ifdef DDB
329#define STATIC
330#else
331#define STATIC static
332#endif
333
334STATIC int	mec_match(struct device *, struct cfdata *, void *);
335STATIC void	mec_attach(struct device *, struct device *, void *);
336
337STATIC int	mec_mii_readreg(struct device *, int, int);
338STATIC void	mec_mii_writereg(struct device *, int, int, int);
339STATIC int	mec_mii_wait(struct mec_softc *);
340STATIC void	mec_statchg(struct device *);
341STATIC void	mec_mediastatus(struct ifnet *, struct ifmediareq *);
342STATIC int	mec_mediachange(struct ifnet *);
343
344static void	enaddr_aton(const char *, uint8_t *);
345
346STATIC int	mec_init(struct ifnet * ifp);
347STATIC void	mec_start(struct ifnet *);
348STATIC void	mec_watchdog(struct ifnet *);
349STATIC void	mec_tick(void *);
350STATIC int	mec_ioctl(struct ifnet *, u_long, void *);
351STATIC void	mec_reset(struct mec_softc *);
352STATIC void	mec_setfilter(struct mec_softc *);
353STATIC int	mec_intr(void *arg);
354STATIC void	mec_stop(struct ifnet *, int);
355STATIC void	mec_rxintr(struct mec_softc *);
356STATIC void	mec_txintr(struct mec_softc *);
357STATIC void	mec_shutdown(void *);
358
359CFATTACH_DECL(mec, sizeof(struct mec_softc),
360    mec_match, mec_attach, NULL, NULL);
361
362static int mec_matched = 0;
363
364STATIC int
365mec_match(struct device *parent, struct cfdata *match, void *aux)
366{
367
368	/* allow only one device */
369	if (mec_matched)
370		return 0;
371
372	mec_matched = 1;
373	return 1;
374}
375
376STATIC void
377mec_attach(struct device *parent, struct device *self, void *aux)
378{
379	struct mec_softc *sc = (void *)self;
380	struct mace_attach_args *maa = aux;
381	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
382	uint32_t command;
383	const char *macaddr;
384	struct mii_softc *child;
385	bus_dma_segment_t seg;
386	int i, err, rseg;
387
388	sc->sc_st = maa->maa_st;
389	if (bus_space_subregion(sc->sc_st, maa->maa_sh,
390	    maa->maa_offset, 0,	&sc->sc_sh) != 0) {
391		printf(": can't map i/o space\n");
392		return;
393	}
394
395	/* set up DMA structures */
396	sc->sc_dmat = maa->maa_dmat;
397
398	/*
399	 * Allocate the control data structures, and create and load the
400	 * DMA map for it.
401	 */
402	if ((err = bus_dmamem_alloc(sc->sc_dmat,
403	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
404	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
405		printf(": unable to allocate control data, error = %d\n", err);
406		goto fail_0;
407	}
408	/*
409	 * XXX needs re-think...
410	 * control data structures contain whole RX data buffer, so
411	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
412	 * issue on copying data from the RX buffer to mbuf on normal memory,
413	 * though we have to make sure all bus_dmamap_sync(9) ops are called
414	 * proprely in that case.
415	 */
416	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
417	    sizeof(struct mec_control_data),
418	    (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
419		printf(": unable to map control data, error = %d\n", err);
420		goto fail_1;
421	}
422	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
423
424	if ((err = bus_dmamap_create(sc->sc_dmat,
425	    sizeof(struct mec_control_data), 1,
426	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
427		printf(": unable to create control data DMA map, error = %d\n",
428		    err);
429		goto fail_2;
430	}
431	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
432	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
433	    BUS_DMA_NOWAIT)) != 0) {
434		printf(": unable to load control data DMA map, error = %d\n",
435		    err);
436		goto fail_3;
437	}
438
439	/* create TX buffer DMA maps */
440	for (i = 0; i < MEC_NTXDESC; i++) {
441		if ((err = bus_dmamap_create(sc->sc_dmat,
442		    MCLBYTES, 1, MCLBYTES, 0, 0,
443		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
444			printf(": unable to create tx DMA map %d, error = %d\n",
445			    i, err);
446			goto fail_4;
447		}
448	}
449
450	callout_init(&sc->sc_tick_ch, 0);
451
452	/* get ethernet address from ARCBIOS */
453	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
454		printf(": unable to get MAC address!\n");
455		goto fail_4;
456	}
457	enaddr_aton(macaddr, sc->sc_enaddr);
458
459	/* reset device */
460	mec_reset(sc);
461
462	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
463
464	printf(": MAC-110 Ethernet, rev %d\n",
465	    (command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT);
466
467	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
468	    ether_sprintf(sc->sc_enaddr));
469
470	/* Done, now attach everything */
471
472	sc->sc_mii.mii_ifp = ifp;
473	sc->sc_mii.mii_readreg = mec_mii_readreg;
474	sc->sc_mii.mii_writereg = mec_mii_writereg;
475	sc->sc_mii.mii_statchg = mec_statchg;
476
477	/* Set up PHY properties */
478	ifmedia_init(&sc->sc_mii.mii_media, 0, mec_mediachange,
479	    mec_mediastatus);
480	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
481	    MII_OFFSET_ANY, 0);
482
483	child = LIST_FIRST(&sc->sc_mii.mii_phys);
484	if (child == NULL) {
485		/* No PHY attached */
486		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
487		    0, NULL);
488		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
489	} else {
490		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
491		sc->sc_phyaddr = child->mii_phy;
492	}
493
494	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
495	ifp->if_softc = sc;
496	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
497	ifp->if_ioctl = mec_ioctl;
498	ifp->if_start = mec_start;
499	ifp->if_watchdog = mec_watchdog;
500	ifp->if_init = mec_init;
501	ifp->if_stop = mec_stop;
502	ifp->if_mtu = ETHERMTU;
503	IFQ_SET_READY(&ifp->if_snd);
504
505	if_attach(ifp);
506	ether_ifattach(ifp, sc->sc_enaddr);
507
508	/* establish interrupt */
509	cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
510
511#if NRND > 0
512	rnd_attach_source(&sc->sc_rnd_source, sc->sc_dev.dv_xname,
513	    RND_TYPE_NET, 0);
514#endif
515
516	/* set shutdown hook to reset interface on powerdown */
517	sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc);
518
519	return;
520
521	/*
522	 * Free any resources we've allocated during the failed attach
523	 * attempt.  Do this in reverse order and fall though.
524	 */
525 fail_4:
526	for (i = 0; i < MEC_NTXDESC; i++) {
527		if (sc->sc_txsoft[i].txs_dmamap != NULL)
528			bus_dmamap_destroy(sc->sc_dmat,
529			    sc->sc_txsoft[i].txs_dmamap);
530	}
531	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
532 fail_3:
533	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
534 fail_2:
535	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
536	    sizeof(struct mec_control_data));
537 fail_1:
538	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
539 fail_0:
540	return;
541}
542
543STATIC int
544mec_mii_readreg(struct device *self, int phy, int reg)
545{
546	struct mec_softc *sc = (void *)self;
547	bus_space_tag_t st = sc->sc_st;
548	bus_space_handle_t sh = sc->sc_sh;
549	uint64_t val;
550	int i;
551
552	if (mec_mii_wait(sc) != 0)
553		return 0;
554
555	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
556	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
557	delay(25);
558	bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
559	delay(25);
560	mec_mii_wait(sc);
561
562	for (i = 0; i < 20; i++) {
563		delay(30);
564
565		val = bus_space_read_8(st, sh, MEC_PHY_DATA);
566
567		if ((val & MEC_PHY_DATA_BUSY) == 0)
568			return val & MEC_PHY_DATA_VALUE;
569	}
570	return 0;
571}
572
573STATIC void
574mec_mii_writereg(struct device *self, int phy, int reg, int val)
575{
576	struct mec_softc *sc = (void *)self;
577	bus_space_tag_t st = sc->sc_st;
578	bus_space_handle_t sh = sc->sc_sh;
579
580	if (mec_mii_wait(sc) != 0) {
581		printf("timed out writing %x: %x\n", reg, val);
582		return;
583	}
584
585	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
586	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
587
588	delay(60);
589
590	bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
591
592	delay(60);
593
594	mec_mii_wait(sc);
595}
596
597STATIC int
598mec_mii_wait(struct mec_softc *sc)
599{
600	uint32_t busy;
601	int i, s;
602
603	for (i = 0; i < 100; i++) {
604		delay(30);
605
606		s = splhigh();
607		busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
608		splx(s);
609
610		if ((busy & MEC_PHY_DATA_BUSY) == 0)
611			return 0;
612#if 0
613		if (busy == 0xffff) /* XXX ? */
614			return 0;
615#endif
616	}
617
618	printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
619	return 1;
620}
621
622STATIC void
623mec_statchg(struct device *self)
624{
625	struct mec_softc *sc = (void *)self;
626	bus_space_tag_t st = sc->sc_st;
627	bus_space_handle_t sh = sc->sc_sh;
628	uint32_t control;
629
630	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
631	control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
632	    MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
633
634	/* must also set IPG here for duplex stuff ... */
635	if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
636		control |= MEC_MAC_FULL_DUPLEX;
637	} else {
638		/* set IPG */
639		control |= MEC_MAC_IPG_DEFAULT;
640	}
641
642	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
643}
644
645STATIC void
646mec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
647{
648	struct mec_softc *sc = ifp->if_softc;
649
650	if ((ifp->if_flags & IFF_UP) == 0)
651		return;
652
653	mii_pollstat(&sc->sc_mii);
654	ifmr->ifm_status = sc->sc_mii.mii_media_status;
655	ifmr->ifm_active = sc->sc_mii.mii_media_active;
656}
657
658STATIC int
659mec_mediachange(struct ifnet *ifp)
660{
661	struct mec_softc *sc = ifp->if_softc;
662
663	if ((ifp->if_flags & IFF_UP) == 0)
664		return 0;
665
666	return mii_mediachg(&sc->sc_mii);
667}
668
669/*
670 * XXX
671 * maybe this function should be moved to common part
672 * (sgimips/machdep.c or elsewhere) for all on-board network devices.
673 */
674static void
675enaddr_aton(const char *str, uint8_t *eaddr)
676{
677	int i;
678	char c;
679
680	for (i = 0; i < ETHER_ADDR_LEN; i++) {
681		if (*str == ':')
682			str++;
683
684		c = *str++;
685		if (isdigit(c)) {
686			eaddr[i] = (c - '0');
687		} else if (isxdigit(c)) {
688			eaddr[i] = (toupper(c) + 10 - 'A');
689		}
690		c = *str++;
691		if (isdigit(c)) {
692			eaddr[i] = (eaddr[i] << 4) | (c - '0');
693		} else if (isxdigit(c)) {
694			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
695		}
696	}
697}
698
699STATIC int
700mec_init(struct ifnet *ifp)
701{
702	struct mec_softc *sc = ifp->if_softc;
703	bus_space_tag_t st = sc->sc_st;
704	bus_space_handle_t sh = sc->sc_sh;
705	struct mec_rxdesc *rxd;
706	int i;
707
708	/* cancel any pending I/O */
709	mec_stop(ifp, 0);
710
711	/* reset device */
712	mec_reset(sc);
713
714	/* setup filter for multicast or promisc mode */
715	mec_setfilter(sc);
716
717	/* set the TX ring pointer to the base address */
718	bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
719
720	sc->sc_txpending = 0;
721	sc->sc_txdirty = 0;
722	sc->sc_txlast = MEC_NTXDESC - 1;
723
724	/* put RX buffers into FIFO */
725	for (i = 0; i < MEC_NRXDESC; i++) {
726		rxd = &sc->sc_rxdesc[i];
727		rxd->rxd_stat = 0;
728		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
729		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
730		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
731	}
732	sc->sc_rxptr = 0;
733
734#if 0	/* XXX no info */
735	bus_space_write_8(st, sh, MEC_TIMER, 0);
736#endif
737
738	/*
739	 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
740	 * spurious interrupts when TX buffers are empty
741	 */
742	bus_space_write_8(st, sh, MEC_DMA_CONTROL,
743	    (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
744	    (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
745	    MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
746	    MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
747
748	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
749
750	ifp->if_flags |= IFF_RUNNING;
751	ifp->if_flags &= ~IFF_OACTIVE;
752	mec_start(ifp);
753
754	mii_mediachg(&sc->sc_mii);
755
756	return 0;
757}
758
759STATIC void
760mec_reset(struct mec_softc *sc)
761{
762	bus_space_tag_t st = sc->sc_st;
763	bus_space_handle_t sh = sc->sc_sh;
764	uint64_t address, control;
765	int i;
766
767	/* reset chip */
768	bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
769	delay(1000);
770	bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
771	delay(1000);
772
773	/* set ethernet address */
774	address = 0;
775	for (i = 0; i < ETHER_ADDR_LEN; i++) {
776		address = address << 8;
777		address += sc->sc_enaddr[i];
778	}
779	bus_space_write_8(st, sh, MEC_STATION, address);
780
781	/* Default to 100/half and let autonegotiation work its magic */
782	control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
783	    MEC_MAC_IPG_DEFAULT;
784
785	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
786	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
787
788	DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
789	    bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
790}
791
792STATIC void
793mec_start(struct ifnet *ifp)
794{
795	struct mec_softc *sc = ifp->if_softc;
796	struct mbuf *m0, *m;
797	struct mec_txdesc *txd;
798	struct mec_txsoft *txs;
799	bus_dmamap_t dmamap;
800	bus_space_tag_t st = sc->sc_st;
801	bus_space_handle_t sh = sc->sc_sh;
802	uint64_t txdaddr;
803	int error, firsttx, nexttx, opending;
804	int len, bufoff, buflen, unaligned, txdlen;
805
806	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
807		return;
808
809	/*
810	 * Remember the previous txpending and the first transmit descriptor.
811	 */
812	opending = sc->sc_txpending;
813	firsttx = MEC_NEXTTX(sc->sc_txlast);
814
815	DPRINTF(MEC_DEBUG_START,
816	    ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx));
817
818	for (;;) {
819		/* Grab a packet off the queue. */
820		IFQ_POLL(&ifp->if_snd, m0);
821		if (m0 == NULL)
822			break;
823		m = NULL;
824
825		if (sc->sc_txpending == MEC_NTXDESC) {
826			break;
827		}
828
829		/*
830		 * Get the next available transmit descriptor.
831		 */
832		nexttx = MEC_NEXTTX(sc->sc_txlast);
833		txd = &sc->sc_txdesc[nexttx];
834		txs = &sc->sc_txsoft[nexttx];
835
836		buflen = 0;
837		bufoff = 0;
838		txdaddr = 0; /* XXX gcc */
839		txdlen = 0; /* XXX gcc */
840
841		len = m0->m_pkthdr.len;
842
843		DPRINTF(MEC_DEBUG_START,
844		    ("mec_start: len = %d, nexttx = %d\n", len, nexttx));
845
846		if (len < ETHER_PAD_LEN) {
847			/*
848			 * I don't know if MEC chip does auto padding,
849			 * so if the packet is small enough,
850			 * just copy it to the buffer in txdesc.
851			 * Maybe this is the simple way.
852			 */
853			DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n"));
854
855			IFQ_DEQUEUE(&ifp->if_snd, m0);
856			bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
857			m_copydata(m0, 0, m0->m_pkthdr.len,
858			    txd->txd_buf + bufoff);
859			memset(txd->txd_buf + bufoff + len, 0,
860			    ETHER_PAD_LEN - len);
861			len = buflen = ETHER_PAD_LEN;
862
863			txs->txs_flags = MEC_TXS_TXDBUF | buflen;
864		} else {
865			/*
866			 * If the packet won't fit the buffer in txdesc,
867			 * we have to use concatinate pointer to handle it.
868			 * While MEC can handle up to three segments to
869			 * concatinate, MEC requires that both the second and
870			 * third segments have to be 8 byte aligned.
871			 * Since it's unlikely for mbuf clusters, we use
872			 * only the first concatinate pointer. If the packet
873			 * doesn't fit in one DMA segment, allocate new mbuf
874			 * and copy the packet to it.
875			 *
876			 * Besides, if the start address of the first segments
877			 * is not 8 byte aligned, such part have to be copied
878			 * to the txdesc buffer. (XXX see below comments)
879	                 */
880			DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n"));
881
882			dmamap = txs->txs_dmamap;
883			if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
884			    BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
885				DPRINTF(MEC_DEBUG_START,
886				    ("mec_start: re-allocating mbuf\n"));
887				MGETHDR(m, M_DONTWAIT, MT_DATA);
888				if (m == NULL) {
889					printf("%s: unable to allocate "
890					    "TX mbuf\n", sc->sc_dev.dv_xname);
891					break;
892				}
893				if (len > (MHLEN - MEC_ETHER_ALIGN)) {
894					MCLGET(m, M_DONTWAIT);
895					if ((m->m_flags & M_EXT) == 0) {
896						printf("%s: unable to allocate "
897						    "TX cluster\n",
898						    sc->sc_dev.dv_xname);
899						m_freem(m);
900						break;
901					}
902				}
903				/*
904				 * Each packet has the Ethernet header, so
905				 * in many case the header isn't 4-byte aligned
906				 * and data after the header is 4-byte aligned.
907				 * Thus adding 2-byte offset before copying to
908				 * new mbuf avoids unaligned copy and this may
909				 * improve some performance.
910				 * As noted above, unaligned part has to be
911				 * copied to txdesc buffer so this may cause
912				 * extra copy ops, but for now MEC always
913				 * requires some data in txdesc buffer,
914				 * so we always have to copy some data anyway.
915				 */
916				m->m_data += MEC_ETHER_ALIGN;
917				m_copydata(m0, 0, len, mtod(m, void *));
918				m->m_pkthdr.len = m->m_len = len;
919				error = bus_dmamap_load_mbuf(sc->sc_dmat,
920				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
921				if (error) {
922					printf("%s: unable to load TX buffer, "
923					    "error = %d\n",
924					    sc->sc_dev.dv_xname, error);
925					break;
926				}
927			}
928			IFQ_DEQUEUE(&ifp->if_snd, m0);
929			if (m != NULL) {
930				m_freem(m0);
931				m0 = m;
932			}
933
934			/* handle unaligned part */
935			txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr);
936			txs->txs_flags = MEC_TXS_TXDPTR1;
937			unaligned =
938			    dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1);
939			DPRINTF(MEC_DEBUG_START,
940			    ("mec_start: ds_addr = 0x%08x, unaligned = %d\n",
941			    (u_int)dmamap->dm_segs[0].ds_addr, unaligned));
942			if (unaligned != 0) {
943				buflen = MEC_TXD_ALIGN - unaligned;
944				bufoff = MEC_TXD_BUFSTART(buflen);
945				DPRINTF(MEC_DEBUG_START,
946				    ("mec_start: unaligned, "
947				    "buflen = %d, bufoff = %d\n",
948				    buflen, bufoff));
949				memcpy(txd->txd_buf + bufoff,
950				    mtod(m0, void *), buflen);
951				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
952			}
953#if 1
954			else {
955				/*
956				 * XXX needs hardware info XXX
957				 * It seems MEC always requires some data
958				 * in txd_buf[] even if buffer is
959				 * 8-byte aligned otherwise DMA abort error
960				 * occurs later...
961				 */
962				buflen = MEC_TXD_ALIGN;
963				bufoff = MEC_TXD_BUFSTART(buflen);
964				memcpy(txd->txd_buf + bufoff,
965				    mtod(m0, void *), buflen);
966				DPRINTF(MEC_DEBUG_START,
967				    ("mec_start: aligned, "
968				    "buflen = %d, bufoff = %d\n",
969				    buflen, bufoff));
970				txs->txs_flags |= MEC_TXS_TXDBUF | buflen;
971				txdaddr += MEC_TXD_ALIGN;
972			}
973#endif
974			txdlen  = len - buflen;
975			DPRINTF(MEC_DEBUG_START,
976			    ("mec_start: txdaddr = 0x%08llx, txdlen = %d\n",
977			    txdaddr, txdlen));
978
979			/*
980			 * sync the DMA map for TX mbuf
981			 *
982			 * XXX unaligned part doesn't have to be sync'ed,
983			 *     but it's harmless...
984			 */
985			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
986			    dmamap->dm_mapsize,	BUS_DMASYNC_PREWRITE);
987		}
988
989#if NBPFILTER > 0
990		/*
991		 * Pass packet to bpf if there is a listener.
992		 */
993		if (ifp->if_bpf)
994			bpf_mtap(ifp->if_bpf, m0);
995#endif
996
997		/*
998		 * setup the transmit descriptor.
999		 */
1000
1001		/* TXINT bit will be set later on the last packet */
1002		txd->txd_cmd = (len - 1);
1003		/* but also set TXINT bit on a half of TXDESC */
1004		if (sc->sc_txpending == (MEC_NTXDESC / 2))
1005			txd->txd_cmd |= MEC_TXCMD_TXINT;
1006
1007		if (txs->txs_flags & MEC_TXS_TXDBUF)
1008			txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen);
1009		if (txs->txs_flags & MEC_TXS_TXDPTR1) {
1010			txd->txd_cmd |= MEC_TXCMD_PTR1;
1011			txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr;
1012			/*
1013			 * Store a pointer to the packet so we can
1014			 * free it later.
1015			 */
1016			txs->txs_mbuf = m0;
1017		} else {
1018			txd->txd_ptr[0] = 0;
1019			/*
1020			 * In this case all data are copied to buffer in txdesc,
1021			 * we can free TX mbuf here.
1022			 */
1023			m_freem(m0);
1024		}
1025
1026		DPRINTF(MEC_DEBUG_START,
1027		    ("mec_start: txd_cmd = 0x%016llx, txd_ptr = 0x%016llx\n",
1028		    txd->txd_cmd, txd->txd_ptr[0]));
1029		DPRINTF(MEC_DEBUG_START,
1030		    ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1031		    len, len, buflen, buflen));
1032
1033		/* sync TX descriptor */
1034		MEC_TXDESCSYNC(sc, nexttx,
1035		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1036
1037		/* advance the TX pointer. */
1038		sc->sc_txpending++;
1039		sc->sc_txlast = nexttx;
1040	}
1041
1042	if (sc->sc_txpending == MEC_NTXDESC) {
1043		/* No more slots; notify upper layer. */
1044		ifp->if_flags |= IFF_OACTIVE;
1045	}
1046
1047	if (sc->sc_txpending != opending) {
1048		/*
1049		 * Cause a TX interrupt to happen on the last packet
1050		 * we enqueued.
1051		 */
1052		sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT;
1053		MEC_TXCMDSYNC(sc, sc->sc_txlast,
1054		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1055
1056		/* start TX */
1057		bus_space_write_8(st, sh, MEC_TX_RING_PTR,
1058		    MEC_NEXTTX(sc->sc_txlast));
1059
1060		/*
1061		 * If the transmitter was idle,
1062		 * reset the txdirty pointer and reenable TX interrupt.
1063		 */
1064		if (opending == 0) {
1065			sc->sc_txdirty = firsttx;
1066			bus_space_write_8(st, sh, MEC_TX_ALIAS,
1067			    MEC_TX_ALIAS_INT_ENABLE);
1068		}
1069
1070		/* Set a watchdog timer in case the chip flakes out. */
1071		ifp->if_timer = 5;
1072	}
1073}
1074
1075STATIC void
1076mec_stop(struct ifnet *ifp, int disable)
1077{
1078	struct mec_softc *sc = ifp->if_softc;
1079	struct mec_txsoft *txs;
1080	int i;
1081
1082	DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n"));
1083
1084	ifp->if_timer = 0;
1085	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1086
1087	callout_stop(&sc->sc_tick_ch);
1088	mii_down(&sc->sc_mii);
1089
1090	/* release any TX buffers */
1091	for (i = 0; i < MEC_NTXDESC; i++) {
1092		txs = &sc->sc_txsoft[i];
1093		if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1094			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1095			m_freem(txs->txs_mbuf);
1096			txs->txs_mbuf = NULL;
1097		}
1098	}
1099}
1100
1101STATIC int
1102mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1103{
1104	struct mec_softc *sc = ifp->if_softc;
1105	struct ifreq *ifr = (void *)data;
1106	int s, error;
1107
1108	s = splnet();
1109
1110	switch (cmd) {
1111	case SIOCSIFMEDIA:
1112	case SIOCGIFMEDIA:
1113		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1114		break;
1115
1116	default:
1117		error = ether_ioctl(ifp, cmd, data);
1118		if (error == ENETRESET) {
1119			/*
1120			 * Multicast list has changed; set the hardware filter
1121			 * accordingly.
1122			 */
1123			if (ifp->if_flags & IFF_RUNNING)
1124				error = mec_init(ifp);
1125			else
1126				error = 0;
1127		}
1128		break;
1129	}
1130
1131	/* Try to get more packets going. */
1132	mec_start(ifp);
1133
1134	splx(s);
1135	return error;
1136}
1137
1138STATIC void
1139mec_watchdog(struct ifnet *ifp)
1140{
1141	struct mec_softc *sc = ifp->if_softc;
1142
1143	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1144	ifp->if_oerrors++;
1145
1146	mec_init(ifp);
1147}
1148
1149STATIC void
1150mec_tick(void *arg)
1151{
1152	struct mec_softc *sc = arg;
1153	int s;
1154
1155	s = splnet();
1156	mii_tick(&sc->sc_mii);
1157	splx(s);
1158
1159	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1160}
1161
1162STATIC void
1163mec_setfilter(struct mec_softc *sc)
1164{
1165	struct ethercom *ec = &sc->sc_ethercom;
1166	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167	struct ether_multi *enm;
1168	struct ether_multistep step;
1169	bus_space_tag_t st = sc->sc_st;
1170	bus_space_handle_t sh = sc->sc_sh;
1171	uint64_t mchash;
1172	uint32_t control, hash;
1173	int mcnt;
1174
1175	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1176	control &= ~MEC_MAC_FILTER_MASK;
1177
1178	if (ifp->if_flags & IFF_PROMISC) {
1179		control |= MEC_MAC_FILTER_PROMISC;
1180		bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1181		bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1182		return;
1183	}
1184
1185	mcnt = 0;
1186	mchash = 0;
1187	ETHER_FIRST_MULTI(step, ec, enm);
1188	while (enm != NULL) {
1189		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1190			/* set allmulti for a range of multicast addresses */
1191			control |= MEC_MAC_FILTER_ALLMULTI;
1192			bus_space_write_8(st, sh, MEC_MULTICAST,
1193			    0xffffffffffffffffULL);
1194			bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1195			return;
1196		}
1197
1198#define mec_calchash(addr)	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1199
1200		hash = mec_calchash(enm->enm_addrlo);
1201		mchash |= 1 << hash;
1202		mcnt++;
1203		ETHER_NEXT_MULTI(step, enm);
1204	}
1205
1206	ifp->if_flags &= ~IFF_ALLMULTI;
1207
1208	if (mcnt > 0)
1209		control |= MEC_MAC_FILTER_MATCHMULTI;
1210
1211	bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1212	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1213}
1214
1215STATIC int
1216mec_intr(void *arg)
1217{
1218	struct mec_softc *sc = arg;
1219	bus_space_tag_t st = sc->sc_st;
1220	bus_space_handle_t sh = sc->sc_sh;
1221	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1222	uint32_t statreg, statack, dmac;
1223	int handled, sent;
1224
1225	DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n"));
1226
1227	handled = sent = 0;
1228
1229	for (;;) {
1230		statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1231
1232		DPRINTF(MEC_DEBUG_INTR,
1233		    ("mec_intr: INT_STAT = 0x%08x\n", statreg));
1234
1235		statack = statreg & MEC_INT_STATUS_MASK;
1236		if (statack == 0)
1237			break;
1238		bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1239
1240		handled = 1;
1241
1242		if (statack &
1243		    (MEC_INT_RX_THRESHOLD |
1244		     MEC_INT_RX_FIFO_UNDERFLOW)) {
1245			mec_rxintr(sc);
1246		}
1247
1248		dmac = bus_space_read_8(st, sh, MEC_DMA_CONTROL);
1249		DPRINTF(MEC_DEBUG_INTR,
1250		    ("mec_intr: DMA_CONT = 0x%08x\n", dmac));
1251
1252		if (statack &
1253		    (MEC_INT_TX_EMPTY |
1254		     MEC_INT_TX_PACKET_SENT |
1255		     MEC_INT_TX_ABORT)) {
1256			mec_txintr(sc);
1257			sent = 1;
1258			if ((statack & MEC_INT_TX_EMPTY) != 0 &&
1259			    (dmac & MEC_DMA_TX_INT_ENABLE) != 0) {
1260				/*
1261				 * disable TX interrupt to stop
1262				 * TX empty interrupt
1263				 */
1264				bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1265				DPRINTF(MEC_DEBUG_INTR,
1266				    ("mec_intr: disable TX_INT\n"));
1267			}
1268		}
1269
1270		if (statack &
1271		    (MEC_INT_TX_LINK_FAIL |
1272		     MEC_INT_TX_MEM_ERROR |
1273		     MEC_INT_TX_ABORT |
1274		     MEC_INT_RX_FIFO_UNDERFLOW |
1275		     MEC_INT_RX_DMA_UNDERFLOW)) {
1276			printf("%s: mec_intr: interrupt status = 0x%08x\n",
1277			    sc->sc_dev.dv_xname, statreg);
1278		}
1279	}
1280
1281	if (sent) {
1282		/* try to get more packets going */
1283		mec_start(ifp);
1284	}
1285
1286#if NRND > 0
1287	if (handled)
1288		rnd_add_uint32(&sc->sc_rnd_source, statreg);
1289#endif
1290
1291	return handled;
1292}
1293
1294STATIC void
1295mec_rxintr(struct mec_softc *sc)
1296{
1297	bus_space_tag_t st = sc->sc_st;
1298	bus_space_handle_t sh = sc->sc_sh;
1299	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1300	struct mbuf *m;
1301	struct mec_rxdesc *rxd;
1302	uint64_t rxstat;
1303	u_int len;
1304	int i;
1305
1306	DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n"));
1307
1308	for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1309		rxd = &sc->sc_rxdesc[i];
1310
1311		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1312		rxstat = rxd->rxd_stat;
1313
1314		DPRINTF(MEC_DEBUG_RXINTR,
1315		    ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n",
1316		    rxstat, i));
1317		DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n",
1318		    (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1319
1320		if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1321			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1322			break;
1323		}
1324
1325		len = rxstat & MEC_RXSTAT_LEN;
1326
1327		if (len < ETHER_MIN_LEN ||
1328		    len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1329			/* invalid length packet; drop it. */
1330			DPRINTF(MEC_DEBUG_RXINTR,
1331			    ("mec_rxintr: wrong packet\n"));
1332 dropit:
1333			ifp->if_ierrors++;
1334			rxd->rxd_stat = 0;
1335			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1336			bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1337			    MEC_CDRXADDR(sc, i));
1338			continue;
1339		}
1340
1341		if (rxstat &
1342		    (MEC_RXSTAT_BADPACKET |
1343		     MEC_RXSTAT_LONGEVENT |
1344		     MEC_RXSTAT_INVALID   |
1345		     MEC_RXSTAT_CRCERROR  |
1346		     MEC_RXSTAT_VIOLATION)) {
1347			printf("%s: mec_rxintr: status = 0x%016llx\n",
1348			    sc->sc_dev.dv_xname, rxstat);
1349			goto dropit;
1350		}
1351
1352		/*
1353		 * The MEC includes the CRC with every packet.  Trim
1354		 * it off here.
1355		 */
1356		len -= ETHER_CRC_LEN;
1357
1358		/*
1359		 * now allocate an mbuf (and possibly a cluster) to hold
1360		 * the received packet.
1361		 */
1362		MGETHDR(m, M_DONTWAIT, MT_DATA);
1363		if (m == NULL) {
1364			printf("%s: unable to allocate RX mbuf\n",
1365			    sc->sc_dev.dv_xname);
1366			goto dropit;
1367		}
1368		if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1369			MCLGET(m, M_DONTWAIT);
1370			if ((m->m_flags & M_EXT) == 0) {
1371				printf("%s: unable to allocate RX cluster\n",
1372				    sc->sc_dev.dv_xname);
1373				m_freem(m);
1374				m = NULL;
1375				goto dropit;
1376			}
1377		}
1378
1379		/*
1380		 * Note MEC chip seems to insert 2 byte padding at the top of
1381		 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1382		 */
1383		MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD);
1384		memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1385		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1386		m->m_data += MEC_ETHER_ALIGN;
1387
1388		/* put RX buffer into FIFO again */
1389		rxd->rxd_stat = 0;
1390		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1391		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1392
1393		m->m_pkthdr.rcvif = ifp;
1394		m->m_pkthdr.len = m->m_len = len;
1395
1396		ifp->if_ipackets++;
1397
1398#if NBPFILTER > 0
1399		/*
1400		 * Pass this up to any BPF listeners, but only
1401		 * pass it up the stack it its for us.
1402		 */
1403		if (ifp->if_bpf)
1404			bpf_mtap(ifp->if_bpf, m);
1405#endif
1406
1407		/* Pass it on. */
1408		(*ifp->if_input)(ifp, m);
1409	}
1410
1411	/* update RX pointer */
1412	sc->sc_rxptr = i;
1413}
1414
1415STATIC void
1416mec_txintr(struct mec_softc *sc)
1417{
1418	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1419	struct mec_txdesc *txd;
1420	struct mec_txsoft *txs;
1421	bus_dmamap_t dmamap;
1422	uint64_t txstat;
1423	int i;
1424	u_int col;
1425
1426	ifp->if_flags &= ~IFF_OACTIVE;
1427
1428	DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n"));
1429
1430	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
1431	    i = MEC_NEXTTX(i), sc->sc_txpending--) {
1432		txd = &sc->sc_txdesc[i];
1433
1434		MEC_TXDESCSYNC(sc, i,
1435		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1436
1437		txstat = txd->txd_stat;
1438		DPRINTF(MEC_DEBUG_TXINTR,
1439		    ("mec_txintr: dirty = %d, txstat = 0x%016llx\n",
1440		    i, txstat));
1441		if ((txstat & MEC_TXSTAT_SENT) == 0) {
1442			MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1443			break;
1444		}
1445
1446		if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1447			printf("%s: TX error: txstat = 0x%016llx\n",
1448			    sc->sc_dev.dv_xname, txstat);
1449			ifp->if_oerrors++;
1450			continue;
1451		}
1452
1453		txs = &sc->sc_txsoft[i];
1454		if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) {
1455			dmamap = txs->txs_dmamap;
1456			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1457			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1458			bus_dmamap_unload(sc->sc_dmat, dmamap);
1459			m_freem(txs->txs_mbuf);
1460			txs->txs_mbuf = NULL;
1461		}
1462
1463		col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1464		ifp->if_collisions += col;
1465		ifp->if_opackets++;
1466	}
1467
1468	/* update the dirty TX buffer pointer */
1469	sc->sc_txdirty = i;
1470	DPRINTF(MEC_DEBUG_INTR,
1471	    ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n",
1472	    sc->sc_txdirty, sc->sc_txpending));
1473
1474	/* cancel the watchdog timer if there are no pending TX packets */
1475	if (sc->sc_txpending == 0)
1476		ifp->if_timer = 0;
1477}
1478
1479STATIC void
1480mec_shutdown(void *arg)
1481{
1482	struct mec_softc *sc = arg;
1483
1484	mec_stop(&sc->sc_ethercom.ec_if, 1);
1485}
1486