if_mec.c revision 1.45
1/* $NetBSD: if_mec.c,v 1.45 2011/02/20 07:59:51 matt Exp $ */
2
3/*-
4 * Copyright (c) 2004, 2008 Izumi Tsutsui.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * Copyright (c) 2003 Christopher SEKIYA
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 *    notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 *    notice, this list of conditions and the following disclaimer in the
38 *    documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 *    must display the following acknowledgement:
41 *          This product includes software developed for the
42 *          NetBSD Project.  See http://www.NetBSD.org/ for
43 *          information about NetBSD.
44 * 4. The name of the author may not be used to endorse or promote products
45 *    derived from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59/*
60 * MACE MAC-110 Ethernet driver
61 */
62
63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.45 2011/02/20 07:59:51 matt Exp $");
65
66#include "opt_ddb.h"
67#include "rnd.h"
68
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/device.h>
72#include <sys/callout.h>
73#include <sys/mbuf.h>
74#include <sys/malloc.h>
75#include <sys/kernel.h>
76#include <sys/socket.h>
77#include <sys/ioctl.h>
78#include <sys/errno.h>
79
80#if NRND > 0
81#include <sys/rnd.h>
82#endif
83
84#include <net/if.h>
85#include <net/if_dl.h>
86#include <net/if_media.h>
87#include <net/if_ether.h>
88
89#include <netinet/in.h>
90#include <netinet/in_systm.h>
91#include <netinet/ip.h>
92#include <netinet/tcp.h>
93#include <netinet/udp.h>
94
95#include <net/bpf.h>
96
97#include <machine/bus.h>
98#include <machine/intr.h>
99#include <machine/machtype.h>
100
101#include <dev/mii/mii.h>
102#include <dev/mii/miivar.h>
103
104#include <sgimips/mace/macevar.h>
105#include <sgimips/mace/if_mecreg.h>
106
107#include <dev/arcbios/arcbios.h>
108#include <dev/arcbios/arcbiosvar.h>
109
110/* #define MEC_DEBUG */
111
112#ifdef MEC_DEBUG
113#define MEC_DEBUG_RESET		0x01
114#define MEC_DEBUG_START		0x02
115#define MEC_DEBUG_STOP		0x04
116#define MEC_DEBUG_INTR		0x08
117#define MEC_DEBUG_RXINTR	0x10
118#define MEC_DEBUG_TXINTR	0x20
119#define MEC_DEBUG_TXSEGS	0x40
120uint32_t mec_debug = 0;
121#define DPRINTF(x, y)	if (mec_debug & (x)) printf y
122#else
123#define DPRINTF(x, y)	/* nothing */
124#endif
125
126/* #define MEC_EVENT_COUNTERS */
127
128#ifdef MEC_EVENT_COUNTERS
129#define MEC_EVCNT_INCR(ev)	(ev)->ev_count++
130#else
131#define MEC_EVCNT_INCR(ev)	do {} while (/* CONSTCOND */ 0)
132#endif
133
134/*
135 * Transmit descriptor list size
136 */
137#define MEC_NTXDESC		64
138#define MEC_NTXDESC_MASK	(MEC_NTXDESC - 1)
139#define MEC_NEXTTX(x)		(((x) + 1) & MEC_NTXDESC_MASK)
140#define MEC_NTXDESC_RSVD	4
141#define MEC_NTXDESC_INTR	8
142
143/*
144 * software state for TX
145 */
146struct mec_txsoft {
147	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
148	bus_dmamap_t txs_dmamap;	/* our DMA map */
149	uint32_t txs_flags;
150#define MEC_TXS_BUFLEN_MASK	0x0000007f	/* data len in txd_buf */
151#define MEC_TXS_TXDPTR		0x00000080	/* concat txd_ptr is used */
152};
153
154/*
155 * Transmit buffer descriptor
156 */
157#define MEC_TXDESCSIZE		128
158#define MEC_NTXPTR		3
159#define MEC_TXD_BUFOFFSET	sizeof(uint64_t)
160#define MEC_TXD_BUFOFFSET1	\
161	(sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
162#define MEC_TXD_BUFSIZE		(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
163#define MEC_TXD_BUFSIZE1	(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
164#define MEC_TXD_BUFSTART(len)	(MEC_TXD_BUFSIZE - (len))
165#define MEC_TXD_ALIGN		8
166#define MEC_TXD_ALIGNMASK	(MEC_TXD_ALIGN - 1)
167#define MEC_TXD_ROUNDUP(addr)	\
168	(((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
169#define MEC_NTXSEG		16
170
171struct mec_txdesc {
172	volatile uint64_t txd_cmd;
173#define MEC_TXCMD_DATALEN	0x000000000000ffff	/* data length */
174#define MEC_TXCMD_BUFSTART	0x00000000007f0000	/* start byte offset */
175#define  TXCMD_BUFSTART(x)	((x) << 16)
176#define MEC_TXCMD_TERMDMA	0x0000000000800000	/* stop DMA on abort */
177#define MEC_TXCMD_TXINT		0x0000000001000000	/* INT after TX done */
178#define MEC_TXCMD_PTR1		0x0000000002000000	/* valid 1st txd_ptr */
179#define MEC_TXCMD_PTR2		0x0000000004000000	/* valid 2nd txd_ptr */
180#define MEC_TXCMD_PTR3		0x0000000008000000	/* valid 3rd txd_ptr */
181#define MEC_TXCMD_UNUSED	0xfffffffff0000000ULL	/* should be zero */
182
183#define txd_stat	txd_cmd
184#define MEC_TXSTAT_LEN		0x000000000000ffff	/* TX length */
185#define MEC_TXSTAT_COLCNT	0x00000000000f0000	/* collision count */
186#define MEC_TXSTAT_COLCNT_SHIFT	16
187#define MEC_TXSTAT_LATE_COL	0x0000000000100000	/* late collision */
188#define MEC_TXSTAT_CRCERROR	0x0000000000200000	/* */
189#define MEC_TXSTAT_DEFERRED	0x0000000000400000	/* */
190#define MEC_TXSTAT_SUCCESS	0x0000000000800000	/* TX complete */
191#define MEC_TXSTAT_TOOBIG	0x0000000001000000	/* */
192#define MEC_TXSTAT_UNDERRUN	0x0000000002000000	/* */
193#define MEC_TXSTAT_COLLISIONS	0x0000000004000000	/* */
194#define MEC_TXSTAT_EXDEFERRAL	0x0000000008000000	/* */
195#define MEC_TXSTAT_COLLIDED	0x0000000010000000	/* */
196#define MEC_TXSTAT_UNUSED	0x7fffffffe0000000ULL	/* should be zero */
197#define MEC_TXSTAT_SENT		0x8000000000000000ULL	/* packet sent */
198
199	union {
200		uint64_t txptr[MEC_NTXPTR];
201#define MEC_TXPTR_UNUSED2	0x0000000000000007	/* should be zero */
202#define MEC_TXPTR_DMAADDR	0x00000000fffffff8	/* TX DMA address */
203#define MEC_TXPTR_LEN		0x0000ffff00000000ULL	/* buffer length */
204#define  TXPTR_LEN(x)		((uint64_t)(x) << 32)
205#define MEC_TXPTR_UNUSED1	0xffff000000000000ULL	/* should be zero */
206
207		uint8_t txbuf[MEC_TXD_BUFSIZE];
208	} txd_data;
209#define txd_ptr		txd_data.txptr
210#define txd_buf		txd_data.txbuf
211};
212
213/*
214 * Receive buffer size
215 */
216#define MEC_NRXDESC		16
217#define MEC_NRXDESC_MASK	(MEC_NRXDESC - 1)
218#define MEC_NEXTRX(x)		(((x) + 1) & MEC_NRXDESC_MASK)
219
220/*
221 * Receive buffer description
222 */
223#define MEC_RXDESCSIZE		4096	/* umm, should be 4kbyte aligned */
224#define MEC_RXD_NRXPAD		3
225#define MEC_RXD_DMAOFFSET	(1 + MEC_RXD_NRXPAD)
226#define MEC_RXD_BUFOFFSET	(MEC_RXD_DMAOFFSET * sizeof(uint64_t))
227#define MEC_RXD_BUFSIZE		(MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
228
229struct mec_rxdesc {
230	volatile uint64_t rxd_stat;
231#define MEC_RXSTAT_LEN		0x000000000000ffff	/* data length */
232#define MEC_RXSTAT_VIOLATION	0x0000000000010000	/* code violation (?) */
233#define MEC_RXSTAT_UNUSED2	0x0000000000020000	/* unknown (?) */
234#define MEC_RXSTAT_CRCERROR	0x0000000000040000	/* CRC error */
235#define MEC_RXSTAT_MULTICAST	0x0000000000080000	/* multicast packet */
236#define MEC_RXSTAT_BROADCAST	0x0000000000100000	/* broadcast packet */
237#define MEC_RXSTAT_INVALID	0x0000000000200000	/* invalid preamble */
238#define MEC_RXSTAT_LONGEVENT	0x0000000000400000	/* long packet */
239#define MEC_RXSTAT_BADPACKET	0x0000000000800000	/* bad packet */
240#define MEC_RXSTAT_CAREVENT	0x0000000001000000	/* carrier event */
241#define MEC_RXSTAT_MATCHMCAST	0x0000000002000000	/* match multicast */
242#define MEC_RXSTAT_MATCHMAC	0x0000000004000000	/* match MAC */
243#define MEC_RXSTAT_SEQNUM	0x00000000f8000000	/* sequence number */
244#define MEC_RXSTAT_CKSUM	0x0000ffff00000000ULL	/* IP checksum */
245#define  RXSTAT_CKSUM(x)	(((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
246#define MEC_RXSTAT_UNUSED1	0x7fff000000000000ULL	/* should be zero */
247#define MEC_RXSTAT_RECEIVED	0x8000000000000000ULL	/* set to 1 on RX */
248	uint64_t rxd_pad1[MEC_RXD_NRXPAD];
249	uint8_t  rxd_buf[MEC_RXD_BUFSIZE];
250};
251
252/*
253 * control structures for DMA ops
254 */
255struct mec_control_data {
256	/*
257	 * TX descriptors and buffers
258	 */
259	struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
260
261	/*
262	 * RX descriptors and buffers
263	 */
264	struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
265};
266
267/*
268 * It _seems_ there are some restrictions on descriptor address:
269 *
270 * - Base address of txdescs should be 8kbyte aligned
271 * - Each txdesc should be 128byte aligned
272 * - Each rxdesc should be 4kbyte aligned
273 *
274 * So we should specify 8k align to allocalte txdescs.
275 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
276 * so rxdescs are also allocated at 4kbyte aligned.
277 */
278#define MEC_CONTROL_DATA_ALIGN	(8 * 1024)
279
280#define MEC_CDOFF(x)	offsetof(struct mec_control_data, x)
281#define MEC_CDTXOFF(x)	MEC_CDOFF(mcd_txdesc[(x)])
282#define MEC_CDRXOFF(x)	MEC_CDOFF(mcd_rxdesc[(x)])
283
284/*
285 * software state per device
286 */
287struct mec_softc {
288	device_t sc_dev;		/* generic device structures */
289
290	bus_space_tag_t sc_st;		/* bus_space tag */
291	bus_space_handle_t sc_sh;	/* bus_space handle */
292	bus_dma_tag_t sc_dmat;		/* bus_dma tag */
293
294	struct ethercom sc_ethercom;	/* Ethernet common part */
295
296	struct mii_data sc_mii;		/* MII/media information */
297	int sc_phyaddr;			/* MII address */
298	struct callout sc_tick_ch;	/* tick callout */
299
300	uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
301
302	bus_dmamap_t sc_cddmamap;	/* bus_dma map for control data */
303#define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
304
305	/* pointer to allocated control data */
306	struct mec_control_data *sc_control_data;
307#define sc_txdesc	sc_control_data->mcd_txdesc
308#define sc_rxdesc	sc_control_data->mcd_rxdesc
309
310	/* software state for TX descs */
311	struct mec_txsoft sc_txsoft[MEC_NTXDESC];
312
313	int sc_txpending;		/* number of TX requests pending */
314	int sc_txdirty;			/* first dirty TX descriptor */
315	int sc_txlast;			/* last used TX descriptor */
316
317	int sc_rxptr;			/* next ready RX buffer */
318
319#if NRND > 0
320	rndsource_element_t sc_rnd_source; /* random source */
321#endif
322#ifdef MEC_EVENT_COUNTERS
323	struct evcnt sc_ev_txpkts;	/* TX packets queued total */
324	struct evcnt sc_ev_txdpad;	/* TX packets padded in txdesc buf */
325	struct evcnt sc_ev_txdbuf;	/* TX packets copied to txdesc buf */
326	struct evcnt sc_ev_txptr1;	/* TX packets using concat ptr1 */
327	struct evcnt sc_ev_txptr1a;	/* TX packets  w/ptr1  ~160bytes */
328	struct evcnt sc_ev_txptr1b;	/* TX packets  w/ptr1  ~256bytes */
329	struct evcnt sc_ev_txptr1c;	/* TX packets  w/ptr1  ~512bytes */
330	struct evcnt sc_ev_txptr1d;	/* TX packets  w/ptr1 ~1024bytes */
331	struct evcnt sc_ev_txptr1e;	/* TX packets  w/ptr1 >1024bytes */
332	struct evcnt sc_ev_txptr2;	/* TX packets using concat ptr1,2 */
333	struct evcnt sc_ev_txptr2a;	/* TX packets  w/ptr2  ~160bytes */
334	struct evcnt sc_ev_txptr2b;	/* TX packets  w/ptr2  ~256bytes */
335	struct evcnt sc_ev_txptr2c;	/* TX packets  w/ptr2  ~512bytes */
336	struct evcnt sc_ev_txptr2d;	/* TX packets  w/ptr2 ~1024bytes */
337	struct evcnt sc_ev_txptr2e;	/* TX packets  w/ptr2 >1024bytes */
338	struct evcnt sc_ev_txptr3;	/* TX packets using concat ptr1,2,3 */
339	struct evcnt sc_ev_txptr3a;	/* TX packets  w/ptr3  ~160bytes */
340	struct evcnt sc_ev_txptr3b;	/* TX packets  w/ptr3  ~256bytes */
341	struct evcnt sc_ev_txptr3c;	/* TX packets  w/ptr3  ~512bytes */
342	struct evcnt sc_ev_txptr3d;	/* TX packets  w/ptr3 ~1024bytes */
343	struct evcnt sc_ev_txptr3e;	/* TX packets  w/ptr3 >1024bytes */
344	struct evcnt sc_ev_txmbuf;	/* TX packets copied to new mbufs */
345	struct evcnt sc_ev_txmbufa;	/* TX packets  w/mbuf  ~160bytes */
346	struct evcnt sc_ev_txmbufb;	/* TX packets  w/mbuf  ~256bytes */
347	struct evcnt sc_ev_txmbufc;	/* TX packets  w/mbuf  ~512bytes */
348	struct evcnt sc_ev_txmbufd;	/* TX packets  w/mbuf ~1024bytes */
349	struct evcnt sc_ev_txmbufe;	/* TX packets  w/mbuf >1024bytes */
350	struct evcnt sc_ev_txptrs;	/* TX packets using ptrs total */
351	struct evcnt sc_ev_txptrc0;	/* TX packets  w/ptrs no hdr chain */
352	struct evcnt sc_ev_txptrc1;	/* TX packets  w/ptrs  1 hdr chain */
353	struct evcnt sc_ev_txptrc2;	/* TX packets  w/ptrs  2 hdr chains */
354	struct evcnt sc_ev_txptrc3;	/* TX packets  w/ptrs  3 hdr chains */
355	struct evcnt sc_ev_txptrc4;	/* TX packets  w/ptrs  4 hdr chains */
356	struct evcnt sc_ev_txptrc5;	/* TX packets  w/ptrs  5 hdr chains */
357	struct evcnt sc_ev_txptrc6;	/* TX packets  w/ptrs >5 hdr chains */
358	struct evcnt sc_ev_txptrh0;	/* TX packets  w/ptrs  ~8bytes hdr */
359	struct evcnt sc_ev_txptrh1;	/* TX packets  w/ptrs ~16bytes hdr */
360	struct evcnt sc_ev_txptrh2;	/* TX packets  w/ptrs ~32bytes hdr */
361	struct evcnt sc_ev_txptrh3;	/* TX packets  w/ptrs ~64bytes hdr */
362	struct evcnt sc_ev_txptrh4;	/* TX packets  w/ptrs ~80bytes hdr */
363	struct evcnt sc_ev_txptrh5;	/* TX packets  w/ptrs ~96bytes hdr */
364	struct evcnt sc_ev_txdstall;	/* TX stalled due to no txdesc */
365	struct evcnt sc_ev_txempty;	/* TX empty interrupts */
366	struct evcnt sc_ev_txsent;	/* TX sent interrupts */
367#endif
368};
369
370#define MEC_CDTXADDR(sc, x)	((sc)->sc_cddma + MEC_CDTXOFF(x))
371#define MEC_CDRXADDR(sc, x)	((sc)->sc_cddma + MEC_CDRXOFF(x))
372
373#define MEC_TXDESCSYNC(sc, x, ops)					\
374	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
375	    MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
376#define MEC_TXCMDSYNC(sc, x, ops)					\
377	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
378	    MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
379
380#define MEC_RXSTATSYNC(sc, x, ops)					\
381	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
382	    MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
383#define MEC_RXBUFSYNC(sc, x, len, ops)					\
384	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
385	    MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET,				\
386	    MEC_ETHER_ALIGN + (len), (ops))
387
388/* XXX these values should be moved to <net/if_ether.h> ? */
389#define ETHER_PAD_LEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
390#define MEC_ETHER_ALIGN	2
391
392static int	mec_match(device_t, cfdata_t, void *);
393static void	mec_attach(device_t, device_t, void *);
394
395static int	mec_mii_readreg(device_t, int, int);
396static void	mec_mii_writereg(device_t, int, int, int);
397static int	mec_mii_wait(struct mec_softc *);
398static void	mec_statchg(device_t);
399
400static int	mec_init(struct ifnet * ifp);
401static void	mec_start(struct ifnet *);
402static void	mec_watchdog(struct ifnet *);
403static void	mec_tick(void *);
404static int	mec_ioctl(struct ifnet *, u_long, void *);
405static void	mec_reset(struct mec_softc *);
406static void	mec_setfilter(struct mec_softc *);
407static int	mec_intr(void *arg);
408static void	mec_stop(struct ifnet *, int);
409static void	mec_rxintr(struct mec_softc *);
410static void	mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
411		    uint32_t);
412static void	mec_txintr(struct mec_softc *, uint32_t);
413static bool	mec_shutdown(device_t, int);
414
415CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
416    mec_match, mec_attach, NULL, NULL);
417
418static int mec_matched = 0;
419
420static int
421mec_match(device_t parent, cfdata_t cf, void *aux)
422{
423
424	/* allow only one device */
425	if (mec_matched)
426		return 0;
427
428	mec_matched = 1;
429	return 1;
430}
431
432static void
433mec_attach(device_t parent, device_t self, void *aux)
434{
435	struct mec_softc *sc = device_private(self);
436	struct mace_attach_args *maa = aux;
437	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
438	uint64_t address, command;
439	const char *macaddr;
440	struct mii_softc *child;
441	bus_dma_segment_t seg;
442	int i, err, rseg;
443	bool mac_is_fake;
444
445	sc->sc_dev = self;
446	sc->sc_st = maa->maa_st;
447	if (bus_space_subregion(sc->sc_st, maa->maa_sh,
448	    maa->maa_offset, 0,	&sc->sc_sh) != 0) {
449		aprint_error(": can't map i/o space\n");
450		return;
451	}
452
453	/* set up DMA structures */
454	sc->sc_dmat = maa->maa_dmat;
455
456	/*
457	 * Allocate the control data structures, and create and load the
458	 * DMA map for it.
459	 */
460	if ((err = bus_dmamem_alloc(sc->sc_dmat,
461	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
462	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
463		aprint_error(": unable to allocate control data, error = %d\n",
464		    err);
465		goto fail_0;
466	}
467	/*
468	 * XXX needs re-think...
469	 * control data structures contain whole RX data buffer, so
470	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
471	 * issue on copying data from the RX buffer to mbuf on normal memory,
472	 * though we have to make sure all bus_dmamap_sync(9) ops are called
473	 * properly in that case.
474	 */
475	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
476	    sizeof(struct mec_control_data),
477	    (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
478		aprint_error(": unable to map control data, error = %d\n", err);
479		goto fail_1;
480	}
481	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
482
483	if ((err = bus_dmamap_create(sc->sc_dmat,
484	    sizeof(struct mec_control_data), 1,
485	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
486		aprint_error(": unable to create control data DMA map,"
487		    " error = %d\n", err);
488		goto fail_2;
489	}
490	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
491	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
492	    BUS_DMA_NOWAIT)) != 0) {
493		aprint_error(": unable to load control data DMA map,"
494		    " error = %d\n", err);
495		goto fail_3;
496	}
497
498	/* create TX buffer DMA maps */
499	for (i = 0; i < MEC_NTXDESC; i++) {
500		if ((err = bus_dmamap_create(sc->sc_dmat,
501		    MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
502		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
503			aprint_error(": unable to create tx DMA map %d,"
504			    " error = %d\n", i, err);
505			goto fail_4;
506		}
507	}
508
509	callout_init(&sc->sc_tick_ch, 0);
510
511	/* get Ethernet address from ARCBIOS */
512	if ((macaddr = arcbios_GetEnvironmentVariable("eaddr")) == NULL) {
513		aprint_error(": unable to get MAC address!\n");
514		goto fail_4;
515	}
516	/*
517	 * On some machines the DS2502 chip storing the serial number/
518	 * mac address is on the pci riser board - if this board is
519	 * missing, ARCBIOS will not know a good ethernet address (but
520	 * otherwise the machine will work fine).
521	 */
522	mac_is_fake = false;
523	if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
524		uint32_t ui = 0;
525		const char * netaddr =
526			arcbios_GetEnvironmentVariable("netaddr");
527
528		/*
529		 * Create a MAC address by abusing the "netaddr" env var
530		 */
531		sc->sc_enaddr[0] = 0xf2;
532		sc->sc_enaddr[1] = 0x0b;
533		sc->sc_enaddr[2] = 0xa4;
534		if (netaddr) {
535			mac_is_fake = true;
536			while (*netaddr) {
537				int v = 0;
538				while (*netaddr && *netaddr != '.') {
539					if (*netaddr >= '0' && *netaddr <= '9')
540						v = v*10 + (*netaddr - '0');
541					netaddr++;
542				}
543				ui <<= 8;
544				ui |= v;
545				if (*netaddr == '.')
546					netaddr++;
547			}
548		}
549		memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
550	}
551	if (!mac_is_fake)
552		ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
553
554	/* set the Ethernet address */
555	address = 0;
556	for (i = 0; i < ETHER_ADDR_LEN; i++) {
557		address = address << 8;
558		address |= sc->sc_enaddr[i];
559	}
560	bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
561
562	/* reset device */
563	mec_reset(sc);
564
565	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
566
567	aprint_normal(": MAC-110 Ethernet, rev %u\n",
568	    (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
569
570	if (mac_is_fake)
571		aprint_normal_dev(self,
572		    "could not get ethernet address from firmware"
573		    " - generated one from the \"netaddr\" environment"
574		    " variable\n");
575	aprint_normal_dev(self, "Ethernet address %s\n",
576	    ether_sprintf(sc->sc_enaddr));
577
578	/* Done, now attach everything */
579
580	sc->sc_mii.mii_ifp = ifp;
581	sc->sc_mii.mii_readreg = mec_mii_readreg;
582	sc->sc_mii.mii_writereg = mec_mii_writereg;
583	sc->sc_mii.mii_statchg = mec_statchg;
584
585	/* Set up PHY properties */
586	sc->sc_ethercom.ec_mii = &sc->sc_mii;
587	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
588	    ether_mediastatus);
589	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
590	    MII_OFFSET_ANY, 0);
591
592	child = LIST_FIRST(&sc->sc_mii.mii_phys);
593	if (child == NULL) {
594		/* No PHY attached */
595		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
596		    0, NULL);
597		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
598	} else {
599		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
600		sc->sc_phyaddr = child->mii_phy;
601	}
602
603	strcpy(ifp->if_xname, device_xname(self));
604	ifp->if_softc = sc;
605	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
606	ifp->if_ioctl = mec_ioctl;
607	ifp->if_start = mec_start;
608	ifp->if_watchdog = mec_watchdog;
609	ifp->if_init = mec_init;
610	ifp->if_stop = mec_stop;
611	ifp->if_mtu = ETHERMTU;
612	IFQ_SET_READY(&ifp->if_snd);
613
614	/* mec has dumb RX cksum support */
615	ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
616
617	/* We can support 802.1Q VLAN-sized frames. */
618	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
619
620	/* attach the interface */
621	if_attach(ifp);
622	ether_ifattach(ifp, sc->sc_enaddr);
623
624	/* establish interrupt */
625	cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
626
627#if NRND > 0
628	rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
629	    RND_TYPE_NET, 0);
630#endif
631
632#ifdef MEC_EVENT_COUNTERS
633	evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
634	    NULL, device_xname(self), "TX pkts queued total");
635	evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
636	    NULL, device_xname(self), "TX pkts padded in txdesc buf");
637	evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
638	    NULL, device_xname(self), "TX pkts copied to txdesc buf");
639	evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
640	    NULL, device_xname(self), "TX pkts using concat ptr1");
641	evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
642	    NULL, device_xname(self), "TX pkts  w/ptr1  ~160bytes");
643	evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
644	    NULL, device_xname(self), "TX pkts  w/ptr1  ~256bytes");
645	evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
646	    NULL, device_xname(self), "TX pkts  w/ptr1  ~512bytes");
647	evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
648	    NULL, device_xname(self), "TX pkts  w/ptr1 ~1024bytes");
649	evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
650	    NULL, device_xname(self), "TX pkts  w/ptr1 >1024bytes");
651	evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
652	    NULL, device_xname(self), "TX pkts using concat ptr1,2");
653	evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
654	    NULL, device_xname(self), "TX pkts  w/ptr2  ~160bytes");
655	evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
656	    NULL, device_xname(self), "TX pkts  w/ptr2  ~256bytes");
657	evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
658	    NULL, device_xname(self), "TX pkts  w/ptr2  ~512bytes");
659	evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
660	    NULL, device_xname(self), "TX pkts  w/ptr2 ~1024bytes");
661	evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
662	    NULL, device_xname(self), "TX pkts  w/ptr2 >1024bytes");
663	evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
664	    NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
665	evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
666	    NULL, device_xname(self), "TX pkts  w/ptr3  ~160bytes");
667	evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
668	    NULL, device_xname(self), "TX pkts  w/ptr3  ~256bytes");
669	evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
670	    NULL, device_xname(self), "TX pkts  w/ptr3  ~512bytes");
671	evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
672	    NULL, device_xname(self), "TX pkts  w/ptr3 ~1024bytes");
673	evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
674	    NULL, device_xname(self), "TX pkts  w/ptr3 >1024bytes");
675	evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
676	    NULL, device_xname(self), "TX pkts copied to new mbufs");
677	evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
678	    NULL, device_xname(self), "TX pkts  w/mbuf  ~160bytes");
679	evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
680	    NULL, device_xname(self), "TX pkts  w/mbuf  ~256bytes");
681	evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
682	    NULL, device_xname(self), "TX pkts  w/mbuf  ~512bytes");
683	evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
684	    NULL, device_xname(self), "TX pkts  w/mbuf ~1024bytes");
685	evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
686	    NULL, device_xname(self), "TX pkts  w/mbuf >1024bytes");
687	evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
688	    NULL, device_xname(self), "TX pkts using ptrs total");
689	evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
690	    NULL, device_xname(self), "TX pkts  w/ptrs no hdr chain");
691	evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
692	    NULL, device_xname(self), "TX pkts  w/ptrs  1 hdr chain");
693	evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
694	    NULL, device_xname(self), "TX pkts  w/ptrs  2 hdr chains");
695	evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
696	    NULL, device_xname(self), "TX pkts  w/ptrs  3 hdr chains");
697	evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
698	    NULL, device_xname(self), "TX pkts  w/ptrs  4 hdr chains");
699	evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
700	    NULL, device_xname(self), "TX pkts  w/ptrs  5 hdr chains");
701	evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
702	    NULL, device_xname(self), "TX pkts  w/ptrs >5 hdr chains");
703	evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
704	    NULL, device_xname(self), "TX pkts  w/ptrs  ~8bytes hdr");
705	evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
706	    NULL, device_xname(self), "TX pkts  w/ptrs ~16bytes hdr");
707	evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
708	    NULL, device_xname(self), "TX pkts  w/ptrs ~32bytes hdr");
709	evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
710	    NULL, device_xname(self), "TX pkts  w/ptrs ~64bytes hdr");
711	evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
712	    NULL, device_xname(self), "TX pkts  w/ptrs ~80bytes hdr");
713	evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
714	    NULL, device_xname(self), "TX pkts  w/ptrs ~96bytes hdr");
715	evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
716	    NULL, device_xname(self), "TX stalled due to no txdesc");
717	evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
718	    NULL, device_xname(self), "TX empty interrupts");
719	evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
720	    NULL, device_xname(self), "TX sent interrupts");
721#endif
722
723	/* set shutdown hook to reset interface on powerdown */
724	if (pmf_device_register1(self, NULL, NULL, mec_shutdown))
725		pmf_class_network_register(self, ifp);
726	else
727		aprint_error_dev(self, "couldn't establish power handler\n");
728
729	return;
730
731	/*
732	 * Free any resources we've allocated during the failed attach
733	 * attempt.  Do this in reverse order and fall though.
734	 */
735 fail_4:
736	for (i = 0; i < MEC_NTXDESC; i++) {
737		if (sc->sc_txsoft[i].txs_dmamap != NULL)
738			bus_dmamap_destroy(sc->sc_dmat,
739			    sc->sc_txsoft[i].txs_dmamap);
740	}
741	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
742 fail_3:
743	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
744 fail_2:
745	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
746	    sizeof(struct mec_control_data));
747 fail_1:
748	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
749 fail_0:
750	return;
751}
752
753static int
754mec_mii_readreg(device_t self, int phy, int reg)
755{
756	struct mec_softc *sc = device_private(self);
757	bus_space_tag_t st = sc->sc_st;
758	bus_space_handle_t sh = sc->sc_sh;
759	uint64_t val;
760	int i;
761
762	if (mec_mii_wait(sc) != 0)
763		return 0;
764
765	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
766	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
767	delay(25);
768	bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
769	delay(25);
770	mec_mii_wait(sc);
771
772	for (i = 0; i < 20; i++) {
773		delay(30);
774
775		val = bus_space_read_8(st, sh, MEC_PHY_DATA);
776
777		if ((val & MEC_PHY_DATA_BUSY) == 0)
778			return val & MEC_PHY_DATA_VALUE;
779	}
780	return 0;
781}
782
783static void
784mec_mii_writereg(device_t self, int phy, int reg, int val)
785{
786	struct mec_softc *sc = device_private(self);
787	bus_space_tag_t st = sc->sc_st;
788	bus_space_handle_t sh = sc->sc_sh;
789
790	if (mec_mii_wait(sc) != 0) {
791		printf("timed out writing %x: %x\n", reg, val);
792		return;
793	}
794
795	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
796	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
797
798	delay(60);
799
800	bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
801
802	delay(60);
803
804	mec_mii_wait(sc);
805}
806
807static int
808mec_mii_wait(struct mec_softc *sc)
809{
810	uint32_t busy;
811	int i, s;
812
813	for (i = 0; i < 100; i++) {
814		delay(30);
815
816		s = splhigh();
817		busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
818		splx(s);
819
820		if ((busy & MEC_PHY_DATA_BUSY) == 0)
821			return 0;
822#if 0
823		if (busy == 0xffff) /* XXX ? */
824			return 0;
825#endif
826	}
827
828	printf("%s: MII timed out\n", device_xname(sc->sc_dev));
829	return 1;
830}
831
832static void
833mec_statchg(device_t self)
834{
835	struct mec_softc *sc = device_private(self);
836	bus_space_tag_t st = sc->sc_st;
837	bus_space_handle_t sh = sc->sc_sh;
838	uint32_t control;
839
840	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
841	control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
842	    MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
843
844	/* must also set IPG here for duplex stuff ... */
845	if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
846		control |= MEC_MAC_FULL_DUPLEX;
847	} else {
848		/* set IPG */
849		control |= MEC_MAC_IPG_DEFAULT;
850	}
851
852	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
853}
854
855static int
856mec_init(struct ifnet *ifp)
857{
858	struct mec_softc *sc = ifp->if_softc;
859	bus_space_tag_t st = sc->sc_st;
860	bus_space_handle_t sh = sc->sc_sh;
861	struct mec_rxdesc *rxd;
862	int i, rc;
863
864	/* cancel any pending I/O */
865	mec_stop(ifp, 0);
866
867	/* reset device */
868	mec_reset(sc);
869
870	/* setup filter for multicast or promisc mode */
871	mec_setfilter(sc);
872
873	/* set the TX ring pointer to the base address */
874	bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
875
876	sc->sc_txpending = 0;
877	sc->sc_txdirty = 0;
878	sc->sc_txlast = MEC_NTXDESC - 1;
879
880	/* put RX buffers into FIFO */
881	for (i = 0; i < MEC_NRXDESC; i++) {
882		rxd = &sc->sc_rxdesc[i];
883		rxd->rxd_stat = 0;
884		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
885		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
886		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
887	}
888	sc->sc_rxptr = 0;
889
890#if 0	/* XXX no info */
891	bus_space_write_8(st, sh, MEC_TIMER, 0);
892#endif
893
894	/*
895	 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
896	 * spurious interrupts when TX buffers are empty
897	 */
898	bus_space_write_8(st, sh, MEC_DMA_CONTROL,
899	    (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
900	    (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
901	    MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
902	    MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
903
904	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
905
906	if ((rc = ether_mediachange(ifp)) != 0)
907		return rc;
908
909	ifp->if_flags |= IFF_RUNNING;
910	ifp->if_flags &= ~IFF_OACTIVE;
911	mec_start(ifp);
912
913	return 0;
914}
915
916static void
917mec_reset(struct mec_softc *sc)
918{
919	bus_space_tag_t st = sc->sc_st;
920	bus_space_handle_t sh = sc->sc_sh;
921	uint64_t control;
922
923	/* stop DMA first */
924	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
925
926	/* reset chip */
927	bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
928	delay(1000);
929	bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
930	delay(1000);
931
932	/* Default to 100/half and let auto-negotiation work its magic */
933	control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
934	    MEC_MAC_IPG_DEFAULT;
935
936	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
937	/* stop DMA again for sanity */
938	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
939
940	DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
941	    bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
942}
943
944static void
945mec_start(struct ifnet *ifp)
946{
947	struct mec_softc *sc = ifp->if_softc;
948	struct mbuf *m0, *m;
949	struct mec_txdesc *txd;
950	struct mec_txsoft *txs;
951	bus_dmamap_t dmamap;
952	bus_space_tag_t st = sc->sc_st;
953	bus_space_handle_t sh = sc->sc_sh;
954	int error, firsttx, nexttx, opending;
955	int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
956	uint32_t txdcmd;
957
958	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
959		return;
960
961	/*
962	 * Remember the previous txpending and the first transmit descriptor.
963	 */
964	opending = sc->sc_txpending;
965	firsttx = MEC_NEXTTX(sc->sc_txlast);
966
967	DPRINTF(MEC_DEBUG_START,
968	    ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
969
970	while (sc->sc_txpending < MEC_NTXDESC - 1) {
971		/* Grab a packet off the queue. */
972		IFQ_POLL(&ifp->if_snd, m0);
973		if (m0 == NULL)
974			break;
975		m = NULL;
976
977		/*
978		 * Get the next available transmit descriptor.
979		 */
980		nexttx = MEC_NEXTTX(sc->sc_txlast);
981		txd = &sc->sc_txdesc[nexttx];
982		txs = &sc->sc_txsoft[nexttx];
983		dmamap = txs->txs_dmamap;
984		txs->txs_flags = 0;
985
986		buflen = 0;
987		bufoff = 0;
988		resid = 0;
989		nptr = 0;	/* XXX gcc */
990		pseg = 0;	/* XXX gcc */
991
992		len = m0->m_pkthdr.len;
993
994		DPRINTF(MEC_DEBUG_START,
995		    ("%s: len = %d, nexttx = %d, txpending = %d\n",
996		    __func__, len, nexttx, sc->sc_txpending));
997
998		if (len <= MEC_TXD_BUFSIZE) {
999			/*
1000			 * If a TX packet will fit into small txdesc buffer,
1001			 * just copy it into there. Maybe it's faster than
1002			 * checking alignment and calling bus_dma(9) etc.
1003			 */
1004			DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1005			    __func__));
1006			IFQ_DEQUEUE(&ifp->if_snd, m0);
1007
1008			/*
1009			 * I don't know if MEC chip does auto padding,
1010			 * but do it manually for safety.
1011			 */
1012			if (len < ETHER_PAD_LEN) {
1013				MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1014				bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1015				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1016				memset(txd->txd_buf + bufoff + len, 0,
1017				    ETHER_PAD_LEN - len);
1018				len = buflen = ETHER_PAD_LEN;
1019			} else {
1020				MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1021				bufoff = MEC_TXD_BUFSTART(len);
1022				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1023				buflen = len;
1024			}
1025		} else {
1026			/*
1027			 * If the packet won't fit the static buffer in txdesc,
1028			 * we have to use the concatenate pointers to handle it.
1029			 */
1030			DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1031			    __func__));
1032			txs->txs_flags = MEC_TXS_TXDPTR;
1033
1034			/*
1035			 * Call bus_dmamap_load_mbuf(9) first to see
1036			 * how many chains the TX mbuf has.
1037			 */
1038			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1039			    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1040			if (error == 0) {
1041				/*
1042				 * Check chains which might contain headers.
1043				 * They might be so much fragmented and
1044				 * it's better to copy them into txdesc buffer
1045				 * since they would be small enough.
1046				 */
1047				nsegs = dmamap->dm_nsegs;
1048				for (pseg = 0; pseg < nsegs; pseg++) {
1049					slen = dmamap->dm_segs[pseg].ds_len;
1050					if (buflen + slen >
1051					    MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1052						break;
1053					buflen += slen;
1054				}
1055				/*
1056				 * Check if the rest chains can be fit into
1057				 * the concatinate pointers.
1058				 */
1059				align = dmamap->dm_segs[pseg].ds_addr &
1060				    MEC_TXD_ALIGNMASK;
1061				if (align > 0) {
1062					/*
1063					 * If the first chain isn't uint64_t
1064					 * aligned, append the unaligned part
1065					 * into txdesc buffer too.
1066					 */
1067					resid = MEC_TXD_ALIGN - align;
1068					buflen += resid;
1069					for (; pseg < nsegs; pseg++) {
1070						slen =
1071						  dmamap->dm_segs[pseg].ds_len;
1072						if (slen > resid)
1073							break;
1074						resid -= slen;
1075					}
1076				} else if (pseg == 0) {
1077					/*
1078					 * In this case, the first chain is
1079					 * uint64_t aligned but it's too long
1080					 * to put into txdesc buf.
1081					 * We have to put some data into
1082					 * txdesc buf even in this case,
1083					 * so put MEC_TXD_ALIGN bytes there.
1084					 */
1085					buflen = resid = MEC_TXD_ALIGN;
1086				}
1087				nptr = nsegs - pseg;
1088				if (nptr <= MEC_NTXPTR) {
1089					bufoff = MEC_TXD_BUFSTART(buflen);
1090
1091					/*
1092					 * Check if all the rest chains are
1093					 * uint64_t aligned.
1094					 */
1095					align = 0;
1096					for (i = pseg + 1; i < nsegs; i++)
1097						align |=
1098						    dmamap->dm_segs[i].ds_addr
1099						    & MEC_TXD_ALIGNMASK;
1100					if (align != 0) {
1101						/* chains are not aligned */
1102						error = -1;
1103					}
1104				} else {
1105					/* The TX mbuf chains doesn't fit. */
1106					error = -1;
1107				}
1108				if (error == -1)
1109					bus_dmamap_unload(sc->sc_dmat, dmamap);
1110			}
1111			if (error != 0) {
1112				/*
1113				 * The TX mbuf chains can't be put into
1114				 * the concatinate buffers. In this case,
1115				 * we have to allocate a new contiguous mbuf
1116				 * and copy data into it.
1117				 *
1118				 * Even in this case, the Ethernet header in
1119				 * the TX mbuf might be unaligned and trailing
1120				 * data might be word aligned, so put 2 byte
1121				 * (MEC_ETHER_ALIGN) padding at the top of the
1122				 * allocated mbuf and copy TX packets.
1123				 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1124				 * at the top of the new mbuf won't be uint64_t
1125				 * alignd, but we have to put some data into
1126				 * txdesc buffer anyway even if the buffer
1127				 * is uint64_t aligned.
1128				 */
1129				DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS,
1130				    ("%s: re-allocating mbuf\n", __func__));
1131
1132				MGETHDR(m, M_DONTWAIT, MT_DATA);
1133				if (m == NULL) {
1134					printf("%s: unable to allocate "
1135					    "TX mbuf\n",
1136					    device_xname(sc->sc_dev));
1137					break;
1138				}
1139				if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1140					MCLGET(m, M_DONTWAIT);
1141					if ((m->m_flags & M_EXT) == 0) {
1142						printf("%s: unable to allocate "
1143						    "TX cluster\n",
1144						    device_xname(sc->sc_dev));
1145						m_freem(m);
1146						break;
1147					}
1148				}
1149				m->m_data += MEC_ETHER_ALIGN;
1150
1151				/*
1152				 * Copy whole data (including unaligned part)
1153				 * for following bpf_mtap().
1154				 */
1155				m_copydata(m0, 0, len, mtod(m, void *));
1156				m->m_pkthdr.len = m->m_len = len;
1157				error = bus_dmamap_load_mbuf(sc->sc_dmat,
1158				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1159				if (dmamap->dm_nsegs > 1) {
1160					/* should not happen, but for sanity */
1161					bus_dmamap_unload(sc->sc_dmat, dmamap);
1162					error = -1;
1163				}
1164				if (error != 0) {
1165					printf("%s: unable to load TX buffer, "
1166					    "error = %d\n",
1167					    device_xname(sc->sc_dev), error);
1168					m_freem(m);
1169					break;
1170				}
1171				/*
1172				 * Only the first segment should be put into
1173				 * the concatinate pointer in this case.
1174				 */
1175				pseg = 0;
1176				nptr = 1;
1177
1178				/*
1179				 * Set lenght of unaligned part which will be
1180				 * copied into txdesc buffer.
1181				 */
1182				buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1183				bufoff = MEC_TXD_BUFSTART(buflen);
1184				resid = buflen;
1185#ifdef MEC_EVENT_COUNTERS
1186				MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1187				if (len <= 160)
1188					MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1189				else if (len <= 256)
1190					MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1191				else if (len <= 512)
1192					MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1193				else if (len <= 1024)
1194					MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1195				else
1196					MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1197#endif
1198			}
1199#ifdef MEC_EVENT_COUNTERS
1200			else {
1201				MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1202				if (nptr == 1) {
1203					MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1204					if (len <= 160)
1205						MEC_EVCNT_INCR(
1206						    &sc->sc_ev_txptr1a);
1207					else if (len <= 256)
1208						MEC_EVCNT_INCR(
1209						    &sc->sc_ev_txptr1b);
1210					else if (len <= 512)
1211						MEC_EVCNT_INCR(
1212						    &sc->sc_ev_txptr1c);
1213					else if (len <= 1024)
1214						MEC_EVCNT_INCR(
1215						    &sc->sc_ev_txptr1d);
1216					else
1217						MEC_EVCNT_INCR(
1218						    &sc->sc_ev_txptr1e);
1219				} else if (nptr == 2) {
1220					MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1221					if (len <= 160)
1222						MEC_EVCNT_INCR(
1223						    &sc->sc_ev_txptr2a);
1224					else if (len <= 256)
1225						MEC_EVCNT_INCR(
1226						    &sc->sc_ev_txptr2b);
1227					else if (len <= 512)
1228						MEC_EVCNT_INCR(
1229						    &sc->sc_ev_txptr2c);
1230					else if (len <= 1024)
1231						MEC_EVCNT_INCR(
1232						    &sc->sc_ev_txptr2d);
1233					else
1234						MEC_EVCNT_INCR(
1235						    &sc->sc_ev_txptr2e);
1236				} else if (nptr == 3) {
1237					MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1238					if (len <= 160)
1239						MEC_EVCNT_INCR(
1240						    &sc->sc_ev_txptr3a);
1241					else if (len <= 256)
1242						MEC_EVCNT_INCR(
1243						    &sc->sc_ev_txptr3b);
1244					else if (len <= 512)
1245						MEC_EVCNT_INCR(
1246						    &sc->sc_ev_txptr3c);
1247					else if (len <= 1024)
1248						MEC_EVCNT_INCR(
1249						    &sc->sc_ev_txptr3d);
1250					else
1251						MEC_EVCNT_INCR(
1252						    &sc->sc_ev_txptr3e);
1253				}
1254				if (pseg == 0)
1255					MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1256				else if (pseg == 1)
1257					MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1258				else if (pseg == 2)
1259					MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1260				else if (pseg == 3)
1261					MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1262				else if (pseg == 4)
1263					MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1264				else if (pseg == 5)
1265					MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1266				else
1267					MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1268				if (buflen <= 8)
1269					MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1270				else if (buflen <= 16)
1271					MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1272				else if (buflen <= 32)
1273					MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1274				else if (buflen <= 64)
1275					MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1276				else if (buflen <= 80)
1277					MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1278				else
1279					MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1280			}
1281#endif
1282			m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1283
1284			IFQ_DEQUEUE(&ifp->if_snd, m0);
1285			if (m != NULL) {
1286				m_freem(m0);
1287				m0 = m;
1288			}
1289
1290			/*
1291			 * sync the DMA map for TX mbuf
1292			 */
1293			bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1294			    len - buflen, BUS_DMASYNC_PREWRITE);
1295		}
1296
1297		/*
1298		 * Pass packet to bpf if there is a listener.
1299		 */
1300		bpf_mtap(ifp, m0);
1301		MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1302
1303		/*
1304		 * setup the transmit descriptor.
1305		 */
1306		txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1307
1308		/*
1309		 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1310		 * if more than half txdescs have been queued
1311		 * because TX_EMPTY interrupts will rarely happen
1312		 * if TX queue is so stacked.
1313		 */
1314		if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1315		    (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1316			txdcmd |= MEC_TXCMD_TXINT;
1317
1318		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1319			bus_dma_segment_t *segs = dmamap->dm_segs;
1320
1321			DPRINTF(MEC_DEBUG_TXSEGS,
1322			    ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1323			    __func__, dmamap->dm_nsegs, pseg, nptr));
1324
1325			switch (nptr) {
1326			case 3:
1327				KASSERT((segs[pseg + 2].ds_addr &
1328				    MEC_TXD_ALIGNMASK) == 0);
1329				txdcmd |= MEC_TXCMD_PTR3;
1330				txd->txd_ptr[2] =
1331				    TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1332				    segs[pseg + 2].ds_addr;
1333				/* FALLTHROUGH */
1334			case 2:
1335				KASSERT((segs[pseg + 1].ds_addr &
1336				    MEC_TXD_ALIGNMASK) == 0);
1337				txdcmd |= MEC_TXCMD_PTR2;
1338				txd->txd_ptr[1] =
1339				    TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1340				    segs[pseg + 1].ds_addr;
1341				/* FALLTHROUGH */
1342			case 1:
1343				txdcmd |= MEC_TXCMD_PTR1;
1344				txd->txd_ptr[0] =
1345				    TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1346				    (segs[pseg].ds_addr + resid);
1347				break;
1348			default:
1349				panic("%s: impossible nptr in %s",
1350				    device_xname(sc->sc_dev), __func__);
1351				/* NOTREACHED */
1352			}
1353			/*
1354			 * Store a pointer to the packet so we can
1355			 * free it later.
1356			 */
1357			txs->txs_mbuf = m0;
1358		} else {
1359			/*
1360			 * In this case all data are copied to buffer in txdesc,
1361			 * we can free TX mbuf here.
1362			 */
1363			m_freem(m0);
1364		}
1365		txd->txd_cmd = txdcmd;
1366
1367		DPRINTF(MEC_DEBUG_START,
1368		    ("%s: txd_cmd    = 0x%016llx\n",
1369		    __func__, txd->txd_cmd));
1370		DPRINTF(MEC_DEBUG_START,
1371		    ("%s: txd_ptr[0] = 0x%016llx\n",
1372		    __func__, txd->txd_ptr[0]));
1373		DPRINTF(MEC_DEBUG_START,
1374		    ("%s: txd_ptr[1] = 0x%016llx\n",
1375		    __func__, txd->txd_ptr[1]));
1376		DPRINTF(MEC_DEBUG_START,
1377		    ("%s: txd_ptr[2] = 0x%016llx\n",
1378		    __func__, txd->txd_ptr[2]));
1379		DPRINTF(MEC_DEBUG_START,
1380		    ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1381		    __func__, len, len, buflen, buflen));
1382
1383		/* sync TX descriptor */
1384		MEC_TXDESCSYNC(sc, nexttx,
1385		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1386
1387		/* start TX */
1388		bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1389
1390		/* advance the TX pointer. */
1391		sc->sc_txpending++;
1392		sc->sc_txlast = nexttx;
1393	}
1394
1395	if (sc->sc_txpending == MEC_NTXDESC - 1) {
1396		/* No more slots; notify upper layer. */
1397		MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1398		ifp->if_flags |= IFF_OACTIVE;
1399	}
1400
1401	if (sc->sc_txpending != opending) {
1402		/*
1403		 * If the transmitter was idle,
1404		 * reset the txdirty pointer and re-enable TX interrupt.
1405		 */
1406		if (opending == 0) {
1407			sc->sc_txdirty = firsttx;
1408			bus_space_write_8(st, sh, MEC_TX_ALIAS,
1409			    MEC_TX_ALIAS_INT_ENABLE);
1410		}
1411
1412		/* Set a watchdog timer in case the chip flakes out. */
1413		ifp->if_timer = 5;
1414	}
1415}
1416
1417static void
1418mec_stop(struct ifnet *ifp, int disable)
1419{
1420	struct mec_softc *sc = ifp->if_softc;
1421	struct mec_txsoft *txs;
1422	int i;
1423
1424	DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1425
1426	ifp->if_timer = 0;
1427	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1428
1429	callout_stop(&sc->sc_tick_ch);
1430	mii_down(&sc->sc_mii);
1431
1432	/* release any TX buffers */
1433	for (i = 0; i < MEC_NTXDESC; i++) {
1434		txs = &sc->sc_txsoft[i];
1435		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1436			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1437			m_freem(txs->txs_mbuf);
1438			txs->txs_mbuf = NULL;
1439		}
1440	}
1441}
1442
1443static int
1444mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1445{
1446	int s, error;
1447
1448	s = splnet();
1449
1450	error = ether_ioctl(ifp, cmd, data);
1451	if (error == ENETRESET) {
1452		/*
1453		 * Multicast list has changed; set the hardware filter
1454		 * accordingly.
1455		 */
1456		if (ifp->if_flags & IFF_RUNNING)
1457			error = mec_init(ifp);
1458		else
1459			error = 0;
1460	}
1461
1462	/* Try to get more packets going. */
1463	mec_start(ifp);
1464
1465	splx(s);
1466	return error;
1467}
1468
1469static void
1470mec_watchdog(struct ifnet *ifp)
1471{
1472	struct mec_softc *sc = ifp->if_softc;
1473
1474	printf("%s: device timeout\n", device_xname(sc->sc_dev));
1475	ifp->if_oerrors++;
1476
1477	mec_init(ifp);
1478}
1479
1480static void
1481mec_tick(void *arg)
1482{
1483	struct mec_softc *sc = arg;
1484	int s;
1485
1486	s = splnet();
1487	mii_tick(&sc->sc_mii);
1488	splx(s);
1489
1490	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1491}
1492
1493static void
1494mec_setfilter(struct mec_softc *sc)
1495{
1496	struct ethercom *ec = &sc->sc_ethercom;
1497	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1498	struct ether_multi *enm;
1499	struct ether_multistep step;
1500	bus_space_tag_t st = sc->sc_st;
1501	bus_space_handle_t sh = sc->sc_sh;
1502	uint64_t mchash;
1503	uint32_t control, hash;
1504	int mcnt;
1505
1506	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1507	control &= ~MEC_MAC_FILTER_MASK;
1508
1509	if (ifp->if_flags & IFF_PROMISC) {
1510		control |= MEC_MAC_FILTER_PROMISC;
1511		bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1512		bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1513		return;
1514	}
1515
1516	mcnt = 0;
1517	mchash = 0;
1518	ETHER_FIRST_MULTI(step, ec, enm);
1519	while (enm != NULL) {
1520		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1521			/* set allmulti for a range of multicast addresses */
1522			control |= MEC_MAC_FILTER_ALLMULTI;
1523			bus_space_write_8(st, sh, MEC_MULTICAST,
1524			    0xffffffffffffffffULL);
1525			bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1526			return;
1527		}
1528
1529#define mec_calchash(addr)	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1530
1531		hash = mec_calchash(enm->enm_addrlo);
1532		mchash |= 1 << hash;
1533		mcnt++;
1534		ETHER_NEXT_MULTI(step, enm);
1535	}
1536
1537	ifp->if_flags &= ~IFF_ALLMULTI;
1538
1539	if (mcnt > 0)
1540		control |= MEC_MAC_FILTER_MATCHMULTI;
1541
1542	bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1543	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1544}
1545
1546static int
1547mec_intr(void *arg)
1548{
1549	struct mec_softc *sc = arg;
1550	bus_space_tag_t st = sc->sc_st;
1551	bus_space_handle_t sh = sc->sc_sh;
1552	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1553	uint32_t statreg, statack, txptr;
1554	int handled, sent;
1555
1556	DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1557
1558	handled = sent = 0;
1559
1560	for (;;) {
1561		statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1562
1563		DPRINTF(MEC_DEBUG_INTR,
1564		    ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1565
1566		statack = statreg & MEC_INT_STATUS_MASK;
1567		if (statack == 0)
1568			break;
1569		bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1570
1571		handled = 1;
1572
1573		if (statack &
1574		    (MEC_INT_RX_THRESHOLD |
1575		     MEC_INT_RX_FIFO_UNDERFLOW)) {
1576			mec_rxintr(sc);
1577		}
1578
1579		if (statack &
1580		    (MEC_INT_TX_EMPTY |
1581		     MEC_INT_TX_PACKET_SENT |
1582		     MEC_INT_TX_ABORT)) {
1583			txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1584			    >> MEC_INT_TX_RING_BUFFER_SHIFT;
1585			mec_txintr(sc, txptr);
1586			sent = 1;
1587			if ((statack & MEC_INT_TX_EMPTY) != 0) {
1588				/*
1589				 * disable TX interrupt to stop
1590				 * TX empty interrupt
1591				 */
1592				bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1593				DPRINTF(MEC_DEBUG_INTR,
1594				    ("%s: disable TX_INT\n", __func__));
1595			}
1596#ifdef MEC_EVENT_COUNTERS
1597			if ((statack & MEC_INT_TX_EMPTY) != 0)
1598				MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1599			if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1600				MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1601#endif
1602		}
1603
1604		if (statack &
1605		    (MEC_INT_TX_LINK_FAIL |
1606		     MEC_INT_TX_MEM_ERROR |
1607		     MEC_INT_TX_ABORT |
1608		     MEC_INT_RX_FIFO_UNDERFLOW |
1609		     MEC_INT_RX_DMA_UNDERFLOW)) {
1610			printf("%s: %s: interrupt status = 0x%08x\n",
1611			    device_xname(sc->sc_dev), __func__, statreg);
1612			mec_init(ifp);
1613			break;
1614		}
1615	}
1616
1617	if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) {
1618		/* try to get more packets going */
1619		mec_start(ifp);
1620	}
1621
1622#if NRND > 0
1623	if (handled)
1624		rnd_add_uint32(&sc->sc_rnd_source, statreg);
1625#endif
1626
1627	return handled;
1628}
1629
1630static void
1631mec_rxintr(struct mec_softc *sc)
1632{
1633	bus_space_tag_t st = sc->sc_st;
1634	bus_space_handle_t sh = sc->sc_sh;
1635	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1636	struct mbuf *m;
1637	struct mec_rxdesc *rxd;
1638	uint64_t rxstat;
1639	u_int len;
1640	int i;
1641	uint32_t crc;
1642
1643	DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1644
1645	for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1646		rxd = &sc->sc_rxdesc[i];
1647
1648		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1649		rxstat = rxd->rxd_stat;
1650
1651		DPRINTF(MEC_DEBUG_RXINTR,
1652		    ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1653		    __func__, rxstat, i));
1654		DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1655		    __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1656
1657		if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1658			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1659			break;
1660		}
1661
1662		len = rxstat & MEC_RXSTAT_LEN;
1663
1664		if (len < ETHER_MIN_LEN ||
1665		    len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1666			/* invalid length packet; drop it. */
1667			DPRINTF(MEC_DEBUG_RXINTR,
1668			    ("%s: wrong packet\n", __func__));
1669 dropit:
1670			ifp->if_ierrors++;
1671			rxd->rxd_stat = 0;
1672			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1673			bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1674			    MEC_CDRXADDR(sc, i));
1675			continue;
1676		}
1677
1678		/*
1679		 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error.
1680		 */
1681		if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1682			rxstat &= ~MEC_RXSTAT_BADPACKET;
1683
1684		if (rxstat &
1685		    (MEC_RXSTAT_BADPACKET |
1686		     MEC_RXSTAT_LONGEVENT |
1687		     MEC_RXSTAT_INVALID   |
1688		     MEC_RXSTAT_CRCERROR  |
1689		     MEC_RXSTAT_VIOLATION)) {
1690			printf("%s: mec_rxintr: status = 0x%016"PRIx64"\n",
1691			    device_xname(sc->sc_dev), rxstat);
1692			goto dropit;
1693		}
1694
1695		/*
1696		 * The MEC includes the CRC with every packet.  Trim
1697		 * it off here.
1698		 */
1699		len -= ETHER_CRC_LEN;
1700
1701		/*
1702		 * now allocate an mbuf (and possibly a cluster) to hold
1703		 * the received packet.
1704		 */
1705		MGETHDR(m, M_DONTWAIT, MT_DATA);
1706		if (m == NULL) {
1707			printf("%s: unable to allocate RX mbuf\n",
1708			    device_xname(sc->sc_dev));
1709			goto dropit;
1710		}
1711		if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1712			MCLGET(m, M_DONTWAIT);
1713			if ((m->m_flags & M_EXT) == 0) {
1714				printf("%s: unable to allocate RX cluster\n",
1715				    device_xname(sc->sc_dev));
1716				m_freem(m);
1717				m = NULL;
1718				goto dropit;
1719			}
1720		}
1721
1722		/*
1723		 * Note MEC chip seems to insert 2 byte padding at the top of
1724		 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1725		 */
1726		MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1727		memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1728		crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1729		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1730		m->m_data += MEC_ETHER_ALIGN;
1731
1732		/* put RX buffer into FIFO again */
1733		rxd->rxd_stat = 0;
1734		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1735		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1736
1737		m->m_pkthdr.rcvif = ifp;
1738		m->m_pkthdr.len = m->m_len = len;
1739		if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1740			mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1741
1742		ifp->if_ipackets++;
1743
1744		/*
1745		 * Pass this up to any BPF listeners, but only
1746		 * pass it up the stack if it's for us.
1747		 */
1748		bpf_mtap(ifp, m);
1749
1750		/* Pass it on. */
1751		(*ifp->if_input)(ifp, m);
1752	}
1753
1754	/* update RX pointer */
1755	sc->sc_rxptr = i;
1756}
1757
1758static void
1759mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1760{
1761	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1762	struct ether_header *eh;
1763	struct ip *ip;
1764	struct udphdr *uh;
1765	u_int len, pktlen, hlen;
1766	uint32_t csum_data, dsum;
1767	int csum_flags;
1768	const uint16_t *dp;
1769
1770	csum_data = 0;
1771	csum_flags = 0;
1772
1773	len = m->m_len;
1774	if (len < ETHER_HDR_LEN + sizeof(struct ip))
1775		goto out;
1776	pktlen = len - ETHER_HDR_LEN;
1777	eh = mtod(m, struct ether_header *);
1778	if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1779		goto out;
1780	ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1781	if (ip->ip_v != IPVERSION)
1782		goto out;
1783
1784	hlen = ip->ip_hl << 2;
1785	if (hlen < sizeof(struct ip))
1786		goto out;
1787
1788	/*
1789	 * Bail if too short, has random trailing garbage, truncated,
1790	 * fragment, or has ethernet pad.
1791	 */
1792	if (ntohs(ip->ip_len) < hlen ||
1793	    ntohs(ip->ip_len) != pktlen ||
1794	    (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1795		goto out;
1796
1797	switch (ip->ip_p) {
1798	case IPPROTO_TCP:
1799		if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1800		    pktlen < (hlen + sizeof(struct tcphdr)))
1801			goto out;
1802		csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1803		break;
1804	case IPPROTO_UDP:
1805		if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1806		    pktlen < (hlen + sizeof(struct udphdr)))
1807			goto out;
1808		uh = (struct udphdr *)((uint8_t *)ip + hlen);
1809		if (uh->uh_sum == 0)
1810			goto out;	/* no checksum */
1811		csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1812		break;
1813	default:
1814		goto out;
1815	}
1816
1817	/*
1818	 * The computed checksum includes Ethernet header, IP headers,
1819	 * and CRC, so we have to deduct them.
1820	 * Note IP header cksum should be 0xffff so we don't have to
1821	 * dedecut them.
1822	 */
1823	dsum = 0;
1824
1825	/* deduct Ethernet header */
1826	dp = (const uint16_t *)eh;
1827	for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1828		dsum += ntohs(*dp++);
1829
1830	/* deduct CRC */
1831	if (len & 1) {
1832		dsum += (crc >> 24) & 0x00ff;
1833		dsum += (crc >>  8) & 0xffff;
1834		dsum += (crc <<  8) & 0xff00;
1835	} else {
1836		dsum += (crc >> 16) & 0xffff;
1837		dsum += (crc >>  0) & 0xffff;
1838	}
1839	while (dsum >> 16)
1840		dsum = (dsum >> 16) + (dsum & 0xffff);
1841
1842	csum_data = rxcsum;
1843	csum_data += (uint16_t)~dsum;
1844
1845	while (csum_data >> 16)
1846		csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1847
1848 out:
1849	m->m_pkthdr.csum_flags = csum_flags;
1850	m->m_pkthdr.csum_data = csum_data;
1851}
1852
1853static void
1854mec_txintr(struct mec_softc *sc, uint32_t txptr)
1855{
1856	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1857	struct mec_txdesc *txd;
1858	struct mec_txsoft *txs;
1859	bus_dmamap_t dmamap;
1860	uint64_t txstat;
1861	int i;
1862	u_int col;
1863
1864	DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1865
1866	for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1867	    i = MEC_NEXTTX(i), sc->sc_txpending--) {
1868		txd = &sc->sc_txdesc[i];
1869
1870		MEC_TXCMDSYNC(sc, i,
1871		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1872
1873		txstat = txd->txd_stat;
1874		DPRINTF(MEC_DEBUG_TXINTR,
1875		    ("%s: dirty = %d, txstat = 0x%016llx\n",
1876		    __func__, i, txstat));
1877		if ((txstat & MEC_TXSTAT_SENT) == 0) {
1878			MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1879			break;
1880		}
1881
1882		txs = &sc->sc_txsoft[i];
1883		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1884			dmamap = txs->txs_dmamap;
1885			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1886			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1887			bus_dmamap_unload(sc->sc_dmat, dmamap);
1888			m_freem(txs->txs_mbuf);
1889			txs->txs_mbuf = NULL;
1890		}
1891
1892		col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1893		ifp->if_collisions += col;
1894
1895		if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1896			printf("%s: TX error: txstat = 0x%016"PRIx64"\n",
1897			    device_xname(sc->sc_dev), txstat);
1898			ifp->if_oerrors++;
1899		} else
1900			ifp->if_opackets++;
1901	}
1902
1903	/* update the dirty TX buffer pointer */
1904	sc->sc_txdirty = i;
1905	DPRINTF(MEC_DEBUG_INTR,
1906	    ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1907	    __func__, sc->sc_txdirty, sc->sc_txpending));
1908
1909	/* cancel the watchdog timer if there are no pending TX packets */
1910	if (sc->sc_txpending == 0)
1911		ifp->if_timer = 0;
1912	if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1913		ifp->if_flags &= ~IFF_OACTIVE;
1914}
1915
1916static bool
1917mec_shutdown(device_t self, int howto)
1918{
1919	struct mec_softc *sc = device_private(self);
1920
1921	mec_stop(&sc->sc_ethercom.ec_if, 1);
1922	/* make sure to stop DMA etc. */
1923	mec_reset(sc);
1924
1925	return true;
1926}
1927