1/*	$NetBSD: if_emac.c,v 1.59 2024/02/10 09:30:05 andvar Exp $	*/
2
3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * emac(4) supports following ibm4xx's EMACs.
40 *   XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
41 *
42 *            tested
43 *            ------
44 * 405EP	-  10/100 x2
45 * 405EX/EXr	o  10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
46 * 405GP/GPr	o  10/100
47 * 440EP	-  10/100 x2, ZMII
48 * 440GP	-  10/100 x2, ZMII
49 * 440GX	-  10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
50 * 440SP	-  10/100/1000
51 * 440SPe	-  10/100/1000, STA v2
52 */
53
54#include <sys/cdefs.h>
55__KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.59 2024/02/10 09:30:05 andvar Exp $");
56
57#ifdef _KERNEL_OPT
58#include "opt_emac.h"
59#endif
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/mbuf.h>
64#include <sys/kernel.h>
65#include <sys/socket.h>
66#include <sys/ioctl.h>
67#include <sys/cpu.h>
68#include <sys/device.h>
69
70#include <sys/rndsource.h>
71
72#include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
73
74#include <net/if.h>
75#include <net/if_dl.h>
76#include <net/if_media.h>
77#include <net/if_ether.h>
78
79#include <net/bpf.h>
80
81#include <powerpc/ibm4xx/cpu.h>
82#include <powerpc/ibm4xx/dcr4xx.h>
83#include <powerpc/ibm4xx/mal405gp.h>
84#include <powerpc/ibm4xx/dev/emacreg.h>
85#include <powerpc/ibm4xx/dev/if_emacreg.h>
86#include <powerpc/ibm4xx/dev/if_emacvar.h>
87#include <powerpc/ibm4xx/dev/malvar.h>
88#include <powerpc/ibm4xx/dev/opbreg.h>
89#include <powerpc/ibm4xx/dev/opbvar.h>
90#include <powerpc/ibm4xx/dev/plbvar.h>
91#if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
92#include <powerpc/ibm4xx/dev/rmiivar.h>
93#endif
94
95#include <dev/mii/miivar.h>
96
97#include "locators.h"
98
99
100/*
101 * Transmit descriptor list size.  There are two Tx channels, each with
102 * up to 256 hardware descriptors available.  We currently use one Tx
103 * channel.  We tell the upper layers that they can queue a lot of
104 * packets, and we go ahead and manage up to 64 of them at a time.  We
105 * allow up to 16 DMA segments per packet.
106 */
107#define	EMAC_NTXSEGS		16
108#define	EMAC_TXQUEUELEN		64
109#define	EMAC_TXQUEUELEN_MASK	(EMAC_TXQUEUELEN - 1)
110#define	EMAC_TXQUEUE_GC		(EMAC_TXQUEUELEN / 4)
111#define	EMAC_NTXDESC		256
112#define	EMAC_NTXDESC_MASK	(EMAC_NTXDESC - 1)
113#define	EMAC_NEXTTX(x)		(((x) + 1) & EMAC_NTXDESC_MASK)
114#define	EMAC_NEXTTXS(x)		(((x) + 1) & EMAC_TXQUEUELEN_MASK)
115
116/*
117 * Receive descriptor list size.  There is one Rx channel with up to 256
118 * hardware descriptors available.  We allocate 64 receive descriptors,
119 * each with a 2k buffer (MCLBYTES).
120 */
121#define	EMAC_NRXDESC		64
122#define	EMAC_NRXDESC_MASK	(EMAC_NRXDESC - 1)
123#define	EMAC_NEXTRX(x)		(((x) + 1) & EMAC_NRXDESC_MASK)
124#define	EMAC_PREVRX(x)		(((x) - 1) & EMAC_NRXDESC_MASK)
125
126/*
127 * Transmit/receive descriptors that are DMA'd to the EMAC.
128 */
129struct emac_control_data {
130	struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
131	struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
132};
133
134#define	EMAC_CDOFF(x)		offsetof(struct emac_control_data, x)
135#define	EMAC_CDTXOFF(x)		EMAC_CDOFF(ecd_txdesc[(x)])
136#define	EMAC_CDRXOFF(x)		EMAC_CDOFF(ecd_rxdesc[(x)])
137
138/*
139 * Software state for transmit jobs.
140 */
141struct emac_txsoft {
142	struct mbuf *txs_mbuf;		/* head of mbuf chain */
143	bus_dmamap_t txs_dmamap;	/* our DMA map */
144	int txs_firstdesc;		/* first descriptor in packet */
145	int txs_lastdesc;		/* last descriptor in packet */
146	int txs_ndesc;			/* # of descriptors used */
147};
148
149/*
150 * Software state for receive descriptors.
151 */
152struct emac_rxsoft {
153	struct mbuf *rxs_mbuf;		/* head of mbuf chain */
154	bus_dmamap_t rxs_dmamap;	/* our DMA map */
155};
156
157/*
158 * Software state per device.
159 */
160struct emac_softc {
161	device_t sc_dev;		/* generic device information */
162	int sc_instance;		/* instance no. */
163	bus_space_tag_t sc_st;		/* bus space tag */
164	bus_space_handle_t sc_sh;	/* bus space handle */
165	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
166	struct ethercom sc_ethercom;	/* ethernet common data */
167	void *sc_sdhook;		/* shutdown hook */
168	void *sc_powerhook;		/* power management hook */
169
170	struct mii_data sc_mii;		/* MII/media information */
171	struct callout sc_callout;	/* tick callout */
172
173	uint32_t sc_mr1;		/* copy of Mode Register 1 */
174	uint32_t sc_stacr_read;		/* Read opcode of STAOPC of STACR */
175	uint32_t sc_stacr_write;	/* Write opcode of STAOPC of STACR */
176	uint32_t sc_stacr_bits;		/* misc bits of STACR */
177	bool sc_stacr_completed;	/* Operation completed of STACR */
178	int sc_htsize;			/* Hash Table size */
179
180	bus_dmamap_t sc_cddmamap;	/* control data dma map */
181#define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
182
183	/* Software state for transmit/receive descriptors. */
184	struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
185	struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
186
187	/* Control data structures. */
188	struct emac_control_data *sc_control_data;
189#define	sc_txdescs	sc_control_data->ecd_txdesc
190#define	sc_rxdescs	sc_control_data->ecd_rxdesc
191
192#ifdef EMAC_EVENT_COUNTERS
193	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
194	struct evcnt sc_ev_txintr;	/* Tx interrupts */
195	struct evcnt sc_ev_rxde;	/* Rx descriptor interrupts */
196	struct evcnt sc_ev_txde;	/* Tx descriptor interrupts */
197	struct evcnt sc_ev_intr;	/* General EMAC interrupts */
198
199	struct evcnt sc_ev_txreap;	/* Calls to Tx descriptor reaper */
200	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
201	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
202	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
203	struct evcnt sc_ev_tu;		/* Tx underrun */
204#endif /* EMAC_EVENT_COUNTERS */
205
206	int sc_txfree;			/* number of free Tx descriptors */
207	int sc_txnext;			/* next ready Tx descriptor */
208
209	int sc_txsfree;			/* number of free Tx jobs */
210	int sc_txsnext;			/* next ready Tx job */
211	int sc_txsdirty;		/* dirty Tx jobs */
212
213	int sc_rxptr;			/* next ready RX descriptor/descsoft */
214
215	krndsource_t rnd_source;	/* random source */
216
217	void (*sc_rmii_enable)(device_t, int);		/* reduced MII enable */
218	void (*sc_rmii_disable)(device_t, int);		/* reduced MII disable*/
219	void (*sc_rmii_speed)(device_t, int, int);	/* reduced MII speed */
220};
221
222#ifdef EMAC_EVENT_COUNTERS
223#define	EMAC_EVCNT_INCR(ev)	(ev)->ev_count++
224#else
225#define	EMAC_EVCNT_INCR(ev)	/* nothing */
226#endif
227
228#define	EMAC_CDTXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDTXOFF((x)))
229#define	EMAC_CDRXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDRXOFF((x)))
230
231#define	EMAC_CDTXSYNC(sc, x, n, ops)					\
232do {									\
233	int __x, __n;							\
234									\
235	__x = (x);							\
236	__n = (n);							\
237									\
238	/* If it will wrap around, sync to the end of the ring. */	\
239	if ((__x + __n) > EMAC_NTXDESC) {				\
240		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
241		    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) *	\
242		    (EMAC_NTXDESC - __x), (ops));			\
243		__n -= (EMAC_NTXDESC - __x);				\
244		__x = 0;						\
245	}								\
246									\
247	/* Now sync whatever is left. */				\
248	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
249	    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
250} while (/*CONSTCOND*/0)
251
252#define	EMAC_CDRXSYNC(sc, x, ops)					\
253do {									\
254	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
255	    EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops));	\
256} while (/*CONSTCOND*/0)
257
258#define	EMAC_INIT_RXDESC(sc, x)						\
259do {									\
260	struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
261	struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)];		\
262	struct mbuf *__m = __rxs->rxs_mbuf;				\
263									\
264	/*								\
265	 * Note: We scoot the packet forward 2 bytes in the buffer	\
266	 * so that the payload after the Ethernet header is aligned	\
267	 * to a 4-byte boundary.					\
268	 */								\
269	__m->m_data = __m->m_ext.ext_buf + 2;				\
270									\
271	__rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2;	\
272	__rxd->md_data_len = __m->m_ext.ext_size - 2;			\
273	__rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT |		\
274	    /* Set wrap on last descriptor. */				\
275	    (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0);		\
276	EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
277} while (/*CONSTCOND*/0)
278
279#define	EMAC_WRITE(sc, reg, val) \
280	bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
281#define	EMAC_READ(sc, reg) \
282	bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
283
284#define	EMAC_SET_FILTER(aht, crc) \
285do {									\
286	(aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf);	\
287} while (/*CONSTCOND*/0)
288#define	EMAC_SET_FILTER256(aht, crc) \
289do {									\
290	(aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f);	\
291} while (/*CONSTCOND*/0)
292
293static int	emac_match(device_t, cfdata_t, void *);
294static void	emac_attach(device_t, device_t, void *);
295
296static int	emac_intr(void *);
297static void	emac_shutdown(void *);
298
299static void	emac_start(struct ifnet *);
300static int	emac_ioctl(struct ifnet *, u_long, void *);
301static int	emac_init(struct ifnet *);
302static void	emac_stop(struct ifnet *, int);
303static void	emac_watchdog(struct ifnet *);
304
305static int	emac_add_rxbuf(struct emac_softc *, int);
306static void	emac_rxdrain(struct emac_softc *);
307static int	emac_set_filter(struct emac_softc *);
308static int	emac_txreap(struct emac_softc *);
309
310static void	emac_soft_reset(struct emac_softc *);
311static void	emac_smart_reset(struct emac_softc *);
312
313static int	emac_mii_readreg(device_t, int, int, uint16_t *);
314static int	emac_mii_writereg(device_t, int, int, uint16_t);
315static void	emac_mii_statchg(struct ifnet *);
316static uint32_t	emac_mii_wait(struct emac_softc *);
317static void	emac_mii_tick(void *);
318
319int		emac_copy_small = 0;
320
321CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc),
322    emac_match, emac_attach, NULL, NULL);
323
324
325static int
326emac_match(device_t parent, cfdata_t cf, void *aux)
327{
328	struct opb_attach_args *oaa = aux;
329
330	/* match only on-chip ethernet devices */
331	if (strcmp(oaa->opb_name, cf->cf_name) == 0)
332		return 1;
333
334	return 0;
335}
336
337static void
338emac_attach(device_t parent, device_t self, void *aux)
339{
340	struct opb_attach_args *oaa = aux;
341	struct emac_softc *sc = device_private(self);
342	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
343	struct mii_data *mii = &sc->sc_mii;
344	const char * xname = device_xname(self);
345	bus_dma_segment_t seg;
346	int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY;
347	const uint8_t *enaddr;
348	prop_dictionary_t dict = device_properties(self);
349	prop_data_t ea;
350
351	bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
352
353	sc->sc_dev = self;
354	sc->sc_instance = oaa->opb_instance;
355	sc->sc_st = oaa->opb_bt;
356	sc->sc_dmat = oaa->opb_dmat;
357
358	callout_init(&sc->sc_callout, 0);
359
360	aprint_naive("\n");
361	aprint_normal(": Ethernet Media Access Controller\n");
362
363	/* Fetch the Ethernet address. */
364	ea = prop_dictionary_get(dict, "mac-address");
365	if (ea == NULL) {
366		aprint_error_dev(self, "unable to get mac-address property\n");
367		return;
368	}
369	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
370	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
371	enaddr = prop_data_data_nocopy(ea);
372	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
373
374#if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
375	/* Fetch the MII offset. */
376	prop_dictionary_get_uint32(dict, "mii-phy", &mii_phy);
377
378#ifdef EMAC_ZMII_PHY
379	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII)
380		zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
381		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
382#endif
383#ifdef EMAC_RGMII_PHY
384	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII)
385		rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
386		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
387#endif
388#endif
389
390	/*
391	 * Allocate the control data structures, and create and load the
392	 * DMA map for it.
393	 */
394	if ((error = bus_dmamem_alloc(sc->sc_dmat,
395	    sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
396		aprint_error_dev(self,
397		    "unable to allocate control data, error = %d\n", error);
398		goto fail_0;
399	}
400
401	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
402	    sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
403	    BUS_DMA_COHERENT)) != 0) {
404		aprint_error_dev(self,
405		    "unable to map control data, error = %d\n", error);
406		goto fail_1;
407	}
408
409	if ((error = bus_dmamap_create(sc->sc_dmat,
410	    sizeof(struct emac_control_data), 1,
411	    sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
412		aprint_error_dev(self,
413		    "unable to create control data DMA map, error = %d\n",
414		    error);
415		goto fail_2;
416	}
417
418	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
419	    sc->sc_control_data, sizeof(struct emac_control_data), NULL,
420	    0)) != 0) {
421		aprint_error_dev(self,
422		    "unable to load control data DMA map, error = %d\n", error);
423		goto fail_3;
424	}
425
426	/*
427	 * Create the transmit buffer DMA maps.
428	 */
429	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
430		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
431		    EMAC_NTXSEGS, MCLBYTES, 0, 0,
432		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
433			aprint_error_dev(self,
434			    "unable to create tx DMA map %d, error = %d\n",
435			    i, error);
436			goto fail_4;
437		}
438	}
439
440	/*
441	 * Create the receive buffer DMA maps.
442	 */
443	for (i = 0; i < EMAC_NRXDESC; i++) {
444		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
445		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
446			aprint_error_dev(self,
447			    "unable to create rx DMA map %d, error = %d\n",
448			    i, error);
449			goto fail_5;
450		}
451		sc->sc_rxsoft[i].rxs_mbuf = NULL;
452	}
453
454	/* Soft Reset the EMAC.  The chip to a known state. */
455	emac_soft_reset(sc);
456
457	opb_freq = opb_get_frequency();
458	switch (opb_freq) {
459	case  33333333: opbc =  STACR_OPBC_33MHZ; break;
460	case  50000000: opbc =  STACR_OPBC_50MHZ; break;
461	case  66666666: opbc =  STACR_OPBC_66MHZ; break;
462	case  83333333: opbc =  STACR_OPBC_83MHZ; break;
463	case 100000000: opbc = STACR_OPBC_100MHZ; break;
464
465	default:
466		if (opb_freq > 100000000) {
467			opbc = STACR_OPBC_A100MHZ;
468			break;
469		}
470		aprint_error_dev(self, "unsupported OPB frequency %dMHz\n",
471		    opb_freq / 1000 / 1000);
472		goto fail_5;
473	}
474	if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) {
475		sc->sc_mr1 =
476		    MR1_RFS_GBE(MR1__FS_16KB)	|
477		    MR1_TFS_GBE(MR1__FS_16KB)	|
478		    MR1_TR0_MULTIPLE		|
479		    MR1_OBCI(opbc);
480		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
481
482		if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) {
483			sc->sc_stacr_read = STACR_STAOPC_READ;
484			sc->sc_stacr_write = STACR_STAOPC_WRITE;
485			sc->sc_stacr_bits = STACR_OC;
486			sc->sc_stacr_completed = false;
487		} else {
488			sc->sc_stacr_read = STACR_READ;
489			sc->sc_stacr_write = STACR_WRITE;
490			sc->sc_stacr_completed = true;
491		}
492	} else {
493		/*
494		 * Set up Mode Register 1 - set receive and transmit FIFOs to
495		 * maximum size, allow transmit of multiple packets (only
496		 * channel 0 is used).
497		 *
498		 * XXX: Allow pause packets??
499		 */
500		sc->sc_mr1 =
501		    MR1_RFS(MR1__FS_4KB) |
502		    MR1_TFS(MR1__FS_2KB) |
503		    MR1_TR0_MULTIPLE;
504
505		sc->sc_stacr_read = STACR_READ;
506		sc->sc_stacr_write = STACR_WRITE;
507		sc->sc_stacr_bits = STACR_OPBC(opbc);
508		sc->sc_stacr_completed = true;
509	}
510
511	intr_establish_xname(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc,
512	    device_xname(self));
513	mal_intr_establish(sc->sc_instance, sc);
514
515	if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256)
516		sc->sc_htsize = 256;
517	else
518		sc->sc_htsize = 64;
519
520	/* Clear all interrupts */
521	EMAC_WRITE(sc, EMAC_ISR, ISR_ALL);
522
523	/*
524	 * Initialise the media structures.
525	 */
526	mii->mii_ifp = ifp;
527	mii->mii_readreg = emac_mii_readreg;
528	mii->mii_writereg = emac_mii_writereg;
529	mii->mii_statchg = emac_mii_statchg;
530
531	sc->sc_ethercom.ec_mii = mii;
532	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
533	mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY,
534	    MIIF_DOPAUSE);
535	if (LIST_FIRST(&mii->mii_phys) == NULL) {
536		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
537		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
538	} else
539		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
540
541	ifp = &sc->sc_ethercom.ec_if;
542	strcpy(ifp->if_xname, xname);
543	ifp->if_softc = sc;
544	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
545	ifp->if_start = emac_start;
546	ifp->if_ioctl = emac_ioctl;
547	ifp->if_init = emac_init;
548	ifp->if_stop = emac_stop;
549	ifp->if_watchdog = emac_watchdog;
550	IFQ_SET_READY(&ifp->if_snd);
551
552	/*
553	 * We can support 802.1Q VLAN-sized frames.
554	 */
555	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
556
557	/*
558	 * Attach the interface.
559	 */
560	if_attach(ifp);
561	if_deferred_start_init(ifp, NULL);
562	ether_ifattach(ifp, enaddr);
563
564	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
565	    RND_FLAG_DEFAULT);
566
567#ifdef EMAC_EVENT_COUNTERS
568	/*
569	 * Attach the event counters.
570	 */
571	evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
572	    NULL, xname, "txintr");
573	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
574	    NULL, xname, "rxintr");
575	evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
576	    NULL, xname, "txde");
577	evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
578	    NULL, xname, "rxde");
579	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
580	    NULL, xname, "intr");
581
582	evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
583	    NULL, xname, "txreap");
584	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
585	    NULL, xname, "txsstall");
586	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
587	    NULL, xname, "txdstall");
588	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
589	    NULL, xname, "txdrop");
590	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
591	    NULL, xname, "tu");
592#endif /* EMAC_EVENT_COUNTERS */
593
594	/*
595	 * Make sure the interface is shutdown during reboot.
596	 */
597	sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
598	if (sc->sc_sdhook == NULL)
599		aprint_error_dev(self,
600		    "WARNING: unable to establish shutdown hook\n");
601
602	return;
603
604	/*
605	 * Free any resources we've allocated during the failed attach
606	 * attempt.  Do this in reverse order and fall through.
607	 */
608fail_5:
609	for (i = 0; i < EMAC_NRXDESC; i++) {
610		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
611			bus_dmamap_destroy(sc->sc_dmat,
612			    sc->sc_rxsoft[i].rxs_dmamap);
613	}
614fail_4:
615	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
616		if (sc->sc_txsoft[i].txs_dmamap != NULL)
617			bus_dmamap_destroy(sc->sc_dmat,
618			    sc->sc_txsoft[i].txs_dmamap);
619	}
620	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
621fail_3:
622	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
623fail_2:
624	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
625	    sizeof(struct emac_control_data));
626fail_1:
627	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
628fail_0:
629	return;
630}
631
632/*
633 * EMAC General interrupt handler
634 */
635static int
636emac_intr(void *arg)
637{
638	struct emac_softc *sc = arg;
639	uint32_t status;
640
641	EMAC_EVCNT_INCR(&sc->sc_ev_intr);
642	status = EMAC_READ(sc, EMAC_ISR);
643
644	/* Clear the interrupt status bits. */
645	EMAC_WRITE(sc, EMAC_ISR, status);
646
647	return 1;
648}
649
650static void
651emac_shutdown(void *arg)
652{
653	struct emac_softc *sc = arg;
654
655	emac_stop(&sc->sc_ethercom.ec_if, 0);
656}
657
658
659/*
660 * ifnet interface functions
661 */
662
663static void
664emac_start(struct ifnet *ifp)
665{
666	struct emac_softc *sc = ifp->if_softc;
667	struct mbuf *m0;
668	struct emac_txsoft *txs;
669	bus_dmamap_t dmamap;
670	int error, firsttx, nexttx, lasttx, ofree, seg;
671
672	lasttx = 0;	/* XXX gcc */
673
674	if ((ifp->if_flags & IFF_RUNNING) == 0)
675		return;
676
677	/*
678	 * Remember the previous number of free descriptors.
679	 */
680	ofree = sc->sc_txfree;
681
682	/*
683	 * Loop through the send queue, setting up transmit descriptors
684	 * until we drain the queue, or use up all available transmit
685	 * descriptors.
686	 */
687	for (;;) {
688		/* Grab a packet off the queue. */
689		IFQ_POLL(&ifp->if_snd, m0);
690		if (m0 == NULL)
691			break;
692
693		/*
694		 * Get a work queue entry.  Reclaim used Tx descriptors if
695		 * we are running low.
696		 */
697		if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
698			emac_txreap(sc);
699			if (sc->sc_txsfree == 0) {
700				EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
701				break;
702			}
703		}
704
705		txs = &sc->sc_txsoft[sc->sc_txsnext];
706		dmamap = txs->txs_dmamap;
707
708		/*
709		 * Load the DMA map.  If this fails, the packet either
710		 * didn't fit in the allotted number of segments, or we
711		 * were short on resources.  In this case, we'll copy
712		 * and try again.
713		 */
714		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
715		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
716		if (error) {
717			if (error == EFBIG) {
718				EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
719				aprint_error_ifnet(ifp,
720				    "Tx packet consumes too many "
721				    "DMA segments, dropping...\n");
722				    IFQ_DEQUEUE(&ifp->if_snd, m0);
723				    m_freem(m0);
724				    continue;
725			}
726			/* Short on resources, just stop for now. */
727			break;
728		}
729
730		/*
731		 * Ensure we have enough descriptors free to describe
732		 * the packet.
733		 */
734		if (dmamap->dm_nsegs > sc->sc_txfree) {
735			/*
736			 * Not enough free descriptors to transmit this
737			 * packet.  We haven't committed anything yet,
738			 * so just unload the DMA map, put the packet
739			 * back on the queue, and punt.  Notify the upper
740			 * layer that there are not more slots left.
741			 *
742			 */
743			bus_dmamap_unload(sc->sc_dmat, dmamap);
744			EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
745			break;
746		}
747
748		IFQ_DEQUEUE(&ifp->if_snd, m0);
749
750		/*
751		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
752		 */
753
754		/* Sync the DMA map. */
755		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
756		    BUS_DMASYNC_PREWRITE);
757
758		/*
759		 * Store a pointer to the packet so that we can free it
760		 * later.
761		 */
762		txs->txs_mbuf = m0;
763		txs->txs_firstdesc = sc->sc_txnext;
764		txs->txs_ndesc = dmamap->dm_nsegs;
765
766		/*
767		 * Initialize the transmit descriptor.
768		 */
769		firsttx = sc->sc_txnext;
770		for (nexttx = sc->sc_txnext, seg = 0;
771		     seg < dmamap->dm_nsegs;
772		     seg++, nexttx = EMAC_NEXTTX(nexttx)) {
773			struct mal_descriptor *txdesc =
774			    &sc->sc_txdescs[nexttx];
775
776			/*
777			 * If this is the first descriptor we're
778			 * enqueueing, don't set the TX_READY bit just
779			 * yet.  That could cause a race condition.
780			 * We'll do it below.
781			 */
782			txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
783			txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
784			txdesc->md_stat_ctrl =
785			    (txdesc->md_stat_ctrl & MAL_TX_WRAP) |
786			    (nexttx == firsttx ? 0 : MAL_TX_READY) |
787			    EMAC_TXC_GFCS | EMAC_TXC_GPAD;
788			lasttx = nexttx;
789		}
790
791		/* Set the LAST bit on the last segment. */
792		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
793
794		/*
795		 * Set up last segment descriptor to send an interrupt after
796		 * that descriptor is transmitted, and bypass existing Tx
797		 * descriptor reaping method (for now...).
798		 */
799		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
800
801
802		txs->txs_lastdesc = lasttx;
803
804		/* Sync the descriptors we're using. */
805		EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
806		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
807
808		/*
809		 * The entire packet chain is set up.  Give the
810		 * first descriptor to the chip now.
811		 */
812		sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
813		EMAC_CDTXSYNC(sc, firsttx, 1,
814		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
815		/*
816		 * Tell the EMAC that a new packet is available.
817		 */
818		EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
819
820		/* Advance the tx pointer. */
821		sc->sc_txfree -= txs->txs_ndesc;
822		sc->sc_txnext = nexttx;
823
824		sc->sc_txsfree--;
825		sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
826
827		/*
828		 * Pass the packet to any BPF listeners.
829		 */
830		bpf_mtap(ifp, m0, BPF_D_OUT);
831	}
832
833	if (sc->sc_txfree != ofree)
834		/* Set a watchdog timer in case the chip flakes out. */
835		ifp->if_timer = 5;
836}
837
838static int
839emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
840{
841	struct emac_softc *sc = ifp->if_softc;
842	int s, error;
843
844	s = splnet();
845
846	switch (cmd) {
847	case SIOCSIFMTU:
848	{
849		struct ifreq *ifr = (struct ifreq *)data;
850		int maxmtu;
851
852		if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU)
853			maxmtu = EMAC_MAX_MTU;
854		else
855			maxmtu = ETHERMTU;
856
857		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
858			error = EINVAL;
859		else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
860			break;
861		else if (ifp->if_flags & IFF_UP)
862			error = emac_init(ifp);
863		else
864			error = 0;
865		break;
866	}
867
868	default:
869		error = ether_ioctl(ifp, cmd, data);
870		if (error == ENETRESET) {
871			/*
872			 * Multicast list has changed; set the hardware filter
873			 * accordingly.
874			 */
875			if (ifp->if_flags & IFF_RUNNING)
876				error = emac_set_filter(sc);
877			else
878				error = 0;
879		}
880	}
881
882	/* try to get more packets going */
883	emac_start(ifp);
884
885	splx(s);
886	return error;
887}
888
889static int
890emac_init(struct ifnet *ifp)
891{
892	struct emac_softc *sc = ifp->if_softc;
893	struct emac_rxsoft *rxs;
894	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
895	int error, i;
896
897	error = 0;
898
899	/* Cancel any pending I/O. */
900	emac_stop(ifp, 0);
901
902	/* Reset the chip to a known state. */
903	emac_soft_reset(sc);
904
905	/*
906	 * Initialise the transmit descriptor ring.
907	 */
908	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
909	/* set wrap on last descriptor */
910	sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
911	EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
912	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
913	sc->sc_txfree = EMAC_NTXDESC;
914	sc->sc_txnext = 0;
915
916	/*
917	 * Initialise the transmit job descriptors.
918	 */
919	for (i = 0; i < EMAC_TXQUEUELEN; i++)
920		sc->sc_txsoft[i].txs_mbuf = NULL;
921	sc->sc_txsfree = EMAC_TXQUEUELEN;
922	sc->sc_txsnext = 0;
923	sc->sc_txsdirty = 0;
924
925	/*
926	 * Initialise the receiver descriptor and receive job
927	 * descriptor rings.
928	 */
929	for (i = 0; i < EMAC_NRXDESC; i++) {
930		rxs = &sc->sc_rxsoft[i];
931		if (rxs->rxs_mbuf == NULL) {
932			if ((error = emac_add_rxbuf(sc, i)) != 0) {
933				aprint_error_ifnet(ifp,
934				    "unable to allocate or map rx buffer %d,"
935				    " error = %d\n",
936				    i, error);
937				/*
938				 * XXX Should attempt to run with fewer receive
939				 * XXX buffers instead of just failing.
940				 */
941				emac_rxdrain(sc);
942				goto out;
943			}
944		} else
945			EMAC_INIT_RXDESC(sc, i);
946	}
947	sc->sc_rxptr = 0;
948
949	/*
950	 * Set the current media.
951	 */
952	if ((error = ether_mediachange(ifp)) != 0)
953		goto out;
954
955	/*
956	 * Load the MAC address.
957	 */
958	EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
959	EMAC_WRITE(sc, EMAC_IALR,
960	    enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
961
962	/* Enable the transmit and receive channel on the MAL. */
963	error = mal_start(sc->sc_instance,
964	    EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
965	if (error)
966		goto out;
967
968	sc->sc_mr1 &= ~MR1_JPSM;
969	if (ifp->if_mtu > ETHERMTU)
970		/* Enable Jumbo Packet Support Mode */
971		sc->sc_mr1 |= MR1_JPSM;
972
973	/* Set fifos, media modes. */
974	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
975
976	/*
977	 * Enable Individual and (possibly) Broadcast Address modes,
978	 * runt packets, and strip padding.
979	 */
980	EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 |
981	    (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
982	    (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
983
984	/*
985	 * Set multicast filter.
986	 */
987	emac_set_filter(sc);
988
989	/*
990	 * Set low- and urgent-priority request thresholds.
991	 */
992	EMAC_WRITE(sc, EMAC_TMR1,
993	    ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
994	    ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
995	/*
996	 * Set Transmit Request Threshold Register.
997	 */
998	EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
999
1000	/*
1001	 * Set high and low receive watermarks.
1002	 */
1003	EMAC_WRITE(sc, EMAC_RWMR,
1004	    30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
1005
1006	/*
1007	 * Set frame gap.
1008	 */
1009	EMAC_WRITE(sc, EMAC_IPGVR, 8);
1010
1011	/*
1012	 * Set interrupt status enable bits for EMAC.
1013	 */
1014	EMAC_WRITE(sc, EMAC_ISER,
1015	    ISR_TXPE |		/* TX Parity Error */
1016	    ISR_RXPE |		/* RX Parity Error */
1017	    ISR_TXUE |		/* TX Underrun Event */
1018	    ISR_RXOE |		/* RX Overrun Event */
1019	    ISR_OVR  |		/* Overrun Error */
1020	    ISR_PP   |		/* Pause Packet */
1021	    ISR_BP   |		/* Bad Packet */
1022	    ISR_RP   |		/* Runt Packet */
1023	    ISR_SE   |		/* Short Event */
1024	    ISR_ALE  |		/* Alignment Error */
1025	    ISR_BFCS |		/* Bad FCS */
1026	    ISR_PTLE |		/* Packet Too Long Error */
1027	    ISR_ORE  |		/* Out of Range Error */
1028	    ISR_IRE  |		/* In Range Error */
1029	    ISR_SE0  |		/* Signal Quality Error 0 (SQE) */
1030	    ISR_TE0  |		/* Transmit Error 0 */
1031	    ISR_MOS  |		/* MMA Operation Succeeded */
1032	    ISR_MOF);		/* MMA Operation Failed */
1033
1034	/*
1035	 * Enable the transmit and receive channel on the EMAC.
1036	 */
1037	EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1038
1039	/*
1040	 * Start the one second MII clock.
1041	 */
1042	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1043
1044	/*
1045	 * ... all done!
1046	 */
1047	ifp->if_flags |= IFF_RUNNING;
1048
1049 out:
1050	if (error) {
1051		ifp->if_flags &= ~IFF_RUNNING;
1052		ifp->if_timer = 0;
1053		aprint_error_ifnet(ifp, "interface not running\n");
1054	}
1055	return error;
1056}
1057
1058static void
1059emac_stop(struct ifnet *ifp, int disable)
1060{
1061	struct emac_softc *sc = ifp->if_softc;
1062	struct emac_txsoft *txs;
1063	int i;
1064
1065	/* Stop the one second clock. */
1066	callout_stop(&sc->sc_callout);
1067
1068	/* Down the MII */
1069	mii_down(&sc->sc_mii);
1070
1071	/* Disable interrupts. */
1072	EMAC_WRITE(sc, EMAC_ISER, 0);
1073
1074	/* Disable the receive and transmit channels. */
1075	mal_stop(sc->sc_instance);
1076
1077	/* Disable the transmit enable and receive MACs. */
1078	EMAC_WRITE(sc, EMAC_MR0,
1079	    EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
1080
1081	/* Release any queued transmit buffers. */
1082	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
1083		txs = &sc->sc_txsoft[i];
1084		if (txs->txs_mbuf != NULL) {
1085			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1086			m_freem(txs->txs_mbuf);
1087			txs->txs_mbuf = NULL;
1088		}
1089	}
1090
1091	if (disable)
1092		emac_rxdrain(sc);
1093
1094	/*
1095	 * Mark the interface down and cancel the watchdog timer.
1096	 */
1097	ifp->if_flags &= ~IFF_RUNNING;
1098	ifp->if_timer = 0;
1099}
1100
1101static void
1102emac_watchdog(struct ifnet *ifp)
1103{
1104	struct emac_softc *sc = ifp->if_softc;
1105
1106	/*
1107	 * Since we're not interrupting every packet, sweep
1108	 * up before we report an error.
1109	 */
1110	emac_txreap(sc);
1111
1112	if (sc->sc_txfree != EMAC_NTXDESC) {
1113		aprint_error_ifnet(ifp,
1114		    "device timeout (txfree %d txsfree %d txnext %d)\n",
1115		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
1116		if_statinc(ifp, if_oerrors);
1117
1118		/* Reset the interface. */
1119		(void)emac_init(ifp);
1120	} else if (ifp->if_flags & IFF_DEBUG)
1121		aprint_error_ifnet(ifp, "recovered from device timeout\n");
1122
1123	/* try to get more packets going */
1124	emac_start(ifp);
1125}
1126
1127static int
1128emac_add_rxbuf(struct emac_softc *sc, int idx)
1129{
1130	struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
1131	struct mbuf *m;
1132	int error;
1133
1134	MGETHDR(m, M_DONTWAIT, MT_DATA);
1135	if (m == NULL)
1136		return ENOBUFS;
1137
1138	MCLGET(m, M_DONTWAIT);
1139	if ((m->m_flags & M_EXT) == 0) {
1140		m_freem(m);
1141		return ENOBUFS;
1142	}
1143
1144	if (rxs->rxs_mbuf != NULL)
1145		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1146
1147	rxs->rxs_mbuf = m;
1148
1149	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1150	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1151	if (error) {
1152		aprint_error_dev(sc->sc_dev,
1153		    "can't load rx DMA map %d, error = %d\n", idx, error);
1154		panic("emac_add_rxbuf");		/* XXX */
1155	}
1156
1157	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1158	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1159
1160	EMAC_INIT_RXDESC(sc, idx);
1161
1162	return 0;
1163}
1164
1165static void
1166emac_rxdrain(struct emac_softc *sc)
1167{
1168	struct emac_rxsoft *rxs;
1169	int i;
1170
1171	for (i = 0; i < EMAC_NRXDESC; i++) {
1172		rxs = &sc->sc_rxsoft[i];
1173		if (rxs->rxs_mbuf != NULL) {
1174			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1175			m_freem(rxs->rxs_mbuf);
1176			rxs->rxs_mbuf = NULL;
1177		}
1178	}
1179}
1180
1181static int
1182emac_set_filter(struct emac_softc *sc)
1183{
1184	struct ethercom *ec = &sc->sc_ethercom;
1185	struct ether_multistep step;
1186	struct ether_multi *enm;
1187	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1188	uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1189	int regs, cnt = 0, i;
1190
1191	if (sc->sc_htsize == 256) {
1192		reg = EMAC_GAHT256(0);
1193		regs = 8;
1194	} else {
1195		reg = EMAC_GAHT64(0);
1196		regs = 4;
1197	}
1198	mask = (1ULL << (sc->sc_htsize / regs)) - 1;
1199
1200	rmr = EMAC_READ(sc, EMAC_RMR);
1201	rmr &= ~(RMR_PMME | RMR_MAE);
1202	ifp->if_flags &= ~IFF_ALLMULTI;
1203
1204	ETHER_LOCK(ec);
1205	ETHER_FIRST_MULTI(step, ec, enm);
1206	while (enm != NULL) {
1207		if (memcmp(enm->enm_addrlo,
1208		    enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1209			/*
1210			 * We must listen to a range of multicast addresses.
1211			 * For now, just accept all multicasts, rather than
1212			 * trying to set only those filter bits needed to match
1213			 * the range.  (At this time, the only use of address
1214			 * ranges is for IP multicast routing, for which the
1215			 * range is big enough to require all bits set.)
1216			 */
1217			gaht[0] = gaht[1] = gaht[2] = gaht[3] =
1218			    gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
1219			break;
1220		}
1221
1222		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1223
1224		if (sc->sc_htsize == 256)
1225			EMAC_SET_FILTER256(gaht, crc);
1226		else
1227			EMAC_SET_FILTER(gaht, crc);
1228
1229		ETHER_NEXT_MULTI(step, enm);
1230		cnt++;
1231	}
1232	ETHER_UNLOCK(ec);
1233
1234	for (i = 1, tmp = gaht[0]; i < regs; i++)
1235		tmp &= gaht[i];
1236	if (tmp == mask) {
1237		/* All categories are true. */
1238		ifp->if_flags |= IFF_ALLMULTI;
1239		rmr |= RMR_PMME;
1240	} else if (cnt != 0) {
1241		/* Some categories are true. */
1242		for (i = 0; i < regs; i++)
1243			EMAC_WRITE(sc, reg + (i << 2), gaht[i]);
1244		rmr |= RMR_MAE;
1245	}
1246	EMAC_WRITE(sc, EMAC_RMR, rmr);
1247
1248	return 0;
1249}
1250
1251/*
1252 * Reap completed Tx descriptors.
1253 */
1254static int
1255emac_txreap(struct emac_softc *sc)
1256{
1257	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1258	struct emac_txsoft *txs;
1259	int handled, i;
1260	uint32_t txstat, count;
1261
1262	EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1263	handled = 0;
1264
1265	count = 0;
1266	/*
1267	 * Go through our Tx list and free mbufs for those
1268	 * frames that have been transmitted.
1269	 */
1270	for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1271	    i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1272		txs = &sc->sc_txsoft[i];
1273
1274		EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1275		    txs->txs_dmamap->dm_nsegs,
1276		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1277
1278		txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1279		if (txstat & MAL_TX_READY)
1280			break;
1281
1282		handled = 1;
1283
1284		/*
1285		 * Check for errors and collisions.
1286		 */
1287		if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1288			if_statinc(ifp, if_oerrors);
1289
1290#ifdef EMAC_EVENT_COUNTERS
1291		if (txstat & EMAC_TXS_UR)
1292			EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1293#endif /* EMAC_EVENT_COUNTERS */
1294
1295		if (txstat &
1296		    (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1297			if (txstat & EMAC_TXS_EC)
1298				if_statadd(ifp, if_collisions, 16);
1299			else if (txstat & EMAC_TXS_MC)
1300				if_statadd(ifp, if_collisions, 2); /* XXX? */
1301			else if (txstat & EMAC_TXS_SC)
1302				if_statinc(ifp, if_collisions);
1303			if (txstat & EMAC_TXS_LC)
1304				if_statinc(ifp, if_collisions);
1305		} else
1306			if_statinc(ifp, if_opackets);
1307
1308		if (ifp->if_flags & IFF_DEBUG) {
1309			if (txstat & EMAC_TXS_ED)
1310				aprint_error_ifnet(ifp, "excessive deferral\n");
1311			if (txstat & EMAC_TXS_EC)
1312				aprint_error_ifnet(ifp,
1313				    "excessive collisions\n");
1314		}
1315
1316		sc->sc_txfree += txs->txs_ndesc;
1317		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1318		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1319		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1320		m_freem(txs->txs_mbuf);
1321		txs->txs_mbuf = NULL;
1322
1323		count++;
1324	}
1325
1326	/* Update the dirty transmit buffer pointer. */
1327	sc->sc_txsdirty = i;
1328
1329	/*
1330	 * If there are no more pending transmissions, cancel the watchdog
1331	 * timer.
1332	 */
1333	if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1334		ifp->if_timer = 0;
1335
1336	if (count != 0)
1337		rnd_add_uint32(&sc->rnd_source, count);
1338
1339	return handled;
1340}
1341
1342
1343/*
1344 * Reset functions
1345 */
1346
1347static void
1348emac_soft_reset(struct emac_softc *sc)
1349{
1350	uint32_t sdr;
1351	int t = 0;
1352
1353	/*
1354	 * The PHY must provide a TX Clk in order perform a soft reset the
1355	 * EMAC.  If none is present, select the internal clock,
1356	 * SDR0_MFR[E0CS, E1CS].  After the soft reset, select the external
1357	 * clock.
1358	 */
1359
1360	sdr = mfsdr(DCR_SDR0_MFR);
1361	sdr |= SDR0_MFR_ECS(sc->sc_instance);
1362	mtsdr(DCR_SDR0_MFR, sdr);
1363
1364	EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1365
1366	sdr = mfsdr(DCR_SDR0_MFR);
1367	sdr &= ~SDR0_MFR_ECS(sc->sc_instance);
1368	mtsdr(DCR_SDR0_MFR, sdr);
1369
1370	delay(5);
1371
1372	/* wait finish */
1373	while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) {
1374		if (++t == 1000000 /* 1sec XXXXX */) {
1375			aprint_error_dev(sc->sc_dev, "Soft Reset failed\n");
1376			return;
1377		}
1378		delay(1);
1379	}
1380}
1381
1382static void
1383emac_smart_reset(struct emac_softc *sc)
1384{
1385	uint32_t mr0;
1386	int t = 0;
1387
1388	mr0 = EMAC_READ(sc, EMAC_MR0);
1389	if (mr0 & (MR0_TXE | MR0_RXE)) {
1390		mr0 &= ~(MR0_TXE | MR0_RXE);
1391		EMAC_WRITE(sc, EMAC_MR0, mr0);
1392
1393		/* wait idel state */
1394		while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) !=
1395		    (MR0_TXI | MR0_RXI)) {
1396			if (++t == 1000000 /* 1sec XXXXX */) {
1397				aprint_error_dev(sc->sc_dev,
1398				    "Smart Reset failed\n");
1399				return;
1400			}
1401			delay(1);
1402		}
1403	}
1404}
1405
1406
1407/*
1408 * MII related functions
1409 */
1410
1411static int
1412emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1413{
1414	struct emac_softc *sc = device_private(self);
1415	uint32_t sta_reg;
1416	int rv;
1417
1418	if (sc->sc_rmii_enable)
1419		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1420
1421	/* wait for PHY data transfer to complete */
1422	if ((rv = emac_mii_wait(sc)) != 0)
1423		goto fail;
1424
1425	sta_reg =
1426	    sc->sc_stacr_read		|
1427	    (reg << STACR_PRA_SHIFT)	|
1428	    (phy << STACR_PCDA_SHIFT)	|
1429	    sc->sc_stacr_bits;
1430	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1431
1432	if ((rv = emac_mii_wait(sc)) != 0)
1433		goto fail;
1434	sta_reg = EMAC_READ(sc, EMAC_STACR);
1435
1436	if (sta_reg & STACR_PHYE) {
1437		rv = -1;
1438		goto fail;
1439	}
1440	*val = sta_reg >> STACR_PHYD_SHIFT;
1441
1442fail:
1443	if (sc->sc_rmii_disable)
1444		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1445	return rv;
1446}
1447
1448static int
1449emac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1450{
1451	struct emac_softc *sc = device_private(self);
1452	uint32_t sta_reg;
1453	int rv;
1454
1455	if (sc->sc_rmii_enable)
1456		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1457
1458	/* wait for PHY data transfer to complete */
1459	if ((rv = emac_mii_wait(sc)) != 0)
1460		goto out;
1461
1462	sta_reg =
1463	    (val << STACR_PHYD_SHIFT)	|
1464	    sc->sc_stacr_write		|
1465	    (reg << STACR_PRA_SHIFT)	|
1466	    (phy << STACR_PCDA_SHIFT)	|
1467	    sc->sc_stacr_bits;
1468	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1469
1470	if ((rv = emac_mii_wait(sc)) != 0)
1471		goto out;
1472	if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE) {
1473		aprint_error_dev(sc->sc_dev, "MII PHY Error\n");
1474		rv = -1;
1475	}
1476
1477out:
1478	if (sc->sc_rmii_disable)
1479		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1480
1481	return rv;
1482}
1483
1484static void
1485emac_mii_statchg(struct ifnet *ifp)
1486{
1487	struct emac_softc *sc = ifp->if_softc;
1488	struct mii_data *mii = &sc->sc_mii;
1489
1490	/*
1491	 * MR1 can only be written immediately after a reset...
1492	 */
1493	emac_smart_reset(sc);
1494
1495	sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST);
1496	if (mii->mii_media_active & IFM_FDX)
1497		sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST);
1498	if (mii->mii_media_active & IFM_FLOW)
1499		sc->sc_mr1 |= MR1_EIFC;
1500	if (mii->mii_media_active & IFM_LOOP)
1501		sc->sc_mr1 |= MR1_ILE;
1502	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1503	case IFM_1000_T:
1504		sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST);
1505		break;
1506
1507	case IFM_100_TX:
1508		sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST);
1509		break;
1510
1511	case IFM_10_T:
1512		sc->sc_mr1 |= MR1_MF_10MBS;
1513		break;
1514
1515	case IFM_NONE:
1516		break;
1517
1518	default:
1519		aprint_error_dev(sc->sc_dev, "unknown sub-type %d\n",
1520		    IFM_SUBTYPE(mii->mii_media_active));
1521		break;
1522	}
1523	if (sc->sc_rmii_speed)
1524		sc->sc_rmii_speed(device_parent(sc->sc_dev), sc->sc_instance,
1525		    IFM_SUBTYPE(mii->mii_media_active));
1526
1527	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
1528
1529	/* Enable TX and RX if already RUNNING */
1530	if (ifp->if_flags & IFF_RUNNING)
1531		EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1532}
1533
1534static uint32_t
1535emac_mii_wait(struct emac_softc *sc)
1536{
1537	int i;
1538	uint32_t oc;
1539
1540	/* wait for PHY data transfer to complete */
1541	i = 0;
1542	oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1543	while ((oc == STACR_OC) != sc->sc_stacr_completed) {
1544		delay(7);
1545		if (i++ > 5) {
1546			aprint_error_dev(sc->sc_dev, "MII timed out\n");
1547			return ETIMEDOUT;
1548		}
1549		oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1550	}
1551	return 0;
1552}
1553
1554static void
1555emac_mii_tick(void *arg)
1556{
1557	struct emac_softc *sc = arg;
1558	int s;
1559
1560	if (!device_is_active(sc->sc_dev))
1561		return;
1562
1563	s = splnet();
1564	mii_tick(&sc->sc_mii);
1565	splx(s);
1566
1567	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1568}
1569
1570int
1571emac_txeob_intr(void *arg)
1572{
1573	struct emac_softc *sc = arg;
1574	int handled = 0;
1575
1576	EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1577	handled |= emac_txreap(sc);
1578
1579	/* try to get more packets going */
1580	if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
1581
1582	return handled;
1583}
1584
1585int
1586emac_rxeob_intr(void *arg)
1587{
1588	struct emac_softc *sc = arg;
1589	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1590	struct emac_rxsoft *rxs;
1591	struct mbuf *m;
1592	uint32_t rxstat, count;
1593	int i, len;
1594
1595	EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1596
1597	count = 0;
1598	for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
1599		rxs = &sc->sc_rxsoft[i];
1600
1601		EMAC_CDRXSYNC(sc, i,
1602		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1603
1604		rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1605
1606		if (rxstat & MAL_RX_EMPTY) {
1607			/*
1608			 * We have processed all of the receive buffers.
1609			 */
1610			/* Flush current empty descriptor */
1611			EMAC_CDRXSYNC(sc, i,
1612			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1613			break;
1614		}
1615
1616		/*
1617		 * If an error occurred, update stats, clear the status
1618		 * word, and leave the packet buffer in place.  It will
1619		 * simply be reused the next time the ring comes around.
1620		 */
1621		if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1622		    EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1623		    EMAC_RXS_IRE)) {
1624#define	PRINTERR(bit, str)					\
1625			if (rxstat & (bit))			\
1626				aprint_error_ifnet(ifp,		\
1627				    "receive error: %s\n", str)
1628			if_statinc(ifp, if_ierrors);
1629			PRINTERR(EMAC_RXS_OE, "overrun error");
1630			PRINTERR(EMAC_RXS_BP, "bad packet");
1631			PRINTERR(EMAC_RXS_RP, "runt packet");
1632			PRINTERR(EMAC_RXS_SE, "short event");
1633			PRINTERR(EMAC_RXS_AE, "alignment error");
1634			PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1635			PRINTERR(EMAC_RXS_PTL, "packet too long");
1636			PRINTERR(EMAC_RXS_ORE, "out of range error");
1637			PRINTERR(EMAC_RXS_IRE, "in range error");
1638#undef PRINTERR
1639			EMAC_INIT_RXDESC(sc, i);
1640			continue;
1641		}
1642
1643		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1644		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1645
1646		/*
1647		 * No errors; receive the packet.  Note, the 405GP emac
1648		 * includes the CRC with every packet.
1649		 */
1650		len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
1651
1652		/*
1653		 * If the packet is small enough to fit in a
1654		 * single header mbuf, allocate one and copy
1655		 * the data into it.  This greatly reduces
1656		 * memory consumption when we receive lots
1657		 * of small packets.
1658		 *
1659		 * Otherwise, we add a new buffer to the receive
1660		 * chain.  If this fails, we drop the packet and
1661		 * recycle the old buffer.
1662		 */
1663		if (emac_copy_small != 0 && len <= MHLEN) {
1664			MGETHDR(m, M_DONTWAIT, MT_DATA);
1665			if (m == NULL)
1666				goto dropit;
1667			memcpy(mtod(m, void *),
1668			    mtod(rxs->rxs_mbuf, void *), len);
1669			EMAC_INIT_RXDESC(sc, i);
1670			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1671			    rxs->rxs_dmamap->dm_mapsize,
1672			    BUS_DMASYNC_PREREAD);
1673		} else {
1674			m = rxs->rxs_mbuf;
1675			if (emac_add_rxbuf(sc, i) != 0) {
1676 dropit:
1677				if_statinc(ifp, if_ierrors);
1678				EMAC_INIT_RXDESC(sc, i);
1679				bus_dmamap_sync(sc->sc_dmat,
1680				    rxs->rxs_dmamap, 0,
1681				    rxs->rxs_dmamap->dm_mapsize,
1682				    BUS_DMASYNC_PREREAD);
1683				continue;
1684			}
1685		}
1686
1687		m_set_rcvif(m, ifp);
1688		m->m_pkthdr.len = m->m_len = len;
1689
1690		/* Pass it on. */
1691		if_percpuq_enqueue(ifp->if_percpuq, m);
1692
1693		count++;
1694	}
1695
1696	/* Update the receive pointer. */
1697	sc->sc_rxptr = i;
1698
1699	if (count != 0)
1700		rnd_add_uint32(&sc->rnd_source, count);
1701
1702	return 1;
1703}
1704
1705int
1706emac_txde_intr(void *arg)
1707{
1708	struct emac_softc *sc = arg;
1709
1710	EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1711	aprint_error_dev(sc->sc_dev, "emac_txde_intr\n");
1712	return 1;
1713}
1714
1715int
1716emac_rxde_intr(void *arg)
1717{
1718	struct emac_softc *sc = arg;
1719	int i;
1720
1721	EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1722	aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
1723	/*
1724	 * XXX!
1725	 * This is a bit drastic; we just drop all descriptors that aren't
1726	 * "clean".  We should probably send any that are up the stack.
1727	 */
1728	for (i = 0; i < EMAC_NRXDESC; i++) {
1729		EMAC_CDRXSYNC(sc, i,
1730		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1731
1732		if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
1733			EMAC_INIT_RXDESC(sc, i);
1734	}
1735
1736	return 1;
1737}
1738