1/* $NetBSD: if_ae.c,v 1.44 2024/06/29 12:11:11 riastradh Exp $ */
2/*-
3 * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4 * Copyright (c) 2006 Garrett D'Amore.
5 * All rights reserved.
6 *
7 * This code was written by Garrett D'Amore for the Champaign-Urbana
8 * Community Wireless Network Project.
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above
16 *    copyright notice, this list of conditions and the following
17 *    disclaimer in the documentation and/or other materials provided
18 *    with the distribution.
19 * 3. All advertising materials mentioning features or use of this
20 *    software must display the following acknowledgements:
21 *      This product includes software developed by the Urbana-Champaign
22 *      Independent Media Center.
23 *	This product includes software developed by Garrett D'Amore.
24 * 4. Urbana-Champaign Independent Media Center's name and Garrett
25 *    D'Amore's name may not be used to endorse or promote products
26 *    derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29 * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33 * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42/*-
43 * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
44 * All rights reserved.
45 *
46 * This code is derived from software contributed to The NetBSD Foundation
47 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
48 * NASA Ames Research Center; and by Charles M. Hannum.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 *    notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 *    notice, this list of conditions and the following disclaimer in the
57 *    documentation and/or other materials provided with the distribution.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
60 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69 * POSSIBILITY OF SUCH DAMAGE.
70 */
71
72/*
73 * Device driver for the onboard ethernet MAC found on the AR5312
74 * chip's AHB bus.
75 *
76 * This device is very simliar to the tulip in most regards, and
77 * the code is directly derived from NetBSD's tulip.c.  However, it
78 * is different enough that it did not seem to be a good idea to
79 * add further complexity to the tulip driver, so we have our own.
80 *
81 * Also tulip has a lot of complexity in it for various parts/options
82 * that we don't need, and on these little boxes with only ~8MB RAM, we
83 * don't want any extra bloat.
84 */
85
86/*
87 * TODO:
88 *
89 * 1) Find out about BUS_MODE_ALIGN16B.  This chip can apparently align
90 *    inbound packets on a half-word boundary, which would make life easier
91 *    for TCP/IP.  (Aligning IP headers on a word.)
92 *
93 * 2) There is stuff in original tulip to shut down the device when reacting
94 *    to a change in link status.  Is that needed.
95 *
96 * 3) Test with variety of 10/100 HDX/FDX scenarios.
97 *
98 */
99
100#include <sys/cdefs.h>
101__KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.44 2024/06/29 12:11:11 riastradh Exp $");
102
103
104#include <sys/param.h>
105#include <sys/bus.h>
106#include <sys/callout.h>
107#include <sys/device.h>
108#include <sys/endian.h>
109#include <sys/errno.h>
110#include <sys/intr.h>
111#include <sys/ioctl.h>
112#include <sys/kernel.h>
113#include <sys/mbuf.h>
114#include <sys/socket.h>
115
116#include <uvm/uvm_extern.h>
117
118#include <net/if.h>
119#include <net/if_dl.h>
120#include <net/if_media.h>
121#include <net/if_ether.h>
122
123#include <net/bpf.h>
124
125#include <dev/mii/mii.h>
126#include <dev/mii/miivar.h>
127#include <dev/mii/mii_bitbang.h>
128
129#include <mips/atheros/include/arbusvar.h>
130#include <mips/atheros/dev/aereg.h>
131#include <mips/atheros/dev/aevar.h>
132
133static const struct {
134	uint32_t txth_opmode;		/* OPMODE bits */
135	const char *txth_name;		/* name of mode */
136} ae_txthresh[] = {
137	{ OPMODE_TR_32,		"32 words" },
138	{ OPMODE_TR_64,		"64 words" },
139	{ OPMODE_TR_128,	"128 words" },
140	{ OPMODE_TR_256,	"256 words" },
141	{ OPMODE_SF,		"store and forward mode" },
142	{ 0,			NULL },
143};
144
145static int	ae_match(device_t, struct cfdata *, void *);
146static void	ae_attach(device_t, device_t, void *);
147static int	ae_detach(device_t, int);
148static int	ae_activate(device_t, enum devact);
149
150static int	ae_ifflags_cb(struct ethercom *);
151static void	ae_reset(struct ae_softc *);
152static void	ae_idle(struct ae_softc *, uint32_t);
153
154static void	ae_start(struct ifnet *);
155static void	ae_watchdog(struct ifnet *);
156static int	ae_ioctl(struct ifnet *, u_long, void *);
157static int	ae_init(struct ifnet *);
158static void	ae_stop(struct ifnet *, int);
159
160static void	ae_shutdown(void *);
161
162static void	ae_rxdrain(struct ae_softc *);
163static int	ae_add_rxbuf(struct ae_softc *, int);
164
165static int	ae_enable(struct ae_softc *);
166static void	ae_disable(struct ae_softc *);
167static void	ae_power(int, void *);
168
169static void	ae_filter_setup(struct ae_softc *);
170
171static int	ae_intr(void *);
172static void	ae_rxintr(struct ae_softc *);
173static void	ae_txintr(struct ae_softc *);
174
175static void	ae_mii_tick(void *);
176static void	ae_mii_statchg(struct ifnet *);
177
178static int	ae_mii_readreg(device_t, int, int, uint16_t *);
179static int	ae_mii_writereg(device_t, int, int, uint16_t);
180
181#ifdef AE_DEBUG
182#define	DPRINTF(sc, x)	if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
183				printf x
184#else
185#define	DPRINTF(sc, x)	/* nothing */
186#endif
187
188#ifdef AE_STATS
189static void	ae_print_stats(struct ae_softc *);
190#endif
191
192CFATTACH_DECL_NEW(ae, sizeof(struct ae_softc),
193    ae_match, ae_attach, ae_detach, ae_activate);
194
195/*
196 * ae_match:
197 *
198 *	Check for a device match.
199 */
200int
201ae_match(device_t parent, struct cfdata *cf, void *aux)
202{
203	struct arbus_attach_args *aa = aux;
204
205	if (strcmp(aa->aa_name, cf->cf_name) == 0)
206		return 1;
207
208	return 0;
209
210}
211
212/*
213 * ae_attach:
214 *
215 *	Attach an ae interface to the system.
216 */
217void
218ae_attach(device_t parent, device_t self, void *aux)
219{
220	const uint8_t *enaddr;
221	prop_data_t ea;
222	struct ae_softc *sc = device_private(self);
223	struct arbus_attach_args *aa = aux;
224	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
225	struct mii_data * const mii = &sc->sc_mii;
226	int i, error;
227
228	sc->sc_dev = self;
229
230	callout_init(&sc->sc_tick_callout, 0);
231
232	printf(": Atheros AR531X 10/100 Ethernet\n");
233
234	/*
235	 * Try to get MAC address.
236	 */
237	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
238	if (ea == NULL) {
239		printf("%s: unable to get mac-addr property\n",
240		    device_xname(sc->sc_dev));
241		return;
242	}
243	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
244	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
245	enaddr = prop_data_data_nocopy(ea);
246
247	/* Announce ourselves. */
248	printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
249	    ether_sprintf(enaddr));
250
251	sc->sc_cirq = aa->aa_cirq;
252	sc->sc_mirq = aa->aa_mirq;
253	sc->sc_st = aa->aa_bst;
254	sc->sc_dmat = aa->aa_dmat;
255
256	SIMPLEQ_INIT(&sc->sc_txfreeq);
257	SIMPLEQ_INIT(&sc->sc_txdirtyq);
258
259	/*
260	 * Map registers.
261	 */
262	sc->sc_size = aa->aa_size;
263	if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
264	    &sc->sc_sh)) != 0) {
265		printf("%s: unable to map registers, error = %d\n",
266		    device_xname(sc->sc_dev), error);
267		goto fail_0;
268	}
269
270	/*
271	 * Allocate the control data structures, and create and load the
272	 * DMA map for it.
273	 */
274	if ((error = bus_dmamem_alloc(sc->sc_dmat,
275	    sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
276	    1, &sc->sc_cdnseg, 0)) != 0) {
277		printf("%s: unable to allocate control data, error = %d\n",
278		    device_xname(sc->sc_dev), error);
279		goto fail_1;
280	}
281
282	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
283	    sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
284	    BUS_DMA_COHERENT)) != 0) {
285		printf("%s: unable to map control data, error = %d\n",
286		    device_xname(sc->sc_dev), error);
287		goto fail_2;
288	}
289
290	if ((error = bus_dmamap_create(sc->sc_dmat,
291	    sizeof(struct ae_control_data), 1,
292	    sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
293		printf("%s: unable to create control data DMA map, "
294		    "error = %d\n", device_xname(sc->sc_dev), error);
295		goto fail_3;
296	}
297
298	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
299	    sc->sc_control_data, sizeof(struct ae_control_data), NULL,
300	    0)) != 0) {
301		printf("%s: unable to load control data DMA map, error = %d\n",
302		    device_xname(sc->sc_dev), error);
303		goto fail_4;
304	}
305
306	/*
307	 * Create the transmit buffer DMA maps.
308	 */
309	for (i = 0; i < AE_TXQUEUELEN; i++) {
310		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
311		    AE_NTXSEGS, MCLBYTES, 0, 0,
312		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
313			printf("%s: unable to create tx DMA map %d, "
314			    "error = %d\n", device_xname(sc->sc_dev), i, error);
315			goto fail_5;
316		}
317	}
318
319	/*
320	 * Create the receive buffer DMA maps.
321	 */
322	for (i = 0; i < AE_NRXDESC; i++) {
323		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
324		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
325			printf("%s: unable to create rx DMA map %d, "
326			    "error = %d\n", device_xname(sc->sc_dev), i, error);
327			goto fail_6;
328		}
329		sc->sc_rxsoft[i].rxs_mbuf = NULL;
330	}
331
332	/*
333	 * Reset the chip to a known state.
334	 */
335	ae_reset(sc);
336
337	/*
338	 * From this point forward, the attachment cannot fail.  A failure
339	 * before this point releases all resources that may have been
340	 * allocated.
341	 */
342	sc->sc_flags |= AE_ATTACHED;
343
344	/*
345	 * Initialize our media structures.  This may probe the MII, if
346	 * present.
347	 */
348	mii->mii_ifp = ifp;
349	mii->mii_readreg = ae_mii_readreg;
350	mii->mii_writereg = ae_mii_writereg;
351	mii->mii_statchg = ae_mii_statchg;
352	sc->sc_ethercom.ec_mii = mii;
353	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
354	mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
355	    MII_OFFSET_ANY, 0);
356
357	if (LIST_FIRST(&mii->mii_phys) == NULL) {
358		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
359		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
360	} else
361		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
362
363	sc->sc_tick = ae_mii_tick;
364
365	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
366	ifp->if_softc = sc;
367	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
368	sc->sc_if_flags = ifp->if_flags;
369	ifp->if_ioctl = ae_ioctl;
370	ifp->if_start = ae_start;
371	ifp->if_watchdog = ae_watchdog;
372	ifp->if_init = ae_init;
373	ifp->if_stop = ae_stop;
374	IFQ_SET_READY(&ifp->if_snd);
375
376	/*
377	 * We can support 802.1Q VLAN-sized frames.
378	 */
379	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
380
381	/*
382	 * Attach the interface.
383	 */
384	if_attach(ifp);
385	if_deferred_start_init(ifp, NULL);
386	ether_ifattach(ifp, enaddr);
387	ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);
388
389	rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
390	    RND_TYPE_NET, RND_FLAG_DEFAULT);
391
392	/*
393	 * Make sure the interface is shutdown during reboot.
394	 */
395	sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
396	if (sc->sc_sdhook == NULL)
397		printf("%s: WARNING: unable to establish shutdown hook\n",
398		    device_xname(sc->sc_dev));
399
400	/*
401	 * Add a suspend hook to make sure we come back up after a
402	 * resume.
403	 */
404	sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
405	    ae_power, sc);
406	if (sc->sc_powerhook == NULL)
407		printf("%s: WARNING: unable to establish power hook\n",
408		    device_xname(sc->sc_dev));
409	return;
410
411	/*
412	 * Free any resources we've allocated during the failed attach
413	 * attempt.  Do this in reverse order and fall through.
414	 */
415 fail_6:
416	for (i = 0; i < AE_NRXDESC; i++) {
417		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
418			bus_dmamap_destroy(sc->sc_dmat,
419			    sc->sc_rxsoft[i].rxs_dmamap);
420	}
421 fail_5:
422	for (i = 0; i < AE_TXQUEUELEN; i++) {
423		if (sc->sc_txsoft[i].txs_dmamap != NULL)
424			bus_dmamap_destroy(sc->sc_dmat,
425			    sc->sc_txsoft[i].txs_dmamap);
426	}
427	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
428 fail_4:
429	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
430 fail_3:
431	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
432	    sizeof(struct ae_control_data));
433 fail_2:
434	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
435 fail_1:
436	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
437 fail_0:
438	return;
439}
440
441/*
442 * ae_activate:
443 *
444 *	Handle device activation/deactivation requests.
445 */
446int
447ae_activate(device_t self, enum devact act)
448{
449	struct ae_softc *sc = device_private(self);
450
451	switch (act) {
452	case DVACT_DEACTIVATE:
453		if_deactivate(&sc->sc_ethercom.ec_if);
454		return 0;
455	default:
456		return EOPNOTSUPP;
457	}
458}
459
460/*
461 * ae_detach:
462 *
463 *	Detach a device interface.
464 */
465int
466ae_detach(device_t self, int flags)
467{
468	struct ae_softc *sc = device_private(self);
469	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
470	struct ae_rxsoft *rxs;
471	struct ae_txsoft *txs;
472	int i;
473
474	/*
475	 * Succeed now if there isn't any work to do.
476	 */
477	if ((sc->sc_flags & AE_ATTACHED) == 0)
478		return (0);
479
480	/* Unhook our tick handler. */
481	if (sc->sc_tick)
482		callout_stop(&sc->sc_tick_callout);
483
484	/* Detach all PHYs */
485	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
486
487	rnd_detach_source(&sc->sc_rnd_source);
488	ether_ifdetach(ifp);
489	if_detach(ifp);
490
491	/* Delete all remaining media. */
492	ifmedia_fini(&sc->sc_mii.mii_media);
493
494	for (i = 0; i < AE_NRXDESC; i++) {
495		rxs = &sc->sc_rxsoft[i];
496		if (rxs->rxs_mbuf != NULL) {
497			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
498			m_freem(rxs->rxs_mbuf);
499			rxs->rxs_mbuf = NULL;
500		}
501		bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
502	}
503	for (i = 0; i < AE_TXQUEUELEN; i++) {
504		txs = &sc->sc_txsoft[i];
505		if (txs->txs_mbuf != NULL) {
506			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
507			m_freem(txs->txs_mbuf);
508			txs->txs_mbuf = NULL;
509		}
510		bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
511	}
512	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
513	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
514	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
515	    sizeof(struct ae_control_data));
516	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
517
518	shutdownhook_disestablish(sc->sc_sdhook);
519	powerhook_disestablish(sc->sc_powerhook);
520
521	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
522
523
524	return (0);
525}
526
527/*
528 * ae_shutdown:
529 *
530 *	Make sure the interface is stopped at reboot time.
531 */
532static void
533ae_shutdown(void *arg)
534{
535	struct ae_softc *sc = arg;
536
537	ae_stop(&sc->sc_ethercom.ec_if, 1);
538}
539
540/*
541 * ae_start:		[ifnet interface function]
542 *
543 *	Start packet transmission on the interface.
544 */
545static void
546ae_start(struct ifnet *ifp)
547{
548	struct ae_softc *sc = ifp->if_softc;
549	struct mbuf *m0, *m;
550	struct ae_txsoft *txs;
551	bus_dmamap_t dmamap;
552	int error, firsttx, nexttx, lasttx = 1, ofree, seg;
553
554	DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
555	    device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags));
556
557
558	if ((ifp->if_flags & IFF_RUNNING) == 0)
559		return;
560
561	/*
562	 * Remember the previous number of free descriptors and
563	 * the first descriptor we'll use.
564	 */
565	ofree = sc->sc_txfree;
566	firsttx = sc->sc_txnext;
567
568	DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
569	    device_xname(sc->sc_dev), ofree, firsttx));
570
571	/*
572	 * Loop through the send queue, setting up transmit descriptors
573	 * until we drain the queue, or use up all available transmit
574	 * descriptors.
575	 */
576	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
577	       sc->sc_txfree != 0) {
578		/*
579		 * Grab a packet off the queue.
580		 */
581		IFQ_POLL(&ifp->if_snd, m0);
582		if (m0 == NULL)
583			break;
584		m = NULL;
585
586		dmamap = txs->txs_dmamap;
587
588		/*
589		 * Load the DMA map.  If this fails, the packet either
590		 * didn't fit in the allotted number of segments, or we were
591		 * short on resources.  In this case, we'll copy and try
592		 * again.
593		 */
594		if (((mtod(m0, uintptr_t) & 3) != 0) ||
595		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
596		      BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
597			MGETHDR(m, M_DONTWAIT, MT_DATA);
598			if (m == NULL) {
599				printf("%s: unable to allocate Tx mbuf\n",
600				    device_xname(sc->sc_dev));
601				break;
602			}
603			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
604			if (m0->m_pkthdr.len > MHLEN) {
605				MCLGET(m, M_DONTWAIT);
606				if ((m->m_flags & M_EXT) == 0) {
607					printf("%s: unable to allocate Tx "
608					    "cluster\n", device_xname(sc->sc_dev));
609					m_freem(m);
610					break;
611				}
612			}
613			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
614			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
615			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
616			    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
617			if (error) {
618				printf("%s: unable to load Tx buffer, "
619				    "error = %d\n", device_xname(sc->sc_dev),
620				    error);
621				break;
622			}
623		}
624
625		/*
626		 * Ensure we have enough descriptors free to describe
627		 * the packet.
628		 */
629		if (dmamap->dm_nsegs > sc->sc_txfree) {
630			/*
631			 * Not enough free descriptors to transmit this
632			 * packet.  We haven't committed to anything yet,
633			 * so just unload the DMA map, put the packet
634			 * back on the queue, and punt.  Notify the upper
635			 * layer that there are no more slots left.
636			 *
637			 * XXX We could allocate an mbuf and copy, but
638			 * XXX it is worth it?
639			 */
640			bus_dmamap_unload(sc->sc_dmat, dmamap);
641			if (m != NULL)
642				m_freem(m);
643			break;
644		}
645
646		IFQ_DEQUEUE(&ifp->if_snd, m0);
647		if (m != NULL) {
648			m_freem(m0);
649			m0 = m;
650		}
651
652		/*
653		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
654		 */
655
656		/* Sync the DMA map. */
657		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
658		    BUS_DMASYNC_PREWRITE);
659
660		/*
661		 * Initialize the transmit descriptors.
662		 */
663		for (nexttx = sc->sc_txnext, seg = 0;
664		     seg < dmamap->dm_nsegs;
665		     seg++, nexttx = AE_NEXTTX(nexttx)) {
666			/*
667			 * If this is the first descriptor we're
668			 * enqueueing, don't set the OWN bit just
669			 * yet.  That could cause a race condition.
670			 * We'll do it below.
671			 */
672			sc->sc_txdescs[nexttx].ad_status =
673			    (nexttx == firsttx) ? 0 : ADSTAT_OWN;
674			sc->sc_txdescs[nexttx].ad_bufaddr1 =
675			    dmamap->dm_segs[seg].ds_addr;
676			sc->sc_txdescs[nexttx].ad_ctl =
677			    (dmamap->dm_segs[seg].ds_len <<
678				ADCTL_SIZE1_SHIFT) |
679				(nexttx == (AE_NTXDESC - 1) ?
680				    ADCTL_ER : 0);
681			lasttx = nexttx;
682		}
683
684		KASSERT(lasttx != -1);
685
686		/* Set `first segment' and `last segment' appropriately. */
687		sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
688		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;
689
690#ifdef AE_DEBUG
691		if (ifp->if_flags & IFF_DEBUG) {
692			printf("     txsoft %p transmit chain:\n", txs);
693			for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
694				printf("     descriptor %d:\n", seg);
695				printf("       ad_status:   0x%08x\n",
696				    sc->sc_txdescs[seg].ad_status);
697				printf("       ad_ctl:      0x%08x\n",
698				    sc->sc_txdescs[seg].ad_ctl);
699				printf("       ad_bufaddr1: 0x%08x\n",
700				    sc->sc_txdescs[seg].ad_bufaddr1);
701				printf("       ad_bufaddr2: 0x%08x\n",
702				    sc->sc_txdescs[seg].ad_bufaddr2);
703				if (seg == lasttx)
704					break;
705			}
706		}
707#endif
708
709		/* Sync the descriptors we're using. */
710		AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
711		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712
713		/*
714		 * Store a pointer to the packet so we can free it later,
715		 * and remember what txdirty will be once the packet is
716		 * done.
717		 */
718		txs->txs_mbuf = m0;
719		txs->txs_firstdesc = sc->sc_txnext;
720		txs->txs_lastdesc = lasttx;
721		txs->txs_ndescs = dmamap->dm_nsegs;
722
723		/* Advance the tx pointer. */
724		sc->sc_txfree -= dmamap->dm_nsegs;
725		sc->sc_txnext = nexttx;
726
727		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
728		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
729
730		/*
731		 * Pass the packet to any BPF listeners.
732		 */
733		bpf_mtap(ifp, m0, BPF_D_OUT);
734	}
735
736	if (sc->sc_txfree != ofree) {
737		DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
738		    device_xname(sc->sc_dev), lasttx, firsttx));
739		/*
740		 * Cause a transmit interrupt to happen on the
741		 * last packet we enqueued.
742		 */
743		sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC;
744		AE_CDTXSYNC(sc, lasttx, 1,
745		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
746
747		/*
748		 * The entire packet chain is set up.  Give the
749		 * first descriptor to the chip now.
750		 */
751		sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN;
752		AE_CDTXSYNC(sc, firsttx, 1,
753		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
754
755		/* Wake up the transmitter. */
756		/* XXX USE AUTOPOLLING? */
757		AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
758		AE_BARRIER(sc);
759
760		/* Set a watchdog timer in case the chip flakes out. */
761		ifp->if_timer = 5;
762	}
763}
764
765/*
766 * ae_watchdog:	[ifnet interface function]
767 *
768 *	Watchdog timer handler.
769 */
770static void
771ae_watchdog(struct ifnet *ifp)
772{
773	struct ae_softc *sc = ifp->if_softc;
774	int doing_transmit;
775
776	doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq));
777
778	if (doing_transmit) {
779		printf("%s: transmit timeout\n", device_xname(sc->sc_dev));
780		if_statinc(ifp, if_oerrors);
781	}
782	else
783		printf("%s: spurious watchdog timeout\n", device_xname(sc->sc_dev));
784
785	(void) ae_init(ifp);
786
787	/* Try to get more packets going. */
788	ae_start(ifp);
789}
790
791/* If the interface is up and running, only modify the receive
792 * filter when changing to/from promiscuous mode.  Otherwise return
793 * ENETRESET so that ether_ioctl will reset the chip.
794 */
795static int
796ae_ifflags_cb(struct ethercom *ec)
797{
798	struct ifnet *ifp = &ec->ec_if;
799	struct ae_softc *sc = ifp->if_softc;
800	u_short change = ifp->if_flags ^ sc->sc_if_flags;
801
802	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
803		return ENETRESET;
804	else if ((change & IFF_PROMISC) != 0)
805		ae_filter_setup(sc);
806	return 0;
807}
808
809/*
810 * ae_ioctl:		[ifnet interface function]
811 *
812 *	Handle control requests from the operator.
813 */
814static int
815ae_ioctl(struct ifnet *ifp, u_long cmd, void *data)
816{
817	struct ae_softc *sc = ifp->if_softc;
818	int s, error;
819
820	s = splnet();
821
822	error = ether_ioctl(ifp, cmd, data);
823	if (error == ENETRESET) {
824		if (ifp->if_flags & IFF_RUNNING) {
825			/*
826			 * Multicast list has changed.  Set the
827			 * hardware filter accordingly.
828			 */
829			ae_filter_setup(sc);
830		}
831		error = 0;
832	}
833
834	/* Try to get more packets going. */
835	if (AE_IS_ENABLED(sc))
836		ae_start(ifp);
837
838	sc->sc_if_flags = ifp->if_flags;
839	splx(s);
840	return (error);
841}
842
843/*
844 * ae_intr:
845 *
846 *	Interrupt service routine.
847 */
848int
849ae_intr(void *arg)
850{
851	struct ae_softc *sc = arg;
852	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
853	uint32_t status, rxstatus, txstatus;
854	int handled = 0, txthresh;
855
856	DPRINTF(sc, ("%s: ae_intr\n", device_xname(sc->sc_dev)));
857
858#ifdef DEBUG
859	if (AE_IS_ENABLED(sc) == 0)
860		panic("%s: ae_intr: not enabled", device_xname(sc->sc_dev));
861#endif
862
863	/*
864	 * If the interface isn't running, the interrupt couldn't
865	 * possibly have come from us.
866	 */
867	if ((ifp->if_flags & IFF_RUNNING) == 0 ||
868	    !device_is_active(sc->sc_dev)) {
869		printf("spurious?!?\n");
870		return (0);
871	}
872
873	for (;;) {
874		status = AE_READ(sc, CSR_STATUS);
875		if (status) {
876			AE_WRITE(sc, CSR_STATUS, status);
877			AE_BARRIER(sc);
878		}
879
880		if ((status & sc->sc_inten) == 0)
881			break;
882
883		handled = 1;
884
885		rxstatus = status & sc->sc_rxint_mask;
886		txstatus = status & sc->sc_txint_mask;
887
888		if (rxstatus) {
889			/* Grab new any new packets. */
890			ae_rxintr(sc);
891
892			if (rxstatus & STATUS_RU) {
893				printf("%s: receive ring overrun\n",
894				    device_xname(sc->sc_dev));
895				/* Get the receive process going again. */
896				AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
897				AE_BARRIER(sc);
898				break;
899			}
900		}
901
902		if (txstatus) {
903			/* Sweep up transmit descriptors. */
904			ae_txintr(sc);
905
906			if (txstatus & STATUS_TJT)
907				printf("%s: transmit jabber timeout\n",
908				    device_xname(sc->sc_dev));
909
910			if (txstatus & STATUS_UNF) {
911				/*
912				 * Increase our transmit threshold if
913				 * another is available.
914				 */
915				txthresh = sc->sc_txthresh + 1;
916				if (ae_txthresh[txthresh].txth_name != NULL) {
917					uint32_t opmode;
918					/* Idle the transmit process. */
919					opmode = AE_READ(sc, CSR_OPMODE);
920					ae_idle(sc, OPMODE_ST);
921
922					sc->sc_txthresh = txthresh;
923					opmode &= ~(OPMODE_TR | OPMODE_SF);
924					opmode |=
925					    ae_txthresh[txthresh].txth_opmode;
926					printf("%s: transmit underrun; new "
927					    "threshold: %s\n",
928					    device_xname(sc->sc_dev),
929					    ae_txthresh[txthresh].txth_name);
930
931					/*
932					 * Set the new threshold and restart
933					 * the transmit process.
934					 */
935					AE_WRITE(sc, CSR_OPMODE, opmode);
936					AE_BARRIER(sc);
937				}
938					/*
939					 * XXX Log every Nth underrun from
940					 * XXX now on?
941					 */
942			}
943		}
944
945		if (status & (STATUS_TPS | STATUS_RPS)) {
946			if (status & STATUS_TPS)
947				printf("%s: transmit process stopped\n",
948				    device_xname(sc->sc_dev));
949			if (status & STATUS_RPS)
950				printf("%s: receive process stopped\n",
951				    device_xname(sc->sc_dev));
952			(void) ae_init(ifp);
953			break;
954		}
955
956		if (status & STATUS_SE) {
957			const char *str;
958
959			if (status & STATUS_TX_ABORT)
960				str = "tx abort";
961			else if (status & STATUS_RX_ABORT)
962				str = "rx abort";
963			else
964				str = "unknown error";
965
966			printf("%s: fatal system error: %s\n",
967			    device_xname(sc->sc_dev), str);
968			(void) ae_init(ifp);
969			break;
970		}
971
972		/*
973		 * Not handled:
974		 *
975		 *	Transmit buffer unavailable -- normal
976		 *	condition, nothing to do, really.
977		 *
978		 *	General purpose timer experied -- we don't
979		 *	use the general purpose timer.
980		 *
981		 *	Early receive interrupt -- not available on
982		 *	all chips, we just use RI.  We also only
983		 *	use single-segment receive DMA, so this
984		 *	is mostly useless.
985		 */
986	}
987
988	/* Try to get more packets going. */
989	if_schedule_deferred_start(ifp);
990
991	if (handled)
992		rnd_add_uint32(&sc->sc_rnd_source, status);
993	return (handled);
994}
995
996/*
997 * ae_rxintr:
998 *
999 *	Helper; handle receive interrupts.
1000 */
1001static void
1002ae_rxintr(struct ae_softc *sc)
1003{
1004	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1005	struct ae_rxsoft *rxs;
1006	struct mbuf *m;
1007	uint32_t rxstat;
1008	int i, len;
1009
1010	for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) {
1011		rxs = &sc->sc_rxsoft[i];
1012
1013		AE_CDRXSYNC(sc, i,
1014		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1015
1016		rxstat = sc->sc_rxdescs[i].ad_status;
1017
1018		if (rxstat & ADSTAT_OWN) {
1019			/*
1020			 * We have processed all of the receive buffers.
1021			 */
1022			break;
1023		}
1024
1025		/*
1026		 * If any collisions were seen on the wire, count one.
1027		 */
1028		if (rxstat & ADSTAT_Rx_CS)
1029			if_statinc(ifp, if_collisions);
1030
1031		/*
1032		 * If an error occurred, update stats, clear the status
1033		 * word, and leave the packet buffer in place.  It will
1034		 * simply be reused the next time the ring comes around.
1035	 	 * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
1036		 * error.
1037		 */
1038		if (rxstat & ADSTAT_ES &&
1039		    ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 ||
1040		     (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF |
1041				ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) {
1042#define	PRINTERR(bit, str)						\
1043			if (rxstat & (bit))				\
1044				printf("%s: receive error: %s\n",	\
1045				    device_xname(sc->sc_dev), str)
1046			if_statinc(ifp, if_ierrors);
1047			PRINTERR(ADSTAT_Rx_DE, "descriptor error");
1048			PRINTERR(ADSTAT_Rx_RF, "runt frame");
1049			PRINTERR(ADSTAT_Rx_TL, "frame too long");
1050			PRINTERR(ADSTAT_Rx_RE, "MII error");
1051			PRINTERR(ADSTAT_Rx_DB, "dribbling bit");
1052			PRINTERR(ADSTAT_Rx_CE, "CRC error");
1053#undef PRINTERR
1054			AE_INIT_RXDESC(sc, i);
1055			continue;
1056		}
1057
1058		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1059		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1060
1061		/*
1062		 * No errors; receive the packet.  Note the chip
1063		 * includes the CRC with every packet.
1064		 */
1065		len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
1066
1067		/*
1068		 * XXX: the Atheros part can align on half words.  what
1069		 * is the performance implication of this?  Probably
1070		 * minimal, and we should use it...
1071		 */
1072#ifdef __NO_STRICT_ALIGNMENT
1073		/*
1074		 * Allocate a new mbuf cluster.  If that fails, we are
1075		 * out of memory, and must drop the packet and recycle
1076		 * the buffer that's already attached to this descriptor.
1077		 */
1078		m = rxs->rxs_mbuf;
1079		if (ae_add_rxbuf(sc, i) != 0) {
1080			if_statinc(ifp, if_ierrors);
1081			AE_INIT_RXDESC(sc, i);
1082			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1083			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1084			continue;
1085		}
1086#else
1087		/*
1088		 * The chip's receive buffers must be 4-byte aligned.
1089		 * But this means that the data after the Ethernet header
1090		 * is misaligned.  We must allocate a new buffer and
1091		 * copy the data, shifted forward 2 bytes.
1092		 */
1093		MGETHDR(m, M_DONTWAIT, MT_DATA);
1094		if (m == NULL) {
1095 dropit:
1096			if_statinc(ifp, if_ierrors);
1097			AE_INIT_RXDESC(sc, i);
1098			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1099			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1100			continue;
1101		}
1102		MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1103		if (len > (MHLEN - 2)) {
1104			MCLGET(m, M_DONTWAIT);
1105			if ((m->m_flags & M_EXT) == 0) {
1106				m_freem(m);
1107				goto dropit;
1108			}
1109		}
1110		m->m_data += 2;
1111
1112		/*
1113		 * Note that we use clusters for incoming frames, so the
1114		 * buffer is virtually contiguous.
1115		 */
1116		memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len);
1117
1118		/* Allow the receive descriptor to continue using its mbuf. */
1119		AE_INIT_RXDESC(sc, i);
1120		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1121		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1122#endif /* __NO_STRICT_ALIGNMENT */
1123
1124		m_set_rcvif(m, ifp);
1125		m->m_pkthdr.len = m->m_len = len;
1126
1127		/* Pass it on. */
1128		if_percpuq_enqueue(ifp->if_percpuq, m);
1129	}
1130
1131	/* Update the receive pointer. */
1132	sc->sc_rxptr = i;
1133}
1134
1135/*
1136 * ae_txintr:
1137 *
1138 *	Helper; handle transmit interrupts.
1139 */
1140static void
1141ae_txintr(struct ae_softc *sc)
1142{
1143	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1144	struct ae_txsoft *txs;
1145	uint32_t txstat;
1146
1147	DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n",
1148	    device_xname(sc->sc_dev), sc->sc_flags));
1149
1150	/*
1151	 * Go through our Tx list and free mbufs for those
1152	 * frames that have been transmitted.
1153	 */
1154	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1155		AE_CDTXSYNC(sc, txs->txs_lastdesc,
1156		    txs->txs_ndescs,
1157		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1158
1159#ifdef AE_DEBUG
1160		if (ifp->if_flags & IFF_DEBUG) {
1161			int i;
1162			printf("    txsoft %p transmit chain:\n", txs);
1163			for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) {
1164				printf("     descriptor %d:\n", i);
1165				printf("       ad_status:   0x%08x\n",
1166				    sc->sc_txdescs[i].ad_status);
1167				printf("       ad_ctl:      0x%08x\n",
1168				    sc->sc_txdescs[i].ad_ctl);
1169				printf("       ad_bufaddr1: 0x%08x\n",
1170				    sc->sc_txdescs[i].ad_bufaddr1);
1171				printf("       ad_bufaddr2: 0x%08x\n",
1172				    sc->sc_txdescs[i].ad_bufaddr2);
1173				if (i == txs->txs_lastdesc)
1174					break;
1175			}
1176		}
1177#endif
1178
1179		txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status;
1180		if (txstat & ADSTAT_OWN)
1181			break;
1182
1183		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1184
1185		sc->sc_txfree += txs->txs_ndescs;
1186
1187		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1188		    0, txs->txs_dmamap->dm_mapsize,
1189		    BUS_DMASYNC_POSTWRITE);
1190		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1191		m_freem(txs->txs_mbuf);
1192		txs->txs_mbuf = NULL;
1193
1194		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1195
1196		/*
1197		 * Check for errors and collisions.
1198		 */
1199#ifdef AE_STATS
1200		if (txstat & ADSTAT_Tx_UF)
1201			sc->sc_stats.ts_tx_uf++;
1202		if (txstat & ADSTAT_Tx_TO)
1203			sc->sc_stats.ts_tx_to++;
1204		if (txstat & ADSTAT_Tx_EC)
1205			sc->sc_stats.ts_tx_ec++;
1206		if (txstat & ADSTAT_Tx_LC)
1207			sc->sc_stats.ts_tx_lc++;
1208#endif
1209
1210		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1211		if (txstat & (ADSTAT_Tx_UF | ADSTAT_Tx_TO))
1212			if_statinc_ref(ifp, nsr, if_oerrors);
1213
1214		if (txstat & ADSTAT_Tx_EC)
1215			if_statadd_ref(ifp, nsr, if_collisions, 16);
1216		else if (ADSTAT_Tx_COLLISIONS(txstat))
1217			if_statadd_ref(ifp, nsr, if_collisions,
1218			    ADSTAT_Tx_COLLISIONS(txstat));
1219		if (txstat & ADSTAT_Tx_LC)
1220			if_statinc_ref(ifp, nsr, if_collisions);
1221
1222		if_statinc_ref(ifp, nsr, if_opackets);
1223		IF_STAT_PUTREF(ifp);
1224	}
1225
1226	/*
1227	 * If there are no more pending transmissions, cancel the watchdog
1228	 * timer.
1229	 */
1230	if (txs == NULL)
1231		ifp->if_timer = 0;
1232}
1233
1234#ifdef AE_STATS
1235void
1236ae_print_stats(struct ae_softc *sc)
1237{
1238
1239	printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
1240	    device_xname(sc->sc_dev),
1241	    sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to,
1242	    sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc);
1243}
1244#endif
1245
1246/*
1247 * ae_reset:
1248 *
1249 *	Perform a soft reset on the chip.
1250 */
1251void
1252ae_reset(struct ae_softc *sc)
1253{
1254	int i;
1255
1256	AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
1257	AE_BARRIER(sc);
1258
1259	/*
1260	 * The chip doesn't take itself out of reset automatically.
1261	 * We need to do so after 2us.
1262	 */
1263	delay(10);
1264	AE_WRITE(sc, CSR_BUSMODE, 0);
1265	AE_BARRIER(sc);
1266
1267	for (i = 0; i < 1000; i++) {
1268		/*
1269		 * Wait a bit for the reset to complete before peeking
1270		 * at the chip again.
1271		 */
1272		delay(10);
1273		if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
1274			break;
1275	}
1276
1277	if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
1278		printf("%s: reset failed to complete\n", device_xname(sc->sc_dev));
1279
1280	delay(1000);
1281}
1282
1283/*
1284 * ae_init:		[ ifnet interface function ]
1285 *
1286 *	Initialize the interface.  Must be called at splnet().
1287 */
1288static int
1289ae_init(struct ifnet *ifp)
1290{
1291	struct ae_softc *sc = ifp->if_softc;
1292	struct ae_txsoft *txs;
1293	struct ae_rxsoft *rxs;
1294	const uint8_t *enaddr;
1295	int i, error = 0;
1296
1297	if ((error = ae_enable(sc)) != 0)
1298		goto out;
1299
1300	/*
1301	 * Cancel any pending I/O.
1302	 */
1303	ae_stop(ifp, 0);
1304
1305	/*
1306	 * Reset the chip to a known state.
1307	 */
1308	ae_reset(sc);
1309
1310	/*
1311	 * Initialize the BUSMODE register.
1312	 */
1313	AE_WRITE(sc, CSR_BUSMODE,
1314	    /* XXX: not sure if this is a good thing or not... */
1315	    //BUSMODE_ALIGN_16B |
1316	    BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
1317	AE_BARRIER(sc);
1318
1319	/*
1320	 * Initialize the transmit descriptor ring.
1321	 */
1322	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1323	for (i = 0; i < AE_NTXDESC; i++) {
1324		sc->sc_txdescs[i].ad_ctl = 0;
1325		sc->sc_txdescs[i].ad_bufaddr2 =
1326		    AE_CDTXADDR(sc, AE_NEXTTX(i));
1327	}
1328	sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER;
1329	AE_CDTXSYNC(sc, 0, AE_NTXDESC,
1330	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1331	sc->sc_txfree = AE_NTXDESC;
1332	sc->sc_txnext = 0;
1333
1334	/*
1335	 * Initialize the transmit job descriptors.
1336	 */
1337	SIMPLEQ_INIT(&sc->sc_txfreeq);
1338	SIMPLEQ_INIT(&sc->sc_txdirtyq);
1339	for (i = 0; i < AE_TXQUEUELEN; i++) {
1340		txs = &sc->sc_txsoft[i];
1341		txs->txs_mbuf = NULL;
1342		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1343	}
1344
1345	/*
1346	 * Initialize the receive descriptor and receive job
1347	 * descriptor rings.
1348	 */
1349	for (i = 0; i < AE_NRXDESC; i++) {
1350		rxs = &sc->sc_rxsoft[i];
1351		if (rxs->rxs_mbuf == NULL) {
1352			if ((error = ae_add_rxbuf(sc, i)) != 0) {
1353				printf("%s: unable to allocate or map rx "
1354				    "buffer %d, error = %d\n",
1355				    device_xname(sc->sc_dev), i, error);
1356				/*
1357				 * XXX Should attempt to run with fewer receive
1358				 * XXX buffers instead of just failing.
1359				 */
1360				ae_rxdrain(sc);
1361				goto out;
1362			}
1363		} else
1364			AE_INIT_RXDESC(sc, i);
1365	}
1366	sc->sc_rxptr = 0;
1367
1368	/*
1369	 * Initialize the interrupt mask and enable interrupts.
1370	 */
1371	/* normal interrupts */
1372	sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
1373
1374	/* abnormal interrupts */
1375	sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
1376	    STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
1377
1378	sc->sc_rxint_mask = STATUS_RI | STATUS_RU;
1379	sc->sc_txint_mask = STATUS_TI | STATUS_UNF | STATUS_TJT;
1380
1381	sc->sc_rxint_mask &= sc->sc_inten;
1382	sc->sc_txint_mask &= sc->sc_inten;
1383
1384	AE_WRITE(sc, CSR_INTEN, sc->sc_inten);
1385	AE_WRITE(sc, CSR_STATUS, 0xffffffff);
1386
1387	/*
1388	 * Give the transmit and receive rings to the chip.
1389	 */
1390	AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext));
1391	AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr));
1392	AE_BARRIER(sc);
1393
1394	/*
1395	 * Set the station address.
1396	 */
1397	enaddr = CLLADDR(ifp->if_sadl);
1398	AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]);
1399	AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 |
1400		enaddr[1] << 8 | enaddr[0]);
1401	AE_BARRIER(sc);
1402
1403	/*
1404	 * Set the receive filter.  This will start the transmit and
1405	 * receive processes.
1406	 */
1407	ae_filter_setup(sc);
1408
1409	/*
1410	 * Set the current media.
1411	 */
1412	if ((error = ether_mediachange(ifp)) != 0)
1413		goto out;
1414
1415	/*
1416	 * Start the mac.
1417	 */
1418	AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE);
1419	AE_BARRIER(sc);
1420
1421	/*
1422	 * Write out the opmode.
1423	 */
1424	AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST |
1425	    ae_txthresh[sc->sc_txthresh].txth_opmode);
1426	/*
1427	 * Start the receive process.
1428	 */
1429	AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
1430	AE_BARRIER(sc);
1431
1432	if (sc->sc_tick != NULL) {
1433		/* Start the one second clock. */
1434		callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc);
1435	}
1436
1437	/*
1438	 * Note that the interface is now running.
1439	 */
1440	ifp->if_flags |= IFF_RUNNING;
1441	sc->sc_if_flags = ifp->if_flags;
1442
1443 out:
1444	if (error) {
1445		ifp->if_flags &= ~IFF_RUNNING;
1446		ifp->if_timer = 0;
1447		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1448	}
1449	return (error);
1450}
1451
1452/*
1453 * ae_enable:
1454 *
1455 *	Enable the chip.
1456 */
1457static int
1458ae_enable(struct ae_softc *sc)
1459{
1460
1461	if (AE_IS_ENABLED(sc) == 0) {
1462		sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq,
1463		    ae_intr, sc);
1464		if (sc->sc_ih == NULL) {
1465			printf("%s: unable to establish interrupt\n",
1466			    device_xname(sc->sc_dev));
1467			return (EIO);
1468		}
1469		sc->sc_flags |= AE_ENABLED;
1470	}
1471	return (0);
1472}
1473
1474/*
1475 * ae_disable:
1476 *
1477 *	Disable the chip.
1478 */
1479static void
1480ae_disable(struct ae_softc *sc)
1481{
1482
1483	if (AE_IS_ENABLED(sc)) {
1484		arbus_intr_disestablish(sc->sc_ih);
1485		sc->sc_flags &= ~AE_ENABLED;
1486	}
1487}
1488
1489/*
1490 * ae_power:
1491 *
1492 *	Power management (suspend/resume) hook.
1493 */
1494static void
1495ae_power(int why, void *arg)
1496{
1497	struct ae_softc *sc = arg;
1498	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1499	int s;
1500
1501	printf("power called: %d, %x\n", why, (uint32_t)arg);
1502	s = splnet();
1503	switch (why) {
1504	case PWR_STANDBY:
1505		/* do nothing! */
1506		break;
1507	case PWR_SUSPEND:
1508		ae_stop(ifp, 0);
1509		ae_disable(sc);
1510		break;
1511	case PWR_RESUME:
1512		if (ifp->if_flags & IFF_UP) {
1513			ae_enable(sc);
1514			ae_init(ifp);
1515		}
1516		break;
1517	case PWR_SOFTSUSPEND:
1518	case PWR_SOFTSTANDBY:
1519	case PWR_SOFTRESUME:
1520		break;
1521	}
1522	splx(s);
1523}
1524
1525/*
1526 * ae_rxdrain:
1527 *
1528 *	Drain the receive queue.
1529 */
1530static void
1531ae_rxdrain(struct ae_softc *sc)
1532{
1533	struct ae_rxsoft *rxs;
1534	int i;
1535
1536	for (i = 0; i < AE_NRXDESC; i++) {
1537		rxs = &sc->sc_rxsoft[i];
1538		if (rxs->rxs_mbuf != NULL) {
1539			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1540			m_freem(rxs->rxs_mbuf);
1541			rxs->rxs_mbuf = NULL;
1542		}
1543	}
1544}
1545
1546/*
1547 * ae_stop:		[ ifnet interface function ]
1548 *
1549 *	Stop transmission on the interface.
1550 */
1551static void
1552ae_stop(struct ifnet *ifp, int disable)
1553{
1554	struct ae_softc *sc = ifp->if_softc;
1555	struct ae_txsoft *txs;
1556
1557	if (sc->sc_tick != NULL) {
1558		/* Stop the one second clock. */
1559		callout_stop(&sc->sc_tick_callout);
1560	}
1561
1562	/* Down the MII. */
1563	mii_down(&sc->sc_mii);
1564
1565	/* Disable interrupts. */
1566	AE_WRITE(sc, CSR_INTEN, 0);
1567
1568	/* Stop the transmit and receive processes. */
1569	AE_WRITE(sc, CSR_OPMODE, 0);
1570	AE_WRITE(sc, CSR_RXLIST, 0);
1571	AE_WRITE(sc, CSR_TXLIST, 0);
1572	AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE);
1573	AE_BARRIER(sc);
1574
1575	/*
1576	 * Release any queued transmit buffers.
1577	 */
1578	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1579		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1580		if (txs->txs_mbuf != NULL) {
1581			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1582			m_freem(txs->txs_mbuf);
1583			txs->txs_mbuf = NULL;
1584		}
1585		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1586	}
1587
1588	/*
1589	 * Mark the interface down and cancel the watchdog timer.
1590	 */
1591	ifp->if_flags &= ~IFF_RUNNING;
1592	sc->sc_if_flags = ifp->if_flags;
1593	ifp->if_timer = 0;
1594
1595	if (disable) {
1596		ae_rxdrain(sc);
1597		ae_disable(sc);
1598	}
1599
1600	/*
1601	 * Reset the chip (needed on some flavors to actually disable it).
1602	 */
1603	ae_reset(sc);
1604}
1605
1606/*
1607 * ae_add_rxbuf:
1608 *
1609 *	Add a receive buffer to the indicated descriptor.
1610 */
1611static int
1612ae_add_rxbuf(struct ae_softc *sc, int idx)
1613{
1614	struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx];
1615	struct mbuf *m;
1616	int error;
1617
1618	MGETHDR(m, M_DONTWAIT, MT_DATA);
1619	if (m == NULL)
1620		return (ENOBUFS);
1621
1622	MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1623	MCLGET(m, M_DONTWAIT);
1624	if ((m->m_flags & M_EXT) == 0) {
1625		m_freem(m);
1626		return (ENOBUFS);
1627	}
1628
1629	if (rxs->rxs_mbuf != NULL)
1630		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1631
1632	rxs->rxs_mbuf = m;
1633
1634	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1635	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1636	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1637	if (error) {
1638		printf("%s: can't load rx DMA map %d, error = %d\n",
1639		    device_xname(sc->sc_dev), idx, error);
1640		panic("ae_add_rxbuf");	/* XXX */
1641	}
1642
1643	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1644	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1645
1646	AE_INIT_RXDESC(sc, idx);
1647
1648	return (0);
1649}
1650
1651/*
1652 * ae_filter_setup:
1653 *
1654 *	Set the chip's receive filter.
1655 */
1656static void
1657ae_filter_setup(struct ae_softc *sc)
1658{
1659	struct ethercom *ec = &sc->sc_ethercom;
1660	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1661	struct ether_multi *enm;
1662	struct ether_multistep step;
1663	uint32_t hash, mchash[2];
1664	uint32_t macctl = 0;
1665
1666	/*
1667	 * If the chip is running, we need to reset the interface,
1668	 * and will revisit here (with IFF_RUNNING) clear.  The
1669	 * chip seems to really not like to have its multicast
1670	 * filter programmed without a reset.
1671	 */
1672	if (ifp->if_flags & IFF_RUNNING) {
1673		(void) ae_init(ifp);
1674		return;
1675	}
1676
1677	DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
1678	    device_xname(sc->sc_dev), sc->sc_flags));
1679
1680	macctl = AE_READ(sc, CSR_MACCTL);
1681	macctl &= ~(MACCTL_PR | MACCTL_PM);
1682	macctl |= MACCTL_HASH;
1683	macctl |= MACCTL_HBD;
1684	macctl |= MACCTL_PR;
1685
1686	if (ifp->if_flags & IFF_PROMISC) {
1687		macctl |= MACCTL_PR;
1688		goto allmulti;
1689	}
1690
1691	mchash[0] = mchash[1] = 0;
1692
1693	ETHER_LOCK(ec);
1694	ETHER_FIRST_MULTI(step, ec, enm);
1695	while (enm != NULL) {
1696		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1697			/*
1698			 * We must listen to a range of multicast addresses.
1699			 * For now, just accept all multicasts, rather than
1700			 * trying to set only those filter bits needed to match
1701			 * the range.  (At this time, the only use of address
1702			 * ranges is for IP multicast routing, for which the
1703			 * range is big enough to require all bits set.)
1704			 */
1705			ETHER_UNLOCK(ec);
1706			goto allmulti;
1707		}
1708
1709		/* Verify whether we use big or little endian hashes */
1710		hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
1711		mchash[hash >> 5] |= 1 << (hash & 0x1f);
1712		ETHER_NEXT_MULTI(step, enm);
1713	}
1714	ETHER_UNLOCK(ec);
1715	ifp->if_flags &= ~IFF_ALLMULTI;
1716	goto setit;
1717
1718 allmulti:
1719	ifp->if_flags |= IFF_ALLMULTI;
1720	mchash[0] = mchash[1] = 0xffffffff;
1721	macctl |= MACCTL_PM;
1722
1723 setit:
1724	AE_WRITE(sc, CSR_HTHI, mchash[0]);
1725	AE_WRITE(sc, CSR_HTHI, mchash[1]);
1726
1727	AE_WRITE(sc, CSR_MACCTL, macctl);
1728	AE_BARRIER(sc);
1729
1730	DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n",
1731		    device_xname(sc->sc_dev), macctl));
1732}
1733
1734/*
1735 * ae_idle:
1736 *
1737 *	Cause the transmit and/or receive processes to go idle.
1738 */
1739void
1740ae_idle(struct ae_softc *sc, uint32_t bits)
1741{
1742	static const char * const txstate_names[] = {
1743		"STOPPED",
1744		"RUNNING - FETCH",
1745		"RUNNING - WAIT",
1746		"RUNNING - READING",
1747		"-- RESERVED --",
1748		"RUNNING - SETUP",
1749		"SUSPENDED",
1750		"RUNNING - CLOSE",
1751	};
1752	static const char * const rxstate_names[] = {
1753		"STOPPED",
1754		"RUNNING - FETCH",
1755		"RUNNING - CHECK",
1756		"RUNNING - WAIT",
1757		"SUSPENDED",
1758		"RUNNING - CLOSE",
1759		"RUNNING - FLUSH",
1760		"RUNNING - QUEUE",
1761	};
1762
1763	uint32_t csr, ackmask = 0;
1764	int i;
1765
1766	if (bits & OPMODE_ST)
1767		ackmask |= STATUS_TPS;
1768
1769	if (bits & OPMODE_SR)
1770		ackmask |= STATUS_RPS;
1771
1772	AE_CLR(sc, CSR_OPMODE, bits);
1773
1774	for (i = 0; i < 1000; i++) {
1775		if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
1776			break;
1777		delay(10);
1778	}
1779
1780	csr = AE_READ(sc, CSR_STATUS);
1781	if ((csr & ackmask) != ackmask) {
1782		if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
1783		    (csr & STATUS_TS) != STATUS_TS_STOPPED) {
1784			printf("%s: transmit process failed to idle: "
1785			    "state %s\n", device_xname(sc->sc_dev),
1786			    txstate_names[(csr & STATUS_TS) >> 20]);
1787		}
1788		if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
1789		    (csr & STATUS_RS) != STATUS_RS_STOPPED) {
1790			printf("%s: receive process failed to idle: "
1791			    "state %s\n", device_xname(sc->sc_dev),
1792			    rxstate_names[(csr & STATUS_RS) >> 17]);
1793		}
1794	}
1795}
1796
1797/*****************************************************************************
1798 * Support functions for MII-attached media.
1799 *****************************************************************************/
1800
1801/*
1802 * ae_mii_tick:
1803 *
1804 *	One second timer, used to tick the MII.
1805 */
1806static void
1807ae_mii_tick(void *arg)
1808{
1809	struct ae_softc *sc = arg;
1810	int s;
1811
1812	if (!device_is_active(sc->sc_dev))
1813		return;
1814
1815	s = splnet();
1816	mii_tick(&sc->sc_mii);
1817	splx(s);
1818
1819	callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc);
1820}
1821
1822/*
1823 * ae_mii_statchg:	[mii interface function]
1824 *
1825 *	Callback from PHY when media changes.
1826 */
1827static void
1828ae_mii_statchg(struct ifnet *ifp)
1829{
1830	struct ae_softc *sc = ifp->if_softc;
1831	uint32_t	macctl, flowc;
1832
1833	//opmode = AE_READ(sc, CSR_OPMODE);
1834	macctl = AE_READ(sc, CSR_MACCTL);
1835
1836	/* XXX: do we need to do this? */
1837	/* Idle the transmit and receive processes. */
1838	//ae_idle(sc, OPMODE_ST | OPMODE_SR);
1839
1840	if (sc->sc_mii.mii_media_active & IFM_FDX) {
1841		flowc = FLOWC_FCE;
1842		macctl &= ~MACCTL_DRO;
1843		macctl |= MACCTL_FDX;
1844	} else {
1845		flowc = 0;	/* cannot do flow control in HDX */
1846		macctl |= MACCTL_DRO;
1847		macctl &= ~MACCTL_FDX;
1848	}
1849
1850	AE_WRITE(sc, CSR_FLOWC, flowc);
1851	AE_WRITE(sc, CSR_MACCTL, macctl);
1852
1853	/* restore operational mode */
1854	//AE_WRITE(sc, CSR_OPMODE, opmode);
1855	AE_BARRIER(sc);
1856}
1857
1858/*
1859 * ae_mii_readreg:
1860 *
1861 *	Read a PHY register.
1862 */
1863static int
1864ae_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1865{
1866	struct ae_softc	*sc = device_private(self);
1867	uint32_t	addr;
1868	int		i;
1869
1870	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
1871	AE_WRITE(sc, CSR_MIIADDR, addr);
1872	AE_BARRIER(sc);
1873	for (i = 0; i < 100000000; i++) {
1874		if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1875			break;
1876	}
1877
1878	if (i >= 100000000)
1879		return ETIMEDOUT;
1880
1881	*val = AE_READ(sc, CSR_MIIDATA) & 0xffff;
1882	return 0;
1883}
1884
1885/*
1886 * ae_mii_writereg:
1887 *
1888 *	Write a PHY register.
1889 */
1890static int
1891ae_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1892{
1893	struct ae_softc *sc = device_private(self);
1894	uint32_t	addr;
1895	int		i;
1896
1897	/* write the data register */
1898	AE_WRITE(sc, CSR_MIIDATA, val);
1899
1900	/* write the address to latch it in */
1901	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
1902	    MIIADDR_WRITE;
1903	AE_WRITE(sc, CSR_MIIADDR, addr);
1904	AE_BARRIER(sc);
1905
1906	for (i = 0; i < 100000000; i++) {
1907		if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1908			break;
1909	}
1910
1911	if (i >= 100000000)
1912		return ETIMEDOUT;
1913
1914	return 0;
1915}
1916