1/*-
2 * Copyright (c) 2016 Hiroki Mori. All rights reserved.
3 * Copyright (C) 2007
4 *	Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
24 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
25 * THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $Id: $
28 *
29 */
30
31#include "opt_platform.h"
32#include "opt_ar531x.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37/*
38 * AR531x Ethernet interface driver
39 * copy from mips/idt/if_kr.c and netbsd code
40 */
41#include <sys/param.h>
42#include <sys/endian.h>
43#include <sys/systm.h>
44#include <sys/sockio.h>
45#include <sys/mbuf.h>
46#include <sys/malloc.h>
47#include <sys/kernel.h>
48#include <sys/lock.h>
49#include <sys/module.h>
50#include <sys/mutex.h>
51#include <sys/socket.h>
52#include <sys/taskqueue.h>
53#include <sys/kdb.h>
54
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/ethernet.h>
58#include <net/if_dl.h>
59#include <net/if_media.h>
60#include <net/if_types.h>
61#include <net/if_var.h>
62
63#include <net/bpf.h>
64
65#include <machine/bus.h>
66#include <machine/resource.h>
67#include <sys/bus.h>
68#include <sys/rman.h>
69
70#ifdef INTRNG
71#include <machine/intr.h>
72#endif
73
74#include <dev/mii/mii.h>
75#include <dev/mii/miivar.h>
76
77#ifdef ARE_MDIO
78#include <dev/mdio/mdio.h>
79#include <dev/etherswitch/miiproxy.h>
80#include "mdio_if.h"
81#endif
82
83MODULE_DEPEND(are, ether, 1, 1, 1);
84MODULE_DEPEND(are, miibus, 1, 1, 1);
85
86#include "miibus_if.h"
87
88#include <mips/atheros/ar531x/ar5315reg.h>
89#include <mips/atheros/ar531x/ar5312reg.h>
90#include <mips/atheros/ar531x/ar5315_setup.h>
91#include <mips/atheros/ar531x/if_arereg.h>
92
93#ifdef ARE_DEBUG
94void dump_txdesc(struct are_softc *, int);
95void dump_status_reg(struct are_softc *);
96#endif
97
98static int are_attach(device_t);
99static int are_detach(device_t);
100static int are_ifmedia_upd(struct ifnet *);
101static void are_ifmedia_sts(struct ifnet *, struct ifmediareq *);
102static int are_ioctl(struct ifnet *, u_long, caddr_t);
103static void are_init(void *);
104static void are_init_locked(struct are_softc *);
105static void are_link_task(void *, int);
106static int are_miibus_readreg(device_t, int, int);
107static void are_miibus_statchg(device_t);
108static int are_miibus_writereg(device_t, int, int, int);
109static int are_probe(device_t);
110static void are_reset(struct are_softc *);
111static int are_resume(device_t);
112static int are_rx_ring_init(struct are_softc *);
113static int are_tx_ring_init(struct are_softc *);
114static int are_shutdown(device_t);
115static void are_start(struct ifnet *);
116static void are_start_locked(struct ifnet *);
117static void are_stop(struct are_softc *);
118static int are_suspend(device_t);
119
120static void are_rx(struct are_softc *);
121static void are_tx(struct are_softc *);
122static void are_intr(void *);
123static void are_tick(void *);
124
125static void are_dmamap_cb(void *, bus_dma_segment_t *, int, int);
126static int are_dma_alloc(struct are_softc *);
127static void are_dma_free(struct are_softc *);
128static int are_newbuf(struct are_softc *, int);
129static __inline void are_fixup_rx(struct mbuf *);
130
131static void are_hinted_child(device_t bus, const char *dname, int dunit);
132
133static device_method_t are_methods[] = {
134	/* Device interface */
135	DEVMETHOD(device_probe,		are_probe),
136	DEVMETHOD(device_attach,	are_attach),
137	DEVMETHOD(device_detach,	are_detach),
138	DEVMETHOD(device_suspend,	are_suspend),
139	DEVMETHOD(device_resume,	are_resume),
140	DEVMETHOD(device_shutdown,	are_shutdown),
141
142	/* MII interface */
143	DEVMETHOD(miibus_readreg,	are_miibus_readreg),
144	DEVMETHOD(miibus_writereg,	are_miibus_writereg),
145	DEVMETHOD(miibus_statchg,	are_miibus_statchg),
146
147	/* bus interface */
148	DEVMETHOD(bus_add_child,	device_add_child_ordered),
149	DEVMETHOD(bus_hinted_child,	are_hinted_child),
150
151	DEVMETHOD_END
152};
153
154static driver_t are_driver = {
155	"are",
156	are_methods,
157	sizeof(struct are_softc)
158};
159
160static devclass_t are_devclass;
161
162DRIVER_MODULE(are, nexus, are_driver, are_devclass, 0, 0);
163#ifdef ARE_MII
164DRIVER_MODULE(miibus, are, miibus_driver, miibus_devclass, 0, 0);
165#endif
166
167#ifdef ARE_MDIO
168static int aremdio_probe(device_t);
169static int aremdio_attach(device_t);
170static int aremdio_detach(device_t);
171
172/*
173 * Declare an additional, separate driver for accessing the MDIO bus.
174 */
175static device_method_t aremdio_methods[] = {
176	/* Device interface */
177	DEVMETHOD(device_probe,		aremdio_probe),
178	DEVMETHOD(device_attach,	aremdio_attach),
179	DEVMETHOD(device_detach,	aremdio_detach),
180
181	/* bus interface */
182	DEVMETHOD(bus_add_child,	device_add_child_ordered),
183
184	/* MDIO access */
185	DEVMETHOD(mdio_readreg,		are_miibus_readreg),
186	DEVMETHOD(mdio_writereg,	are_miibus_writereg),
187};
188
189DEFINE_CLASS_0(aremdio, aremdio_driver, aremdio_methods,
190    sizeof(struct are_softc));
191static devclass_t aremdio_devclass;
192
193DRIVER_MODULE(miiproxy, are, miiproxy_driver, miiproxy_devclass, 0, 0);
194DRIVER_MODULE(aremdio, nexus, aremdio_driver, aremdio_devclass, 0, 0);
195DRIVER_MODULE(mdio, aremdio, mdio_driver, mdio_devclass, 0, 0);
196#endif
197
198
199static int
200are_probe(device_t dev)
201{
202
203	device_set_desc(dev, "AR531x Ethernet interface");
204	return (0);
205}
206
207static int
208are_attach(device_t dev)
209{
210	struct ifnet		*ifp;
211	struct are_softc		*sc;
212	int			error = 0;
213#ifdef INTRNG
214	int			enetirq;
215#else
216	int			rid;
217#endif
218	int			unit;
219	char *			local_macstr;
220	int			count;
221	int			i;
222
223	sc = device_get_softc(dev);
224	unit = device_get_unit(dev);
225	sc->are_dev = dev;
226
227	/* hardcode macaddress */
228	sc->are_eaddr[0] = 0x00;
229	sc->are_eaddr[1] = 0x0C;
230	sc->are_eaddr[2] = 0x42;
231	sc->are_eaddr[3] = 0x09;
232	sc->are_eaddr[4] = 0x5E;
233	sc->are_eaddr[5] = 0x6B;
234
235	/* try to get from hints */
236	if (!resource_string_value(device_get_name(dev),
237		device_get_unit(dev), "macaddr", (const char **)&local_macstr)) {
238		uint32_t tmpmac[ETHER_ADDR_LEN];
239
240		/* Have a MAC address; should use it */
241		device_printf(dev, "Overriding MAC address from environment: '%s'\n",
242		    local_macstr);
243
244		/* Extract out the MAC address */
245		/* XXX this should all be a generic method */
246		count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
247		    &tmpmac[0], &tmpmac[1],
248		    &tmpmac[2], &tmpmac[3],
249		    &tmpmac[4], &tmpmac[5]);
250		if (count == 6) {
251			/* Valid! */
252			for (i = 0; i < ETHER_ADDR_LEN; i++)
253				sc->are_eaddr[i] = tmpmac[i];
254		}
255	}
256
257	mtx_init(&sc->are_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
258	    MTX_DEF);
259	callout_init_mtx(&sc->are_stat_callout, &sc->are_mtx, 0);
260	TASK_INIT(&sc->are_link_task, 0, are_link_task, sc);
261
262	/* Map control/status registers. */
263	sc->are_rid = 0;
264	sc->are_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->are_rid,
265	    RF_ACTIVE | RF_SHAREABLE);
266
267	if (sc->are_res == NULL) {
268		device_printf(dev, "couldn't map memory\n");
269		error = ENXIO;
270		goto fail;
271	}
272
273	sc->are_btag = rman_get_bustag(sc->are_res);
274	sc->are_bhandle = rman_get_bushandle(sc->are_res);
275
276#ifndef INTRNG
277	/* Allocate interrupts */
278	rid = 0;
279	sc->are_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
280	    RF_SHAREABLE | RF_ACTIVE);
281
282	if (sc->are_irq == NULL) {
283		device_printf(dev, "couldn't map interrupt\n");
284		error = ENXIO;
285		goto fail;
286	}
287#endif
288
289	/* Allocate ifnet structure. */
290	ifp = sc->are_ifp = if_alloc(IFT_ETHER);
291
292	if (ifp == NULL) {
293		device_printf(dev, "couldn't allocate ifnet structure\n");
294		error = ENOSPC;
295		goto fail;
296	}
297	ifp->if_softc = sc;
298	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
299	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
300	ifp->if_ioctl = are_ioctl;
301	ifp->if_start = are_start;
302	ifp->if_init = are_init;
303	sc->are_if_flags = ifp->if_flags;
304
305	/* ifqmaxlen is sysctl value in net/if.c */
306	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
307	ifp->if_snd.ifq_maxlen = ifqmaxlen;
308	IFQ_SET_READY(&ifp->if_snd);
309
310	/* Tell the upper layer(s) we support long frames. */
311	ifp->if_capabilities |= IFCAP_VLAN_MTU;
312
313	ifp->if_capenable = ifp->if_capabilities;
314
315	if (are_dma_alloc(sc) != 0) {
316		error = ENXIO;
317		goto fail;
318	}
319
320	CSR_WRITE_4(sc, CSR_BUSMODE, BUSMODE_SWR);
321	DELAY(1000);
322
323#ifdef ARE_MDIO
324	sc->are_miiproxy = mii_attach_proxy(sc->are_dev);
325#endif
326
327#ifdef ARE_MII
328	/* Do MII setup. */
329	error = mii_attach(dev, &sc->are_miibus, ifp, are_ifmedia_upd,
330	    are_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
331	if (error != 0) {
332		device_printf(dev, "attaching PHYs failed\n");
333		goto fail;
334	}
335#else
336	ifmedia_init(&sc->are_ifmedia, 0, are_ifmedia_upd, are_ifmedia_sts);
337
338	ifmedia_add(&sc->are_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
339	ifmedia_set(&sc->are_ifmedia, IFM_ETHER | IFM_AUTO);
340#endif
341
342	/* Call MI attach routine. */
343	ether_ifattach(ifp, sc->are_eaddr);
344
345#ifdef INTRNG
346	char *name;
347	if (ar531x_soc >= AR531X_SOC_AR5315) {
348		enetirq = AR5315_CPU_IRQ_ENET;
349		name = "enet";
350	} else {
351		if (device_get_unit(dev) == 0) {
352			enetirq = AR5312_IRQ_ENET0;
353			name = "enet0";
354		} else {
355			enetirq = AR5312_IRQ_ENET1;
356			name = "enet1";
357		}
358	}
359	cpu_establish_hardintr(name, NULL, are_intr, sc, enetirq,
360	    INTR_TYPE_NET, NULL);
361#else
362	/* Hook interrupt last to avoid having to lock softc */
363	error = bus_setup_intr(dev, sc->are_irq, INTR_TYPE_NET | INTR_MPSAFE,
364	    NULL, are_intr, sc, &sc->are_intrhand);
365
366	if (error) {
367		device_printf(dev, "couldn't set up irq\n");
368		ether_ifdetach(ifp);
369		goto fail;
370	}
371#endif
372
373fail:
374	if (error)
375		are_detach(dev);
376
377	return (error);
378}
379
380static int
381are_detach(device_t dev)
382{
383	struct are_softc		*sc = device_get_softc(dev);
384	struct ifnet		*ifp = sc->are_ifp;
385
386	KASSERT(mtx_initialized(&sc->are_mtx), ("vr mutex not initialized"));
387
388	/* These should only be active if attach succeeded */
389	if (device_is_attached(dev)) {
390		ARE_LOCK(sc);
391		sc->are_detach = 1;
392		are_stop(sc);
393		ARE_UNLOCK(sc);
394		taskqueue_drain(taskqueue_swi, &sc->are_link_task);
395		ether_ifdetach(ifp);
396	}
397#ifdef ARE_MII
398	if (sc->are_miibus)
399		device_delete_child(dev, sc->are_miibus);
400#endif
401	bus_generic_detach(dev);
402
403	if (sc->are_intrhand)
404		bus_teardown_intr(dev, sc->are_irq, sc->are_intrhand);
405	if (sc->are_irq)
406		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->are_irq);
407
408	if (sc->are_res)
409		bus_release_resource(dev, SYS_RES_MEMORY, sc->are_rid,
410		    sc->are_res);
411
412	if (ifp)
413		if_free(ifp);
414
415	are_dma_free(sc);
416
417	mtx_destroy(&sc->are_mtx);
418
419	return (0);
420
421}
422
423static int
424are_suspend(device_t dev)
425{
426
427	panic("%s", __func__);
428	return 0;
429}
430
431static int
432are_resume(device_t dev)
433{
434
435	panic("%s", __func__);
436	return 0;
437}
438
439static int
440are_shutdown(device_t dev)
441{
442	struct are_softc	*sc;
443
444	sc = device_get_softc(dev);
445
446	ARE_LOCK(sc);
447	are_stop(sc);
448	ARE_UNLOCK(sc);
449
450	return (0);
451}
452
453static int
454are_miibus_readreg(device_t dev, int phy, int reg)
455{
456	struct are_softc * sc = device_get_softc(dev);
457	uint32_t	addr;
458	int		i;
459
460	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
461	CSR_WRITE_4(sc, CSR_MIIADDR, addr);
462	for (i = 0; i < 100000000; i++) {
463		if ((CSR_READ_4(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
464			break;
465	}
466
467	return (CSR_READ_4(sc, CSR_MIIDATA) & 0xffff);
468}
469
470static int
471are_miibus_writereg(device_t dev, int phy, int reg, int data)
472{
473	struct are_softc * sc = device_get_softc(dev);
474	uint32_t	addr;
475	int		i;
476
477	/* write the data register */
478	CSR_WRITE_4(sc, CSR_MIIDATA, data);
479
480	/* write the address to latch it in */
481	addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
482	    MIIADDR_WRITE;
483	CSR_WRITE_4(sc, CSR_MIIADDR, addr);
484
485	for (i = 0; i < 100000000; i++) {
486		if ((CSR_READ_4(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
487			break;
488	}
489
490	return (0);
491}
492
493static void
494are_miibus_statchg(device_t dev)
495{
496	struct are_softc		*sc;
497
498	sc = device_get_softc(dev);
499	taskqueue_enqueue(taskqueue_swi, &sc->are_link_task);
500}
501
502static void
503are_link_task(void *arg, int pending)
504{
505#ifdef ARE_MII
506	struct are_softc		*sc;
507	struct mii_data		*mii;
508	struct ifnet		*ifp;
509	/* int			lfdx, mfdx; */
510
511	sc = (struct are_softc *)arg;
512
513	ARE_LOCK(sc);
514	mii = device_get_softc(sc->are_miibus);
515	ifp = sc->are_ifp;
516	if (mii == NULL || ifp == NULL ||
517	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
518		ARE_UNLOCK(sc);
519		return;
520	}
521
522	if (mii->mii_media_status & IFM_ACTIVE) {
523		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
524			sc->are_link_status = 1;
525	} else
526		sc->are_link_status = 0;
527
528	ARE_UNLOCK(sc);
529#endif
530}
531
532static void
533are_reset(struct are_softc *sc)
534{
535	int		i;
536
537	CSR_WRITE_4(sc, CSR_BUSMODE, BUSMODE_SWR);
538
539	/*
540	 * The chip doesn't take itself out of reset automatically.
541	 * We need to do so after 2us.
542	 */
543	DELAY(10);
544	CSR_WRITE_4(sc, CSR_BUSMODE, 0);
545
546	for (i = 0; i < 1000; i++) {
547		/*
548		 * Wait a bit for the reset to complete before peeking
549		 * at the chip again.
550		 */
551		DELAY(10);
552		if ((CSR_READ_4(sc, CSR_BUSMODE) & BUSMODE_SWR) == 0)
553			break;
554	}
555
556	if (CSR_READ_4(sc, CSR_BUSMODE) & BUSMODE_SWR)
557		device_printf(sc->are_dev, "reset time out\n");
558
559	DELAY(1000);
560}
561
562static void
563are_init(void *xsc)
564{
565	struct are_softc	 *sc = xsc;
566
567	ARE_LOCK(sc);
568	are_init_locked(sc);
569	ARE_UNLOCK(sc);
570}
571
572static void
573are_init_locked(struct are_softc *sc)
574{
575	struct ifnet		*ifp = sc->are_ifp;
576#ifdef ARE_MII
577	struct mii_data		*mii;
578#endif
579
580	ARE_LOCK_ASSERT(sc);
581
582#ifdef ARE_MII
583	mii = device_get_softc(sc->are_miibus);
584#endif
585
586	are_stop(sc);
587	are_reset(sc);
588
589	/* Init circular RX list. */
590	if (are_rx_ring_init(sc) != 0) {
591		device_printf(sc->are_dev,
592		    "initialization failed: no memory for rx buffers\n");
593		are_stop(sc);
594		return;
595	}
596
597	/* Init tx descriptors. */
598	are_tx_ring_init(sc);
599
600	/*
601	 * Initialize the BUSMODE register.
602	 */
603	CSR_WRITE_4(sc, CSR_BUSMODE,
604	    /* XXX: not sure if this is a good thing or not... */
605	    BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
606
607	/*
608	 * Initialize the interrupt mask and enable interrupts.
609	 */
610	/* normal interrupts */
611	sc->sc_inten =  STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
612
613	/* abnormal interrupts */
614	sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
615	    STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
616
617	sc->sc_rxint_mask = STATUS_RI|STATUS_RU;
618	sc->sc_txint_mask = STATUS_TI|STATUS_UNF|STATUS_TJT;
619
620	sc->sc_rxint_mask &= sc->sc_inten;
621	sc->sc_txint_mask &= sc->sc_inten;
622
623	CSR_WRITE_4(sc, CSR_INTEN, sc->sc_inten);
624	CSR_WRITE_4(sc, CSR_STATUS, 0xffffffff);
625
626	/*
627	 * Give the transmit and receive rings to the chip.
628	 */
629	CSR_WRITE_4(sc, CSR_TXLIST, ARE_TX_RING_ADDR(sc, 0));
630	CSR_WRITE_4(sc, CSR_RXLIST, ARE_RX_RING_ADDR(sc, 0));
631
632	/*
633	 * Set the station address.
634	 */
635	CSR_WRITE_4(sc, CSR_MACHI, sc->are_eaddr[5] << 16 | sc->are_eaddr[4]);
636	CSR_WRITE_4(sc, CSR_MACLO, sc->are_eaddr[3] << 24 |
637	    sc->are_eaddr[2] << 16 | sc->are_eaddr[1] << 8 | sc->are_eaddr[0]);
638
639	/*
640	 * Start the mac.
641	 */
642	CSR_WRITE_4(sc, CSR_FLOWC, FLOWC_FCE);
643	CSR_WRITE_4(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE |
644	    MACCTL_PM | MACCTL_FDX | MACCTL_HBD | MACCTL_RA);
645
646	/*
647	 * Write out the opmode.
648	 */
649	CSR_WRITE_4(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST | OPMODE_SF |
650	    OPMODE_TR_64);
651
652	/*
653	 * Start the receive process.
654	 */
655	CSR_WRITE_4(sc, CSR_RXPOLL, RXPOLL_RPD);
656
657	sc->are_link_status = 1;
658#ifdef ARE_MII
659	mii_mediachg(mii);
660#endif
661
662	ifp->if_drv_flags |= IFF_DRV_RUNNING;
663	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
664
665	callout_reset(&sc->are_stat_callout, hz, are_tick, sc);
666}
667
668static void
669are_start(struct ifnet *ifp)
670{
671	struct are_softc	 *sc;
672
673	sc = ifp->if_softc;
674
675	ARE_LOCK(sc);
676	are_start_locked(ifp);
677	ARE_UNLOCK(sc);
678}
679
680/*
681 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
682 * pointers to the fragment pointers.
683 */
684static int
685are_encap(struct are_softc *sc, struct mbuf **m_head)
686{
687	struct are_txdesc	*txd;
688	struct are_desc		*desc, *prev_desc;
689	struct mbuf		*m;
690	bus_dma_segment_t	txsegs[ARE_MAXFRAGS];
691	uint32_t		link_addr;
692	int			error, i, nsegs, prod, si, prev_prod;
693	int			txstat;
694	int			startcount;
695	int			padlen;
696
697	startcount = sc->are_cdata.are_tx_cnt;
698
699	ARE_LOCK_ASSERT(sc);
700
701	/*
702	 * Some VIA Rhine wants packet buffers to be longword
703	 * aligned, but very often our mbufs aren't. Rather than
704	 * waste time trying to decide when to copy and when not
705	 * to copy, just do it all the time.
706	 */
707	m = m_defrag(*m_head, M_NOWAIT);
708	if (m == NULL) {
709		device_printf(sc->are_dev, "are_encap m_defrag error\n");
710		m_freem(*m_head);
711		*m_head = NULL;
712		return (ENOBUFS);
713	}
714	*m_head = m;
715
716	/*
717	 * The Rhine chip doesn't auto-pad, so we have to make
718	 * sure to pad short frames out to the minimum frame length
719	 * ourselves.
720	 */
721	if ((*m_head)->m_pkthdr.len < ARE_MIN_FRAMELEN) {
722		m = *m_head;
723		padlen = ARE_MIN_FRAMELEN - m->m_pkthdr.len;
724		if (M_WRITABLE(m) == 0) {
725			/* Get a writable copy. */
726			m = m_dup(*m_head, M_NOWAIT);
727			m_freem(*m_head);
728			if (m == NULL) {
729				device_printf(sc->are_dev, "are_encap m_dup error\n");
730				*m_head = NULL;
731				return (ENOBUFS);
732			}
733			*m_head = m;
734		}
735		if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
736			m = m_defrag(m, M_NOWAIT);
737			if (m == NULL) {
738				device_printf(sc->are_dev, "are_encap m_defrag error\n");
739				m_freem(*m_head);
740				*m_head = NULL;
741				return (ENOBUFS);
742			}
743		}
744		/*
745		 * Manually pad short frames, and zero the pad space
746		 * to avoid leaking data.
747		 */
748		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
749		m->m_pkthdr.len += padlen;
750		m->m_len = m->m_pkthdr.len;
751		*m_head = m;
752	}
753
754	prod = sc->are_cdata.are_tx_prod;
755	txd = &sc->are_cdata.are_txdesc[prod];
756	error = bus_dmamap_load_mbuf_sg(sc->are_cdata.are_tx_tag,
757	    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
758	if (error == EFBIG) {
759		device_printf(sc->are_dev, "are_encap EFBIG error\n");
760		m = m_defrag(*m_head, M_NOWAIT);
761		if (m == NULL) {
762			m_freem(*m_head);
763			*m_head = NULL;
764			return (ENOBUFS);
765		}
766		*m_head = m;
767		error = bus_dmamap_load_mbuf_sg(sc->are_cdata.are_tx_tag,
768		    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
769		if (error != 0) {
770			m_freem(*m_head);
771			*m_head = NULL;
772			return (error);
773		}
774
775	} else if (error != 0)
776		return (error);
777	if (nsegs == 0) {
778		m_freem(*m_head);
779		*m_head = NULL;
780		return (EIO);
781	}
782
783	/* Check number of available descriptors. */
784	if (sc->are_cdata.are_tx_cnt + nsegs >= (ARE_TX_RING_CNT - 1)) {
785		bus_dmamap_unload(sc->are_cdata.are_tx_tag, txd->tx_dmamap);
786		return (ENOBUFS);
787	}
788
789	txd->tx_m = *m_head;
790	bus_dmamap_sync(sc->are_cdata.are_tx_tag, txd->tx_dmamap,
791	    BUS_DMASYNC_PREWRITE);
792
793	si = prod;
794
795	/*
796	 * Make a list of descriptors for this packet. DMA controller will
797	 * walk through it while are_link is not zero. The last one should
798	 * have COF flag set, to pickup next chain from NDPTR
799	 */
800	prev_prod = prod;
801	desc = prev_desc = NULL;
802	for (i = 0; i < nsegs; i++) {
803		desc = &sc->are_rdata.are_tx_ring[prod];
804		desc->are_stat = ADSTAT_OWN;
805		desc->are_devcs = ARE_DMASIZE(txsegs[i].ds_len);
806		desc->are_addr = txsegs[i].ds_addr;
807		/* link with previous descriptor */
808		/* end of descriptor */
809		if (prod == ARE_TX_RING_CNT - 1)
810			desc->are_devcs |= ADCTL_ER;
811
812		sc->are_cdata.are_tx_cnt++;
813		prev_desc = desc;
814		ARE_INC(prod, ARE_TX_RING_CNT);
815	}
816
817	/*
818	 * Set mark last fragment with LD flag
819	 */
820	if (desc) {
821		desc->are_devcs |= ADCTL_Tx_IC;
822		desc->are_devcs |= ADCTL_Tx_LS;
823	}
824
825	/* Update producer index. */
826	sc->are_cdata.are_tx_prod = prod;
827
828	/* Sync descriptors. */
829	bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
830	    sc->are_cdata.are_tx_ring_map,
831	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
832
833	/* Start transmitting */
834	/* Check if new list is queued in NDPTR */
835	txstat = (CSR_READ_4(sc, CSR_STATUS) >> 20) & 7;
836	if (startcount == 0 && (txstat == 0 || txstat == 6)) {
837		desc = &sc->are_rdata.are_tx_ring[si];
838		desc->are_devcs |= ADCTL_Tx_FS;
839	}
840	else {
841		link_addr = ARE_TX_RING_ADDR(sc, si);
842		/* Get previous descriptor */
843		si = (si + ARE_TX_RING_CNT - 1) % ARE_TX_RING_CNT;
844		desc = &sc->are_rdata.are_tx_ring[si];
845		desc->are_devcs &= ~(ADCTL_Tx_IC | ADCTL_Tx_LS);
846	}
847
848	return (0);
849}
850
851static void
852are_start_locked(struct ifnet *ifp)
853{
854	struct are_softc		*sc;
855	struct mbuf		*m_head;
856	int			enq;
857	int			txstat;
858
859	sc = ifp->if_softc;
860
861	ARE_LOCK_ASSERT(sc);
862
863	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
864	    IFF_DRV_RUNNING || sc->are_link_status == 0 )
865		return;
866
867	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
868	    sc->are_cdata.are_tx_cnt < ARE_TX_RING_CNT - 2; ) {
869		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
870		if (m_head == NULL)
871			break;
872		/*
873		 * Pack the data into the transmit ring. If we
874		 * don't have room, set the OACTIVE flag and wait
875		 * for the NIC to drain the ring.
876		 */
877		if (are_encap(sc, &m_head)) {
878			if (m_head == NULL)
879				break;
880			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
881			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
882			break;
883		}
884
885		enq++;
886		/*
887		 * If there's a BPF listener, bounce a copy of this frame
888		 * to him.
889		 */
890		ETHER_BPF_MTAP(ifp, m_head);
891	}
892
893	if (enq > 0) {
894		txstat = (CSR_READ_4(sc, CSR_STATUS) >> 20) & 7;
895		if (txstat == 0 || txstat == 6) {
896			/* Transmit Process Stat is stop or suspended */
897			CSR_WRITE_4(sc, CSR_TXPOLL, TXPOLL_TPD);
898		}
899	}
900}
901
902static void
903are_stop(struct are_softc *sc)
904{
905	struct ifnet	    *ifp;
906
907	ARE_LOCK_ASSERT(sc);
908
909	ifp = sc->are_ifp;
910	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
911	callout_stop(&sc->are_stat_callout);
912
913	/* Disable interrupts. */
914	CSR_WRITE_4(sc, CSR_INTEN, 0);
915
916	/* Stop the transmit and receive processes. */
917	CSR_WRITE_4(sc, CSR_OPMODE, 0);
918	CSR_WRITE_4(sc, CSR_RXLIST, 0);
919	CSR_WRITE_4(sc, CSR_TXLIST, 0);
920	CSR_WRITE_4(sc, CSR_MACCTL,
921	    CSR_READ_4(sc, CSR_MACCTL) & ~(MACCTL_TE | MACCTL_RE));
922
923}
924
925static int
926are_set_filter(struct are_softc *sc)
927{
928	struct ifnet	    *ifp;
929	int mchash[2];
930	int macctl;
931
932	ifp = sc->are_ifp;
933
934	macctl = CSR_READ_4(sc, CSR_MACCTL);
935	macctl &= ~(MACCTL_PR | MACCTL_PM);
936	macctl |= MACCTL_HBD;
937
938	if (ifp->if_flags & IFF_PROMISC)
939		macctl |= MACCTL_PR;
940
941	/* Todo: hash table set.
942	 * But I don't know how to use multicast hash table at this soc.
943	 */
944
945	/* this is allmulti */
946	mchash[0] = mchash[1] = 0xffffffff;
947	macctl |= MACCTL_PM;
948
949	CSR_WRITE_4(sc, CSR_HTLO, mchash[0]);
950	CSR_WRITE_4(sc, CSR_HTHI, mchash[1]);
951	CSR_WRITE_4(sc, CSR_MACCTL, macctl);
952
953	return 0;
954}
955
956static int
957are_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
958{
959	struct are_softc		*sc = ifp->if_softc;
960	struct ifreq		*ifr = (struct ifreq *) data;
961#ifdef ARE_MII
962	struct mii_data		*mii;
963#endif
964	int			error;
965
966	switch (command) {
967	case SIOCSIFFLAGS:
968		ARE_LOCK(sc);
969		if (ifp->if_flags & IFF_UP) {
970			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
971				if ((ifp->if_flags ^ sc->are_if_flags) &
972				    (IFF_PROMISC | IFF_ALLMULTI))
973					are_set_filter(sc);
974			} else {
975				if (sc->are_detach == 0)
976					are_init_locked(sc);
977			}
978		} else {
979			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
980				are_stop(sc);
981		}
982		sc->are_if_flags = ifp->if_flags;
983		ARE_UNLOCK(sc);
984		error = 0;
985		break;
986	case SIOCADDMULTI:
987	case SIOCDELMULTI:
988		ARE_LOCK(sc);
989		are_set_filter(sc);
990		ARE_UNLOCK(sc);
991		error = 0;
992		break;
993	case SIOCGIFMEDIA:
994	case SIOCSIFMEDIA:
995#ifdef ARE_MII
996		mii = device_get_softc(sc->are_miibus);
997		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
998#else
999		error = ifmedia_ioctl(ifp, ifr, &sc->are_ifmedia, command);
1000#endif
1001		break;
1002	case SIOCSIFCAP:
1003		error = 0;
1004		break;
1005	default:
1006		error = ether_ioctl(ifp, command, data);
1007		break;
1008	}
1009
1010	return (error);
1011}
1012
1013/*
1014 * Set media options.
1015 */
1016static int
1017are_ifmedia_upd(struct ifnet *ifp)
1018{
1019#ifdef ARE_MII
1020	struct are_softc		*sc;
1021	struct mii_data		*mii;
1022	struct mii_softc	*miisc;
1023	int			error;
1024
1025	sc = ifp->if_softc;
1026	ARE_LOCK(sc);
1027	mii = device_get_softc(sc->are_miibus);
1028	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1029		PHY_RESET(miisc);
1030	error = mii_mediachg(mii);
1031	ARE_UNLOCK(sc);
1032
1033	return (error);
1034#else
1035	return (0);
1036#endif
1037}
1038
1039/*
1040 * Report current media status.
1041 */
1042static void
1043are_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1044{
1045#ifdef ARE_MII
1046	struct are_softc		*sc = ifp->if_softc;
1047	struct mii_data		*mii;
1048
1049	mii = device_get_softc(sc->are_miibus);
1050	ARE_LOCK(sc);
1051	mii_pollstat(mii);
1052	ifmr->ifm_active = mii->mii_media_active;
1053	ifmr->ifm_status = mii->mii_media_status;
1054	ARE_UNLOCK(sc);
1055#else
1056	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1057#endif
1058}
1059
1060struct are_dmamap_arg {
1061	bus_addr_t	are_busaddr;
1062};
1063
1064static void
1065are_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1066{
1067	struct are_dmamap_arg	*ctx;
1068
1069	if (error != 0)
1070		return;
1071	ctx = arg;
1072	ctx->are_busaddr = segs[0].ds_addr;
1073}
1074
1075static int
1076are_dma_alloc(struct are_softc *sc)
1077{
1078	struct are_dmamap_arg	ctx;
1079	struct are_txdesc	*txd;
1080	struct are_rxdesc	*rxd;
1081	int			error, i;
1082
1083	/* Create parent DMA tag. */
1084	error = bus_dma_tag_create(
1085	    bus_get_dma_tag(sc->are_dev),	/* parent */
1086	    1, 0,			/* alignment, boundary */
1087	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1088	    BUS_SPACE_MAXADDR,		/* highaddr */
1089	    NULL, NULL,			/* filter, filterarg */
1090	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1091	    0,				/* nsegments */
1092	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1093	    0,				/* flags */
1094	    NULL, NULL,			/* lockfunc, lockarg */
1095	    &sc->are_cdata.are_parent_tag);
1096	if (error != 0) {
1097		device_printf(sc->are_dev, "failed to create parent DMA tag\n");
1098		goto fail;
1099	}
1100	/* Create tag for Tx ring. */
1101	error = bus_dma_tag_create(
1102	    sc->are_cdata.are_parent_tag,	/* parent */
1103	    ARE_RING_ALIGN, 0,		/* alignment, boundary */
1104	    BUS_SPACE_MAXADDR,		/* lowaddr */
1105	    BUS_SPACE_MAXADDR,		/* highaddr */
1106	    NULL, NULL,			/* filter, filterarg */
1107	    ARE_TX_RING_SIZE,		/* maxsize */
1108	    1,				/* nsegments */
1109	    ARE_TX_RING_SIZE,		/* maxsegsize */
1110	    0,				/* flags */
1111	    NULL, NULL,			/* lockfunc, lockarg */
1112	    &sc->are_cdata.are_tx_ring_tag);
1113	if (error != 0) {
1114		device_printf(sc->are_dev, "failed to create Tx ring DMA tag\n");
1115		goto fail;
1116	}
1117
1118	/* Create tag for Rx ring. */
1119	error = bus_dma_tag_create(
1120	    sc->are_cdata.are_parent_tag,	/* parent */
1121	    ARE_RING_ALIGN, 0,		/* alignment, boundary */
1122	    BUS_SPACE_MAXADDR,		/* lowaddr */
1123	    BUS_SPACE_MAXADDR,		/* highaddr */
1124	    NULL, NULL,			/* filter, filterarg */
1125	    ARE_RX_RING_SIZE,		/* maxsize */
1126	    1,				/* nsegments */
1127	    ARE_RX_RING_SIZE,		/* maxsegsize */
1128	    0,				/* flags */
1129	    NULL, NULL,			/* lockfunc, lockarg */
1130	    &sc->are_cdata.are_rx_ring_tag);
1131	if (error != 0) {
1132		device_printf(sc->are_dev, "failed to create Rx ring DMA tag\n");
1133		goto fail;
1134	}
1135
1136	/* Create tag for Tx buffers. */
1137	error = bus_dma_tag_create(
1138	    sc->are_cdata.are_parent_tag,	/* parent */
1139	    sizeof(uint32_t), 0,	/* alignment, boundary */
1140	    BUS_SPACE_MAXADDR,		/* lowaddr */
1141	    BUS_SPACE_MAXADDR,		/* highaddr */
1142	    NULL, NULL,			/* filter, filterarg */
1143	    MCLBYTES * ARE_MAXFRAGS,	/* maxsize */
1144	    ARE_MAXFRAGS,		/* nsegments */
1145	    MCLBYTES,			/* maxsegsize */
1146	    0,				/* flags */
1147	    NULL, NULL,			/* lockfunc, lockarg */
1148	    &sc->are_cdata.are_tx_tag);
1149	if (error != 0) {
1150		device_printf(sc->are_dev, "failed to create Tx DMA tag\n");
1151		goto fail;
1152	}
1153
1154	/* Create tag for Rx buffers. */
1155	error = bus_dma_tag_create(
1156	    sc->are_cdata.are_parent_tag,	/* parent */
1157	    ARE_RX_ALIGN, 0,		/* alignment, boundary */
1158	    BUS_SPACE_MAXADDR,		/* lowaddr */
1159	    BUS_SPACE_MAXADDR,		/* highaddr */
1160	    NULL, NULL,			/* filter, filterarg */
1161	    MCLBYTES,			/* maxsize */
1162	    1,				/* nsegments */
1163	    MCLBYTES,			/* maxsegsize */
1164	    0,				/* flags */
1165	    NULL, NULL,			/* lockfunc, lockarg */
1166	    &sc->are_cdata.are_rx_tag);
1167	if (error != 0) {
1168		device_printf(sc->are_dev, "failed to create Rx DMA tag\n");
1169		goto fail;
1170	}
1171
1172	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1173	error = bus_dmamem_alloc(sc->are_cdata.are_tx_ring_tag,
1174	    (void **)&sc->are_rdata.are_tx_ring, BUS_DMA_WAITOK |
1175	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->are_cdata.are_tx_ring_map);
1176	if (error != 0) {
1177		device_printf(sc->are_dev,
1178		    "failed to allocate DMA'able memory for Tx ring\n");
1179		goto fail;
1180	}
1181
1182	ctx.are_busaddr = 0;
1183	error = bus_dmamap_load(sc->are_cdata.are_tx_ring_tag,
1184	    sc->are_cdata.are_tx_ring_map, sc->are_rdata.are_tx_ring,
1185	    ARE_TX_RING_SIZE, are_dmamap_cb, &ctx, 0);
1186	if (error != 0 || ctx.are_busaddr == 0) {
1187		device_printf(sc->are_dev,
1188		    "failed to load DMA'able memory for Tx ring\n");
1189		goto fail;
1190	}
1191	sc->are_rdata.are_tx_ring_paddr = ctx.are_busaddr;
1192
1193	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1194	error = bus_dmamem_alloc(sc->are_cdata.are_rx_ring_tag,
1195	    (void **)&sc->are_rdata.are_rx_ring, BUS_DMA_WAITOK |
1196	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->are_cdata.are_rx_ring_map);
1197	if (error != 0) {
1198		device_printf(sc->are_dev,
1199		    "failed to allocate DMA'able memory for Rx ring\n");
1200		goto fail;
1201	}
1202
1203	ctx.are_busaddr = 0;
1204	error = bus_dmamap_load(sc->are_cdata.are_rx_ring_tag,
1205	    sc->are_cdata.are_rx_ring_map, sc->are_rdata.are_rx_ring,
1206	    ARE_RX_RING_SIZE, are_dmamap_cb, &ctx, 0);
1207	if (error != 0 || ctx.are_busaddr == 0) {
1208		device_printf(sc->are_dev,
1209		    "failed to load DMA'able memory for Rx ring\n");
1210		goto fail;
1211	}
1212	sc->are_rdata.are_rx_ring_paddr = ctx.are_busaddr;
1213
1214	/* Create DMA maps for Tx buffers. */
1215	for (i = 0; i < ARE_TX_RING_CNT; i++) {
1216		txd = &sc->are_cdata.are_txdesc[i];
1217		txd->tx_m = NULL;
1218		txd->tx_dmamap = NULL;
1219		error = bus_dmamap_create(sc->are_cdata.are_tx_tag, 0,
1220		    &txd->tx_dmamap);
1221		if (error != 0) {
1222			device_printf(sc->are_dev,
1223			    "failed to create Tx dmamap\n");
1224			goto fail;
1225		}
1226	}
1227	/* Create DMA maps for Rx buffers. */
1228	if ((error = bus_dmamap_create(sc->are_cdata.are_rx_tag, 0,
1229	    &sc->are_cdata.are_rx_sparemap)) != 0) {
1230		device_printf(sc->are_dev,
1231		    "failed to create spare Rx dmamap\n");
1232		goto fail;
1233	}
1234	for (i = 0; i < ARE_RX_RING_CNT; i++) {
1235		rxd = &sc->are_cdata.are_rxdesc[i];
1236		rxd->rx_m = NULL;
1237		rxd->rx_dmamap = NULL;
1238		error = bus_dmamap_create(sc->are_cdata.are_rx_tag, 0,
1239		    &rxd->rx_dmamap);
1240		if (error != 0) {
1241			device_printf(sc->are_dev,
1242			    "failed to create Rx dmamap\n");
1243			goto fail;
1244		}
1245	}
1246
1247fail:
1248	return (error);
1249}
1250
1251static void
1252are_dma_free(struct are_softc *sc)
1253{
1254	struct are_txdesc	*txd;
1255	struct are_rxdesc	*rxd;
1256	int			i;
1257
1258	/* Tx ring. */
1259	if (sc->are_cdata.are_tx_ring_tag) {
1260		if (sc->are_rdata.are_tx_ring_paddr)
1261			bus_dmamap_unload(sc->are_cdata.are_tx_ring_tag,
1262			    sc->are_cdata.are_tx_ring_map);
1263		if (sc->are_rdata.are_tx_ring)
1264			bus_dmamem_free(sc->are_cdata.are_tx_ring_tag,
1265			    sc->are_rdata.are_tx_ring,
1266			    sc->are_cdata.are_tx_ring_map);
1267		sc->are_rdata.are_tx_ring = NULL;
1268		sc->are_rdata.are_tx_ring_paddr = 0;
1269		bus_dma_tag_destroy(sc->are_cdata.are_tx_ring_tag);
1270		sc->are_cdata.are_tx_ring_tag = NULL;
1271	}
1272	/* Rx ring. */
1273	if (sc->are_cdata.are_rx_ring_tag) {
1274		if (sc->are_rdata.are_rx_ring_paddr)
1275			bus_dmamap_unload(sc->are_cdata.are_rx_ring_tag,
1276			    sc->are_cdata.are_rx_ring_map);
1277		if (sc->are_rdata.are_rx_ring)
1278			bus_dmamem_free(sc->are_cdata.are_rx_ring_tag,
1279			    sc->are_rdata.are_rx_ring,
1280			    sc->are_cdata.are_rx_ring_map);
1281		sc->are_rdata.are_rx_ring = NULL;
1282		sc->are_rdata.are_rx_ring_paddr = 0;
1283		bus_dma_tag_destroy(sc->are_cdata.are_rx_ring_tag);
1284		sc->are_cdata.are_rx_ring_tag = NULL;
1285	}
1286	/* Tx buffers. */
1287	if (sc->are_cdata.are_tx_tag) {
1288		for (i = 0; i < ARE_TX_RING_CNT; i++) {
1289			txd = &sc->are_cdata.are_txdesc[i];
1290			if (txd->tx_dmamap) {
1291				bus_dmamap_destroy(sc->are_cdata.are_tx_tag,
1292				    txd->tx_dmamap);
1293				txd->tx_dmamap = NULL;
1294			}
1295		}
1296		bus_dma_tag_destroy(sc->are_cdata.are_tx_tag);
1297		sc->are_cdata.are_tx_tag = NULL;
1298	}
1299	/* Rx buffers. */
1300	if (sc->are_cdata.are_rx_tag) {
1301		for (i = 0; i < ARE_RX_RING_CNT; i++) {
1302			rxd = &sc->are_cdata.are_rxdesc[i];
1303			if (rxd->rx_dmamap) {
1304				bus_dmamap_destroy(sc->are_cdata.are_rx_tag,
1305				    rxd->rx_dmamap);
1306				rxd->rx_dmamap = NULL;
1307			}
1308		}
1309		if (sc->are_cdata.are_rx_sparemap) {
1310			bus_dmamap_destroy(sc->are_cdata.are_rx_tag,
1311			    sc->are_cdata.are_rx_sparemap);
1312			sc->are_cdata.are_rx_sparemap = 0;
1313		}
1314		bus_dma_tag_destroy(sc->are_cdata.are_rx_tag);
1315		sc->are_cdata.are_rx_tag = NULL;
1316	}
1317
1318	if (sc->are_cdata.are_parent_tag) {
1319		bus_dma_tag_destroy(sc->are_cdata.are_parent_tag);
1320		sc->are_cdata.are_parent_tag = NULL;
1321	}
1322}
1323
1324/*
1325 * Initialize the transmit descriptors.
1326 */
1327static int
1328are_tx_ring_init(struct are_softc *sc)
1329{
1330	struct are_ring_data	*rd;
1331	struct are_txdesc	*txd;
1332	bus_addr_t		addr;
1333	int			i;
1334
1335	sc->are_cdata.are_tx_prod = 0;
1336	sc->are_cdata.are_tx_cons = 0;
1337	sc->are_cdata.are_tx_cnt = 0;
1338	sc->are_cdata.are_tx_pkts = 0;
1339
1340	rd = &sc->are_rdata;
1341	bzero(rd->are_tx_ring, ARE_TX_RING_SIZE);
1342	for (i = 0; i < ARE_TX_RING_CNT; i++) {
1343		if (i == ARE_TX_RING_CNT - 1)
1344			addr = ARE_TX_RING_ADDR(sc, 0);
1345		else
1346			addr = ARE_TX_RING_ADDR(sc, i + 1);
1347		rd->are_tx_ring[i].are_stat = 0;
1348		rd->are_tx_ring[i].are_devcs = 0;
1349		rd->are_tx_ring[i].are_addr = 0;
1350		rd->are_tx_ring[i].are_link = addr;
1351		txd = &sc->are_cdata.are_txdesc[i];
1352		txd->tx_m = NULL;
1353	}
1354
1355	bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1356	    sc->are_cdata.are_tx_ring_map,
1357	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1358
1359	return (0);
1360}
1361
1362/*
1363 * Initialize the RX descriptors and allocate mbufs for them. Note that
1364 * we arrange the descriptors in a closed ring, so that the last descriptor
1365 * points back to the first.
1366 */
1367static int
1368are_rx_ring_init(struct are_softc *sc)
1369{
1370	struct are_ring_data	*rd;
1371	struct are_rxdesc	*rxd;
1372	bus_addr_t		addr;
1373	int			i;
1374
1375	sc->are_cdata.are_rx_cons = 0;
1376
1377	rd = &sc->are_rdata;
1378	bzero(rd->are_rx_ring, ARE_RX_RING_SIZE);
1379	for (i = 0; i < ARE_RX_RING_CNT; i++) {
1380		rxd = &sc->are_cdata.are_rxdesc[i];
1381		rxd->rx_m = NULL;
1382		rxd->desc = &rd->are_rx_ring[i];
1383		if (i == ARE_RX_RING_CNT - 1)
1384			addr = ARE_RX_RING_ADDR(sc, 0);
1385		else
1386			addr = ARE_RX_RING_ADDR(sc, i + 1);
1387		rd->are_rx_ring[i].are_stat = ADSTAT_OWN;
1388		rd->are_rx_ring[i].are_devcs = ADCTL_CH;
1389		if (i == ARE_RX_RING_CNT - 1)
1390			rd->are_rx_ring[i].are_devcs |= ADCTL_ER;
1391		rd->are_rx_ring[i].are_addr = 0;
1392		rd->are_rx_ring[i].are_link = addr;
1393		if (are_newbuf(sc, i) != 0)
1394			return (ENOBUFS);
1395	}
1396
1397	bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1398	    sc->are_cdata.are_rx_ring_map,
1399	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1400
1401	return (0);
1402}
1403
1404/*
1405 * Initialize an RX descriptor and attach an MBUF cluster.
1406 */
1407static int
1408are_newbuf(struct are_softc *sc, int idx)
1409{
1410	struct are_desc		*desc;
1411	struct are_rxdesc	*rxd;
1412	struct mbuf		*m;
1413	bus_dma_segment_t	segs[1];
1414	bus_dmamap_t		map;
1415	int			nsegs;
1416
1417	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1418	if (m == NULL)
1419		return (ENOBUFS);
1420	m->m_len = m->m_pkthdr.len = MCLBYTES;
1421
1422	/* tcp header boundary margin */
1423	m_adj(m, 4);
1424
1425	if (bus_dmamap_load_mbuf_sg(sc->are_cdata.are_rx_tag,
1426	    sc->are_cdata.are_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1427		m_freem(m);
1428		return (ENOBUFS);
1429	}
1430	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1431
1432	rxd = &sc->are_cdata.are_rxdesc[idx];
1433	if (rxd->rx_m != NULL) {
1434		/*
1435		 * THis is if_kr.c original code but make bug. Make scranble on buffer data.
1436		 * bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1437		 *    BUS_DMASYNC_POSTREAD);
1438		 */
1439		bus_dmamap_unload(sc->are_cdata.are_rx_tag, rxd->rx_dmamap);
1440	}
1441	map = rxd->rx_dmamap;
1442	rxd->rx_dmamap = sc->are_cdata.are_rx_sparemap;
1443	sc->are_cdata.are_rx_sparemap = map;
1444	bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1445	    BUS_DMASYNC_PREREAD);
1446	rxd->rx_m = m;
1447	desc = rxd->desc;
1448	desc->are_addr = segs[0].ds_addr;
1449	desc->are_devcs |= ARE_DMASIZE(segs[0].ds_len);
1450	rxd->saved_ca = desc->are_addr ;
1451	rxd->saved_ctl = desc->are_stat ;
1452
1453	return (0);
1454}
1455
1456static __inline void
1457are_fixup_rx(struct mbuf *m)
1458{
1459        int		i;
1460        uint16_t	*src, *dst;
1461
1462	src = mtod(m, uint16_t *);
1463	dst = src - 1;
1464
1465	for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1466		*dst++ = *src++;
1467	}
1468
1469	if (m->m_len % sizeof(uint16_t))
1470		*(uint8_t *)dst = *(uint8_t *)src;
1471
1472	m->m_data -= ETHER_ALIGN;
1473}
1474
1475
1476static void
1477are_tx(struct are_softc *sc)
1478{
1479	struct are_txdesc	*txd;
1480	struct are_desc		*cur_tx;
1481	struct ifnet		*ifp;
1482	uint32_t		ctl, devcs;
1483	int			cons, prod;
1484
1485	ARE_LOCK_ASSERT(sc);
1486
1487	cons = sc->are_cdata.are_tx_cons;
1488	prod = sc->are_cdata.are_tx_prod;
1489	if (cons == prod)
1490		return;
1491
1492	bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1493	    sc->are_cdata.are_tx_ring_map,
1494	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1495
1496	ifp = sc->are_ifp;
1497	/*
1498	 * Go through our tx list and free mbufs for those
1499	 * frames that have been transmitted.
1500	 */
1501	for (; cons != prod; ARE_INC(cons, ARE_TX_RING_CNT)) {
1502		cur_tx = &sc->are_rdata.are_tx_ring[cons];
1503		ctl = cur_tx->are_stat;
1504		devcs = cur_tx->are_devcs;
1505		/* Check if descriptor has "finished" flag */
1506		if (ARE_DMASIZE(devcs) == 0)
1507			break;
1508
1509		sc->are_cdata.are_tx_cnt--;
1510		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1511
1512		txd = &sc->are_cdata.are_txdesc[cons];
1513
1514		if ((ctl & ADSTAT_Tx_ES) == 0)
1515			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1516		else {
1517			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1518		}
1519
1520		bus_dmamap_sync(sc->are_cdata.are_tx_tag, txd->tx_dmamap,
1521		    BUS_DMASYNC_POSTWRITE);
1522		bus_dmamap_unload(sc->are_cdata.are_tx_tag, txd->tx_dmamap);
1523
1524		/* Free only if it's first descriptor in list */
1525		if (txd->tx_m)
1526			m_freem(txd->tx_m);
1527		txd->tx_m = NULL;
1528
1529		/* reset descriptor */
1530		cur_tx->are_stat = 0;
1531		cur_tx->are_devcs = 0;
1532		cur_tx->are_addr = 0;
1533	}
1534
1535	sc->are_cdata.are_tx_cons = cons;
1536
1537	bus_dmamap_sync(sc->are_cdata.are_tx_ring_tag,
1538	    sc->are_cdata.are_tx_ring_map, BUS_DMASYNC_PREWRITE);
1539}
1540
1541
1542static void
1543are_rx(struct are_softc *sc)
1544{
1545	struct are_rxdesc	*rxd;
1546	struct ifnet		*ifp = sc->are_ifp;
1547	int			cons, prog, packet_len, error;
1548	struct are_desc		*cur_rx;
1549	struct mbuf		*m;
1550
1551	ARE_LOCK_ASSERT(sc);
1552
1553	cons = sc->are_cdata.are_rx_cons;
1554
1555	bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1556	    sc->are_cdata.are_rx_ring_map,
1557	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1558
1559	for (prog = 0; prog < ARE_RX_RING_CNT; ARE_INC(cons, ARE_RX_RING_CNT)) {
1560		cur_rx = &sc->are_rdata.are_rx_ring[cons];
1561		rxd = &sc->are_cdata.are_rxdesc[cons];
1562		m = rxd->rx_m;
1563
1564		if ((cur_rx->are_stat & ADSTAT_OWN) == ADSTAT_OWN)
1565		       break;
1566
1567		prog++;
1568
1569		packet_len = ADSTAT_Rx_LENGTH(cur_rx->are_stat);
1570		/* Assume it's error */
1571		error = 1;
1572
1573		if (packet_len < 64)
1574			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1575		else if ((cur_rx->are_stat & ADSTAT_Rx_DE) == 0) {
1576			error = 0;
1577			bus_dmamap_sync(sc->are_cdata.are_rx_tag, rxd->rx_dmamap,
1578			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1579			m = rxd->rx_m;
1580			/* Skip 4 bytes of CRC */
1581			m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1582			are_fixup_rx(m);
1583			m->m_pkthdr.rcvif = ifp;
1584			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1585
1586			ARE_UNLOCK(sc);
1587			(*ifp->if_input)(ifp, m);
1588			ARE_LOCK(sc);
1589		}
1590
1591		if (error) {
1592			/* Restore CONTROL and CA values, reset DEVCS */
1593			cur_rx->are_stat = rxd->saved_ctl;
1594			cur_rx->are_addr = rxd->saved_ca;
1595			cur_rx->are_devcs = 0;
1596		}
1597		else {
1598			/* Reinit descriptor */
1599			cur_rx->are_stat = ADSTAT_OWN;
1600			cur_rx->are_devcs = 0;
1601			if (cons == ARE_RX_RING_CNT - 1)
1602				cur_rx->are_devcs |= ADCTL_ER;
1603			cur_rx->are_addr = 0;
1604			if (are_newbuf(sc, cons) != 0) {
1605				device_printf(sc->are_dev,
1606				    "Failed to allocate buffer\n");
1607				break;
1608			}
1609		}
1610
1611		bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1612		    sc->are_cdata.are_rx_ring_map,
1613		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1614
1615	}
1616
1617	if (prog > 0) {
1618		sc->are_cdata.are_rx_cons = cons;
1619
1620		bus_dmamap_sync(sc->are_cdata.are_rx_ring_tag,
1621		    sc->are_cdata.are_rx_ring_map,
1622		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1623	}
1624}
1625
1626static void
1627are_intr(void *arg)
1628{
1629	struct are_softc		*sc = arg;
1630	uint32_t		status;
1631	struct ifnet		*ifp = sc->are_ifp;
1632
1633	ARE_LOCK(sc);
1634
1635	/* mask out interrupts */
1636
1637	status = CSR_READ_4(sc, CSR_STATUS);
1638	if (status) {
1639		CSR_WRITE_4(sc, CSR_STATUS, status);
1640	}
1641	if (status & sc->sc_rxint_mask) {
1642		are_rx(sc);
1643	}
1644	if (status & sc->sc_txint_mask) {
1645		are_tx(sc);
1646	}
1647
1648	/* Try to get more packets going. */
1649	are_start(ifp);
1650
1651	ARE_UNLOCK(sc);
1652}
1653
1654static void
1655are_tick(void *xsc)
1656{
1657#ifdef ARE_MII
1658	struct are_softc		*sc = xsc;
1659	struct mii_data		*mii;
1660
1661	ARE_LOCK_ASSERT(sc);
1662
1663	mii = device_get_softc(sc->are_miibus);
1664	mii_tick(mii);
1665	callout_reset(&sc->are_stat_callout, hz, are_tick, sc);
1666#endif
1667}
1668
1669static void
1670are_hinted_child(device_t bus, const char *dname, int dunit)
1671{
1672	BUS_ADD_CHILD(bus, 0, dname, dunit);
1673	device_printf(bus, "hinted child %s%d\n", dname, dunit);
1674}
1675
1676#ifdef ARE_MDIO
1677static int
1678aremdio_probe(device_t dev)
1679{
1680	device_set_desc(dev, "Atheros AR531x built-in ethernet interface, MDIO controller");
1681	return(0);
1682}
1683
1684static int
1685aremdio_attach(device_t dev)
1686{
1687	struct are_softc	*sc;
1688	int			error = 0;
1689
1690	sc = device_get_softc(dev);
1691	sc->are_dev = dev;
1692	sc->are_rid = 0;
1693	sc->are_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1694	    &sc->are_rid, RF_ACTIVE | RF_SHAREABLE);
1695	if (sc->are_res == NULL) {
1696		device_printf(dev, "couldn't map memory\n");
1697		error = ENXIO;
1698		goto fail;
1699	}
1700
1701	sc->are_btag = rman_get_bustag(sc->are_res);
1702	sc->are_bhandle = rman_get_bushandle(sc->are_res);
1703
1704	bus_generic_probe(dev);
1705	bus_enumerate_hinted_children(dev);
1706	error = bus_generic_attach(dev);
1707fail:
1708	return (error);
1709}
1710
1711static int
1712aremdio_detach(device_t dev)
1713{
1714	return(0);
1715}
1716#endif
1717
1718#ifdef ARE_DEBUG
1719void
1720dump_txdesc(struct are_softc *sc, int pos)
1721{
1722	struct are_desc		*desc;
1723
1724	desc = &sc->are_rdata.are_tx_ring[pos];
1725	device_printf(sc->are_dev, "CSR_TXLIST %08x\n", CSR_READ_4(sc, CSR_TXLIST));
1726	device_printf(sc->are_dev, "CSR_HTBA %08x\n", CSR_READ_4(sc, CSR_HTBA));
1727	device_printf(sc->are_dev, "%d TDES0:%08x TDES1:%08x TDES2:%08x TDES3:%08x\n",
1728	    pos, desc->are_stat, desc->are_devcs, desc->are_addr, desc->are_link);
1729}
1730
1731void
1732dump_status_reg(struct are_softc *sc)
1733{
1734	uint32_t		status;
1735
1736	/* mask out interrupts */
1737
1738	device_printf(sc->are_dev, "CSR_HTBA %08x\n", CSR_READ_4(sc, CSR_HTBA));
1739	status = CSR_READ_4(sc, CSR_STATUS);
1740	device_printf(sc->are_dev, "CSR5 Status Register EB:%d TS:%d RS:%d NIS:%d AIS:%d ER:%d SE:%d LNF:%d TM:%d RWT:%d RPS:%d RU:%d RI:%d UNF:%d LNP/ANC:%d TJT:%d TU:%d TPS:%d TI:%d\n",
1741	    (status >> 23 ) & 7,
1742	    (status >> 20 ) & 7,
1743	    (status >> 17 ) & 7,
1744	    (status >> 16 ) & 1,
1745	    (status >> 15 ) & 1,
1746	    (status >> 14 ) & 1,
1747	    (status >> 13 ) & 1,
1748	    (status >> 12 ) & 1,
1749	    (status >> 11 ) & 1,
1750	    (status >> 9 ) & 1,
1751	    (status >> 8 ) & 1,
1752	    (status >> 7 ) & 1,
1753	    (status >> 6 ) & 1,
1754	    (status >> 5 ) & 1,
1755	    (status >> 4 ) & 1,
1756	    (status >> 3 ) & 1,
1757	    (status >> 2 ) & 1,
1758	    (status >> 1 ) & 1,
1759	    (status >> 0 ) & 1);
1760
1761}
1762#endif
1763