155714Skris/*-
255714Skris * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
355714Skris * All rights reserved.
455714Skris *
555714Skris * Redistribution and use in source and binary forms, with or without
655714Skris * modification, are permitted provided that the following conditions
755714Skris * are met:
855714Skris * 1. Redistributions of source code must retain the above copyright
955714Skris *    notice, this list of conditions and the following disclaimer.
1055714Skris * 2. Redistributions in binary form must reproduce the above copyright
1155714Skris *    notice, this list of conditions and the following disclaimer in the
1255714Skris *    documentation and/or other materials provided with the distribution.
1355714Skris *
1455714Skris * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1555714Skris * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1655714Skris * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1755714Skris * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1855714Skris * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1955714Skris * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2055714Skris * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2155714Skris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2255714Skris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * TI Common Platform Ethernet Switch (CPSW) Driver
29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
30 *
31 * This controller is documented in the AM335x Technical Reference
32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
34 *
35 * It is basically a single Ethernet port (port 0) wired internally to
36 * a 3-port store-and-forward switch connected to two independent
37 * "sliver" controllers (port 1 and port 2).  You can operate the
38 * controller in a variety of different ways by suitably configuring
39 * the slivers and the Address Lookup Engine (ALE) that routes packets
40 * between the ports.
41 *
42 * This code was developed and tested on a BeagleBone with
43 * an AM335x SoC.
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD$");
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/endian.h>
52#include <sys/mbuf.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/kernel.h>
56#include <sys/module.h>
57#include <sys/socket.h>
58#include <sys/sysctl.h>
59
60#include <net/ethernet.h>
61#include <net/bpf.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_var.h>
68#include <net/if_vlan_var.h>
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/ip.h>
73
74#include <sys/sockio.h>
75#include <sys/bus.h>
76#include <machine/bus.h>
77#include <sys/rman.h>
78#include <machine/resource.h>
79
80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h>
82
83#include <dev/fdt/fdt_common.h>
84#include <dev/ofw/ofw_bus.h>
85#include <dev/ofw/ofw_bus_subr.h>
86
87#include "if_cpswreg.h"
88#include "if_cpswvar.h"
89
90#include <arm/ti/ti_scm.h>
91
92#include "miibus_if.h"
93
94/* Device probe/attach/detach. */
95static int cpsw_probe(device_t);
96static void cpsw_init_slots(struct cpsw_softc *);
97static int cpsw_attach(device_t);
98static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *);
99static int cpsw_detach(device_t);
100
101/* Device Init/shutdown. */
102static void cpsw_init(void *);
103static void cpsw_init_locked(void *);
104static int cpsw_shutdown(device_t);
105static void cpsw_shutdown_locked(struct cpsw_softc *);
106
107/* Device Suspend/Resume. */
108static int cpsw_suspend(device_t);
109static int cpsw_resume(device_t);
110
111/* Ioctl. */
112static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data);
113
114static int cpsw_miibus_readreg(device_t, int phy, int reg);
115static int cpsw_miibus_writereg(device_t, int phy, int reg, int value);
116
117/* Send/Receive packets. */
118static void cpsw_intr_rx(void *arg);
119static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
120static void cpsw_rx_enqueue(struct cpsw_softc *);
121static void cpsw_start(struct ifnet *);
122static void cpsw_tx_enqueue(struct cpsw_softc *);
123static int cpsw_tx_dequeue(struct cpsw_softc *);
124
125/* Misc interrupts and watchdog. */
126static void cpsw_intr_rx_thresh(void *);
127static void cpsw_intr_misc(void *);
128static void cpsw_tick(void *);
129static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130static int cpsw_ifmedia_upd(struct ifnet *);
131static void cpsw_tx_watchdog(struct cpsw_softc *);
132
133/* ALE support */
134static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
135static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
136static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac);
137static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
138static void cpsw_ale_dump_table(struct cpsw_softc *);
139
140/* Statistics and sysctls. */
141static void cpsw_add_sysctls(struct cpsw_softc *);
142static void cpsw_stats_collect(struct cpsw_softc *);
143static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
144
145/*
146 * Arbitrary limit on number of segments in an mbuf to be transmitted.
147 * Packets with more segments than this will be defragmented before
148 * they are queued.
149 */
150#define CPSW_TXFRAGS 8
151
152
153/*
154 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs
155 * as separate Ethernet ports.  To properly support this, we should
156 * break this into two separate devices: a CPSW_SS device that owns
157 * the interrupts and actually talks to the CPSW hardware, and a
158 * separate CPSW Ethernet child device for each Ethernet port.  The RX
159 * interrupt, for example, would be part of CPSW_SS; it would receive
160 * a packet, note the input port, and then dispatch it to the child
161 * device's interface queue.  Similarly for transmit.
162 *
163 * It's not clear to me whether the device tree should be restructured
164 * with a cpsw_ss node and two child nodes.  That would allow specifying
165 * MAC addresses for each port, for example, but might be overkill.
166 *
167 * Unfortunately, I don't have hardware right now that supports two
168 * Ethernet ports via CPSW.
169 */
170
171static device_method_t cpsw_methods[] = {
172	/* Device interface */
173	DEVMETHOD(device_probe,		cpsw_probe),
174	DEVMETHOD(device_attach,	cpsw_attach),
175	DEVMETHOD(device_detach,	cpsw_detach),
176	DEVMETHOD(device_shutdown,	cpsw_shutdown),
177	DEVMETHOD(device_suspend,	cpsw_suspend),
178	DEVMETHOD(device_resume,	cpsw_resume),
179	/* MII interface */
180	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
181	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
182	{ 0, 0 }
183};
184
185static driver_t cpsw_driver = {
186	"cpsw",
187	cpsw_methods,
188	sizeof(struct cpsw_softc),
189};
190
191static devclass_t cpsw_devclass;
192
193DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
194DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
195MODULE_DEPEND(cpsw, ether, 1, 1, 1);
196MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
197
198static struct resource_spec res_spec[] = {
199	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
200	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
201	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
202	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
203	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
204	{ -1, 0 }
205};
206
207/* Number of entries here must match size of stats
208 * array in struct cpsw_softc. */
209static struct cpsw_stat {
210	int	reg;
211	char *oid;
212} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
213	{0x00, "GoodRxFrames"},
214	{0x04, "BroadcastRxFrames"},
215	{0x08, "MulticastRxFrames"},
216	{0x0C, "PauseRxFrames"},
217	{0x10, "RxCrcErrors"},
218	{0x14, "RxAlignErrors"},
219	{0x18, "OversizeRxFrames"},
220	{0x1c, "RxJabbers"},
221	{0x20, "ShortRxFrames"},
222	{0x24, "RxFragments"},
223	{0x30, "RxOctets"},
224	{0x34, "GoodTxFrames"},
225	{0x38, "BroadcastTxFrames"},
226	{0x3c, "MulticastTxFrames"},
227	{0x40, "PauseTxFrames"},
228	{0x44, "DeferredTxFrames"},
229	{0x48, "CollisionsTxFrames"},
230	{0x4c, "SingleCollisionTxFrames"},
231	{0x50, "MultipleCollisionTxFrames"},
232	{0x54, "ExcessiveCollisions"},
233	{0x58, "LateCollisions"},
234	{0x5c, "TxUnderrun"},
235	{0x60, "CarrierSenseErrors"},
236	{0x64, "TxOctets"},
237	{0x68, "RxTx64OctetFrames"},
238	{0x6c, "RxTx65to127OctetFrames"},
239	{0x70, "RxTx128to255OctetFrames"},
240	{0x74, "RxTx256to511OctetFrames"},
241	{0x78, "RxTx512to1024OctetFrames"},
242	{0x7c, "RxTx1024upOctetFrames"},
243	{0x80, "NetOctets"},
244	{0x84, "RxStartOfFrameOverruns"},
245	{0x88, "RxMiddleOfFrameOverruns"},
246	{0x8c, "RxDmaOverruns"}
247};
248
249/*
250 * Basic debug support.
251 */
252
253#define IF_DEBUG(sc)  if (sc->cpsw_if_flags & IFF_DEBUG)
254
255static void
256cpsw_debugf_head(const char *funcname)
257{
258	int t = (int)(time_second % (24 * 60 * 60));
259
260	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
261}
262
263#include <machine/stdarg.h>
264static void
265cpsw_debugf(const char *fmt, ...)
266{
267	va_list ap;
268
269	va_start(ap, fmt);
270	vprintf(fmt, ap);
271	va_end(ap);
272	printf("\n");
273
274}
275
276#define CPSW_DEBUGF(a) do {					\
277	IF_DEBUG(sc) {						\
278		cpsw_debugf_head(__func__);			\
279		cpsw_debugf a;					\
280	}							\
281} while (0)
282
283
284/*
285 * Locking macros
286 */
287#define CPSW_TX_LOCK(sc) do {					\
288		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
289		mtx_lock(&(sc)->tx.lock);				\
290} while (0)
291
292#define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
293#define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
294
295#define CPSW_RX_LOCK(sc) do {					\
296		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
297		mtx_lock(&(sc)->rx.lock);				\
298} while (0)
299
300#define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
301#define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
302
303#define CPSW_GLOBAL_LOCK(sc) do {					\
304		if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) !=	\
305		    (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) {		\
306			panic("cpsw deadlock possibility detection!");	\
307		}							\
308		mtx_lock(&(sc)->tx.lock);				\
309		mtx_lock(&(sc)->rx.lock);				\
310} while (0)
311
312#define CPSW_GLOBAL_UNLOCK(sc) do {					\
313		CPSW_RX_UNLOCK(sc);				\
314		CPSW_TX_UNLOCK(sc);				\
315} while (0)
316
317#define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
318		CPSW_TX_LOCK_ASSERT(sc);				\
319		CPSW_RX_LOCK_ASSERT(sc);				\
320} while (0)
321
322/*
323 * Read/Write macros
324 */
325#define	cpsw_read_4(sc, reg)		bus_read_4(sc->res[0], reg)
326#define	cpsw_write_4(sc, reg, val)	bus_write_4(sc->res[0], reg, val)
327
328#define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
329
330#define	cpsw_cpdma_bd_paddr(sc, slot)				\
331	BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
332#define	cpsw_cpdma_read_bd(sc, slot, val)				\
333	bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
334#define	cpsw_cpdma_write_bd(sc, slot, val)				\
335	bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
336#define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
337	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
338#define	cpsw_cpdma_read_bd_flags(sc, slot)		\
339	bus_read_2(sc->res[0], slot->bd_offset + 14)
340#define	cpsw_write_hdp_slot(sc, queue, slot)				\
341	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
342#define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
343#define	cpsw_read_cp(sc, queue)				\
344	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
345#define	cpsw_write_cp(sc, queue, val)				\
346	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
347#define	cpsw_write_cp_slot(sc, queue, slot)		\
348	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
349
350#if 0
351/* XXX temporary function versions for debugging. */
352static void
353cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
354{
355	uint32_t reg = queue->hdp_offset;
356	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
357	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
358	cpsw_write_4(sc, reg, v);
359}
360
361static void
362cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
363{
364	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
365	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
366	cpsw_write_cp(sc, queue, v);
367}
368#endif
369
370/*
371 * Expanded dump routines for verbose debugging.
372 */
373static void
374cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
375{
376	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
377	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
378	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
379	    "Port0"};
380	struct cpsw_cpdma_bd bd;
381	const char *sep;
382	int i;
383
384	cpsw_cpdma_read_bd(sc, slot, &bd);
385	printf("BD Addr: 0x%08x   Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
386	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
387	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
388	printf("  Flags: ");
389	sep = "";
390	for (i = 0; i < 16; ++i) {
391		if (bd.flags & (1 << (15 - i))) {
392			printf("%s%s", sep, flags[i]);
393			sep = ",";
394		}
395	}
396	printf("\n");
397	if (slot->mbuf) {
398		printf("  Ether:  %14D\n",
399		    (char *)(slot->mbuf->m_hdr.mh_data), " ");
400		printf("  Packet: %16D\n",
401		    (char *)(slot->mbuf->m_hdr.mh_data) + 14, " ");
402	}
403}
404
405#define CPSW_DUMP_SLOT(cs, slot) do {				\
406	IF_DEBUG(sc) {						\
407		cpsw_dump_slot(sc, slot);			\
408	}							\
409} while (0)
410
411
412static void
413cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
414{
415	struct cpsw_slot *slot;
416	int i = 0;
417	int others = 0;
418
419	STAILQ_FOREACH(slot, q, next) {
420		if (i > 4)
421			++others;
422		else
423			cpsw_dump_slot(sc, slot);
424		++i;
425	}
426	if (others)
427		printf(" ... and %d more.\n", others);
428	printf("\n");
429}
430
431#define CPSW_DUMP_QUEUE(sc, q) do {				\
432	IF_DEBUG(sc) {						\
433		cpsw_dump_queue(sc, q);				\
434	}							\
435} while (0)
436
437
438/*
439 *
440 * Device Probe, Attach, Detach.
441 *
442 */
443
444static int
445cpsw_probe(device_t dev)
446{
447
448	if (!ofw_bus_status_okay(dev))
449		return (ENXIO);
450
451	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
452		return (ENXIO);
453
454	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
455	return (BUS_PROBE_DEFAULT);
456}
457
458
459static void
460cpsw_init_slots(struct cpsw_softc *sc)
461{
462	struct cpsw_slot *slot;
463	int i;
464
465	STAILQ_INIT(&sc->avail);
466
467	/* Put the slot descriptors onto the global avail list. */
468	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) {
469		slot = &sc->_slots[i];
470		slot->bd_offset = cpsw_cpdma_bd_offset(i);
471		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
472	}
473}
474
475/*
476 * bind an interrupt, add the relevant info to sc->interrupts
477 */
478static int
479cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description)
480{
481	void **pcookie;
482	int error;
483
484	sc->interrupts[sc->interrupt_count].res = res;
485	sc->interrupts[sc->interrupt_count].description = description;
486	pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie;
487
488	error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE,
489	    NULL, *handler, sc, pcookie);
490	if (error)
491		device_printf(sc->dev,
492		    "could not setup %s\n", description);
493	else
494		++sc->interrupt_count;
495	return (error);
496}
497
498/*
499 * teardown everything in sc->interrupts.
500 */
501static void
502cpsw_detach_interrupts(struct cpsw_softc *sc)
503{
504	int error;
505	int i;
506
507	for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) {
508		if (!sc->interrupts[i].ih_cookie)
509			continue;
510		error = bus_teardown_intr(sc->dev,
511		    sc->interrupts[i].res, sc->interrupts[i].ih_cookie);
512		if (error)
513			device_printf(sc->dev, "could not release %s\n",
514			    sc->interrupts[i].description);
515		sc->interrupts[i].ih_cookie = NULL;
516	}
517}
518
519static int
520cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
521{
522	const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]);
523	struct cpsw_slot *slot;
524	int i;
525
526	if (requested < 0)
527		requested = max_slots;
528
529	for (i = 0; i < requested; ++i) {
530		slot = STAILQ_FIRST(&sc->avail);
531		if (slot == NULL)
532			return (0);
533		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
534			if_printf(sc->ifp, "failed to create dmamap\n");
535			return (ENOMEM);
536		}
537		STAILQ_REMOVE_HEAD(&sc->avail, next);
538		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
539		++queue->avail_queue_len;
540		++queue->queue_slots;
541	}
542	return (0);
543}
544
545static int
546cpsw_attach(device_t dev)
547{
548	bus_dma_segment_t segs[1];
549	struct cpsw_softc *sc = device_get_softc(dev);
550	struct mii_softc *miisc;
551	struct ifnet *ifp;
552	void *phy_sc;
553	int error, phy, nsegs;
554	uint32_t reg;
555
556	CPSW_DEBUGF((""));
557
558	getbinuptime(&sc->attach_uptime);
559	sc->dev = dev;
560	sc->node = ofw_bus_get_node(dev);
561
562	/* Get phy address from fdt */
563	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
564		device_printf(dev, "failed to get PHY address from FDT\n");
565		return (ENXIO);
566	}
567	/* Initialize mutexes */
568	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
569	    "cpsw TX lock", MTX_DEF);
570	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
571	    "cpsw RX lock", MTX_DEF);
572
573	/* Allocate IO and IRQ resources */
574	error = bus_alloc_resources(dev, res_spec, sc->res);
575	if (error) {
576		device_printf(dev, "could not allocate resources\n");
577		cpsw_detach(dev);
578		return (ENXIO);
579	}
580
581	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
582	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
583		reg & 0xFF, (reg >> 11) & 0x1F);
584
585	cpsw_add_sysctls(sc);
586
587	/* Allocate a busdma tag and DMA safe memory for mbufs. */
588	error = bus_dma_tag_create(
589		bus_get_dma_tag(sc->dev),	/* parent */
590		1, 0,				/* alignment, boundary */
591		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
592		BUS_SPACE_MAXADDR,		/* highaddr */
593		NULL, NULL,			/* filtfunc, filtfuncarg */
594		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
595		MCLBYTES, 0,			/* maxsegsz, flags */
596		NULL, NULL,			/* lockfunc, lockfuncarg */
597		&sc->mbuf_dtag);		/* dmatag */
598	if (error) {
599		device_printf(dev, "bus_dma_tag_create failed\n");
600		cpsw_detach(dev);
601		return (error);
602	}
603
604	/* Allocate network interface */
605	ifp = sc->ifp = if_alloc(IFT_ETHER);
606	if (ifp == NULL) {
607		device_printf(dev, "if_alloc() failed\n");
608		cpsw_detach(dev);
609		return (ENOMEM);
610	}
611
612	/* Allocate the null mbuf and pre-sync it. */
613	sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
614	memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size);
615	bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
616	bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
617	    sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
618	bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
619	    BUS_DMASYNC_PREWRITE);
620	sc->null_mbuf_paddr = segs[0].ds_addr;
621
622	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
623	ifp->if_softc = sc;
624	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
625	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
626	ifp->if_capenable = ifp->if_capabilities;
627
628	ifp->if_init = cpsw_init;
629	ifp->if_start = cpsw_start;
630	ifp->if_ioctl = cpsw_ioctl;
631
632	cpsw_init_slots(sc);
633
634	/* Allocate slots to TX and RX queues. */
635	STAILQ_INIT(&sc->rx.avail);
636	STAILQ_INIT(&sc->rx.active);
637	STAILQ_INIT(&sc->tx.avail);
638	STAILQ_INIT(&sc->tx.active);
639	// For now:  128 slots to TX, rest to RX.
640	// XXX TODO: start with 32/64 and grow dynamically based on demand.
641	if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) {
642		device_printf(dev, "failed to allocate dmamaps\n");
643		cpsw_detach(dev);
644		return (ENOMEM);
645	}
646	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
647	    sc->tx.queue_slots, sc->rx.queue_slots);
648
649	ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots;
650	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
651	IFQ_SET_READY(&ifp->if_snd);
652
653	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
654	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
655
656	/* Get high part of MAC address from control module (mac_id0_hi) */
657	/* TODO: Get MAC ID1 as well as MAC ID0. */
658	ti_scm_reg_read_4(0x634, &reg);
659	sc->mac_addr[0] = reg & 0xFF;
660	sc->mac_addr[1] = (reg >>  8) & 0xFF;
661	sc->mac_addr[2] = (reg >> 16) & 0xFF;
662	sc->mac_addr[3] = (reg >> 24) & 0xFF;
663
664	/* Get low part of MAC address from control module (mac_id0_lo) */
665	ti_scm_reg_read_4(0x630, &reg);
666	sc->mac_addr[4] = reg & 0xFF;
667	sc->mac_addr[5] = (reg >>  8) & 0xFF;
668
669	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
670	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
671	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
672
673	/* Clear ALE */
674	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30);
675
676	/* Attach PHY(s) */
677	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
678	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
679	if (error) {
680		device_printf(dev, "attaching PHYs failed\n");
681		cpsw_detach(dev);
682		return (error);
683	}
684	sc->mii = device_get_softc(sc->miibus);
685
686	/* Tell the MAC where to find the PHY so autoneg works */
687	miisc = LIST_FIRST(&sc->mii->mii_phys);
688
689	/* Select PHY and enable interrupts */
690	cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
691
692	/* Note: We don't use sc->res[3] (TX interrupt) */
693	if (cpsw_attach_interrupt(sc, sc->res[1],
694		cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") ||
695	    cpsw_attach_interrupt(sc, sc->res[2],
696		cpsw_intr_rx, "CPSW RX interrupt") ||
697	    cpsw_attach_interrupt(sc, sc->res[4],
698		cpsw_intr_misc, "CPSW misc interrupt")) {
699		cpsw_detach(dev);
700		return (ENXIO);
701	}
702
703	ether_ifattach(ifp, sc->mac_addr);
704	callout_init(&sc->watchdog.callout, 0);
705
706	return (0);
707}
708
709static void
710cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
711{
712	int error;
713
714	if (slot->dmamap) {
715		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
716		KASSERT(error == 0, ("Mapping still active"));
717		slot->dmamap = NULL;
718	}
719	if (slot->mbuf) {
720		m_freem(slot->mbuf);
721		slot->mbuf = NULL;
722	}
723}
724
725static int
726cpsw_detach(device_t dev)
727{
728	struct cpsw_softc *sc = device_get_softc(dev);
729	int error, i;
730
731	CPSW_DEBUGF((""));
732
733	/* Stop controller and free TX queue */
734	if (device_is_attached(dev)) {
735		ether_ifdetach(sc->ifp);
736		CPSW_GLOBAL_LOCK(sc);
737		cpsw_shutdown_locked(sc);
738		CPSW_GLOBAL_UNLOCK(sc);
739		callout_drain(&sc->watchdog.callout);
740	}
741
742	bus_generic_detach(dev);
743	if (sc->miibus)
744		device_delete_child(dev, sc->miibus);
745
746	/* Stop and release all interrupts */
747	cpsw_detach_interrupts(sc);
748
749	/* Free dmamaps and mbufs */
750	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i)
751		cpsw_free_slot(sc, &sc->_slots[i]);
752	if (sc->null_mbuf_dmamap) {
753		error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap);
754		KASSERT(error == 0, ("Mapping still active"));
755	}
756	if (sc->null_mbuf)
757		m_freem(sc->null_mbuf);
758
759	/* Free DMA tag */
760	error = bus_dma_tag_destroy(sc->mbuf_dtag);
761	KASSERT(error == 0, ("Unable to destroy DMA tag"));
762
763	/* Free IO memory handler */
764	bus_release_resources(dev, res_spec, sc->res);
765
766	if (sc->ifp != NULL)
767		if_free(sc->ifp);
768
769	/* Destroy mutexes */
770	mtx_destroy(&sc->rx.lock);
771	mtx_destroy(&sc->tx.lock);
772
773	return (0);
774}
775
776/*
777 *
778 * Init/Shutdown.
779 *
780 */
781
782static void
783cpsw_reset(struct cpsw_softc *sc)
784{
785	int i;
786
787	/* Reset RMII/RGMII wrapper. */
788	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
789	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
790		;
791
792	/* Disable TX and RX interrupts for all cores. */
793	for (i = 0; i < 3; ++i) {
794		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
795		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
796		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
797		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
798	}
799
800	/* Reset CPSW subsystem. */
801	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
802	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
803		;
804
805	/* Reset Sliver port 1 and 2 */
806	for (i = 0; i < 2; i++) {
807		/* Reset */
808		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
809		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
810			;
811	}
812
813	/* Reset DMA controller. */
814	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
815	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
816		;
817
818	/* Disable TX & RX DMA */
819	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
820	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
821
822	/* Clear all queues. */
823	for (i = 0; i < 8; i++) {
824		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
825		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
826		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
827		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
828	}
829
830	/* Clear all interrupt Masks */
831	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
832	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
833}
834
835static void
836cpsw_init(void *arg)
837{
838	struct cpsw_softc *sc = arg;
839
840	CPSW_DEBUGF((""));
841	CPSW_GLOBAL_LOCK(sc);
842	cpsw_init_locked(arg);
843	CPSW_GLOBAL_UNLOCK(sc);
844}
845
846static void
847cpsw_init_locked(void *arg)
848{
849	struct ifnet *ifp;
850	struct cpsw_softc *sc = arg;
851	struct cpsw_slot *slot;
852	uint32_t i;
853
854	CPSW_DEBUGF((""));
855	ifp = sc->ifp;
856	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
857		return;
858
859	getbinuptime(&sc->init_uptime);
860
861	/* Reset the controller. */
862	cpsw_reset(sc);
863
864	/* Enable ALE */
865	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4);
866
867	/* Init Sliver port 1 and 2 */
868	for (i = 0; i < 2; i++) {
869		/* Set Slave Mapping */
870		cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
871		cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
872		cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
873		/* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15),
874		   GMII_EN(5), FULLDUPLEX(1) */
875		/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
876		/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
877		cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
878	}
879
880	/* Set Host Port Mapping */
881	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
882	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
883
884	/* Initialize ALE: all ports set to forwarding(3), initialize addrs */
885	for (i = 0; i < 3; i++)
886		cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3);
887	cpsw_ale_update_addresses(sc, 1);
888
889	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
890
891	/* Enable statistics for ports 0, 1 and 2 */
892	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
893
894	/* Experiment:  Turn off flow control */
895	/* This seems to fix the watchdog resets that have plagued
896	   earlier versions of this driver; I'm not yet sure if there
897	   are negative effects yet. */
898	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
899
900	/* Make IP hdr aligned with 4 */
901	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
902
903	/* Initialize RX Buffer Descriptors */
904	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
905
906	/* Enable TX & RX DMA */
907	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
908	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
909
910	/* Enable Interrupts for core 0 */
911	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
912	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
913	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F);
914
915	/* Enable host Error Interrupt */
916	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
917
918	/* Enable interrupts for RX Channel 0 */
919	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
920
921	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
922	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
923	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
924
925	/* Select MII in GMII_SEL, Internal Delay mode */
926	//ti_scm_reg_write_4(0x650, 0);
927
928	/* Initialize active queues. */
929	slot = STAILQ_FIRST(&sc->tx.active);
930	if (slot != NULL)
931		cpsw_write_hdp_slot(sc, &sc->tx, slot);
932	slot = STAILQ_FIRST(&sc->rx.active);
933	if (slot != NULL)
934		cpsw_write_hdp_slot(sc, &sc->rx, slot);
935	cpsw_rx_enqueue(sc);
936
937	/* Activate network interface */
938	sc->rx.running = 1;
939	sc->tx.running = 1;
940	sc->watchdog.timer = 0;
941	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
942	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
943	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
944
945}
946
947static int
948cpsw_shutdown(device_t dev)
949{
950	struct cpsw_softc *sc = device_get_softc(dev);
951
952	CPSW_DEBUGF((""));
953	CPSW_GLOBAL_LOCK(sc);
954	cpsw_shutdown_locked(sc);
955	CPSW_GLOBAL_UNLOCK(sc);
956	return (0);
957}
958
959static void
960cpsw_rx_teardown_locked(struct cpsw_softc *sc)
961{
962	struct mbuf *received, *next;
963	int i = 0;
964
965	CPSW_DEBUGF(("starting RX teardown"));
966	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
967	for (;;) {
968		received = cpsw_rx_dequeue(sc);
969		CPSW_GLOBAL_UNLOCK(sc);
970		while (received != NULL) {
971			next = received->m_nextpkt;
972			received->m_nextpkt = NULL;
973			(*sc->ifp->if_input)(sc->ifp, received);
974			received = next;
975		}
976		CPSW_GLOBAL_LOCK(sc);
977		if (!sc->rx.running) {
978			CPSW_DEBUGF(("finished RX teardown (%d retries)", i));
979			return;
980		}
981		if (++i > 10) {
982			if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n");
983			return;
984		}
985		DELAY(10);
986	}
987}
988
989static void
990cpsw_tx_teardown_locked(struct cpsw_softc *sc)
991{
992	int i = 0;
993
994	CPSW_DEBUGF(("starting TX teardown"));
995	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
996	cpsw_tx_dequeue(sc);
997	while (sc->tx.running && ++i < 10) {
998		DELAY(10);
999		cpsw_tx_dequeue(sc);
1000	}
1001	if (sc->tx.running)
1002		if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n");
1003	CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)",
1004	    i, sc->tx.active_queue_len));
1005}
1006
1007static void
1008cpsw_shutdown_locked(struct cpsw_softc *sc)
1009{
1010	struct ifnet *ifp;
1011
1012	CPSW_DEBUGF((""));
1013	CPSW_GLOBAL_LOCK_ASSERT(sc);
1014	ifp = sc->ifp;
1015
1016	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1017		return;
1018
1019	/* Disable interface */
1020	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1021	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1022
1023	/* Stop ticker */
1024	callout_stop(&sc->watchdog.callout);
1025
1026	/* Tear down the RX/TX queues. */
1027	cpsw_rx_teardown_locked(sc);
1028	cpsw_tx_teardown_locked(sc);
1029
1030	/* Capture stats before we reset controller. */
1031	cpsw_stats_collect(sc);
1032
1033	cpsw_reset(sc);
1034}
1035
1036/*
1037 *  Suspend/Resume.
1038 */
1039
1040static int
1041cpsw_suspend(device_t dev)
1042{
1043	struct cpsw_softc *sc = device_get_softc(dev);
1044
1045	CPSW_DEBUGF((""));
1046	CPSW_GLOBAL_LOCK(sc);
1047	cpsw_shutdown_locked(sc);
1048	CPSW_GLOBAL_UNLOCK(sc);
1049	return (0);
1050}
1051
1052static int
1053cpsw_resume(device_t dev)
1054{
1055	struct cpsw_softc *sc = device_get_softc(dev);
1056
1057	CPSW_DEBUGF(("UNIMPLEMENTED"));
1058	return (0);
1059}
1060
1061/*
1062 *
1063 *  IOCTL
1064 *
1065 */
1066
1067static void
1068cpsw_set_promisc(struct cpsw_softc *sc, int set)
1069{
1070	/*
1071	 * Enabling promiscuous mode requires two bits of work: First,
1072	 * ALE_BYPASS needs to be enabled.  That disables the ALE
1073	 * forwarding logic and causes every packet to be sent to the
1074	 * host port.  That makes us promiscuous wrt received packets.
1075	 *
1076	 * With ALE forwarding disabled, the transmitter needs to set
1077	 * an explicit output port on every packet to route it to the
1078	 * correct egress.  This should be doable for systems such as
1079	 * BeagleBone where only one egress port is actually wired to
1080	 * a PHY.  If you have both egress ports wired up, life gets a
1081	 * lot more interesting.
1082	 *
1083	 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't
1084	 * seem to set explicit egress ports.  Does that mean they
1085	 * are always promiscuous?
1086	 */
1087	if (set) {
1088		printf("Promiscuous mode unimplemented\n");
1089	}
1090}
1091
1092static void
1093cpsw_set_allmulti(struct cpsw_softc *sc, int set)
1094{
1095	if (set) {
1096		printf("All-multicast mode unimplemented\n");
1097	}
1098}
1099
1100static int
1101cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1102{
1103	struct cpsw_softc *sc = ifp->if_softc;
1104	struct ifreq *ifr = (struct ifreq *)data;
1105	int error;
1106	uint32_t changed;
1107
1108	error = 0;
1109
1110	switch (command) {
1111	case SIOCSIFFLAGS:
1112		CPSW_GLOBAL_LOCK(sc);
1113		if (ifp->if_flags & IFF_UP) {
1114			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1115				changed = ifp->if_flags ^ sc->cpsw_if_flags;
1116				CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
1117				if (changed & IFF_PROMISC)
1118					cpsw_set_promisc(sc,
1119					    ifp->if_flags & IFF_PROMISC);
1120				if (changed & IFF_ALLMULTI)
1121					cpsw_set_allmulti(sc,
1122					    ifp->if_flags & IFF_ALLMULTI);
1123			} else {
1124				CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1125				cpsw_init_locked(sc);
1126			}
1127		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1128			CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1129			cpsw_shutdown_locked(sc);
1130		}
1131
1132		sc->cpsw_if_flags = ifp->if_flags;
1133		CPSW_GLOBAL_UNLOCK(sc);
1134		break;
1135	case SIOCADDMULTI:
1136		cpsw_ale_update_addresses(sc, 0);
1137		break;
1138	case SIOCDELMULTI:
1139		/* Ugh.  DELMULTI doesn't provide the specific address
1140		   being removed, so the best we can do is remove
1141		   everything and rebuild it all. */
1142		cpsw_ale_update_addresses(sc, 1);
1143		break;
1144	case SIOCGIFMEDIA:
1145	case SIOCSIFMEDIA:
1146		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1147		break;
1148	default:
1149		error = ether_ioctl(ifp, command, data);
1150	}
1151	return (error);
1152}
1153
1154/*
1155 *
1156 * MIIBUS
1157 *
1158 */
1159static int
1160cpsw_miibus_ready(struct cpsw_softc *sc)
1161{
1162	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1163
1164	while (--retries) {
1165		r = cpsw_read_4(sc, MDIOUSERACCESS0);
1166		if ((r & 1 << 31) == 0)
1167			return 1;
1168		DELAY(CPSW_MIIBUS_DELAY);
1169	}
1170	return 0;
1171}
1172
1173static int
1174cpsw_miibus_readreg(device_t dev, int phy, int reg)
1175{
1176	struct cpsw_softc *sc = device_get_softc(dev);
1177	uint32_t cmd, r;
1178
1179	if (!cpsw_miibus_ready(sc)) {
1180		device_printf(dev, "MDIO not ready to read\n");
1181		return 0;
1182	}
1183
1184	/* Set GO, reg, phy */
1185	cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1186	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1187
1188	if (!cpsw_miibus_ready(sc)) {
1189		device_printf(dev, "MDIO timed out during read\n");
1190		return 0;
1191	}
1192
1193	r = cpsw_read_4(sc, MDIOUSERACCESS0);
1194	if((r & 1 << 29) == 0) {
1195		device_printf(dev, "Failed to read from PHY.\n");
1196		r = 0;
1197	}
1198	return (r & 0xFFFF);
1199}
1200
1201static int
1202cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
1203{
1204	struct cpsw_softc *sc = device_get_softc(dev);
1205	uint32_t cmd;
1206
1207	if (!cpsw_miibus_ready(sc)) {
1208		device_printf(dev, "MDIO not ready to write\n");
1209		return 0;
1210	}
1211
1212	/* Set GO, WRITE, reg, phy, and value */
1213	cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
1214	    | (value & 0xFFFF);
1215	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1216
1217	if (!cpsw_miibus_ready(sc)) {
1218		device_printf(dev, "MDIO timed out during write\n");
1219		return 0;
1220	}
1221
1222	if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0)
1223		device_printf(dev, "Failed to write to PHY.\n");
1224
1225	return 0;
1226}
1227
1228/*
1229 *
1230 * Transmit/Receive Packets.
1231 *
1232 */
1233
1234
1235static void
1236cpsw_intr_rx(void *arg)
1237{
1238	struct cpsw_softc *sc = arg;
1239	struct mbuf *received, *next;
1240
1241	CPSW_RX_LOCK(sc);
1242	received = cpsw_rx_dequeue(sc);
1243	cpsw_rx_enqueue(sc);
1244	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1245	CPSW_RX_UNLOCK(sc);
1246
1247	while (received != NULL) {
1248		next = received->m_nextpkt;
1249		received->m_nextpkt = NULL;
1250		(*sc->ifp->if_input)(sc->ifp, received);
1251		received = next;
1252	}
1253}
1254
1255static struct mbuf *
1256cpsw_rx_dequeue(struct cpsw_softc *sc)
1257{
1258	struct cpsw_cpdma_bd bd;
1259	struct cpsw_slot *slot;
1260	struct ifnet *ifp;
1261	struct mbuf *mb_head, *mb_tail;
1262	int removed = 0;
1263
1264	ifp = sc->ifp;
1265	mb_head = mb_tail = NULL;
1266
1267	/* Pull completed packets off hardware RX queue. */
1268	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1269		cpsw_cpdma_read_bd(sc, slot, &bd);
1270		if (bd.flags & CPDMA_BD_OWNER)
1271			break; /* Still in use by hardware */
1272
1273		CPSW_DEBUGF(("Removing received packet from RX queue"));
1274		++removed;
1275		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1276		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1277
1278		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1279		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1280
1281		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1282			CPSW_DEBUGF(("RX teardown in progress"));
1283			m_freem(slot->mbuf);
1284			slot->mbuf = NULL;
1285			cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1286			sc->rx.running = 0;
1287			break;
1288		}
1289
1290		cpsw_write_cp_slot(sc, &sc->rx, slot);
1291
1292		/* Set up mbuf */
1293		/* TODO: track SOP/EOP bits to assemble a full mbuf
1294		   out of received fragments. */
1295		slot->mbuf->m_hdr.mh_data += bd.bufoff;
1296		slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
1297		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1298		slot->mbuf->m_flags |= M_PKTHDR;
1299		slot->mbuf->m_pkthdr.rcvif = ifp;
1300		slot->mbuf->m_nextpkt = NULL;
1301
1302		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1303			/* check for valid CRC by looking into pkt_err[5:4] */
1304			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1305				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1306				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1307				slot->mbuf->m_pkthdr.csum_data = 0xffff;
1308			}
1309		}
1310
1311		/* Add mbuf to packet list to be returned. */
1312		if (mb_tail) {
1313			mb_tail->m_nextpkt = slot->mbuf;
1314		} else {
1315			mb_head = slot->mbuf;
1316		}
1317		mb_tail = slot->mbuf;
1318		slot->mbuf = NULL;
1319	}
1320
1321	if (removed != 0) {
1322		sc->rx.queue_removes += removed;
1323		sc->rx.active_queue_len -= removed;
1324		sc->rx.avail_queue_len += removed;
1325		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1326			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1327	}
1328	return (mb_head);
1329}
1330
1331static void
1332cpsw_rx_enqueue(struct cpsw_softc *sc)
1333{
1334	bus_dma_segment_t seg[1];
1335	struct cpsw_cpdma_bd bd;
1336	struct ifnet *ifp = sc->ifp;
1337	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1338	struct cpsw_slot *slot, *prev_slot = NULL;
1339	struct cpsw_slot *last_old_slot, *first_new_slot;
1340	int error, nsegs, added = 0;
1341
1342	/* Register new mbufs with hardware. */
1343	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1344		if (slot->mbuf == NULL) {
1345			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1346			if (slot->mbuf == NULL) {
1347				if_printf(sc->ifp, "Unable to fill RX queue\n");
1348				break;
1349			}
1350			slot->mbuf->m_len =
1351			    slot->mbuf->m_pkthdr.len =
1352			    slot->mbuf->m_ext.ext_size;
1353		}
1354
1355		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1356		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1357
1358		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1359		KASSERT(error == 0, ("DMA error (error=%d)", error));
1360		if (error != 0 || nsegs != 1) {
1361			if_printf(ifp,
1362			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1363			    __func__, nsegs, error);
1364			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1365			m_freem(slot->mbuf);
1366			slot->mbuf = NULL;
1367			break;
1368		}
1369
1370		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1371
1372		/* Create and submit new rx descriptor*/
1373		bd.next = 0;
1374		bd.bufptr = seg->ds_addr;
1375		bd.bufoff = 0;
1376		bd.buflen = MCLBYTES - 1;
1377		bd.pktlen = bd.buflen;
1378		bd.flags = CPDMA_BD_OWNER;
1379		cpsw_cpdma_write_bd(sc, slot, &bd);
1380		++added;
1381
1382		if (prev_slot != NULL)
1383			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1384		prev_slot = slot;
1385		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1386		sc->rx.avail_queue_len--;
1387		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1388	}
1389
1390	if (added == 0)
1391		return;
1392
1393	CPSW_DEBUGF(("Adding %d buffers to RX queue", added));
1394
1395	/* Link new entries to hardware RX queue. */
1396	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1397	first_new_slot = STAILQ_FIRST(&tmpqueue);
1398	STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1399	if (first_new_slot == NULL) {
1400		return;
1401	} else if (last_old_slot == NULL) {
1402		/* Start a fresh queue. */
1403		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1404	} else {
1405		/* Add buffers to end of current queue. */
1406		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1407		/* If underrun, restart queue. */
1408		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1409			cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1410		}
1411	}
1412	sc->rx.queue_adds += added;
1413	sc->rx.active_queue_len += added;
1414	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1415		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1416	}
1417}
1418
1419static void
1420cpsw_start(struct ifnet *ifp)
1421{
1422	struct cpsw_softc *sc = ifp->if_softc;
1423
1424	CPSW_TX_LOCK(sc);
1425	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) {
1426		cpsw_tx_enqueue(sc);
1427		cpsw_tx_dequeue(sc);
1428	}
1429	CPSW_TX_UNLOCK(sc);
1430}
1431
1432static void
1433cpsw_tx_enqueue(struct cpsw_softc *sc)
1434{
1435	bus_dma_segment_t segs[CPSW_TXFRAGS];
1436	struct cpsw_cpdma_bd bd;
1437	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1438	struct cpsw_slot *slot, *prev_slot = NULL;
1439	struct cpsw_slot *last_old_slot, *first_new_slot;
1440	struct mbuf *m0;
1441	int error, nsegs, seg, added = 0, padlen;
1442
1443	/* Pull pending packets from IF queue and prep them for DMA. */
1444	while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) {
1445		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1446		if (m0 == NULL)
1447			break;
1448
1449		slot->mbuf = m0;
1450		padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1451		if (padlen < 0)
1452			padlen = 0;
1453
1454		/* Create mapping in DMA memory */
1455		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1456		    slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1457		/* If the packet is too fragmented, try to simplify. */
1458		if (error == EFBIG ||
1459		    (error == 0 &&
1460			nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) {
1461			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1462			if (padlen > 0) /* May as well add padding. */
1463				m_append(slot->mbuf, padlen,
1464				    sc->null_mbuf->m_hdr.mh_data);
1465			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1466			if (m0 == NULL) {
1467				if_printf(sc->ifp,
1468				    "Can't defragment packet; dropping\n");
1469				m_freem(slot->mbuf);
1470			} else {
1471				CPSW_DEBUGF(("Requeueing defragmented packet"));
1472				IF_PREPEND(&sc->ifp->if_snd, m0);
1473			}
1474			slot->mbuf = NULL;
1475			continue;
1476		}
1477		if (error != 0) {
1478			if_printf(sc->ifp,
1479			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1480			    __func__, error);
1481			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1482			m_freem(slot->mbuf);
1483			slot->mbuf = NULL;
1484			break;
1485		}
1486
1487		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1488				BUS_DMASYNC_PREWRITE);
1489
1490
1491		CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes",
1492			nsegs, padlen));
1493
1494		/* If there is only one segment, the for() loop
1495		 * gets skipped and the single buffer gets set up
1496		 * as both SOP and EOP. */
1497		/* Start by setting up the first buffer */
1498		bd.next = 0;
1499		bd.bufptr = segs[0].ds_addr;
1500		bd.bufoff = 0;
1501		bd.buflen = segs[0].ds_len;
1502		bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1503		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER;
1504		for (seg = 1; seg < nsegs; ++seg) {
1505			/* Save the previous buffer (which isn't EOP) */
1506			cpsw_cpdma_write_bd(sc, slot, &bd);
1507			if (prev_slot != NULL)
1508				cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1509			prev_slot = slot;
1510			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1511			sc->tx.avail_queue_len--;
1512			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1513			++added;
1514			slot = STAILQ_FIRST(&sc->tx.avail);
1515
1516			/* Setup next buffer (which isn't SOP) */
1517			bd.next = 0;
1518			bd.bufptr = segs[seg].ds_addr;
1519			bd.bufoff = 0;
1520			bd.buflen = segs[seg].ds_len;
1521			bd.pktlen = 0;
1522			bd.flags = CPDMA_BD_OWNER;
1523		}
1524		/* Save the final buffer. */
1525		if (padlen <= 0)
1526			bd.flags |= CPDMA_BD_EOP;
1527		cpsw_cpdma_write_bd(sc, slot, &bd);
1528		if (prev_slot != NULL)
1529			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1530		prev_slot = slot;
1531		STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1532		sc->tx.avail_queue_len--;
1533		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1534		++added;
1535
1536		if (padlen > 0) {
1537			slot = STAILQ_FIRST(&sc->tx.avail);
1538			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1539			sc->tx.avail_queue_len--;
1540			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1541			++added;
1542
1543			/* Setup buffer of null pad bytes (definitely EOP) */
1544			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1545			prev_slot = slot;
1546			bd.next = 0;
1547			bd.bufptr = sc->null_mbuf_paddr;
1548			bd.bufoff = 0;
1549			bd.buflen = padlen;
1550			bd.pktlen = 0;
1551			bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER;
1552			cpsw_cpdma_write_bd(sc, slot, &bd);
1553			++nsegs;
1554		}
1555
1556		if (nsegs > sc->tx.longest_chain)
1557			sc->tx.longest_chain = nsegs;
1558
1559		// TODO: Should we defer the BPF tap until
1560		// after all packets are queued?
1561		BPF_MTAP(sc->ifp, m0);
1562	}
1563
1564	/* Attach the list of new buffers to the hardware TX queue. */
1565	last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next);
1566	first_new_slot = STAILQ_FIRST(&tmpqueue);
1567	STAILQ_CONCAT(&sc->tx.active, &tmpqueue);
1568	if (first_new_slot == NULL) {
1569		return;
1570	} else if (last_old_slot == NULL) {
1571		/* Start a fresh queue. */
1572		cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1573	} else {
1574		/* Add buffers to end of current queue. */
1575		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1576		/* If underrun, restart queue. */
1577		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1578			cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1579		}
1580	}
1581	sc->tx.queue_adds += added;
1582	sc->tx.active_queue_len += added;
1583	if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) {
1584		sc->tx.max_active_queue_len = sc->tx.active_queue_len;
1585	}
1586}
1587
1588static int
1589cpsw_tx_dequeue(struct cpsw_softc *sc)
1590{
1591	struct cpsw_slot *slot, *last_removed_slot = NULL;
1592	uint32_t flags, removed = 0;
1593
1594	slot = STAILQ_FIRST(&sc->tx.active);
1595	if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1596		CPSW_DEBUGF(("TX teardown of an empty queue"));
1597		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1598		sc->tx.running = 0;
1599		return (0);
1600	}
1601
1602	/* Pull completed buffers off the hardware TX queue. */
1603	while (slot != NULL) {
1604		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1605		if (flags & CPDMA_BD_OWNER)
1606			break; /* Hardware is still using this packet. */
1607
1608		CPSW_DEBUGF(("TX removing completed packet"));
1609		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1610		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1611		m_freem(slot->mbuf);
1612		slot->mbuf = NULL;
1613
1614		/* Dequeue any additional buffers used by this packet. */
1615		while (slot != NULL && slot->mbuf == NULL) {
1616			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1617			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1618			++removed;
1619			last_removed_slot = slot;
1620			slot = STAILQ_FIRST(&sc->tx.active);
1621		}
1622
1623		/* TearDown complete is only marked on the SOP for the packet. */
1624		if (flags & CPDMA_BD_TDOWNCMPLT) {
1625			CPSW_DEBUGF(("TX teardown in progress"));
1626			cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1627			// TODO: Increment a count of dropped TX packets
1628			sc->tx.running = 0;
1629			break;
1630		}
1631	}
1632
1633	if (removed != 0) {
1634		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1635		sc->tx.queue_removes += removed;
1636		sc->tx.active_queue_len -= removed;
1637		sc->tx.avail_queue_len += removed;
1638		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1639			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1640	}
1641	return (removed);
1642}
1643
1644/*
1645 *
1646 * Miscellaneous interrupts.
1647 *
1648 */
1649
1650static void
1651cpsw_intr_rx_thresh(void *arg)
1652{
1653	struct cpsw_softc *sc = arg;
1654	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1655
1656	CPSW_DEBUGF(("stat=%x", stat));
1657	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1658}
1659
1660static void
1661cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1662{
1663	uint32_t intstat;
1664	uint32_t dmastat;
1665	int txerr, rxerr, txchan, rxchan;
1666
1667	printf("\n\n");
1668	device_printf(sc->dev,
1669	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1670	printf("\n\n");
1671	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1672	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1673	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1674	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1675
1676	txerr = (dmastat >> 20) & 15;
1677	txchan = (dmastat >> 16) & 7;
1678	rxerr = (dmastat >> 12) & 15;
1679	rxchan = (dmastat >> 8) & 7;
1680
1681	switch (txerr) {
1682	case 0: break;
1683	case 1:	printf("SOP error on TX channel %d\n", txchan);
1684		break;
1685	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1686		break;
1687	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
1688		break;
1689	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
1690		break;
1691	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
1692		break;
1693	case 6:	printf("Packet length error on TX channel %d\n", txchan);
1694		break;
1695	default: printf("Unknown error on TX channel %d\n", txchan);
1696		break;
1697	}
1698
1699	if (txerr != 0) {
1700		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
1701		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
1702		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
1703		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
1704		cpsw_dump_queue(sc, &sc->tx.active);
1705	}
1706
1707	switch (rxerr) {
1708	case 0: break;
1709	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
1710		break;
1711	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
1712		break;
1713	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
1714		break;
1715	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
1716		break;
1717	default: printf("Unknown RX error on RX channel %d\n", rxchan);
1718		break;
1719	}
1720
1721	if (rxerr != 0) {
1722		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
1723		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
1724		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
1725		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
1726		cpsw_dump_queue(sc, &sc->rx.active);
1727	}
1728
1729	printf("\nALE Table\n");
1730	cpsw_ale_dump_table(sc);
1731
1732	// XXX do something useful here??
1733	panic("CPSW HOST ERROR INTERRUPT");
1734
1735	// Suppress this interrupt in the future.
1736	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
1737	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
1738	// The watchdog will probably reset the controller
1739	// in a little while.  It will probably fail again.
1740}
1741
1742static void
1743cpsw_intr_misc(void *arg)
1744{
1745	struct cpsw_softc *sc = arg;
1746	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1747
1748	if (stat & 16)
1749		CPSW_DEBUGF(("Time sync event interrupt unimplemented"));
1750	if (stat & 8)
1751		cpsw_stats_collect(sc);
1752	if (stat & 4)
1753		cpsw_intr_misc_host_error(sc);
1754	if (stat & 2)
1755		CPSW_DEBUGF(("MDIO link change interrupt unimplemented"));
1756	if (stat & 1)
1757		CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented"));
1758	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1759}
1760
1761/*
1762 *
1763 * Periodic Checks and Watchdog.
1764 *
1765 */
1766
1767static void
1768cpsw_tick(void *msc)
1769{
1770	struct cpsw_softc *sc = msc;
1771
1772	/* Check for TX timeout */
1773	cpsw_tx_watchdog(sc);
1774
1775	/* Check for media type change */
1776	mii_tick(sc->mii);
1777	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1778		printf("%s: media type changed (ifm_media=%x)\n", __func__,
1779			sc->mii->mii_media.ifm_media);
1780		cpsw_ifmedia_upd(sc->ifp);
1781	}
1782
1783	/* Schedule another timeout one second from now */
1784	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
1785}
1786
1787static void
1788cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1789{
1790	struct cpsw_softc *sc = ifp->if_softc;
1791	struct mii_data *mii;
1792
1793	CPSW_DEBUGF((""));
1794	CPSW_TX_LOCK(sc);
1795
1796	mii = sc->mii;
1797	mii_pollstat(mii);
1798
1799	ifmr->ifm_active = mii->mii_media_active;
1800	ifmr->ifm_status = mii->mii_media_status;
1801
1802	CPSW_TX_UNLOCK(sc);
1803}
1804
1805static int
1806cpsw_ifmedia_upd(struct ifnet *ifp)
1807{
1808	struct cpsw_softc *sc = ifp->if_softc;
1809
1810	CPSW_DEBUGF((""));
1811	if (ifp->if_flags & IFF_UP) {
1812		CPSW_GLOBAL_LOCK(sc);
1813		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
1814		mii_mediachg(sc->mii);
1815		cpsw_init_locked(sc);
1816		CPSW_GLOBAL_UNLOCK(sc);
1817	}
1818
1819	return (0);
1820}
1821
1822static void
1823cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
1824{
1825	cpsw_debugf_head("CPSW watchdog");
1826	if_printf(sc->ifp, "watchdog timeout\n");
1827	cpsw_shutdown_locked(sc);
1828	cpsw_init_locked(sc);
1829}
1830
1831static void
1832cpsw_tx_watchdog(struct cpsw_softc *sc)
1833{
1834	struct ifnet *ifp = sc->ifp;
1835
1836	CPSW_GLOBAL_LOCK(sc);
1837	if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 ||
1838	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) {
1839		sc->watchdog.timer = 0; /* Nothing to do. */
1840	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
1841		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
1842	} else if (cpsw_tx_dequeue(sc) > 0) {
1843		sc->watchdog.timer = 0;  /* We just did something. */
1844	} else {
1845		/* There was something to do but it didn't get done. */
1846		++sc->watchdog.timer;
1847		if (sc->watchdog.timer > 2) {
1848			sc->watchdog.timer = 0;
1849			++ifp->if_oerrors;
1850			++sc->watchdog.resets;
1851			cpsw_tx_watchdog_full_reset(sc);
1852		}
1853	}
1854	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
1855	CPSW_GLOBAL_UNLOCK(sc);
1856}
1857
1858/*
1859 *
1860 * ALE support routines.
1861 *
1862 */
1863
1864static void
1865cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1866{
1867	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1868	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1869	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1870	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1871}
1872
1873static void
1874cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1875{
1876	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1877	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1878	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1879	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1880}
1881
1882static int
1883cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1884{
1885	int i;
1886	uint32_t ale_entry[3];
1887
1888	/* First two entries are link address and broadcast. */
1889	for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1890		cpsw_ale_read_entry(sc, i, ale_entry);
1891		if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1892		    ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1893			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1894			cpsw_ale_write_entry(sc, i, ale_entry);
1895		}
1896	}
1897	return CPSW_MAX_ALE_ENTRIES;
1898}
1899
1900static int
1901cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1902{
1903	int free_index = -1, matching_index = -1, i;
1904	uint32_t ale_entry[3];
1905
1906	/* Find a matching entry or a free entry. */
1907	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1908		cpsw_ale_read_entry(sc, i, ale_entry);
1909
1910		/* Entry Type[61:60] is 0 for free entry */
1911		if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1912			free_index = i;
1913		}
1914
1915		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1916		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1917		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1918		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1919		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1920		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1921			matching_index = i;
1922			break;
1923		}
1924	}
1925
1926	if (matching_index < 0) {
1927		if (free_index < 0)
1928			return (ENOMEM);
1929		i = free_index;
1930	}
1931
1932	/* Set MAC address */
1933	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1934	ale_entry[1] = mac[0] << 8 | mac[1];
1935
1936	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1937	ale_entry[1] |= 0xd0 << 24;
1938
1939	/* Set portmask [68:66] */
1940	ale_entry[2] = (portmap & 7) << 2;
1941
1942	cpsw_ale_write_entry(sc, i, ale_entry);
1943
1944	return 0;
1945}
1946
1947static void
1948cpsw_ale_dump_table(struct cpsw_softc *sc) {
1949	int i;
1950	uint32_t ale_entry[3];
1951	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1952		cpsw_ale_read_entry(sc, i, ale_entry);
1953		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1954			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1955				ale_entry[1], ale_entry[2]);
1956			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1957				(ale_entry[1] >> 8) & 0xFF,
1958				(ale_entry[1] >> 0) & 0xFF,
1959				(ale_entry[0] >>24) & 0xFF,
1960				(ale_entry[0] >>16) & 0xFF,
1961				(ale_entry[0] >> 8) & 0xFF,
1962				(ale_entry[0] >> 0) & 0xFF);
1963			printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1964			printf("type: %u ", (ale_entry[1] >> 28) & 3);
1965			printf("port: %u ", (ale_entry[2] >> 2) & 7);
1966			printf("\n");
1967		}
1968	}
1969	printf("\n");
1970}
1971
1972static int
1973cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1974{
1975	uint8_t *mac;
1976	uint32_t ale_entry[3];
1977	struct ifnet *ifp = sc->ifp;
1978	struct ifmultiaddr *ifma;
1979	int i;
1980
1981	/* Route incoming packets for our MAC address to Port 0 (host). */
1982	/* For simplicity, keep this entry at table index 0 in the ALE. */
1983        if_addr_rlock(ifp);
1984	mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr);
1985	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1986	ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */
1987	ale_entry[2] = 0; /* port = 0 */
1988	cpsw_ale_write_entry(sc, 0, ale_entry);
1989
1990	/* Set outgoing MAC Address for Ports 1 and 2. */
1991	for (i = 1; i < 3; ++i) {
1992		cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i),
1993		    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1994		cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i),
1995		    mac[5] << 8 | mac[4]);
1996	}
1997        if_addr_runlock(ifp);
1998
1999	/* Keep the broadcast address at table entry 1. */
2000	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
2001	ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */
2002	ale_entry[2] = 0x0000001c; /* Forward to all ports */
2003	cpsw_ale_write_entry(sc, 1, ale_entry);
2004
2005	/* SIOCDELMULTI doesn't specify the particular address
2006	   being removed, so we have to remove all and rebuild. */
2007	if (purge)
2008		cpsw_ale_remove_all_mc_entries(sc);
2009
2010        /* Set other multicast addrs desired. */
2011        if_maddr_rlock(ifp);
2012        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2013                if (ifma->ifma_addr->sa_family != AF_LINK)
2014                        continue;
2015		cpsw_ale_mc_entry_set(sc, 7,
2016		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2017        }
2018        if_maddr_runlock(ifp);
2019
2020	return (0);
2021}
2022
2023/*
2024 *
2025 * Statistics and Sysctls.
2026 *
2027 */
2028
2029#if 0
2030static void
2031cpsw_stats_dump(struct cpsw_softc *sc)
2032{
2033	int i;
2034	uint32_t r;
2035
2036	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2037		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2038		    cpsw_stat_sysctls[i].reg);
2039		CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2040			     (intmax_t)sc->shadow_stats[i], r,
2041			     (intmax_t)sc->shadow_stats[i] + r));
2042	}
2043}
2044#endif
2045
2046static void
2047cpsw_stats_collect(struct cpsw_softc *sc)
2048{
2049	int i;
2050	uint32_t r;
2051
2052	CPSW_DEBUGF(("Controller shadow statistics updated."));
2053
2054	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2055		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2056		    cpsw_stat_sysctls[i].reg);
2057		sc->shadow_stats[i] += r;
2058		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r);
2059	}
2060}
2061
2062static int
2063cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2064{
2065	struct cpsw_softc *sc;
2066	struct cpsw_stat *stat;
2067	uint64_t result;
2068
2069	sc = (struct cpsw_softc *)arg1;
2070	stat = &cpsw_stat_sysctls[oidp->oid_number];
2071	result = sc->shadow_stats[oidp->oid_number];
2072	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2073	return (sysctl_handle_64(oidp, &result, 0, req));
2074}
2075
2076static int
2077cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2078{
2079	struct cpsw_softc *sc;
2080	struct bintime t;
2081	unsigned result;
2082
2083	sc = (struct cpsw_softc *)arg1;
2084	getbinuptime(&t);
2085	bintime_sub(&t, &sc->attach_uptime);
2086	result = t.sec;
2087	return (sysctl_handle_int(oidp, &result, 0, req));
2088}
2089
2090static int
2091cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2092{
2093	struct cpsw_softc *sc;
2094	struct bintime t;
2095	unsigned result;
2096
2097	sc = (struct cpsw_softc *)arg1;
2098	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2099		getbinuptime(&t);
2100		bintime_sub(&t, &sc->init_uptime);
2101		result = t.sec;
2102	} else
2103		result = 0;
2104	return (sysctl_handle_int(oidp, &result, 0, req));
2105}
2106
2107static void
2108cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue)
2109{
2110	struct sysctl_oid_list *parent;
2111
2112	parent = SYSCTL_CHILDREN(node);
2113	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2114	    CTLFLAG_RD, &queue->queue_slots, 0,
2115	    "Total buffers currently assigned to this queue");
2116	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2117	    CTLFLAG_RD, &queue->active_queue_len, 0,
2118	    "Buffers currently registered with hardware controller");
2119	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2120	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2121	    "Max value of activeBuffers since last driver reset");
2122	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2123	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2124	    "Buffers allocated to this queue but not currently "
2125	    "registered with hardware controller");
2126	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2127	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2128	    "Max value of availBuffers since last driver reset");
2129	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2130	    CTLFLAG_RD, &queue->queue_adds, 0,
2131	    "Total buffers added to queue");
2132	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2133	    CTLFLAG_RD, &queue->queue_removes, 0,
2134	    "Total buffers removed from queue");
2135	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2136	    CTLFLAG_RD, &queue->longest_chain, 0,
2137	    "Max buffers used for a single packet");
2138}
2139
2140static void
2141cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc)
2142{
2143	struct sysctl_oid_list *parent;
2144
2145	parent = SYSCTL_CHILDREN(node);
2146	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2147	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2148	    "Total number of watchdog resets");
2149}
2150
2151static void
2152cpsw_add_sysctls(struct cpsw_softc *sc)
2153{
2154	struct sysctl_ctx_list *ctx;
2155	struct sysctl_oid *stats_node, *queue_node, *node;
2156	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2157	int i;
2158
2159	ctx = device_get_sysctl_ctx(sc->dev);
2160	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2161
2162	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2163	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2164	    "Time since driver attach");
2165
2166	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime",
2167	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU",
2168	    "Seconds since driver init");
2169
2170	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2171				     CTLFLAG_RD, NULL, "CPSW Statistics");
2172	stats_parent = SYSCTL_CHILDREN(stats_node);
2173	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2174		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2175				cpsw_stat_sysctls[i].oid,
2176				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2177				cpsw_stats_sysctl, "IU",
2178				cpsw_stat_sysctls[i].oid);
2179	}
2180
2181	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2182	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2183	queue_parent = SYSCTL_CHILDREN(queue_node);
2184
2185	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2186	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2187	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2188
2189	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2190	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2191	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2192
2193	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2194	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2195	cpsw_add_watchdog_sysctls(ctx, node, sc);
2196}
2197
2198