1/*-
2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * TI Common Platform Ethernet Switch (CPSW) Driver
29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
30 *
31 * This controller is documented in the AM335x Technical Reference
32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
34 *
35 * It is basically a single Ethernet port (port 0) wired internally to
36 * a 3-port store-and-forward switch connected to two independent
37 * "sliver" controllers (port 1 and port 2).  You can operate the
38 * controller in a variety of different ways by suitably configuring
39 * the slivers and the Address Lookup Engine (ALE) that routes packets
40 * between the ports.
41 *
42 * This code was developed and tested on a BeagleBone with
43 * an AM335x SoC.
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD$");
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/endian.h>
52#include <sys/mbuf.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/kernel.h>
56#include <sys/module.h>
57#include <sys/socket.h>
58#include <sys/sysctl.h>
59
60#include <net/ethernet.h>
61#include <net/bpf.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_vlan_var.h>
68
69#include <netinet/in_systm.h>
70#include <netinet/in.h>
71#include <netinet/ip.h>
72
73#include <sys/sockio.h>
74#include <sys/bus.h>
75#include <machine/bus.h>
76#include <sys/rman.h>
77#include <machine/resource.h>
78
79#include <dev/mii/mii.h>
80#include <dev/mii/miivar.h>
81
82#include <dev/fdt/fdt_common.h>
83#include <dev/ofw/ofw_bus.h>
84#include <dev/ofw/ofw_bus_subr.h>
85
86#include "if_cpswreg.h"
87#include "if_cpswvar.h"
88
89#include <arm/ti/ti_scm.h>
90
91#include "miibus_if.h"
92
93/* Device probe/attach/detach. */
94static int cpsw_probe(device_t);
95static void cpsw_init_slots(struct cpsw_softc *);
96static int cpsw_attach(device_t);
97static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *);
98static int cpsw_detach(device_t);
99
100/* Device Init/shutdown. */
101static void cpsw_init(void *);
102static void cpsw_init_locked(void *);
103static int cpsw_shutdown(device_t);
104static void cpsw_shutdown_locked(struct cpsw_softc *);
105
106/* Device Suspend/Resume. */
107static int cpsw_suspend(device_t);
108static int cpsw_resume(device_t);
109
110/* Ioctl. */
111static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data);
112
113static int cpsw_miibus_readreg(device_t, int phy, int reg);
114static int cpsw_miibus_writereg(device_t, int phy, int reg, int value);
115
116/* Send/Receive packets. */
117static void cpsw_intr_rx(void *arg);
118static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
119static void cpsw_rx_enqueue(struct cpsw_softc *);
120static void cpsw_start(struct ifnet *);
121static void cpsw_tx_enqueue(struct cpsw_softc *);
122static int cpsw_tx_dequeue(struct cpsw_softc *);
123
124/* Misc interrupts and watchdog. */
125static void cpsw_intr_rx_thresh(void *);
126static void cpsw_intr_misc(void *);
127static void cpsw_tick(void *);
128static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129static int cpsw_ifmedia_upd(struct ifnet *);
130static void cpsw_tx_watchdog(struct cpsw_softc *);
131
132/* ALE support */
133static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
134static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
135static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac);
136static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
137static void cpsw_ale_dump_table(struct cpsw_softc *);
138
139/* Statistics and sysctls. */
140static void cpsw_add_sysctls(struct cpsw_softc *);
141static void cpsw_stats_collect(struct cpsw_softc *);
142static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
143
144/*
145 * Arbitrary limit on number of segments in an mbuf to be transmitted.
146 * Packets with more segments than this will be defragmented before
147 * they are queued.
148 */
149#define CPSW_TXFRAGS 8
150
151
152/*
153 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs
154 * as separate Ethernet ports.  To properly support this, we should
155 * break this into two separate devices: a CPSW_SS device that owns
156 * the interrupts and actually talks to the CPSW hardware, and a
157 * separate CPSW Ethernet child device for each Ethernet port.  The RX
158 * interrupt, for example, would be part of CPSW_SS; it would receive
159 * a packet, note the input port, and then dispatch it to the child
160 * device's interface queue.  Similarly for transmit.
161 *
162 * It's not clear to me whether the device tree should be restructured
163 * with a cpsw_ss node and two child nodes.  That would allow specifying
164 * MAC addresses for each port, for example, but might be overkill.
165 *
166 * Unfortunately, I don't have hardware right now that supports two
167 * Ethernet ports via CPSW.
168 */
169
170static device_method_t cpsw_methods[] = {
171	/* Device interface */
172	DEVMETHOD(device_probe,		cpsw_probe),
173	DEVMETHOD(device_attach,	cpsw_attach),
174	DEVMETHOD(device_detach,	cpsw_detach),
175	DEVMETHOD(device_shutdown,	cpsw_shutdown),
176	DEVMETHOD(device_suspend,	cpsw_suspend),
177	DEVMETHOD(device_resume,	cpsw_resume),
178	/* MII interface */
179	DEVMETHOD(miibus_readreg,	cpsw_miibus_readreg),
180	DEVMETHOD(miibus_writereg,	cpsw_miibus_writereg),
181	{ 0, 0 }
182};
183
184static driver_t cpsw_driver = {
185	"cpsw",
186	cpsw_methods,
187	sizeof(struct cpsw_softc),
188};
189
190static devclass_t cpsw_devclass;
191
192DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
193DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
194MODULE_DEPEND(cpsw, ether, 1, 1, 1);
195MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
196
197static struct resource_spec res_spec[] = {
198	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
199	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
200	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
201	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
202	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
203	{ -1, 0 }
204};
205
206/* Number of entries here must match size of stats
207 * array in struct cpsw_softc. */
208static struct cpsw_stat {
209	int	reg;
210	char *oid;
211} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
212	{0x00, "GoodRxFrames"},
213	{0x04, "BroadcastRxFrames"},
214	{0x08, "MulticastRxFrames"},
215	{0x0C, "PauseRxFrames"},
216	{0x10, "RxCrcErrors"},
217	{0x14, "RxAlignErrors"},
218	{0x18, "OversizeRxFrames"},
219	{0x1c, "RxJabbers"},
220	{0x20, "ShortRxFrames"},
221	{0x24, "RxFragments"},
222	{0x30, "RxOctets"},
223	{0x34, "GoodTxFrames"},
224	{0x38, "BroadcastTxFrames"},
225	{0x3c, "MulticastTxFrames"},
226	{0x40, "PauseTxFrames"},
227	{0x44, "DeferredTxFrames"},
228	{0x48, "CollisionsTxFrames"},
229	{0x4c, "SingleCollisionTxFrames"},
230	{0x50, "MultipleCollisionTxFrames"},
231	{0x54, "ExcessiveCollisions"},
232	{0x58, "LateCollisions"},
233	{0x5c, "TxUnderrun"},
234	{0x60, "CarrierSenseErrors"},
235	{0x64, "TxOctets"},
236	{0x68, "RxTx64OctetFrames"},
237	{0x6c, "RxTx65to127OctetFrames"},
238	{0x70, "RxTx128to255OctetFrames"},
239	{0x74, "RxTx256to511OctetFrames"},
240	{0x78, "RxTx512to1024OctetFrames"},
241	{0x7c, "RxTx1024upOctetFrames"},
242	{0x80, "NetOctets"},
243	{0x84, "RxStartOfFrameOverruns"},
244	{0x88, "RxMiddleOfFrameOverruns"},
245	{0x8c, "RxDmaOverruns"}
246};
247
248/*
249 * Basic debug support.
250 */
251
252#define IF_DEBUG(sc)  if (sc->cpsw_if_flags & IFF_DEBUG)
253
254static void
255cpsw_debugf_head(const char *funcname)
256{
257	int t = (int)(time_second % (24 * 60 * 60));
258
259	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
260}
261
262#include <machine/stdarg.h>
263static void
264cpsw_debugf(const char *fmt, ...)
265{
266	va_list ap;
267
268	va_start(ap, fmt);
269	vprintf(fmt, ap);
270	va_end(ap);
271	printf("\n");
272
273}
274
275#define CPSW_DEBUGF(a) do {					\
276	IF_DEBUG(sc) {						\
277		cpsw_debugf_head(__func__);			\
278		cpsw_debugf a;					\
279	}							\
280} while (0)
281
282
283/*
284 * Locking macros
285 */
286#define CPSW_TX_LOCK(sc) do {					\
287		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
288		mtx_lock(&(sc)->tx.lock);				\
289} while (0)
290
291#define CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
292#define CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
293
294#define CPSW_RX_LOCK(sc) do {					\
295		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
296		mtx_lock(&(sc)->rx.lock);				\
297} while (0)
298
299#define CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
300#define CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
301
302#define CPSW_GLOBAL_LOCK(sc) do {					\
303		if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) !=	\
304		    (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) {		\
305			panic("cpsw deadlock possibility detection!");	\
306		}							\
307		mtx_lock(&(sc)->tx.lock);				\
308		mtx_lock(&(sc)->rx.lock);				\
309} while (0)
310
311#define CPSW_GLOBAL_UNLOCK(sc) do {					\
312		CPSW_RX_UNLOCK(sc);				\
313		CPSW_TX_UNLOCK(sc);				\
314} while (0)
315
316#define CPSW_GLOBAL_LOCK_ASSERT(sc) do {				\
317		CPSW_TX_LOCK_ASSERT(sc);				\
318		CPSW_RX_LOCK_ASSERT(sc);				\
319} while (0)
320
321/*
322 * Read/Write macros
323 */
324#define	cpsw_read_4(sc, reg)		bus_read_4(sc->res[0], reg)
325#define	cpsw_write_4(sc, reg, val)	bus_write_4(sc->res[0], reg, val)
326
327#define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
328
329#define	cpsw_cpdma_bd_paddr(sc, slot)				\
330	BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
331#define	cpsw_cpdma_read_bd(sc, slot, val)				\
332	bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
333#define	cpsw_cpdma_write_bd(sc, slot, val)				\
334	bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
335#define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
336	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
337#define	cpsw_cpdma_read_bd_flags(sc, slot)		\
338	bus_read_2(sc->res[0], slot->bd_offset + 14)
339#define	cpsw_write_hdp_slot(sc, queue, slot)				\
340	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
341#define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
342#define	cpsw_read_cp(sc, queue)				\
343	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
344#define	cpsw_write_cp(sc, queue, val)				\
345	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
346#define	cpsw_write_cp_slot(sc, queue, slot)		\
347	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
348
349#if 0
350/* XXX temporary function versions for debugging. */
351static void
352cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
353{
354	uint32_t reg = queue->hdp_offset;
355	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
356	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
357	cpsw_write_4(sc, reg, v);
358}
359
360static void
361cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
362{
363	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
364	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
365	cpsw_write_cp(sc, queue, v);
366}
367#endif
368
369/*
370 * Expanded dump routines for verbose debugging.
371 */
372static void
373cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
374{
375	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
376	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
377	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
378	    "Port0"};
379	struct cpsw_cpdma_bd bd;
380	const char *sep;
381	int i;
382
383	cpsw_cpdma_read_bd(sc, slot, &bd);
384	printf("BD Addr: 0x%08x   Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
385	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
386	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
387	printf("  Flags: ");
388	sep = "";
389	for (i = 0; i < 16; ++i) {
390		if (bd.flags & (1 << (15 - i))) {
391			printf("%s%s", sep, flags[i]);
392			sep = ",";
393		}
394	}
395	printf("\n");
396	if (slot->mbuf) {
397		printf("  Ether:  %14D\n",
398		    (char *)(slot->mbuf->m_hdr.mh_data), " ");
399		printf("  Packet: %16D\n",
400		    (char *)(slot->mbuf->m_hdr.mh_data) + 14, " ");
401	}
402}
403
404#define CPSW_DUMP_SLOT(cs, slot) do {				\
405	IF_DEBUG(sc) {						\
406		cpsw_dump_slot(sc, slot);			\
407	}							\
408} while (0)
409
410
411static void
412cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
413{
414	struct cpsw_slot *slot;
415	int i = 0;
416	int others = 0;
417
418	STAILQ_FOREACH(slot, q, next) {
419		if (i > 4)
420			++others;
421		else
422			cpsw_dump_slot(sc, slot);
423		++i;
424	}
425	if (others)
426		printf(" ... and %d more.\n", others);
427	printf("\n");
428}
429
430#define CPSW_DUMP_QUEUE(sc, q) do {				\
431	IF_DEBUG(sc) {						\
432		cpsw_dump_queue(sc, q);				\
433	}							\
434} while (0)
435
436
437/*
438 *
439 * Device Probe, Attach, Detach.
440 *
441 */
442
443static int
444cpsw_probe(device_t dev)
445{
446
447	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
448		return (ENXIO);
449
450	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
451	return (BUS_PROBE_DEFAULT);
452}
453
454
455static void
456cpsw_init_slots(struct cpsw_softc *sc)
457{
458	struct cpsw_slot *slot;
459	int i;
460
461	STAILQ_INIT(&sc->avail);
462
463	/* Put the slot descriptors onto the global avail list. */
464	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) {
465		slot = &sc->_slots[i];
466		slot->bd_offset = cpsw_cpdma_bd_offset(i);
467		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
468	}
469}
470
471/*
472 * bind an interrupt, add the relevant info to sc->interrupts
473 */
474static int
475cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description)
476{
477	void **pcookie;
478	int error;
479
480	sc->interrupts[sc->interrupt_count].res = res;
481	sc->interrupts[sc->interrupt_count].description = description;
482	pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie;
483
484	error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE,
485	    NULL, *handler, sc, pcookie);
486	if (error)
487		device_printf(sc->dev,
488		    "could not setup %s\n", description);
489	else
490		++sc->interrupt_count;
491	return (error);
492}
493
494/*
495 * teardown everything in sc->interrupts.
496 */
497static void
498cpsw_detach_interrupts(struct cpsw_softc *sc)
499{
500	int error;
501	int i;
502
503	for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) {
504		if (!sc->interrupts[i].ih_cookie)
505			continue;
506		error = bus_teardown_intr(sc->dev,
507		    sc->interrupts[i].res, sc->interrupts[i].ih_cookie);
508		if (error)
509			device_printf(sc->dev, "could not release %s\n",
510			    sc->interrupts[i].description);
511		sc->interrupts[i].ih_cookie = NULL;
512	}
513}
514
515static int
516cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
517{
518	const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]);
519	struct cpsw_slot *slot;
520	int i;
521
522	if (requested < 0)
523		requested = max_slots;
524
525	for (i = 0; i < requested; ++i) {
526		slot = STAILQ_FIRST(&sc->avail);
527		if (slot == NULL)
528			return (0);
529		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
530			if_printf(sc->ifp, "failed to create dmamap\n");
531			return (ENOMEM);
532		}
533		STAILQ_REMOVE_HEAD(&sc->avail, next);
534		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
535		++queue->avail_queue_len;
536		++queue->queue_slots;
537	}
538	return (0);
539}
540
541static int
542cpsw_attach(device_t dev)
543{
544	bus_dma_segment_t segs[1];
545	struct cpsw_softc *sc = device_get_softc(dev);
546	struct mii_softc *miisc;
547	struct ifnet *ifp;
548	void *phy_sc;
549	int error, phy, nsegs;
550	uint32_t reg;
551
552	CPSW_DEBUGF((""));
553
554	getbinuptime(&sc->attach_uptime);
555	sc->dev = dev;
556	sc->node = ofw_bus_get_node(dev);
557
558	/* Get phy address from fdt */
559	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
560		device_printf(dev, "failed to get PHY address from FDT\n");
561		return (ENXIO);
562	}
563	/* Initialize mutexes */
564	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
565	    "cpsw TX lock", MTX_DEF);
566	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
567	    "cpsw RX lock", MTX_DEF);
568
569	/* Allocate IO and IRQ resources */
570	error = bus_alloc_resources(dev, res_spec, sc->res);
571	if (error) {
572		device_printf(dev, "could not allocate resources\n");
573		cpsw_detach(dev);
574		return (ENXIO);
575	}
576
577	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
578	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
579		reg & 0xFF, (reg >> 11) & 0x1F);
580
581	cpsw_add_sysctls(sc);
582
583	/* Allocate a busdma tag and DMA safe memory for mbufs. */
584	error = bus_dma_tag_create(
585		bus_get_dma_tag(sc->dev),	/* parent */
586		1, 0,				/* alignment, boundary */
587		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
588		BUS_SPACE_MAXADDR,		/* highaddr */
589		NULL, NULL,			/* filtfunc, filtfuncarg */
590		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
591		MCLBYTES, 0,			/* maxsegsz, flags */
592		NULL, NULL,			/* lockfunc, lockfuncarg */
593		&sc->mbuf_dtag);		/* dmatag */
594	if (error) {
595		device_printf(dev, "bus_dma_tag_create failed\n");
596		cpsw_detach(dev);
597		return (error);
598	}
599
600	/* Allocate network interface */
601	ifp = sc->ifp = if_alloc(IFT_ETHER);
602	if (ifp == NULL) {
603		device_printf(dev, "if_alloc() failed\n");
604		cpsw_detach(dev);
605		return (ENOMEM);
606	}
607
608	/* Allocate the null mbuf and pre-sync it. */
609	sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
610	memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size);
611	bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
612	bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
613	    sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
614	bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
615	    BUS_DMASYNC_PREWRITE);
616	sc->null_mbuf_paddr = segs[0].ds_addr;
617
618	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
619	ifp->if_softc = sc;
620	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
621	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
622	ifp->if_capenable = ifp->if_capabilities;
623
624	ifp->if_init = cpsw_init;
625	ifp->if_start = cpsw_start;
626	ifp->if_ioctl = cpsw_ioctl;
627
628	cpsw_init_slots(sc);
629
630	/* Allocate slots to TX and RX queues. */
631	STAILQ_INIT(&sc->rx.avail);
632	STAILQ_INIT(&sc->rx.active);
633	STAILQ_INIT(&sc->tx.avail);
634	STAILQ_INIT(&sc->tx.active);
635	// For now:  128 slots to TX, rest to RX.
636	// XXX TODO: start with 32/64 and grow dynamically based on demand.
637	if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) {
638		device_printf(dev, "failed to allocate dmamaps\n");
639		cpsw_detach(dev);
640		return (ENOMEM);
641	}
642	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
643	    sc->tx.queue_slots, sc->rx.queue_slots);
644
645	ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots;
646	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
647	IFQ_SET_READY(&ifp->if_snd);
648
649	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
650	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
651
652	/* Get high part of MAC address from control module (mac_id0_hi) */
653	/* TODO: Get MAC ID1 as well as MAC ID0. */
654	ti_scm_reg_read_4(0x634, &reg);
655	sc->mac_addr[0] = reg & 0xFF;
656	sc->mac_addr[1] = (reg >>  8) & 0xFF;
657	sc->mac_addr[2] = (reg >> 16) & 0xFF;
658	sc->mac_addr[3] = (reg >> 24) & 0xFF;
659
660	/* Get low part of MAC address from control module (mac_id0_lo) */
661	ti_scm_reg_read_4(0x630, &reg);
662	sc->mac_addr[4] = reg & 0xFF;
663	sc->mac_addr[5] = (reg >>  8) & 0xFF;
664
665	ether_ifattach(ifp, sc->mac_addr);
666	callout_init(&sc->watchdog.callout, 0);
667
668	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
669	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
670	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
671
672	/* Clear ALE */
673	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30);
674
675	/* Attach PHY(s) */
676	error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
677	    cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
678	if (error) {
679		device_printf(dev, "attaching PHYs failed\n");
680		cpsw_detach(dev);
681		return (error);
682	}
683	sc->mii = device_get_softc(sc->miibus);
684
685	/* Tell the MAC where to find the PHY so autoneg works */
686	miisc = LIST_FIRST(&sc->mii->mii_phys);
687
688	/* Select PHY and enable interrupts */
689	cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
690
691	/* Note: We don't use sc->res[3] (TX interrupt) */
692	if (cpsw_attach_interrupt(sc, sc->res[1],
693		cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") ||
694	    cpsw_attach_interrupt(sc, sc->res[2],
695		cpsw_intr_rx, "CPSW RX interrupt") ||
696	    cpsw_attach_interrupt(sc, sc->res[4],
697		cpsw_intr_misc, "CPSW misc interrupt")) {
698		cpsw_detach(dev);
699		return (ENXIO);
700	}
701
702	return (0);
703}
704
705static void
706cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
707{
708	int error;
709
710	if (slot->dmamap) {
711		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
712		KASSERT(error == 0, ("Mapping still active"));
713		slot->dmamap = NULL;
714	}
715	if (slot->mbuf) {
716		m_freem(slot->mbuf);
717		slot->mbuf = NULL;
718	}
719}
720
721static int
722cpsw_detach(device_t dev)
723{
724	struct cpsw_softc *sc = device_get_softc(dev);
725	int error, i;
726
727	CPSW_DEBUGF((""));
728
729	/* Stop controller and free TX queue */
730	if (device_is_attached(dev)) {
731		ether_ifdetach(sc->ifp);
732		CPSW_GLOBAL_LOCK(sc);
733		cpsw_shutdown_locked(sc);
734		CPSW_GLOBAL_UNLOCK(sc);
735		callout_drain(&sc->watchdog.callout);
736	}
737
738	bus_generic_detach(dev);
739	device_delete_child(dev, sc->miibus);
740
741	/* Stop and release all interrupts */
742	cpsw_detach_interrupts(sc);
743
744	/* Free dmamaps and mbufs */
745	for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) {
746		cpsw_free_slot(sc, &sc->_slots[i]);
747	}
748
749	/* Free DMA tag */
750	error = bus_dma_tag_destroy(sc->mbuf_dtag);
751	KASSERT(error == 0, ("Unable to destroy DMA tag"));
752
753	/* Free IO memory handler */
754	bus_release_resources(dev, res_spec, sc->res);
755
756	/* Destroy mutexes */
757	mtx_destroy(&sc->rx.lock);
758	mtx_destroy(&sc->tx.lock);
759
760	return (0);
761}
762
763/*
764 *
765 * Init/Shutdown.
766 *
767 */
768
769static void
770cpsw_reset(struct cpsw_softc *sc)
771{
772	int i;
773
774	/* Reset RMII/RGMII wrapper. */
775	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
776	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
777		;
778
779	/* Disable TX and RX interrupts for all cores. */
780	for (i = 0; i < 3; ++i) {
781		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
782		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
783		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
784		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
785	}
786
787	/* Reset CPSW subsystem. */
788	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
789	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
790		;
791
792	/* Reset Sliver port 1 and 2 */
793	for (i = 0; i < 2; i++) {
794		/* Reset */
795		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
796		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
797			;
798	}
799
800	/* Reset DMA controller. */
801	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
802	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
803		;
804
805	/* Disable TX & RX DMA */
806	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
807	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
808
809	/* Clear all queues. */
810	for (i = 0; i < 8; i++) {
811		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
812		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
813		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
814		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
815	}
816
817	/* Clear all interrupt Masks */
818	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
819	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
820}
821
822static void
823cpsw_init(void *arg)
824{
825	struct cpsw_softc *sc = arg;
826
827	CPSW_DEBUGF((""));
828	CPSW_GLOBAL_LOCK(sc);
829	cpsw_init_locked(arg);
830	CPSW_GLOBAL_UNLOCK(sc);
831}
832
833static void
834cpsw_init_locked(void *arg)
835{
836	struct ifnet *ifp;
837	struct cpsw_softc *sc = arg;
838	struct cpsw_slot *slot;
839	uint32_t i;
840
841	CPSW_DEBUGF((""));
842	ifp = sc->ifp;
843	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
844		return;
845
846	getbinuptime(&sc->init_uptime);
847
848	/* Reset the controller. */
849	cpsw_reset(sc);
850
851	/* Enable ALE */
852	cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4);
853
854	/* Init Sliver port 1 and 2 */
855	for (i = 0; i < 2; i++) {
856		/* Set Slave Mapping */
857		cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
858		cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
859		cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
860		/* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15),
861		   GMII_EN(5), FULLDUPLEX(1) */
862		/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
863		/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
864		cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
865	}
866
867	/* Set Host Port Mapping */
868	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
869	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
870
871	/* Initialize ALE: all ports set to forwarding(3), initialize addrs */
872	for (i = 0; i < 3; i++)
873		cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3);
874	cpsw_ale_update_addresses(sc, 1);
875
876	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
877
878	/* Enable statistics for ports 0, 1 and 2 */
879	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
880
881	/* Experiment:  Turn off flow control */
882	/* This seems to fix the watchdog resets that have plagued
883	   earlier versions of this driver; I'm not yet sure if there
884	   are negative effects yet. */
885	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
886
887	/* Make IP hdr aligned with 4 */
888	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
889
890	/* Initialize RX Buffer Descriptors */
891	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
892
893	/* Enable TX & RX DMA */
894	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
895	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
896
897	/* Enable Interrupts for core 0 */
898	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
899	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
900	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F);
901
902	/* Enable host Error Interrupt */
903	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
904
905	/* Enable interrupts for RX Channel 0 */
906	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
907
908	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
909	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
910	cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
911
912	/* Select MII in GMII_SEL, Internal Delay mode */
913	//ti_scm_reg_write_4(0x650, 0);
914
915	/* Initialize active queues. */
916	slot = STAILQ_FIRST(&sc->tx.active);
917	if (slot != NULL)
918		cpsw_write_hdp_slot(sc, &sc->tx, slot);
919	slot = STAILQ_FIRST(&sc->rx.active);
920	if (slot != NULL)
921		cpsw_write_hdp_slot(sc, &sc->rx, slot);
922	cpsw_rx_enqueue(sc);
923
924	/* Activate network interface */
925	sc->rx.running = 1;
926	sc->tx.running = 1;
927	sc->watchdog.timer = 0;
928	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
929	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
930	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
931
932}
933
934static int
935cpsw_shutdown(device_t dev)
936{
937	struct cpsw_softc *sc = device_get_softc(dev);
938
939	CPSW_DEBUGF((""));
940	CPSW_GLOBAL_LOCK(sc);
941	cpsw_shutdown_locked(sc);
942	CPSW_GLOBAL_UNLOCK(sc);
943	return (0);
944}
945
946static void
947cpsw_rx_teardown_locked(struct cpsw_softc *sc)
948{
949	struct mbuf *received, *next;
950	int i = 0;
951
952	CPSW_DEBUGF(("starting RX teardown"));
953	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
954	for (;;) {
955		received = cpsw_rx_dequeue(sc);
956		CPSW_GLOBAL_UNLOCK(sc);
957		while (received != NULL) {
958			next = received->m_nextpkt;
959			received->m_nextpkt = NULL;
960			(*sc->ifp->if_input)(sc->ifp, received);
961			received = next;
962		}
963		CPSW_GLOBAL_LOCK(sc);
964		if (!sc->rx.running) {
965			CPSW_DEBUGF(("finished RX teardown (%d retries)", i));
966			return;
967		}
968		if (++i > 10) {
969			if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n");
970			return;
971		}
972		DELAY(10);
973	}
974}
975
976static void
977cpsw_tx_teardown_locked(struct cpsw_softc *sc)
978{
979	int i = 0;
980
981	CPSW_DEBUGF(("starting TX teardown"));
982	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
983	cpsw_tx_dequeue(sc);
984	while (sc->tx.running && ++i < 10) {
985		DELAY(10);
986		cpsw_tx_dequeue(sc);
987	}
988	if (sc->tx.running)
989		if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n");
990	CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)",
991	    i, sc->tx.active_queue_len));
992}
993
994static void
995cpsw_shutdown_locked(struct cpsw_softc *sc)
996{
997	struct ifnet *ifp;
998
999	CPSW_DEBUGF((""));
1000	CPSW_GLOBAL_LOCK_ASSERT(sc);
1001	ifp = sc->ifp;
1002
1003	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1004		return;
1005
1006	/* Disable interface */
1007	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1008	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1009
1010	/* Stop ticker */
1011	callout_stop(&sc->watchdog.callout);
1012
1013	/* Tear down the RX/TX queues. */
1014	cpsw_rx_teardown_locked(sc);
1015	cpsw_tx_teardown_locked(sc);
1016
1017	/* Capture stats before we reset controller. */
1018	cpsw_stats_collect(sc);
1019
1020	cpsw_reset(sc);
1021}
1022
1023/*
1024 *  Suspend/Resume.
1025 */
1026
1027static int
1028cpsw_suspend(device_t dev)
1029{
1030	struct cpsw_softc *sc = device_get_softc(dev);
1031
1032	CPSW_DEBUGF((""));
1033	CPSW_GLOBAL_LOCK(sc);
1034	cpsw_shutdown_locked(sc);
1035	CPSW_GLOBAL_UNLOCK(sc);
1036	return (0);
1037}
1038
1039static int
1040cpsw_resume(device_t dev)
1041{
1042	struct cpsw_softc *sc = device_get_softc(dev);
1043
1044	CPSW_DEBUGF(("UNIMPLEMENTED"));
1045	return (0);
1046}
1047
1048/*
1049 *
1050 *  IOCTL
1051 *
1052 */
1053
1054static void
1055cpsw_set_promisc(struct cpsw_softc *sc, int set)
1056{
1057	/*
1058	 * Enabling promiscuous mode requires two bits of work: First,
1059	 * ALE_BYPASS needs to be enabled.  That disables the ALE
1060	 * forwarding logic and causes every packet to be sent to the
1061	 * host port.  That makes us promiscuous wrt received packets.
1062	 *
1063	 * With ALE forwarding disabled, the transmitter needs to set
1064	 * an explicit output port on every packet to route it to the
1065	 * correct egress.  This should be doable for systems such as
1066	 * BeagleBone where only one egress port is actually wired to
1067	 * a PHY.  If you have both egress ports wired up, life gets a
1068	 * lot more interesting.
1069	 *
1070	 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't
1071	 * seem to set explicit egress ports.  Does that mean they
1072	 * are always promiscuous?
1073	 */
1074	if (set) {
1075		printf("Promiscuous mode unimplemented\n");
1076	}
1077}
1078
1079static void
1080cpsw_set_allmulti(struct cpsw_softc *sc, int set)
1081{
1082	if (set) {
1083		printf("All-multicast mode unimplemented\n");
1084	}
1085}
1086
1087static int
1088cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1089{
1090	struct cpsw_softc *sc = ifp->if_softc;
1091	struct ifreq *ifr = (struct ifreq *)data;
1092	int error;
1093	uint32_t changed;
1094
1095	error = 0;
1096
1097	switch (command) {
1098	case SIOCSIFFLAGS:
1099		CPSW_GLOBAL_LOCK(sc);
1100		if (ifp->if_flags & IFF_UP) {
1101			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1102				changed = ifp->if_flags ^ sc->cpsw_if_flags;
1103				CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
1104				if (changed & IFF_PROMISC)
1105					cpsw_set_promisc(sc,
1106					    ifp->if_flags & IFF_PROMISC);
1107				if (changed & IFF_ALLMULTI)
1108					cpsw_set_allmulti(sc,
1109					    ifp->if_flags & IFF_ALLMULTI);
1110			} else {
1111				CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1112				cpsw_init_locked(sc);
1113			}
1114		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1115			CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1116			cpsw_shutdown_locked(sc);
1117		}
1118
1119		sc->cpsw_if_flags = ifp->if_flags;
1120		CPSW_GLOBAL_UNLOCK(sc);
1121		break;
1122	case SIOCADDMULTI:
1123		cpsw_ale_update_addresses(sc, 0);
1124		break;
1125	case SIOCDELMULTI:
1126		/* Ugh.  DELMULTI doesn't provide the specific address
1127		   being removed, so the best we can do is remove
1128		   everything and rebuild it all. */
1129		cpsw_ale_update_addresses(sc, 1);
1130		break;
1131	case SIOCGIFMEDIA:
1132	case SIOCSIFMEDIA:
1133		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1134		break;
1135	default:
1136		error = ether_ioctl(ifp, command, data);
1137	}
1138	return (error);
1139}
1140
1141/*
1142 *
1143 * MIIBUS
1144 *
1145 */
1146static int
1147cpsw_miibus_ready(struct cpsw_softc *sc)
1148{
1149	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1150
1151	while (--retries) {
1152		r = cpsw_read_4(sc, MDIOUSERACCESS0);
1153		if ((r & 1 << 31) == 0)
1154			return 1;
1155		DELAY(CPSW_MIIBUS_DELAY);
1156	}
1157	return 0;
1158}
1159
1160static int
1161cpsw_miibus_readreg(device_t dev, int phy, int reg)
1162{
1163	struct cpsw_softc *sc = device_get_softc(dev);
1164	uint32_t cmd, r;
1165
1166	if (!cpsw_miibus_ready(sc)) {
1167		device_printf(dev, "MDIO not ready to read\n");
1168		return 0;
1169	}
1170
1171	/* Set GO, reg, phy */
1172	cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1173	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1174
1175	if (!cpsw_miibus_ready(sc)) {
1176		device_printf(dev, "MDIO timed out during read\n");
1177		return 0;
1178	}
1179
1180	r = cpsw_read_4(sc, MDIOUSERACCESS0);
1181	if((r & 1 << 29) == 0) {
1182		device_printf(dev, "Failed to read from PHY.\n");
1183		r = 0;
1184	}
1185	return (r & 0xFFFF);
1186}
1187
1188static int
1189cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
1190{
1191	struct cpsw_softc *sc = device_get_softc(dev);
1192	uint32_t cmd;
1193
1194	if (!cpsw_miibus_ready(sc)) {
1195		device_printf(dev, "MDIO not ready to write\n");
1196		return 0;
1197	}
1198
1199	/* Set GO, WRITE, reg, phy, and value */
1200	cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
1201	    | (value & 0xFFFF);
1202	cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1203
1204	if (!cpsw_miibus_ready(sc)) {
1205		device_printf(dev, "MDIO timed out during write\n");
1206		return 0;
1207	}
1208
1209	if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0)
1210		device_printf(dev, "Failed to write to PHY.\n");
1211
1212	return 0;
1213}
1214
1215/*
1216 *
1217 * Transmit/Receive Packets.
1218 *
1219 */
1220
1221
1222static void
1223cpsw_intr_rx(void *arg)
1224{
1225	struct cpsw_softc *sc = arg;
1226	struct mbuf *received, *next;
1227
1228	CPSW_RX_LOCK(sc);
1229	received = cpsw_rx_dequeue(sc);
1230	cpsw_rx_enqueue(sc);
1231	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1232	CPSW_RX_UNLOCK(sc);
1233
1234	while (received != NULL) {
1235		next = received->m_nextpkt;
1236		received->m_nextpkt = NULL;
1237		(*sc->ifp->if_input)(sc->ifp, received);
1238		received = next;
1239	}
1240}
1241
1242static struct mbuf *
1243cpsw_rx_dequeue(struct cpsw_softc *sc)
1244{
1245	struct cpsw_cpdma_bd bd;
1246	struct cpsw_slot *slot;
1247	struct ifnet *ifp;
1248	struct mbuf *mb_head, *mb_tail;
1249	int removed = 0;
1250
1251	ifp = sc->ifp;
1252	mb_head = mb_tail = NULL;
1253
1254	/* Pull completed packets off hardware RX queue. */
1255	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1256		cpsw_cpdma_read_bd(sc, slot, &bd);
1257		if (bd.flags & CPDMA_BD_OWNER)
1258			break; /* Still in use by hardware */
1259
1260		CPSW_DEBUGF(("Removing received packet from RX queue"));
1261		++removed;
1262		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1263		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1264
1265		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1266		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1267
1268		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1269			CPSW_DEBUGF(("RX teardown in progress"));
1270			m_freem(slot->mbuf);
1271			slot->mbuf = NULL;
1272			cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1273			sc->rx.running = 0;
1274			break;
1275		}
1276
1277		cpsw_write_cp_slot(sc, &sc->rx, slot);
1278
1279		/* Set up mbuf */
1280		/* TODO: track SOP/EOP bits to assemble a full mbuf
1281		   out of received fragments. */
1282		slot->mbuf->m_hdr.mh_data += bd.bufoff;
1283		slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
1284		slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1285		slot->mbuf->m_flags |= M_PKTHDR;
1286		slot->mbuf->m_pkthdr.rcvif = ifp;
1287		slot->mbuf->m_nextpkt = NULL;
1288
1289		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1290			/* check for valid CRC by looking into pkt_err[5:4] */
1291			if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1292				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1293				slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1294				slot->mbuf->m_pkthdr.csum_data = 0xffff;
1295			}
1296		}
1297
1298		/* Add mbuf to packet list to be returned. */
1299		if (mb_tail) {
1300			mb_tail->m_nextpkt = slot->mbuf;
1301		} else {
1302			mb_head = slot->mbuf;
1303		}
1304		mb_tail = slot->mbuf;
1305		slot->mbuf = NULL;
1306	}
1307
1308	if (removed != 0) {
1309		sc->rx.queue_removes += removed;
1310		sc->rx.active_queue_len -= removed;
1311		sc->rx.avail_queue_len += removed;
1312		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1313			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1314	}
1315	return (mb_head);
1316}
1317
1318static void
1319cpsw_rx_enqueue(struct cpsw_softc *sc)
1320{
1321	bus_dma_segment_t seg[1];
1322	struct cpsw_cpdma_bd bd;
1323	struct ifnet *ifp = sc->ifp;
1324	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1325	struct cpsw_slot *slot, *prev_slot = NULL;
1326	struct cpsw_slot *last_old_slot, *first_new_slot;
1327	int error, nsegs, added = 0;
1328
1329	/* Register new mbufs with hardware. */
1330	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1331		if (slot->mbuf == NULL) {
1332			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1333			if (slot->mbuf == NULL) {
1334				if_printf(sc->ifp, "Unable to fill RX queue\n");
1335				break;
1336			}
1337			slot->mbuf->m_len =
1338			    slot->mbuf->m_pkthdr.len =
1339			    slot->mbuf->m_ext.ext_size;
1340		}
1341
1342		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1343		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1344
1345		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1346		KASSERT(error == 0, ("DMA error (error=%d)", error));
1347		if (error != 0 || nsegs != 1) {
1348			if_printf(ifp,
1349			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1350			    __func__, nsegs, error);
1351			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1352			m_freem(slot->mbuf);
1353			slot->mbuf = NULL;
1354			break;
1355		}
1356
1357		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1358
1359		/* Create and submit new rx descriptor*/
1360		bd.next = 0;
1361		bd.bufptr = seg->ds_addr;
1362		bd.bufoff = 0;
1363		bd.buflen = MCLBYTES - 1;
1364		bd.pktlen = bd.buflen;
1365		bd.flags = CPDMA_BD_OWNER;
1366		cpsw_cpdma_write_bd(sc, slot, &bd);
1367		++added;
1368
1369		if (prev_slot != NULL)
1370			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1371		prev_slot = slot;
1372		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1373		sc->rx.avail_queue_len--;
1374		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1375	}
1376
1377	if (added == 0)
1378		return;
1379
1380	CPSW_DEBUGF(("Adding %d buffers to RX queue", added));
1381
1382	/* Link new entries to hardware RX queue. */
1383	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1384	first_new_slot = STAILQ_FIRST(&tmpqueue);
1385	STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1386	if (first_new_slot == NULL) {
1387		return;
1388	} else if (last_old_slot == NULL) {
1389		/* Start a fresh queue. */
1390		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1391	} else {
1392		/* Add buffers to end of current queue. */
1393		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1394		/* If underrun, restart queue. */
1395		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1396			cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1397		}
1398	}
1399	sc->rx.queue_adds += added;
1400	sc->rx.active_queue_len += added;
1401	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1402		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1403	}
1404}
1405
1406static void
1407cpsw_start(struct ifnet *ifp)
1408{
1409	struct cpsw_softc *sc = ifp->if_softc;
1410
1411	CPSW_TX_LOCK(sc);
1412	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) {
1413		cpsw_tx_enqueue(sc);
1414		cpsw_tx_dequeue(sc);
1415	}
1416	CPSW_TX_UNLOCK(sc);
1417}
1418
1419static void
1420cpsw_tx_enqueue(struct cpsw_softc *sc)
1421{
1422	bus_dma_segment_t segs[CPSW_TXFRAGS];
1423	struct cpsw_cpdma_bd bd;
1424	struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1425	struct cpsw_slot *slot, *prev_slot = NULL;
1426	struct cpsw_slot *last_old_slot, *first_new_slot;
1427	struct mbuf *m0;
1428	int error, nsegs, seg, added = 0, padlen;
1429
1430	/* Pull pending packets from IF queue and prep them for DMA. */
1431	while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) {
1432		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1433		if (m0 == NULL)
1434			break;
1435
1436		slot->mbuf = m0;
1437		padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1438		if (padlen < 0)
1439			padlen = 0;
1440
1441		/* Create mapping in DMA memory */
1442		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1443		    slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1444		/* If the packet is too fragmented, try to simplify. */
1445		if (error == EFBIG ||
1446		    (error == 0 &&
1447			nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) {
1448			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1449			if (padlen > 0) /* May as well add padding. */
1450				m_append(slot->mbuf, padlen,
1451				    sc->null_mbuf->m_hdr.mh_data);
1452			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1453			if (m0 == NULL) {
1454				if_printf(sc->ifp,
1455				    "Can't defragment packet; dropping\n");
1456				m_freem(slot->mbuf);
1457			} else {
1458				CPSW_DEBUGF(("Requeueing defragmented packet"));
1459				IF_PREPEND(&sc->ifp->if_snd, m0);
1460			}
1461			slot->mbuf = NULL;
1462			continue;
1463		}
1464		if (error != 0) {
1465			if_printf(sc->ifp,
1466			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1467			    __func__, error);
1468			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1469			m_freem(slot->mbuf);
1470			slot->mbuf = NULL;
1471			break;
1472		}
1473
1474		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1475				BUS_DMASYNC_PREWRITE);
1476
1477
1478		CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes",
1479			nsegs, padlen));
1480
1481		/* If there is only one segment, the for() loop
1482		 * gets skipped and the single buffer gets set up
1483		 * as both SOP and EOP. */
1484		/* Start by setting up the first buffer */
1485		bd.next = 0;
1486		bd.bufptr = segs[0].ds_addr;
1487		bd.bufoff = 0;
1488		bd.buflen = segs[0].ds_len;
1489		bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1490		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER;
1491		for (seg = 1; seg < nsegs; ++seg) {
1492			/* Save the previous buffer (which isn't EOP) */
1493			cpsw_cpdma_write_bd(sc, slot, &bd);
1494			if (prev_slot != NULL)
1495				cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1496			prev_slot = slot;
1497			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1498			sc->tx.avail_queue_len--;
1499			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1500			++added;
1501			slot = STAILQ_FIRST(&sc->tx.avail);
1502
1503			/* Setup next buffer (which isn't SOP) */
1504			bd.next = 0;
1505			bd.bufptr = segs[seg].ds_addr;
1506			bd.bufoff = 0;
1507			bd.buflen = segs[seg].ds_len;
1508			bd.pktlen = 0;
1509			bd.flags = CPDMA_BD_OWNER;
1510		}
1511		/* Save the final buffer. */
1512		if (padlen <= 0)
1513			bd.flags |= CPDMA_BD_EOP;
1514		cpsw_cpdma_write_bd(sc, slot, &bd);
1515		if (prev_slot != NULL)
1516			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1517		prev_slot = slot;
1518		STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1519		sc->tx.avail_queue_len--;
1520		STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1521		++added;
1522
1523		if (padlen > 0) {
1524			slot = STAILQ_FIRST(&sc->tx.avail);
1525			STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1526			sc->tx.avail_queue_len--;
1527			STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1528			++added;
1529
1530			/* Setup buffer of null pad bytes (definitely EOP) */
1531			cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1532			prev_slot = slot;
1533			bd.next = 0;
1534			bd.bufptr = sc->null_mbuf_paddr;
1535			bd.bufoff = 0;
1536			bd.buflen = padlen;
1537			bd.pktlen = 0;
1538			bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER;
1539			cpsw_cpdma_write_bd(sc, slot, &bd);
1540			++nsegs;
1541		}
1542
1543		if (nsegs > sc->tx.longest_chain)
1544			sc->tx.longest_chain = nsegs;
1545
1546		// TODO: Should we defer the BPF tap until
1547		// after all packets are queued?
1548		BPF_MTAP(sc->ifp, m0);
1549	}
1550
1551	/* Attach the list of new buffers to the hardware TX queue. */
1552	last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next);
1553	first_new_slot = STAILQ_FIRST(&tmpqueue);
1554	STAILQ_CONCAT(&sc->tx.active, &tmpqueue);
1555	if (first_new_slot == NULL) {
1556		return;
1557	} else if (last_old_slot == NULL) {
1558		/* Start a fresh queue. */
1559		cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1560	} else {
1561		/* Add buffers to end of current queue. */
1562		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1563		/* If underrun, restart queue. */
1564		if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1565			cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1566		}
1567	}
1568	sc->tx.queue_adds += added;
1569	sc->tx.active_queue_len += added;
1570	if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) {
1571		sc->tx.max_active_queue_len = sc->tx.active_queue_len;
1572	}
1573}
1574
1575static int
1576cpsw_tx_dequeue(struct cpsw_softc *sc)
1577{
1578	struct cpsw_slot *slot, *last_removed_slot = NULL;
1579	uint32_t flags, removed = 0;
1580
1581	slot = STAILQ_FIRST(&sc->tx.active);
1582	if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1583		CPSW_DEBUGF(("TX teardown of an empty queue"));
1584		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1585		sc->tx.running = 0;
1586		return (0);
1587	}
1588
1589	/* Pull completed buffers off the hardware TX queue. */
1590	while (slot != NULL) {
1591		flags = cpsw_cpdma_read_bd_flags(sc, slot);
1592		if (flags & CPDMA_BD_OWNER)
1593			break; /* Hardware is still using this packet. */
1594
1595		CPSW_DEBUGF(("TX removing completed packet"));
1596		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1597		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1598		m_freem(slot->mbuf);
1599		slot->mbuf = NULL;
1600
1601		/* Dequeue any additional buffers used by this packet. */
1602		while (slot != NULL && slot->mbuf == NULL) {
1603			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1604			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1605			++removed;
1606			last_removed_slot = slot;
1607			slot = STAILQ_FIRST(&sc->tx.active);
1608		}
1609
1610		/* TearDown complete is only marked on the SOP for the packet. */
1611		if (flags & CPDMA_BD_TDOWNCMPLT) {
1612			CPSW_DEBUGF(("TX teardown in progress"));
1613			cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1614			// TODO: Increment a count of dropped TX packets
1615			sc->tx.running = 0;
1616			break;
1617		}
1618	}
1619
1620	if (removed != 0) {
1621		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1622		sc->tx.queue_removes += removed;
1623		sc->tx.active_queue_len -= removed;
1624		sc->tx.avail_queue_len += removed;
1625		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1626			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1627	}
1628	return (removed);
1629}
1630
1631/*
1632 *
1633 * Miscellaneous interrupts.
1634 *
1635 */
1636
1637static void
1638cpsw_intr_rx_thresh(void *arg)
1639{
1640	struct cpsw_softc *sc = arg;
1641	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1642
1643	CPSW_DEBUGF(("stat=%x", stat));
1644	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1645}
1646
1647static void
1648cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1649{
1650	uint32_t intstat;
1651	uint32_t dmastat;
1652	int txerr, rxerr, txchan, rxchan;
1653
1654	printf("\n\n");
1655	device_printf(sc->dev,
1656	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1657	printf("\n\n");
1658	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1659	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1660	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1661	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1662
1663	txerr = (dmastat >> 20) & 15;
1664	txchan = (dmastat >> 16) & 7;
1665	rxerr = (dmastat >> 12) & 15;
1666	rxchan = (dmastat >> 8) & 7;
1667
1668	switch (txerr) {
1669	case 0: break;
1670	case 1:	printf("SOP error on TX channel %d\n", txchan);
1671		break;
1672	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1673		break;
1674	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
1675		break;
1676	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
1677		break;
1678	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
1679		break;
1680	case 6:	printf("Packet length error on TX channel %d\n", txchan);
1681		break;
1682	default: printf("Unknown error on TX channel %d\n", txchan);
1683		break;
1684	}
1685
1686	if (txerr != 0) {
1687		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
1688		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
1689		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
1690		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
1691		cpsw_dump_queue(sc, &sc->tx.active);
1692	}
1693
1694	switch (rxerr) {
1695	case 0: break;
1696	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
1697		break;
1698	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
1699		break;
1700	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
1701		break;
1702	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
1703		break;
1704	default: printf("Unknown RX error on RX channel %d\n", rxchan);
1705		break;
1706	}
1707
1708	if (rxerr != 0) {
1709		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
1710		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
1711		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
1712		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
1713		cpsw_dump_queue(sc, &sc->rx.active);
1714	}
1715
1716	printf("\nALE Table\n");
1717	cpsw_ale_dump_table(sc);
1718
1719	// XXX do something useful here??
1720	panic("CPSW HOST ERROR INTERRUPT");
1721
1722	// Suppress this interrupt in the future.
1723	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
1724	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
1725	// The watchdog will probably reset the controller
1726	// in a little while.  It will probably fail again.
1727}
1728
1729static void
1730cpsw_intr_misc(void *arg)
1731{
1732	struct cpsw_softc *sc = arg;
1733	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1734
1735	if (stat & 16)
1736		CPSW_DEBUGF(("Time sync event interrupt unimplemented"));
1737	if (stat & 8)
1738		cpsw_stats_collect(sc);
1739	if (stat & 4)
1740		cpsw_intr_misc_host_error(sc);
1741	if (stat & 2)
1742		CPSW_DEBUGF(("MDIO link change interrupt unimplemented"));
1743	if (stat & 1)
1744		CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented"));
1745	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1746}
1747
1748/*
1749 *
1750 * Periodic Checks and Watchdog.
1751 *
1752 */
1753
1754static void
1755cpsw_tick(void *msc)
1756{
1757	struct cpsw_softc *sc = msc;
1758
1759	/* Check for TX timeout */
1760	cpsw_tx_watchdog(sc);
1761
1762	/* Check for media type change */
1763	mii_tick(sc->mii);
1764	if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1765		printf("%s: media type changed (ifm_media=%x)\n", __func__,
1766			sc->mii->mii_media.ifm_media);
1767		cpsw_ifmedia_upd(sc->ifp);
1768	}
1769
1770	/* Schedule another timeout one second from now */
1771	callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
1772}
1773
1774static void
1775cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1776{
1777	struct cpsw_softc *sc = ifp->if_softc;
1778	struct mii_data *mii;
1779
1780	CPSW_DEBUGF((""));
1781	CPSW_TX_LOCK(sc);
1782
1783	mii = sc->mii;
1784	mii_pollstat(mii);
1785
1786	ifmr->ifm_active = mii->mii_media_active;
1787	ifmr->ifm_status = mii->mii_media_status;
1788
1789	CPSW_TX_UNLOCK(sc);
1790}
1791
1792static int
1793cpsw_ifmedia_upd(struct ifnet *ifp)
1794{
1795	struct cpsw_softc *sc = ifp->if_softc;
1796
1797	CPSW_DEBUGF((""));
1798	if (ifp->if_flags & IFF_UP) {
1799		CPSW_GLOBAL_LOCK(sc);
1800		sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
1801		mii_mediachg(sc->mii);
1802		cpsw_init_locked(sc);
1803		CPSW_GLOBAL_UNLOCK(sc);
1804	}
1805
1806	return (0);
1807}
1808
1809static void
1810cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
1811{
1812	cpsw_debugf_head("CPSW watchdog");
1813	if_printf(sc->ifp, "watchdog timeout\n");
1814	cpsw_shutdown_locked(sc);
1815	cpsw_init_locked(sc);
1816}
1817
1818static void
1819cpsw_tx_watchdog(struct cpsw_softc *sc)
1820{
1821	struct ifnet *ifp = sc->ifp;
1822
1823	CPSW_GLOBAL_LOCK(sc);
1824	if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 ||
1825	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) {
1826		sc->watchdog.timer = 0; /* Nothing to do. */
1827	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
1828		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
1829	} else if (cpsw_tx_dequeue(sc) > 0) {
1830		sc->watchdog.timer = 0;  /* We just did something. */
1831	} else {
1832		/* There was something to do but it didn't get done. */
1833		++sc->watchdog.timer;
1834		if (sc->watchdog.timer > 2) {
1835			sc->watchdog.timer = 0;
1836			++ifp->if_oerrors;
1837			++sc->watchdog.resets;
1838			cpsw_tx_watchdog_full_reset(sc);
1839		}
1840	}
1841	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
1842	CPSW_GLOBAL_UNLOCK(sc);
1843}
1844
1845/*
1846 *
1847 * ALE support routines.
1848 *
1849 */
1850
1851static void
1852cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1853{
1854	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1855	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1856	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1857	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1858}
1859
1860static void
1861cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1862{
1863	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1864	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1865	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1866	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1867}
1868
1869static int
1870cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1871{
1872	int i;
1873	uint32_t ale_entry[3];
1874
1875	/* First two entries are link address and broadcast. */
1876	for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1877		cpsw_ale_read_entry(sc, i, ale_entry);
1878		if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1879		    ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1880			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1881			cpsw_ale_write_entry(sc, i, ale_entry);
1882		}
1883	}
1884	return CPSW_MAX_ALE_ENTRIES;
1885}
1886
1887static int
1888cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1889{
1890	int free_index = -1, matching_index = -1, i;
1891	uint32_t ale_entry[3];
1892
1893	/* Find a matching entry or a free entry. */
1894	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1895		cpsw_ale_read_entry(sc, i, ale_entry);
1896
1897		/* Entry Type[61:60] is 0 for free entry */
1898		if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1899			free_index = i;
1900		}
1901
1902		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1903		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1904		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1905		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1906		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1907		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1908			matching_index = i;
1909			break;
1910		}
1911	}
1912
1913	if (matching_index < 0) {
1914		if (free_index < 0)
1915			return (ENOMEM);
1916		i = free_index;
1917	}
1918
1919	/* Set MAC address */
1920	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1921	ale_entry[1] = mac[0] << 8 | mac[1];
1922
1923	/* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1924	ale_entry[1] |= 0xd0 << 24;
1925
1926	/* Set portmask [68:66] */
1927	ale_entry[2] = (portmap & 7) << 2;
1928
1929	cpsw_ale_write_entry(sc, i, ale_entry);
1930
1931	return 0;
1932}
1933
1934static void
1935cpsw_ale_dump_table(struct cpsw_softc *sc) {
1936	int i;
1937	uint32_t ale_entry[3];
1938	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1939		cpsw_ale_read_entry(sc, i, ale_entry);
1940		if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1941			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1942				ale_entry[1], ale_entry[2]);
1943			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1944				(ale_entry[1] >> 8) & 0xFF,
1945				(ale_entry[1] >> 0) & 0xFF,
1946				(ale_entry[0] >>24) & 0xFF,
1947				(ale_entry[0] >>16) & 0xFF,
1948				(ale_entry[0] >> 8) & 0xFF,
1949				(ale_entry[0] >> 0) & 0xFF);
1950			printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1951			printf("type: %u ", (ale_entry[1] >> 28) & 3);
1952			printf("port: %u ", (ale_entry[2] >> 2) & 7);
1953			printf("\n");
1954		}
1955	}
1956	printf("\n");
1957}
1958
1959static int
1960cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1961{
1962	uint8_t *mac;
1963	uint32_t ale_entry[3];
1964	struct ifnet *ifp = sc->ifp;
1965	struct ifmultiaddr *ifma;
1966	int i;
1967
1968	/* Route incoming packets for our MAC address to Port 0 (host). */
1969	/* For simplicity, keep this entry at table index 0 in the ALE. */
1970        if_addr_rlock(ifp);
1971	mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr);
1972	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1973	ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */
1974	ale_entry[2] = 0; /* port = 0 */
1975	cpsw_ale_write_entry(sc, 0, ale_entry);
1976
1977	/* Set outgoing MAC Address for Ports 1 and 2. */
1978	for (i = 1; i < 3; ++i) {
1979		cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i),
1980		    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1981		cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i),
1982		    mac[5] << 8 | mac[4]);
1983	}
1984        if_addr_runlock(ifp);
1985
1986	/* Keep the broadcast address at table entry 1. */
1987	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
1988	ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */
1989	ale_entry[2] = 0x0000001c; /* Forward to all ports */
1990	cpsw_ale_write_entry(sc, 1, ale_entry);
1991
1992	/* SIOCDELMULTI doesn't specify the particular address
1993	   being removed, so we have to remove all and rebuild. */
1994	if (purge)
1995		cpsw_ale_remove_all_mc_entries(sc);
1996
1997        /* Set other multicast addrs desired. */
1998        if_maddr_rlock(ifp);
1999        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2000                if (ifma->ifma_addr->sa_family != AF_LINK)
2001                        continue;
2002		cpsw_ale_mc_entry_set(sc, 7,
2003		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2004        }
2005        if_maddr_runlock(ifp);
2006
2007	return (0);
2008}
2009
2010/*
2011 *
2012 * Statistics and Sysctls.
2013 *
2014 */
2015
2016#if 0
2017static void
2018cpsw_stats_dump(struct cpsw_softc *sc)
2019{
2020	int i;
2021	uint32_t r;
2022
2023	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2024		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2025		    cpsw_stat_sysctls[i].reg);
2026		CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2027			     (intmax_t)sc->shadow_stats[i], r,
2028			     (intmax_t)sc->shadow_stats[i] + r));
2029	}
2030}
2031#endif
2032
2033static void
2034cpsw_stats_collect(struct cpsw_softc *sc)
2035{
2036	int i;
2037	uint32_t r;
2038
2039	CPSW_DEBUGF(("Controller shadow statistics updated."));
2040
2041	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2042		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2043		    cpsw_stat_sysctls[i].reg);
2044		sc->shadow_stats[i] += r;
2045		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r);
2046	}
2047}
2048
2049static int
2050cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2051{
2052	struct cpsw_softc *sc;
2053	struct cpsw_stat *stat;
2054	uint64_t result;
2055
2056	sc = (struct cpsw_softc *)arg1;
2057	stat = &cpsw_stat_sysctls[oidp->oid_number];
2058	result = sc->shadow_stats[oidp->oid_number];
2059	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2060	return (sysctl_handle_64(oidp, &result, 0, req));
2061}
2062
2063static int
2064cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2065{
2066	struct cpsw_softc *sc;
2067	struct bintime t;
2068	unsigned result;
2069
2070	sc = (struct cpsw_softc *)arg1;
2071	getbinuptime(&t);
2072	bintime_sub(&t, &sc->attach_uptime);
2073	result = t.sec;
2074	return (sysctl_handle_int(oidp, &result, 0, req));
2075}
2076
2077static int
2078cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2079{
2080	struct cpsw_softc *sc;
2081	struct bintime t;
2082	unsigned result;
2083
2084	sc = (struct cpsw_softc *)arg1;
2085	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2086		getbinuptime(&t);
2087		bintime_sub(&t, &sc->init_uptime);
2088		result = t.sec;
2089	} else
2090		result = 0;
2091	return (sysctl_handle_int(oidp, &result, 0, req));
2092}
2093
2094static void
2095cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue)
2096{
2097	struct sysctl_oid_list *parent;
2098
2099	parent = SYSCTL_CHILDREN(node);
2100	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2101	    CTLFLAG_RD, &queue->queue_slots, 0,
2102	    "Total buffers currently assigned to this queue");
2103	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2104	    CTLFLAG_RD, &queue->active_queue_len, 0,
2105	    "Buffers currently registered with hardware controller");
2106	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2107	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2108	    "Max value of activeBuffers since last driver reset");
2109	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2110	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2111	    "Buffers allocated to this queue but not currently "
2112	    "registered with hardware controller");
2113	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2114	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2115	    "Max value of availBuffers since last driver reset");
2116	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2117	    CTLFLAG_RD, &queue->queue_adds, 0,
2118	    "Total buffers added to queue");
2119	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2120	    CTLFLAG_RD, &queue->queue_removes, 0,
2121	    "Total buffers removed from queue");
2122	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2123	    CTLFLAG_RD, &queue->longest_chain, 0,
2124	    "Max buffers used for a single packet");
2125}
2126
2127static void
2128cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc)
2129{
2130	struct sysctl_oid_list *parent;
2131
2132	parent = SYSCTL_CHILDREN(node);
2133	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2134	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2135	    "Total number of watchdog resets");
2136}
2137
2138static void
2139cpsw_add_sysctls(struct cpsw_softc *sc)
2140{
2141	struct sysctl_ctx_list *ctx;
2142	struct sysctl_oid *stats_node, *queue_node, *node;
2143	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2144	int i;
2145
2146	ctx = device_get_sysctl_ctx(sc->dev);
2147	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2148
2149	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2150	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2151	    "Time since driver attach");
2152
2153	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime",
2154	    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU",
2155	    "Seconds since driver init");
2156
2157	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2158				     CTLFLAG_RD, NULL, "CPSW Statistics");
2159	stats_parent = SYSCTL_CHILDREN(stats_node);
2160	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2161		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2162				cpsw_stat_sysctls[i].oid,
2163				CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2164				cpsw_stats_sysctl, "IU",
2165				cpsw_stat_sysctls[i].oid);
2166	}
2167
2168	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2169	    CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2170	queue_parent = SYSCTL_CHILDREN(queue_node);
2171
2172	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2173	    CTLFLAG_RD, NULL, "TX Queue Statistics");
2174	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2175
2176	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2177	    CTLFLAG_RD, NULL, "RX Queue Statistics");
2178	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2179
2180	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2181	    CTLFLAG_RD, NULL, "Watchdog Statistics");
2182	cpsw_add_watchdog_sysctls(ctx, node, sc);
2183}
2184
2185