1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
5 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate)
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * TI Common Platform Ethernet Switch (CPSW) Driver
32 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
33 *
34 * This controller is documented in the AM335x Technical Reference
35 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
36 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
37 *
38 * It is basically a single Ethernet port (port 0) wired internally to
39 * a 3-port store-and-forward switch connected to two independent
40 * "sliver" controllers (port 1 and port 2).  You can operate the
41 * controller in a variety of different ways by suitably configuring
42 * the slivers and the Address Lookup Engine (ALE) that routes packets
43 * between the ports.
44 *
45 * This code was developed and tested on a BeagleBone with
46 * an AM335x SoC.
47 */
48
49#include <sys/cdefs.h>
50__FBSDID("$FreeBSD$");
51
52#include "opt_cpsw.h"
53
54#include <sys/param.h>
55#include <sys/bus.h>
56#include <sys/kernel.h>
57#include <sys/lock.h>
58#include <sys/mbuf.h>
59#include <sys/module.h>
60#include <sys/mutex.h>
61#include <sys/rman.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/sysctl.h>
65
66#include <machine/bus.h>
67#include <machine/resource.h>
68#include <machine/stdarg.h>
69
70#include <net/ethernet.h>
71#include <net/bpf.h>
72#include <net/if.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75#include <net/if_types.h>
76
77#include <dev/extres/syscon/syscon.h>
78#include "syscon_if.h"
79#include <arm/ti/am335x/am335x_scm.h>
80
81#include <dev/mii/mii.h>
82#include <dev/mii/miivar.h>
83
84#include <dev/ofw/ofw_bus.h>
85#include <dev/ofw/ofw_bus_subr.h>
86
87#include <dev/fdt/fdt_common.h>
88
89#ifdef CPSW_ETHERSWITCH
90#include <dev/etherswitch/etherswitch.h>
91#include "etherswitch_if.h"
92#endif
93
94#include "if_cpswreg.h"
95#include "if_cpswvar.h"
96
97#include "miibus_if.h"
98
99/* Device probe/attach/detach. */
100static int cpsw_probe(device_t);
101static int cpsw_attach(device_t);
102static int cpsw_detach(device_t);
103static int cpswp_probe(device_t);
104static int cpswp_attach(device_t);
105static int cpswp_detach(device_t);
106
107static phandle_t cpsw_get_node(device_t, device_t);
108
109/* Device Init/shutdown. */
110static int cpsw_shutdown(device_t);
111static void cpswp_init(void *);
112static void cpswp_init_locked(void *);
113static void cpswp_stop_locked(struct cpswp_softc *);
114
115/* Device Suspend/Resume. */
116static int cpsw_suspend(device_t);
117static int cpsw_resume(device_t);
118
119/* Ioctl. */
120static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data);
121
122static int cpswp_miibus_readreg(device_t, int phy, int reg);
123static int cpswp_miibus_writereg(device_t, int phy, int reg, int value);
124static void cpswp_miibus_statchg(device_t);
125
126/* Send/Receive packets. */
127static void cpsw_intr_rx(void *arg);
128static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
129static void cpsw_rx_enqueue(struct cpsw_softc *);
130static void cpswp_start(struct ifnet *);
131static void cpsw_intr_tx(void *);
132static void cpswp_tx_enqueue(struct cpswp_softc *);
133static int cpsw_tx_dequeue(struct cpsw_softc *);
134
135/* Misc interrupts and watchdog. */
136static void cpsw_intr_rx_thresh(void *);
137static void cpsw_intr_misc(void *);
138static void cpswp_tick(void *);
139static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
140static int cpswp_ifmedia_upd(struct ifnet *);
141static void cpsw_tx_watchdog(void *);
142
143/* ALE support */
144static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *);
145static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *);
146static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *);
147static void cpsw_ale_dump_table(struct cpsw_softc *);
148static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int,
149	int);
150static int cpswp_ale_update_addresses(struct cpswp_softc *, int);
151
152/* Statistics and sysctls. */
153static void cpsw_add_sysctls(struct cpsw_softc *);
154static void cpsw_stats_collect(struct cpsw_softc *);
155static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
156
157#ifdef CPSW_ETHERSWITCH
158static etherswitch_info_t *cpsw_getinfo(device_t);
159static int cpsw_getport(device_t, etherswitch_port_t *);
160static int cpsw_setport(device_t, etherswitch_port_t *);
161static int cpsw_getconf(device_t, etherswitch_conf_t *);
162static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *);
163static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *);
164static int cpsw_readreg(device_t, int);
165static int cpsw_writereg(device_t, int, int);
166static int cpsw_readphy(device_t, int, int);
167static int cpsw_writephy(device_t, int, int, int);
168#endif
169
170/*
171 * Arbitrary limit on number of segments in an mbuf to be transmitted.
172 * Packets with more segments than this will be defragmented before
173 * they are queued.
174 */
175#define	CPSW_TXFRAGS		16
176
177/* Shared resources. */
178static device_method_t cpsw_methods[] = {
179	/* Device interface */
180	DEVMETHOD(device_probe,		cpsw_probe),
181	DEVMETHOD(device_attach,	cpsw_attach),
182	DEVMETHOD(device_detach,	cpsw_detach),
183	DEVMETHOD(device_shutdown,	cpsw_shutdown),
184	DEVMETHOD(device_suspend,	cpsw_suspend),
185	DEVMETHOD(device_resume,	cpsw_resume),
186	/* Bus interface */
187	DEVMETHOD(bus_add_child,	device_add_child_ordered),
188	/* OFW methods */
189	DEVMETHOD(ofw_bus_get_node,	cpsw_get_node),
190#ifdef CPSW_ETHERSWITCH
191	/* etherswitch interface */
192	DEVMETHOD(etherswitch_getinfo,	cpsw_getinfo),
193	DEVMETHOD(etherswitch_readreg,	cpsw_readreg),
194	DEVMETHOD(etherswitch_writereg,	cpsw_writereg),
195	DEVMETHOD(etherswitch_readphyreg,	cpsw_readphy),
196	DEVMETHOD(etherswitch_writephyreg,	cpsw_writephy),
197	DEVMETHOD(etherswitch_getport,	cpsw_getport),
198	DEVMETHOD(etherswitch_setport,	cpsw_setport),
199	DEVMETHOD(etherswitch_getvgroup,	cpsw_getvgroup),
200	DEVMETHOD(etherswitch_setvgroup,	cpsw_setvgroup),
201	DEVMETHOD(etherswitch_getconf,	cpsw_getconf),
202#endif
203	DEVMETHOD_END
204};
205
206static driver_t cpsw_driver = {
207	"cpswss",
208	cpsw_methods,
209	sizeof(struct cpsw_softc),
210};
211
212static devclass_t cpsw_devclass;
213
214DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
215
216/* Port/Slave resources. */
217static device_method_t cpswp_methods[] = {
218	/* Device interface */
219	DEVMETHOD(device_probe,		cpswp_probe),
220	DEVMETHOD(device_attach,	cpswp_attach),
221	DEVMETHOD(device_detach,	cpswp_detach),
222	/* MII interface */
223	DEVMETHOD(miibus_readreg,	cpswp_miibus_readreg),
224	DEVMETHOD(miibus_writereg,	cpswp_miibus_writereg),
225	DEVMETHOD(miibus_statchg,	cpswp_miibus_statchg),
226	DEVMETHOD_END
227};
228
229static driver_t cpswp_driver = {
230	"cpsw",
231	cpswp_methods,
232	sizeof(struct cpswp_softc),
233};
234
235static devclass_t cpswp_devclass;
236
237#ifdef CPSW_ETHERSWITCH
238DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, etherswitch_devclass, 0, 0);
239MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1);
240#endif
241
242DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0);
243DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
244MODULE_DEPEND(cpsw, ether, 1, 1, 1);
245MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
246
247#ifdef CPSW_ETHERSWITCH
248static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS];
249#endif
250
251static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 };
252
253static struct resource_spec irq_res_spec[] = {
254	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
255	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
256	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
257	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
258	{ -1, 0 }
259};
260
261static struct {
262	void (*cb)(void *);
263} cpsw_intr_cb[] = {
264	{ cpsw_intr_rx_thresh },
265	{ cpsw_intr_rx },
266	{ cpsw_intr_tx },
267	{ cpsw_intr_misc },
268};
269
270/* Number of entries here must match size of stats
271 * array in struct cpswp_softc. */
272static struct cpsw_stat {
273	int	reg;
274	char *oid;
275} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
276	{0x00, "GoodRxFrames"},
277	{0x04, "BroadcastRxFrames"},
278	{0x08, "MulticastRxFrames"},
279	{0x0C, "PauseRxFrames"},
280	{0x10, "RxCrcErrors"},
281	{0x14, "RxAlignErrors"},
282	{0x18, "OversizeRxFrames"},
283	{0x1c, "RxJabbers"},
284	{0x20, "ShortRxFrames"},
285	{0x24, "RxFragments"},
286	{0x30, "RxOctets"},
287	{0x34, "GoodTxFrames"},
288	{0x38, "BroadcastTxFrames"},
289	{0x3c, "MulticastTxFrames"},
290	{0x40, "PauseTxFrames"},
291	{0x44, "DeferredTxFrames"},
292	{0x48, "CollisionsTxFrames"},
293	{0x4c, "SingleCollisionTxFrames"},
294	{0x50, "MultipleCollisionTxFrames"},
295	{0x54, "ExcessiveCollisions"},
296	{0x58, "LateCollisions"},
297	{0x5c, "TxUnderrun"},
298	{0x60, "CarrierSenseErrors"},
299	{0x64, "TxOctets"},
300	{0x68, "RxTx64OctetFrames"},
301	{0x6c, "RxTx65to127OctetFrames"},
302	{0x70, "RxTx128to255OctetFrames"},
303	{0x74, "RxTx256to511OctetFrames"},
304	{0x78, "RxTx512to1024OctetFrames"},
305	{0x7c, "RxTx1024upOctetFrames"},
306	{0x80, "NetOctets"},
307	{0x84, "RxStartOfFrameOverruns"},
308	{0x88, "RxMiddleOfFrameOverruns"},
309	{0x8c, "RxDmaOverruns"}
310};
311
312/*
313 * Basic debug support.
314 */
315
316static void
317cpsw_debugf_head(const char *funcname)
318{
319	int t = (int)(time_second % (24 * 60 * 60));
320
321	printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
322}
323
324static void
325cpsw_debugf(const char *fmt, ...)
326{
327	va_list ap;
328
329	va_start(ap, fmt);
330	vprintf(fmt, ap);
331	va_end(ap);
332	printf("\n");
333
334}
335
336#define	CPSW_DEBUGF(_sc, a) do {					\
337	if ((_sc)->debug) {						\
338		cpsw_debugf_head(__func__);				\
339		cpsw_debugf a;						\
340	}								\
341} while (0)
342
343/*
344 * Locking macros
345 */
346#define	CPSW_TX_LOCK(sc) do {						\
347		mtx_assert(&(sc)->rx.lock, MA_NOTOWNED);		\
348		mtx_lock(&(sc)->tx.lock);				\
349} while (0)
350
351#define	CPSW_TX_UNLOCK(sc)	mtx_unlock(&(sc)->tx.lock)
352#define	CPSW_TX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->tx.lock, MA_OWNED)
353
354#define	CPSW_RX_LOCK(sc) do {						\
355		mtx_assert(&(sc)->tx.lock, MA_NOTOWNED);		\
356		mtx_lock(&(sc)->rx.lock);				\
357} while (0)
358
359#define	CPSW_RX_UNLOCK(sc)		mtx_unlock(&(sc)->rx.lock)
360#define	CPSW_RX_LOCK_ASSERT(sc)	mtx_assert(&(sc)->rx.lock, MA_OWNED)
361
362#define CPSW_PORT_LOCK(_sc) do {					\
363		mtx_assert(&(_sc)->lock, MA_NOTOWNED);			\
364		mtx_lock(&(_sc)->lock);					\
365} while (0)
366
367#define	CPSW_PORT_UNLOCK(_sc)	mtx_unlock(&(_sc)->lock)
368#define	CPSW_PORT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->lock, MA_OWNED)
369
370/*
371 * Read/Write macros
372 */
373#define	cpsw_read_4(_sc, _reg)		bus_read_4((_sc)->mem_res, (_reg))
374#define	cpsw_write_4(_sc, _reg, _val)					\
375	bus_write_4((_sc)->mem_res, (_reg), (_val))
376
377#define	cpsw_cpdma_bd_offset(i)	(CPSW_CPPI_RAM_OFFSET + ((i)*16))
378
379#define	cpsw_cpdma_bd_paddr(sc, slot)					\
380	BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset)
381#define	cpsw_cpdma_read_bd(sc, slot, val)				\
382	bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
383#define	cpsw_cpdma_write_bd(sc, slot, val)				\
384	bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4)
385#define	cpsw_cpdma_write_bd_next(sc, slot, next_slot)			\
386	cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
387#define	cpsw_cpdma_write_bd_flags(sc, slot, val)			\
388	bus_write_2(sc->mem_res, slot->bd_offset + 14, val)
389#define	cpsw_cpdma_read_bd_flags(sc, slot)				\
390	bus_read_2(sc->mem_res, slot->bd_offset + 14)
391#define	cpsw_write_hdp_slot(sc, queue, slot)				\
392	cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
393#define	CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
394#define	cpsw_read_cp(sc, queue)						\
395	cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
396#define	cpsw_write_cp(sc, queue, val)					\
397	cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
398#define	cpsw_write_cp_slot(sc, queue, slot)				\
399	cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
400
401#if 0
402/* XXX temporary function versions for debugging. */
403static void
404cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
405{
406	uint32_t reg = queue->hdp_offset;
407	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
408	CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
409	cpsw_write_4(sc, reg, v);
410}
411
412static void
413cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
414{
415	uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
416	CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
417	cpsw_write_cp(sc, queue, v);
418}
419#endif
420
421/*
422 * Expanded dump routines for verbose debugging.
423 */
424static void
425cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
426{
427	static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
428	    "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
429	    "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
430	    "Port0"};
431	struct cpsw_cpdma_bd bd;
432	const char *sep;
433	int i;
434
435	cpsw_cpdma_read_bd(sc, slot, &bd);
436	printf("BD Addr : 0x%08x   Next  : 0x%08x\n",
437	    cpsw_cpdma_bd_paddr(sc, slot), bd.next);
438	printf("  BufPtr: 0x%08x   BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
439	printf("  BufOff: 0x%08x   PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
440	printf("  Flags: ");
441	sep = "";
442	for (i = 0; i < 16; ++i) {
443		if (bd.flags & (1 << (15 - i))) {
444			printf("%s%s", sep, flags[i]);
445			sep = ",";
446		}
447	}
448	printf("\n");
449	if (slot->mbuf) {
450		printf("  Ether:  %14D\n",
451		    (char *)(slot->mbuf->m_data), " ");
452		printf("  Packet: %16D\n",
453		    (char *)(slot->mbuf->m_data) + 14, " ");
454	}
455}
456
457#define	CPSW_DUMP_SLOT(cs, slot) do {				\
458	IF_DEBUG(sc) {						\
459		cpsw_dump_slot(sc, slot);			\
460	}							\
461} while (0)
462
463static void
464cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
465{
466	struct cpsw_slot *slot;
467	int i = 0;
468	int others = 0;
469
470	STAILQ_FOREACH(slot, q, next) {
471		if (i > CPSW_TXFRAGS)
472			++others;
473		else
474			cpsw_dump_slot(sc, slot);
475		++i;
476	}
477	if (others)
478		printf(" ... and %d more.\n", others);
479	printf("\n");
480}
481
482#define CPSW_DUMP_QUEUE(sc, q) do {				\
483	IF_DEBUG(sc) {						\
484		cpsw_dump_queue(sc, q);				\
485	}							\
486} while (0)
487
488static void
489cpsw_init_slots(struct cpsw_softc *sc)
490{
491	struct cpsw_slot *slot;
492	int i;
493
494	STAILQ_INIT(&sc->avail);
495
496	/* Put the slot descriptors onto the global avail list. */
497	for (i = 0; i < nitems(sc->_slots); i++) {
498		slot = &sc->_slots[i];
499		slot->bd_offset = cpsw_cpdma_bd_offset(i);
500		STAILQ_INSERT_TAIL(&sc->avail, slot, next);
501	}
502}
503
504static int
505cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
506{
507	const int max_slots = nitems(sc->_slots);
508	struct cpsw_slot *slot;
509	int i;
510
511	if (requested < 0)
512		requested = max_slots;
513
514	for (i = 0; i < requested; ++i) {
515		slot = STAILQ_FIRST(&sc->avail);
516		if (slot == NULL)
517			return (0);
518		if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
519			device_printf(sc->dev, "failed to create dmamap\n");
520			return (ENOMEM);
521		}
522		STAILQ_REMOVE_HEAD(&sc->avail, next);
523		STAILQ_INSERT_TAIL(&queue->avail, slot, next);
524		++queue->avail_queue_len;
525		++queue->queue_slots;
526	}
527	return (0);
528}
529
530static void
531cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
532{
533	int error;
534
535	if (slot->dmamap) {
536		if (slot->mbuf)
537			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
538		error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
539		KASSERT(error == 0, ("Mapping still active"));
540		slot->dmamap = NULL;
541	}
542	if (slot->mbuf) {
543		m_freem(slot->mbuf);
544		slot->mbuf = NULL;
545	}
546}
547
548static void
549cpsw_reset(struct cpsw_softc *sc)
550{
551	int i;
552
553	callout_stop(&sc->watchdog.callout);
554
555	/* Reset RMII/RGMII wrapper. */
556	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
557	while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
558		;
559
560	/* Disable TX and RX interrupts for all cores. */
561	for (i = 0; i < 3; ++i) {
562		cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
563		cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
564		cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
565		cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
566	}
567
568	/* Reset CPSW subsystem. */
569	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
570	while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
571		;
572
573	/* Reset Sliver port 1 and 2 */
574	for (i = 0; i < 2; i++) {
575		/* Reset */
576		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
577		while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
578			;
579	}
580
581	/* Reset DMA controller. */
582	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
583	while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
584		;
585
586	/* Disable TX & RX DMA */
587	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
588	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
589
590	/* Clear all queues. */
591	for (i = 0; i < 8; i++) {
592		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
593		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
594		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
595		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
596	}
597
598	/* Clear all interrupt Masks */
599	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
600	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
601}
602
603static void
604cpsw_init(struct cpsw_softc *sc)
605{
606	struct cpsw_slot *slot;
607	uint32_t reg;
608
609	/* Disable the interrupt pacing. */
610	reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
611	reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
612	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg);
613
614	/* Clear ALE */
615	cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL);
616
617	/* Enable ALE */
618	reg = CPSW_ALE_CTL_ENABLE;
619	if (sc->dualemac)
620		reg |= CPSW_ALE_CTL_VLAN_AWARE;
621	cpsw_write_4(sc, CPSW_ALE_CONTROL, reg);
622
623	/* Set Host Port Mapping. */
624	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
625	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
626
627	/* Initialize ALE: set host port to forwarding(3). */
628	cpsw_write_4(sc, CPSW_ALE_PORTCTL(0),
629	    ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
630
631	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
632
633	/* Enable statistics for ports 0, 1 and 2 */
634	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
635
636	/* Turn off flow control. */
637	cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
638
639	/* Make IP hdr aligned with 4 */
640	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
641
642	/* Initialize RX Buffer Descriptors */
643	cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0);
644	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
645
646	/* Enable TX & RX DMA */
647	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
648	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
649
650	/* Enable Interrupts for core 0 */
651	cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
652	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
653	cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF);
654	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
655
656	/* Enable host Error Interrupt */
657	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
658
659	/* Enable interrupts for RX and TX on Channel 0 */
660	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET,
661	    CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0));
662	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
663
664	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
665	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
666	cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff);
667
668	/* Select MII in GMII_SEL, Internal Delay mode */
669	//ti_scm_reg_write_4(0x650, 0);
670
671	/* Initialize active queues. */
672	slot = STAILQ_FIRST(&sc->tx.active);
673	if (slot != NULL)
674		cpsw_write_hdp_slot(sc, &sc->tx, slot);
675	slot = STAILQ_FIRST(&sc->rx.active);
676	if (slot != NULL)
677		cpsw_write_hdp_slot(sc, &sc->rx, slot);
678	cpsw_rx_enqueue(sc);
679	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len);
680	cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS);
681
682	/* Activate network interface. */
683	sc->rx.running = 1;
684	sc->tx.running = 1;
685	sc->watchdog.timer = 0;
686	callout_init(&sc->watchdog.callout, 0);
687	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
688}
689
690/*
691 *
692 * Device Probe, Attach, Detach.
693 *
694 */
695
696static int
697cpsw_probe(device_t dev)
698{
699
700	if (!ofw_bus_status_okay(dev))
701		return (ENXIO);
702
703	if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
704		return (ENXIO);
705
706	device_set_desc(dev, "3-port Switch Ethernet Subsystem");
707	return (BUS_PROBE_DEFAULT);
708}
709
710static int
711cpsw_intr_attach(struct cpsw_softc *sc)
712{
713	int i;
714
715	for (i = 0; i < CPSW_INTR_COUNT; i++) {
716		if (bus_setup_intr(sc->dev, sc->irq_res[i],
717		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
718		    cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) {
719			return (-1);
720		}
721	}
722
723	return (0);
724}
725
726static void
727cpsw_intr_detach(struct cpsw_softc *sc)
728{
729	int i;
730
731	for (i = 0; i < CPSW_INTR_COUNT; i++) {
732		if (sc->ih_cookie[i]) {
733			bus_teardown_intr(sc->dev, sc->irq_res[i],
734			    sc->ih_cookie[i]);
735		}
736	}
737}
738
739static int
740cpsw_get_fdt_data(struct cpsw_softc *sc, int port)
741{
742	char *name;
743	int len, phy, vlan;
744	pcell_t phy_id[3], vlan_id;
745	phandle_t child;
746	unsigned long mdio_child_addr;
747
748	/* Find any slave with phy-handle/phy_id */
749	phy = -1;
750	vlan = -1;
751	for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) {
752		if (OF_getprop_alloc(child, "name", (void **)&name) < 0)
753			continue;
754		if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) {
755			OF_prop_free(name);
756			continue;
757		}
758		OF_prop_free(name);
759
760		if (mdio_child_addr != slave_mdio_addr[port] &&
761		    mdio_child_addr != (slave_mdio_addr[port] & 0xFFF))
762			continue;
763
764		if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){
765			/* Users with old DTB will have phy_id instead */
766			phy = -1;
767			len = OF_getproplen(child, "phy_id");
768			if (len / sizeof(pcell_t) == 2) {
769				/* Get phy address from fdt */
770				if (OF_getencprop(child, "phy_id", phy_id, len) > 0)
771					phy = phy_id[1];
772			}
773		}
774
775		len = OF_getproplen(child, "dual_emac_res_vlan");
776		if (len / sizeof(pcell_t) == 1) {
777			/* Get phy address from fdt */
778			if (OF_getencprop(child, "dual_emac_res_vlan",
779			    &vlan_id, len) > 0) {
780				vlan = vlan_id;
781			}
782		}
783
784		break;
785	}
786	if (phy == -1)
787		return (ENXIO);
788	sc->port[port].phy = phy;
789	sc->port[port].vlan = vlan;
790
791	return (0);
792}
793
794static int
795cpsw_attach(device_t dev)
796{
797	int error, i;
798	struct cpsw_softc *sc;
799	uint32_t reg;
800
801	sc = device_get_softc(dev);
802	sc->dev = dev;
803	sc->node = ofw_bus_get_node(dev);
804	getbinuptime(&sc->attach_uptime);
805
806	if (OF_getencprop(sc->node, "active_slave", &sc->active_slave,
807	    sizeof(sc->active_slave)) <= 0) {
808		sc->active_slave = 0;
809	}
810	if (sc->active_slave > 1)
811		sc->active_slave = 1;
812
813	if (OF_hasprop(sc->node, "dual_emac"))
814		sc->dualemac = 1;
815
816	for (i = 0; i < CPSW_PORTS; i++) {
817		if (!sc->dualemac && i != sc->active_slave)
818			continue;
819		if (cpsw_get_fdt_data(sc, i) != 0) {
820			device_printf(dev,
821			    "failed to get PHY address from FDT\n");
822			return (ENXIO);
823		}
824	}
825
826	/* Initialize mutexes */
827	mtx_init(&sc->tx.lock, device_get_nameunit(dev),
828	    "cpsw TX lock", MTX_DEF);
829	mtx_init(&sc->rx.lock, device_get_nameunit(dev),
830	    "cpsw RX lock", MTX_DEF);
831
832	/* Allocate IRQ resources */
833	error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res);
834	if (error) {
835		device_printf(dev, "could not allocate IRQ resources\n");
836		cpsw_detach(dev);
837		return (ENXIO);
838	}
839
840	sc->mem_rid = 0;
841	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
842	    &sc->mem_rid, RF_ACTIVE);
843	if (sc->mem_res == NULL) {
844		device_printf(sc->dev, "failed to allocate memory resource\n");
845		cpsw_detach(dev);
846		return (ENXIO);
847	}
848
849	reg = cpsw_read_4(sc, CPSW_SS_IDVER);
850	device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
851		reg & 0xFF, (reg >> 11) & 0x1F);
852
853	cpsw_add_sysctls(sc);
854
855	/* Allocate a busdma tag and DMA safe memory for mbufs. */
856	error = bus_dma_tag_create(
857		bus_get_dma_tag(sc->dev),	/* parent */
858		1, 0,				/* alignment, boundary */
859		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
860		BUS_SPACE_MAXADDR,		/* highaddr */
861		NULL, NULL,			/* filtfunc, filtfuncarg */
862		MCLBYTES, CPSW_TXFRAGS,		/* maxsize, nsegments */
863		MCLBYTES, 0,			/* maxsegsz, flags */
864		NULL, NULL,			/* lockfunc, lockfuncarg */
865		&sc->mbuf_dtag);		/* dmatag */
866	if (error) {
867		device_printf(dev, "bus_dma_tag_create failed\n");
868		cpsw_detach(dev);
869		return (error);
870	}
871
872	/* Allocate a NULL buffer for padding. */
873	sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO);
874
875	cpsw_init_slots(sc);
876
877	/* Allocate slots to TX and RX queues. */
878	STAILQ_INIT(&sc->rx.avail);
879	STAILQ_INIT(&sc->rx.active);
880	STAILQ_INIT(&sc->tx.avail);
881	STAILQ_INIT(&sc->tx.active);
882	// For now:  128 slots to TX, rest to RX.
883	// XXX TODO: start with 32/64 and grow dynamically based on demand.
884	if (cpsw_add_slots(sc, &sc->tx, 128) ||
885	    cpsw_add_slots(sc, &sc->rx, -1)) {
886		device_printf(dev, "failed to allocate dmamaps\n");
887		cpsw_detach(dev);
888		return (ENOMEM);
889	}
890	device_printf(dev, "Initial queue size TX=%d RX=%d\n",
891	    sc->tx.queue_slots, sc->rx.queue_slots);
892
893	sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
894	sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
895
896	if (cpsw_intr_attach(sc) == -1) {
897		device_printf(dev, "failed to setup interrupts\n");
898		cpsw_detach(dev);
899		return (ENXIO);
900	}
901
902#ifdef CPSW_ETHERSWITCH
903	for (i = 0; i < CPSW_VLANS; i++)
904		cpsw_vgroups[i].vid = -1;
905#endif
906
907	/* Reset the controller. */
908	cpsw_reset(sc);
909	cpsw_init(sc);
910
911	for (i = 0; i < CPSW_PORTS; i++) {
912		if (!sc->dualemac && i != sc->active_slave)
913			continue;
914		sc->port[i].dev = device_add_child(dev, "cpsw", i);
915		if (sc->port[i].dev == NULL) {
916			cpsw_detach(dev);
917			return (ENXIO);
918		}
919	}
920	bus_generic_probe(dev);
921	bus_generic_attach(dev);
922
923	return (0);
924}
925
926static int
927cpsw_detach(device_t dev)
928{
929	struct cpsw_softc *sc;
930	int error, i;
931
932	bus_generic_detach(dev);
933 	sc = device_get_softc(dev);
934
935	for (i = 0; i < CPSW_PORTS; i++) {
936		if (sc->port[i].dev)
937			device_delete_child(dev, sc->port[i].dev);
938	}
939
940	if (device_is_attached(dev)) {
941		callout_stop(&sc->watchdog.callout);
942		callout_drain(&sc->watchdog.callout);
943	}
944
945	/* Stop and release all interrupts */
946	cpsw_intr_detach(sc);
947
948	/* Free dmamaps and mbufs */
949	for (i = 0; i < nitems(sc->_slots); ++i)
950		cpsw_free_slot(sc, &sc->_slots[i]);
951
952	/* Free null padding buffer. */
953	if (sc->nullpad)
954		free(sc->nullpad, M_DEVBUF);
955
956	/* Free DMA tag */
957	if (sc->mbuf_dtag) {
958		error = bus_dma_tag_destroy(sc->mbuf_dtag);
959		KASSERT(error == 0, ("Unable to destroy DMA tag"));
960	}
961
962	/* Free IO memory handler */
963	if (sc->mem_res != NULL)
964		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
965	bus_release_resources(dev, irq_res_spec, sc->irq_res);
966
967	/* Destroy mutexes */
968	mtx_destroy(&sc->rx.lock);
969	mtx_destroy(&sc->tx.lock);
970
971	/* Detach the switch device, if present. */
972	error = bus_generic_detach(dev);
973	if (error != 0)
974		return (error);
975
976	return (device_delete_children(dev));
977}
978
979static phandle_t
980cpsw_get_node(device_t bus, device_t dev)
981{
982
983	/* Share controller node with port device. */
984	return (ofw_bus_get_node(bus));
985}
986
987static int
988cpswp_probe(device_t dev)
989{
990
991	if (device_get_unit(dev) > 1) {
992		device_printf(dev, "Only two ports are supported.\n");
993		return (ENXIO);
994	}
995	device_set_desc(dev, "Ethernet Switch Port");
996
997	return (BUS_PROBE_DEFAULT);
998}
999
1000static int
1001cpswp_attach(device_t dev)
1002{
1003	int error;
1004	struct ifnet *ifp;
1005	struct cpswp_softc *sc;
1006	uint32_t reg;
1007	uint8_t mac_addr[ETHER_ADDR_LEN];
1008	phandle_t opp_table;
1009	struct syscon *syscon;
1010
1011	sc = device_get_softc(dev);
1012	sc->dev = dev;
1013	sc->pdev = device_get_parent(dev);
1014	sc->swsc = device_get_softc(sc->pdev);
1015	sc->unit = device_get_unit(dev);
1016	sc->phy = sc->swsc->port[sc->unit].phy;
1017	sc->vlan = sc->swsc->port[sc->unit].vlan;
1018	if (sc->swsc->dualemac && sc->vlan == -1)
1019		sc->vlan = sc->unit + 1;
1020
1021	if (sc->unit == 0) {
1022		sc->physel = MDIOUSERPHYSEL0;
1023		sc->phyaccess = MDIOUSERACCESS0;
1024	} else {
1025		sc->physel = MDIOUSERPHYSEL1;
1026		sc->phyaccess = MDIOUSERACCESS1;
1027	}
1028
1029	mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock",
1030	    MTX_DEF);
1031
1032	/* Allocate network interface */
1033	ifp = sc->ifp = if_alloc(IFT_ETHER);
1034	if (ifp == NULL) {
1035		cpswp_detach(dev);
1036		return (ENXIO);
1037	}
1038
1039	if_initname(ifp, device_get_name(sc->dev), sc->unit);
1040	ifp->if_softc = sc;
1041	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
1042	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
1043	ifp->if_capenable = ifp->if_capabilities;
1044
1045	ifp->if_init = cpswp_init;
1046	ifp->if_start = cpswp_start;
1047	ifp->if_ioctl = cpswp_ioctl;
1048
1049	ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots;
1050	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1051	IFQ_SET_READY(&ifp->if_snd);
1052
1053	/* FIXME: For now; Go and kidnap syscon from opp-table */
1054	/* ti,cpsw actually have an optional syscon reference but only for am33xx?? */
1055	opp_table = OF_finddevice("/opp-table");
1056	if (opp_table == -1) {
1057		device_printf(dev, "Cant find /opp-table\n");
1058		cpswp_detach(dev);
1059		return (ENXIO);
1060	}
1061	if (!OF_hasprop(opp_table, "syscon")) {
1062		device_printf(dev, "/opp-table doesnt have required syscon property\n");
1063		cpswp_detach(dev);
1064		return (ENXIO);
1065	}
1066	if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) {
1067		device_printf(dev, "Failed to get syscon\n");
1068		cpswp_detach(dev);
1069		return (ENXIO);
1070	}
1071
1072	/* Get high part of MAC address from control module (mac_id[0|1]_hi) */
1073	reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8);
1074	mac_addr[0] = reg & 0xFF;
1075	mac_addr[1] = (reg >>  8) & 0xFF;
1076	mac_addr[2] = (reg >> 16) & 0xFF;
1077	mac_addr[3] = (reg >> 24) & 0xFF;
1078
1079	/* Get low part of MAC address from control module (mac_id[0|1]_lo) */
1080	reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8);
1081	mac_addr[4] = reg & 0xFF;
1082	mac_addr[5] = (reg >>  8) & 0xFF;
1083
1084	error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd,
1085	    cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0);
1086	if (error) {
1087		device_printf(dev, "attaching PHYs failed\n");
1088		cpswp_detach(dev);
1089		return (error);
1090	}
1091	sc->mii = device_get_softc(sc->miibus);
1092
1093	/* Select PHY and enable interrupts */
1094	cpsw_write_4(sc->swsc, sc->physel,
1095	    MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F));
1096
1097	ether_ifattach(sc->ifp, mac_addr);
1098	callout_init(&sc->mii_callout, 0);
1099
1100	return (0);
1101}
1102
1103static int
1104cpswp_detach(device_t dev)
1105{
1106	struct cpswp_softc *sc;
1107
1108	sc = device_get_softc(dev);
1109	CPSW_DEBUGF(sc->swsc, (""));
1110	if (device_is_attached(dev)) {
1111		ether_ifdetach(sc->ifp);
1112		CPSW_PORT_LOCK(sc);
1113		cpswp_stop_locked(sc);
1114		CPSW_PORT_UNLOCK(sc);
1115		callout_drain(&sc->mii_callout);
1116	}
1117
1118	bus_generic_detach(dev);
1119
1120	if_free(sc->ifp);
1121	mtx_destroy(&sc->lock);
1122
1123	return (0);
1124}
1125
1126/*
1127 *
1128 * Init/Shutdown.
1129 *
1130 */
1131
1132static int
1133cpsw_ports_down(struct cpsw_softc *sc)
1134{
1135	struct cpswp_softc *psc;
1136	struct ifnet *ifp1, *ifp2;
1137
1138	if (!sc->dualemac)
1139		return (1);
1140	psc = device_get_softc(sc->port[0].dev);
1141	ifp1 = psc->ifp;
1142	psc = device_get_softc(sc->port[1].dev);
1143	ifp2 = psc->ifp;
1144	if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0)
1145		return (1);
1146
1147	return (0);
1148}
1149
1150static void
1151cpswp_init(void *arg)
1152{
1153	struct cpswp_softc *sc = arg;
1154
1155	CPSW_DEBUGF(sc->swsc, (""));
1156	CPSW_PORT_LOCK(sc);
1157	cpswp_init_locked(arg);
1158	CPSW_PORT_UNLOCK(sc);
1159}
1160
1161static void
1162cpswp_init_locked(void *arg)
1163{
1164#ifdef CPSW_ETHERSWITCH
1165	int i;
1166#endif
1167	struct cpswp_softc *sc = arg;
1168	struct ifnet *ifp;
1169	uint32_t reg;
1170
1171	CPSW_DEBUGF(sc->swsc, (""));
1172	CPSW_PORT_LOCK_ASSERT(sc);
1173	ifp = sc->ifp;
1174	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1175		return;
1176
1177	getbinuptime(&sc->init_uptime);
1178
1179	if (!sc->swsc->rx.running && !sc->swsc->tx.running) {
1180		/* Reset the controller. */
1181		cpsw_reset(sc->swsc);
1182		cpsw_init(sc->swsc);
1183	}
1184
1185	/* Set Slave Mapping. */
1186	cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210);
1187	cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1),
1188	    0x33221100);
1189	cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2);
1190	/* Enable MAC RX/TX modules. */
1191	/* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
1192	/* Huh?  Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
1193	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1194	reg |= CPSW_SL_MACTL_GMII_ENABLE;
1195	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1196
1197	/* Initialize ALE: set port to forwarding, initialize addrs */
1198	cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1),
1199	    ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD);
1200	cpswp_ale_update_addresses(sc, 1);
1201
1202	if (sc->swsc->dualemac) {
1203		/* Set Port VID. */
1204		cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1),
1205		    sc->vlan & 0xfff);
1206		cpsw_ale_update_vlan_table(sc->swsc, sc->vlan,
1207		    (1 << (sc->unit + 1)) | (1 << 0), /* Member list */
1208		    (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */
1209		    (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */
1210#ifdef CPSW_ETHERSWITCH
1211		for (i = 0; i < CPSW_VLANS; i++) {
1212			if (cpsw_vgroups[i].vid != -1)
1213				continue;
1214			cpsw_vgroups[i].vid = sc->vlan;
1215			break;
1216		}
1217#endif
1218	}
1219
1220	mii_mediachg(sc->mii);
1221	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
1222	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1223	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1224}
1225
1226static int
1227cpsw_shutdown(device_t dev)
1228{
1229	struct cpsw_softc *sc;
1230	struct cpswp_softc *psc;
1231	int i;
1232
1233 	sc = device_get_softc(dev);
1234	CPSW_DEBUGF(sc, (""));
1235	for (i = 0; i < CPSW_PORTS; i++) {
1236		if (!sc->dualemac && i != sc->active_slave)
1237			continue;
1238		psc = device_get_softc(sc->port[i].dev);
1239		CPSW_PORT_LOCK(psc);
1240		cpswp_stop_locked(psc);
1241		CPSW_PORT_UNLOCK(psc);
1242	}
1243
1244	return (0);
1245}
1246
1247static void
1248cpsw_rx_teardown(struct cpsw_softc *sc)
1249{
1250	int i = 0;
1251
1252	CPSW_RX_LOCK(sc);
1253	CPSW_DEBUGF(sc, ("starting RX teardown"));
1254	sc->rx.teardown = 1;
1255	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1256	CPSW_RX_UNLOCK(sc);
1257	while (sc->rx.running) {
1258		if (++i > 10) {
1259			device_printf(sc->dev,
1260			    "Unable to cleanly shutdown receiver\n");
1261			return;
1262		}
1263		DELAY(200);
1264	}
1265	if (!sc->rx.running)
1266		CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i));
1267}
1268
1269static void
1270cpsw_tx_teardown(struct cpsw_softc *sc)
1271{
1272	int i = 0;
1273
1274	CPSW_TX_LOCK(sc);
1275	CPSW_DEBUGF(sc, ("starting TX teardown"));
1276	/* Start the TX queue teardown if queue is not empty. */
1277	if (STAILQ_FIRST(&sc->tx.active) != NULL)
1278		cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1279	else
1280		sc->tx.teardown = 1;
1281	cpsw_tx_dequeue(sc);
1282	while (sc->tx.running && ++i < 10) {
1283		DELAY(200);
1284		cpsw_tx_dequeue(sc);
1285	}
1286	if (sc->tx.running) {
1287		device_printf(sc->dev,
1288		    "Unable to cleanly shutdown transmitter\n");
1289	}
1290	CPSW_DEBUGF(sc,
1291	    ("finished TX teardown (%d retries, %d idle buffers)", i,
1292	     sc->tx.active_queue_len));
1293	CPSW_TX_UNLOCK(sc);
1294}
1295
1296static void
1297cpswp_stop_locked(struct cpswp_softc *sc)
1298{
1299	struct ifnet *ifp;
1300	uint32_t reg;
1301
1302	ifp = sc->ifp;
1303	CPSW_DEBUGF(sc->swsc, (""));
1304	CPSW_PORT_LOCK_ASSERT(sc);
1305
1306	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1307		return;
1308
1309	/* Disable interface */
1310	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1311	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1312
1313	/* Stop ticker */
1314	callout_stop(&sc->mii_callout);
1315
1316	/* Tear down the RX/TX queues. */
1317	if (cpsw_ports_down(sc->swsc)) {
1318		cpsw_rx_teardown(sc->swsc);
1319		cpsw_tx_teardown(sc->swsc);
1320	}
1321
1322	/* Stop MAC RX/TX modules. */
1323	reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit));
1324	reg &= ~CPSW_SL_MACTL_GMII_ENABLE;
1325	cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg);
1326
1327	if (cpsw_ports_down(sc->swsc)) {
1328		/* Capture stats before we reset controller. */
1329		cpsw_stats_collect(sc->swsc);
1330
1331		cpsw_reset(sc->swsc);
1332		cpsw_init(sc->swsc);
1333	}
1334}
1335
1336/*
1337 *  Suspend/Resume.
1338 */
1339
1340static int
1341cpsw_suspend(device_t dev)
1342{
1343	struct cpsw_softc *sc;
1344	struct cpswp_softc *psc;
1345	int i;
1346
1347	sc = device_get_softc(dev);
1348	CPSW_DEBUGF(sc, (""));
1349	for (i = 0; i < CPSW_PORTS; i++) {
1350		if (!sc->dualemac && i != sc->active_slave)
1351			continue;
1352		psc = device_get_softc(sc->port[i].dev);
1353		CPSW_PORT_LOCK(psc);
1354		cpswp_stop_locked(psc);
1355		CPSW_PORT_UNLOCK(psc);
1356	}
1357
1358	return (0);
1359}
1360
1361static int
1362cpsw_resume(device_t dev)
1363{
1364	struct cpsw_softc *sc;
1365
1366	sc  = device_get_softc(dev);
1367	CPSW_DEBUGF(sc, ("UNIMPLEMENTED"));
1368
1369	return (0);
1370}
1371
1372/*
1373 *
1374 *  IOCTL
1375 *
1376 */
1377
1378static void
1379cpsw_set_promisc(struct cpswp_softc *sc, int set)
1380{
1381	uint32_t reg;
1382
1383	/*
1384	 * Enabling promiscuous mode requires ALE_BYPASS to be enabled.
1385	 * That disables the ALE forwarding logic and causes every
1386	 * packet to be sent only to the host port.  In bypass mode,
1387	 * the ALE processes host port transmit packets the same as in
1388	 * normal mode.
1389	 */
1390	reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL);
1391	reg &= ~CPSW_ALE_CTL_BYPASS;
1392	if (set)
1393		reg |= CPSW_ALE_CTL_BYPASS;
1394	cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg);
1395}
1396
1397static void
1398cpsw_set_allmulti(struct cpswp_softc *sc, int set)
1399{
1400	if (set) {
1401		printf("All-multicast mode unimplemented\n");
1402	}
1403}
1404
1405static int
1406cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1407{
1408	struct cpswp_softc *sc;
1409	struct ifreq *ifr;
1410	int error;
1411	uint32_t changed;
1412
1413	error = 0;
1414	sc = ifp->if_softc;
1415	ifr = (struct ifreq *)data;
1416
1417	switch (command) {
1418	case SIOCSIFCAP:
1419		changed = ifp->if_capenable ^ ifr->ifr_reqcap;
1420		if (changed & IFCAP_HWCSUM) {
1421			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
1422				ifp->if_capenable |= IFCAP_HWCSUM;
1423			else
1424				ifp->if_capenable &= ~IFCAP_HWCSUM;
1425		}
1426		error = 0;
1427		break;
1428	case SIOCSIFFLAGS:
1429		CPSW_PORT_LOCK(sc);
1430		if (ifp->if_flags & IFF_UP) {
1431			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1432				changed = ifp->if_flags ^ sc->if_flags;
1433				CPSW_DEBUGF(sc->swsc,
1434				    ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)",
1435				    changed));
1436				if (changed & IFF_PROMISC)
1437					cpsw_set_promisc(sc,
1438					    ifp->if_flags & IFF_PROMISC);
1439				if (changed & IFF_ALLMULTI)
1440					cpsw_set_allmulti(sc,
1441					    ifp->if_flags & IFF_ALLMULTI);
1442			} else {
1443				CPSW_DEBUGF(sc->swsc,
1444				    ("SIOCSIFFLAGS: starting up"));
1445				cpswp_init_locked(sc);
1446			}
1447		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1448			CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down"));
1449			cpswp_stop_locked(sc);
1450		}
1451
1452		sc->if_flags = ifp->if_flags;
1453		CPSW_PORT_UNLOCK(sc);
1454		break;
1455	case SIOCADDMULTI:
1456		cpswp_ale_update_addresses(sc, 0);
1457		break;
1458	case SIOCDELMULTI:
1459		/* Ugh.  DELMULTI doesn't provide the specific address
1460		   being removed, so the best we can do is remove
1461		   everything and rebuild it all. */
1462		cpswp_ale_update_addresses(sc, 1);
1463		break;
1464	case SIOCGIFMEDIA:
1465	case SIOCSIFMEDIA:
1466		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1467		break;
1468	default:
1469		error = ether_ioctl(ifp, command, data);
1470	}
1471	return (error);
1472}
1473
1474/*
1475 *
1476 * MIIBUS
1477 *
1478 */
1479static int
1480cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg)
1481{
1482	uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1483
1484	while (--retries) {
1485		r = cpsw_read_4(sc, reg);
1486		if ((r & MDIO_PHYACCESS_GO) == 0)
1487			return (1);
1488		DELAY(CPSW_MIIBUS_DELAY);
1489	}
1490
1491	return (0);
1492}
1493
1494static int
1495cpswp_miibus_readreg(device_t dev, int phy, int reg)
1496{
1497	struct cpswp_softc *sc;
1498	uint32_t cmd, r;
1499
1500	sc = device_get_softc(dev);
1501	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1502		device_printf(dev, "MDIO not ready to read\n");
1503		return (0);
1504	}
1505
1506	/* Set GO, reg, phy */
1507	cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1508	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1509
1510	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1511		device_printf(dev, "MDIO timed out during read\n");
1512		return (0);
1513	}
1514
1515	r = cpsw_read_4(sc->swsc, sc->phyaccess);
1516	if ((r & MDIO_PHYACCESS_ACK) == 0) {
1517		device_printf(dev, "Failed to read from PHY.\n");
1518		r = 0;
1519	}
1520	return (r & 0xFFFF);
1521}
1522
1523static int
1524cpswp_miibus_writereg(device_t dev, int phy, int reg, int value)
1525{
1526	struct cpswp_softc *sc;
1527	uint32_t cmd;
1528
1529	sc = device_get_softc(dev);
1530	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1531		device_printf(dev, "MDIO not ready to write\n");
1532		return (0);
1533	}
1534
1535	/* Set GO, WRITE, reg, phy, and value */
1536	cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE |
1537	    (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF);
1538	cpsw_write_4(sc->swsc, sc->phyaccess, cmd);
1539
1540	if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) {
1541		device_printf(dev, "MDIO timed out during write\n");
1542		return (0);
1543	}
1544
1545	return (0);
1546}
1547
1548static void
1549cpswp_miibus_statchg(device_t dev)
1550{
1551	struct cpswp_softc *sc;
1552	uint32_t mac_control, reg;
1553
1554	sc = device_get_softc(dev);
1555	CPSW_DEBUGF(sc->swsc, (""));
1556
1557	reg = CPSW_SL_MACCONTROL(sc->unit);
1558	mac_control = cpsw_read_4(sc->swsc, reg);
1559	mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A |
1560	    CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX);
1561
1562	switch(IFM_SUBTYPE(sc->mii->mii_media_active)) {
1563	case IFM_1000_SX:
1564	case IFM_1000_LX:
1565	case IFM_1000_CX:
1566	case IFM_1000_T:
1567		mac_control |= CPSW_SL_MACTL_GIG;
1568		break;
1569
1570	case IFM_100_TX:
1571		mac_control |= CPSW_SL_MACTL_IFCTL_A;
1572		break;
1573	}
1574	if (sc->mii->mii_media_active & IFM_FDX)
1575		mac_control |= CPSW_SL_MACTL_FULLDUPLEX;
1576
1577	cpsw_write_4(sc->swsc, reg, mac_control);
1578}
1579
1580/*
1581 *
1582 * Transmit/Receive Packets.
1583 *
1584 */
1585static void
1586cpsw_intr_rx(void *arg)
1587{
1588	struct cpsw_softc *sc;
1589	struct ifnet *ifp;
1590	struct mbuf *received, *next;
1591
1592	sc = (struct cpsw_softc *)arg;
1593	CPSW_RX_LOCK(sc);
1594	if (sc->rx.teardown) {
1595		sc->rx.running = 0;
1596		sc->rx.teardown = 0;
1597		cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1598	}
1599	received = cpsw_rx_dequeue(sc);
1600	cpsw_rx_enqueue(sc);
1601	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1602	CPSW_RX_UNLOCK(sc);
1603
1604	while (received != NULL) {
1605		next = received->m_nextpkt;
1606		received->m_nextpkt = NULL;
1607		ifp = received->m_pkthdr.rcvif;
1608		(*ifp->if_input)(ifp, received);
1609		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1610		received = next;
1611	}
1612}
1613
1614static struct mbuf *
1615cpsw_rx_dequeue(struct cpsw_softc *sc)
1616{
1617	int nsegs, port, removed;
1618	struct cpsw_cpdma_bd bd;
1619	struct cpsw_slot *last, *slot;
1620	struct cpswp_softc *psc;
1621	struct mbuf *m, *m0, *mb_head, *mb_tail;
1622	uint16_t m0_flags;
1623
1624	nsegs = 0;
1625	m0 = NULL;
1626	last = NULL;
1627	mb_head = NULL;
1628	mb_tail = NULL;
1629	removed = 0;
1630
1631	/* Pull completed packets off hardware RX queue. */
1632	while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1633		cpsw_cpdma_read_bd(sc, slot, &bd);
1634
1635		/*
1636		 * Stop on packets still in use by hardware, but do not stop
1637		 * on packets with the teardown complete flag, they will be
1638		 * discarded later.
1639		 */
1640		if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) ==
1641		    CPDMA_BD_OWNER)
1642			break;
1643
1644		last = slot;
1645		++removed;
1646		STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1647		STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1648
1649		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1650		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1651
1652		m = slot->mbuf;
1653		slot->mbuf = NULL;
1654
1655		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1656			CPSW_DEBUGF(sc, ("RX teardown is complete"));
1657			m_freem(m);
1658			sc->rx.running = 0;
1659			sc->rx.teardown = 0;
1660			break;
1661		}
1662
1663		port = (bd.flags & CPDMA_BD_PORT_MASK) - 1;
1664		KASSERT(port >= 0 && port <= 1,
1665		    ("patcket received with invalid port: %d", port));
1666		psc = device_get_softc(sc->port[port].dev);
1667
1668		/* Set up mbuf */
1669		m->m_data += bd.bufoff;
1670		m->m_len = bd.buflen;
1671		if (bd.flags & CPDMA_BD_SOP) {
1672			m->m_pkthdr.len = bd.pktlen;
1673			m->m_pkthdr.rcvif = psc->ifp;
1674			m->m_flags |= M_PKTHDR;
1675			m0_flags = bd.flags;
1676			m0 = m;
1677		}
1678		nsegs++;
1679		m->m_next = NULL;
1680		m->m_nextpkt = NULL;
1681		if (bd.flags & CPDMA_BD_EOP && m0 != NULL) {
1682			if (m0_flags & CPDMA_BD_PASS_CRC)
1683				m_adj(m0, -ETHER_CRC_LEN);
1684			m0_flags = 0;
1685			m0 = NULL;
1686			if (nsegs > sc->rx.longest_chain)
1687				sc->rx.longest_chain = nsegs;
1688			nsegs = 0;
1689		}
1690
1691		if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1692			/* check for valid CRC by looking into pkt_err[5:4] */
1693			if ((bd.flags &
1694			    (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) ==
1695			    CPDMA_BD_SOP) {
1696				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1697				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1698				m->m_pkthdr.csum_data = 0xffff;
1699			}
1700		}
1701
1702		if (STAILQ_FIRST(&sc->rx.active) != NULL &&
1703		    (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) ==
1704		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
1705			cpsw_write_hdp_slot(sc, &sc->rx,
1706			    STAILQ_FIRST(&sc->rx.active));
1707			sc->rx.queue_restart++;
1708		}
1709
1710		/* Add mbuf to packet list to be returned. */
1711		if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) {
1712			mb_tail->m_nextpkt = m;
1713		} else if (mb_tail != NULL) {
1714			mb_tail->m_next = m;
1715		} else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) {
1716			if (bootverbose)
1717				printf(
1718				    "%s: %s: discanding fragment packet w/o header\n",
1719				    __func__, psc->ifp->if_xname);
1720			m_freem(m);
1721			continue;
1722		} else {
1723			mb_head = m;
1724		}
1725		mb_tail = m;
1726	}
1727
1728	if (removed != 0) {
1729		cpsw_write_cp_slot(sc, &sc->rx, last);
1730		sc->rx.queue_removes += removed;
1731		sc->rx.avail_queue_len += removed;
1732		sc->rx.active_queue_len -= removed;
1733		if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1734			sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1735		CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed));
1736	}
1737
1738	return (mb_head);
1739}
1740
1741static void
1742cpsw_rx_enqueue(struct cpsw_softc *sc)
1743{
1744	bus_dma_segment_t seg[1];
1745	struct cpsw_cpdma_bd bd;
1746	struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot;
1747	int error, nsegs, added = 0;
1748
1749	/* Register new mbufs with hardware. */
1750	first_new_slot = NULL;
1751	last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1752	while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1753		if (first_new_slot == NULL)
1754			first_new_slot = slot;
1755		if (slot->mbuf == NULL) {
1756			slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1757			if (slot->mbuf == NULL) {
1758				device_printf(sc->dev,
1759				    "Unable to fill RX queue\n");
1760				break;
1761			}
1762			slot->mbuf->m_len =
1763			    slot->mbuf->m_pkthdr.len =
1764			    slot->mbuf->m_ext.ext_size;
1765		}
1766
1767		error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1768		    slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1769
1770		KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1771		KASSERT(error == 0, ("DMA error (error=%d)", error));
1772		if (error != 0 || nsegs != 1) {
1773			device_printf(sc->dev,
1774			    "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1775			    __func__, nsegs, error);
1776			bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1777			m_freem(slot->mbuf);
1778			slot->mbuf = NULL;
1779			break;
1780		}
1781
1782		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1783
1784		/* Create and submit new rx descriptor. */
1785		if ((next = STAILQ_NEXT(slot, next)) != NULL)
1786			bd.next = cpsw_cpdma_bd_paddr(sc, next);
1787		else
1788			bd.next = 0;
1789		bd.bufptr = seg->ds_addr;
1790		bd.bufoff = 0;
1791		bd.buflen = MCLBYTES - 1;
1792		bd.pktlen = bd.buflen;
1793		bd.flags = CPDMA_BD_OWNER;
1794		cpsw_cpdma_write_bd(sc, slot, &bd);
1795		++added;
1796
1797		STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1798		STAILQ_INSERT_TAIL(&sc->rx.active, slot, next);
1799	}
1800
1801	if (added == 0 || first_new_slot == NULL)
1802		return;
1803
1804	CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added));
1805
1806	/* Link new entries to hardware RX queue. */
1807	if (last_old_slot == NULL) {
1808		/* Start a fresh queue. */
1809		cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1810	} else {
1811		/* Add buffers to end of current queue. */
1812		cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1813	}
1814	sc->rx.queue_adds += added;
1815	sc->rx.avail_queue_len -= added;
1816	sc->rx.active_queue_len += added;
1817	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added);
1818	if (sc->rx.active_queue_len > sc->rx.max_active_queue_len)
1819		sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1820}
1821
1822static void
1823cpswp_start(struct ifnet *ifp)
1824{
1825	struct cpswp_softc *sc;
1826
1827	sc = ifp->if_softc;
1828	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1829	    sc->swsc->tx.running == 0) {
1830		return;
1831	}
1832	CPSW_TX_LOCK(sc->swsc);
1833	cpswp_tx_enqueue(sc);
1834	cpsw_tx_dequeue(sc->swsc);
1835	CPSW_TX_UNLOCK(sc->swsc);
1836}
1837
1838static void
1839cpsw_intr_tx(void *arg)
1840{
1841	struct cpsw_softc *sc;
1842
1843	sc = (struct cpsw_softc *)arg;
1844	CPSW_TX_LOCK(sc);
1845	if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc)
1846		cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1847	cpsw_tx_dequeue(sc);
1848	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2);
1849	CPSW_TX_UNLOCK(sc);
1850}
1851
1852static void
1853cpswp_tx_enqueue(struct cpswp_softc *sc)
1854{
1855	bus_dma_segment_t segs[CPSW_TXFRAGS];
1856	struct cpsw_cpdma_bd bd;
1857	struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot;
1858	struct mbuf *m0;
1859	int error, nsegs, seg, added = 0, padlen;
1860
1861	/* Pull pending packets from IF queue and prep them for DMA. */
1862	last = NULL;
1863	first_new_slot = NULL;
1864	last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next);
1865	while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) {
1866		IF_DEQUEUE(&sc->ifp->if_snd, m0);
1867		if (m0 == NULL)
1868			break;
1869
1870		slot->mbuf = m0;
1871		padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len;
1872		if (padlen < 0)
1873			padlen = 0;
1874		else if (padlen > 0)
1875			m_append(slot->mbuf, padlen, sc->swsc->nullpad);
1876
1877		/* Create mapping in DMA memory */
1878		error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag,
1879		    slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1880		/* If the packet is too fragmented, try to simplify. */
1881		if (error == EFBIG ||
1882		    (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) {
1883			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1884			m0 = m_defrag(slot->mbuf, M_NOWAIT);
1885			if (m0 == NULL) {
1886				device_printf(sc->dev,
1887				    "Can't defragment packet; dropping\n");
1888				m_freem(slot->mbuf);
1889			} else {
1890				CPSW_DEBUGF(sc->swsc,
1891				    ("Requeueing defragmented packet"));
1892				IF_PREPEND(&sc->ifp->if_snd, m0);
1893			}
1894			slot->mbuf = NULL;
1895			continue;
1896		}
1897		if (error != 0) {
1898			device_printf(sc->dev,
1899			    "%s: Can't setup DMA (error=%d), dropping packet\n",
1900			    __func__, error);
1901			bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap);
1902			m_freem(slot->mbuf);
1903			slot->mbuf = NULL;
1904			break;
1905		}
1906
1907		bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap,
1908				BUS_DMASYNC_PREWRITE);
1909
1910		CPSW_DEBUGF(sc->swsc,
1911		    ("Queueing TX packet: %d segments + %d pad bytes",
1912		    nsegs, padlen));
1913
1914		if (first_new_slot == NULL)
1915			first_new_slot = slot;
1916
1917		/* Link from the previous descriptor. */
1918		if (last != NULL)
1919			cpsw_cpdma_write_bd_next(sc->swsc, last, slot);
1920
1921		slot->ifp = sc->ifp;
1922
1923		/* If there is only one segment, the for() loop
1924		 * gets skipped and the single buffer gets set up
1925		 * as both SOP and EOP. */
1926		if (nsegs > 1) {
1927			next = STAILQ_NEXT(slot, next);
1928			bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
1929		} else
1930			bd.next = 0;
1931		/* Start by setting up the first buffer. */
1932		bd.bufptr = segs[0].ds_addr;
1933		bd.bufoff = 0;
1934		bd.buflen = segs[0].ds_len;
1935		bd.pktlen = m_length(slot->mbuf, NULL);
1936		bd.flags =  CPDMA_BD_SOP | CPDMA_BD_OWNER;
1937		if (sc->swsc->dualemac) {
1938			bd.flags |= CPDMA_BD_TO_PORT;
1939			bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK);
1940		}
1941		for (seg = 1; seg < nsegs; ++seg) {
1942			/* Save the previous buffer (which isn't EOP) */
1943			cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1944			STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1945			STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
1946			slot = STAILQ_FIRST(&sc->swsc->tx.avail);
1947
1948			/* Setup next buffer (which isn't SOP) */
1949			if (nsegs > seg + 1) {
1950				next = STAILQ_NEXT(slot, next);
1951				bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next);
1952			} else
1953				bd.next = 0;
1954			bd.bufptr = segs[seg].ds_addr;
1955			bd.bufoff = 0;
1956			bd.buflen = segs[seg].ds_len;
1957			bd.pktlen = 0;
1958			bd.flags = CPDMA_BD_OWNER;
1959		}
1960
1961		/* Save the final buffer. */
1962		bd.flags |= CPDMA_BD_EOP;
1963		cpsw_cpdma_write_bd(sc->swsc, slot, &bd);
1964		STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next);
1965		STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next);
1966
1967		last = slot;
1968		added += nsegs;
1969		if (nsegs > sc->swsc->tx.longest_chain)
1970			sc->swsc->tx.longest_chain = nsegs;
1971
1972		BPF_MTAP(sc->ifp, m0);
1973	}
1974
1975	if (first_new_slot == NULL)
1976		return;
1977
1978	/* Attach the list of new buffers to the hardware TX queue. */
1979	if (last_old_slot != NULL &&
1980	    (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) &
1981	     CPDMA_BD_EOQ) == 0) {
1982		/* Add buffers to end of current queue. */
1983		cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot,
1984		    first_new_slot);
1985	} else {
1986		/* Start a fresh queue. */
1987		cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot);
1988	}
1989	sc->swsc->tx.queue_adds += added;
1990	sc->swsc->tx.avail_queue_len -= added;
1991	sc->swsc->tx.active_queue_len += added;
1992	if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) {
1993		sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len;
1994	}
1995	CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added));
1996}
1997
1998static int
1999cpsw_tx_dequeue(struct cpsw_softc *sc)
2000{
2001	struct cpsw_slot *slot, *last_removed_slot = NULL;
2002	struct cpsw_cpdma_bd bd;
2003	uint32_t flags, removed = 0;
2004
2005	/* Pull completed buffers off the hardware TX queue. */
2006	slot = STAILQ_FIRST(&sc->tx.active);
2007	while (slot != NULL) {
2008		flags = cpsw_cpdma_read_bd_flags(sc, slot);
2009
2010		/* TearDown complete is only marked on the SOP for the packet. */
2011		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) ==
2012		    (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) {
2013			sc->tx.teardown = 1;
2014		}
2015
2016		if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) ==
2017		    (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0)
2018			break; /* Hardware is still using this packet. */
2019
2020		bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
2021		bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
2022		m_freem(slot->mbuf);
2023		slot->mbuf = NULL;
2024
2025		if (slot->ifp) {
2026			if (sc->tx.teardown == 0)
2027				if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1);
2028			else
2029				if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1);
2030		}
2031
2032		/* Dequeue any additional buffers used by this packet. */
2033		while (slot != NULL && slot->mbuf == NULL) {
2034			STAILQ_REMOVE_HEAD(&sc->tx.active, next);
2035			STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
2036			++removed;
2037			last_removed_slot = slot;
2038			slot = STAILQ_FIRST(&sc->tx.active);
2039		}
2040
2041		cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
2042
2043		/* Restart the TX queue if necessary. */
2044		cpsw_cpdma_read_bd(sc, last_removed_slot, &bd);
2045		if (slot != NULL && bd.next != 0 && (bd.flags &
2046		    (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) ==
2047		    (CPDMA_BD_EOP | CPDMA_BD_EOQ)) {
2048			cpsw_write_hdp_slot(sc, &sc->tx, slot);
2049			sc->tx.queue_restart++;
2050			break;
2051		}
2052	}
2053
2054	if (removed != 0) {
2055		sc->tx.queue_removes += removed;
2056		sc->tx.active_queue_len -= removed;
2057		sc->tx.avail_queue_len += removed;
2058		if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
2059			sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
2060		CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed));
2061	}
2062
2063	if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) {
2064		CPSW_DEBUGF(sc, ("TX teardown is complete"));
2065		sc->tx.teardown = 0;
2066		sc->tx.running = 0;
2067	}
2068
2069	return (removed);
2070}
2071
2072/*
2073 *
2074 * Miscellaneous interrupts.
2075 *
2076 */
2077
2078static void
2079cpsw_intr_rx_thresh(void *arg)
2080{
2081	struct cpsw_softc *sc;
2082	struct ifnet *ifp;
2083	struct mbuf *received, *next;
2084
2085	sc = (struct cpsw_softc *)arg;
2086	CPSW_RX_LOCK(sc);
2087	received = cpsw_rx_dequeue(sc);
2088	cpsw_rx_enqueue(sc);
2089	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
2090	CPSW_RX_UNLOCK(sc);
2091
2092	while (received != NULL) {
2093		next = received->m_nextpkt;
2094		received->m_nextpkt = NULL;
2095		ifp = received->m_pkthdr.rcvif;
2096		(*ifp->if_input)(ifp, received);
2097		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2098		received = next;
2099	}
2100}
2101
2102static void
2103cpsw_intr_misc_host_error(struct cpsw_softc *sc)
2104{
2105	uint32_t intstat;
2106	uint32_t dmastat;
2107	int txerr, rxerr, txchan, rxchan;
2108
2109	printf("\n\n");
2110	device_printf(sc->dev,
2111	    "HOST ERROR:  PROGRAMMING ERROR DETECTED BY HARDWARE\n");
2112	printf("\n\n");
2113	intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
2114	device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
2115	dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
2116	device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
2117
2118	txerr = (dmastat >> 20) & 15;
2119	txchan = (dmastat >> 16) & 7;
2120	rxerr = (dmastat >> 12) & 15;
2121	rxchan = (dmastat >> 8) & 7;
2122
2123	switch (txerr) {
2124	case 0: break;
2125	case 1:	printf("SOP error on TX channel %d\n", txchan);
2126		break;
2127	case 2:	printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
2128		break;
2129	case 3:	printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
2130		break;
2131	case 4:	printf("Zero Buffer Pointer on TX channel %d\n", txchan);
2132		break;
2133	case 5:	printf("Zero Buffer Length on TX channel %d\n", txchan);
2134		break;
2135	case 6:	printf("Packet length error on TX channel %d\n", txchan);
2136		break;
2137	default: printf("Unknown error on TX channel %d\n", txchan);
2138		break;
2139	}
2140
2141	if (txerr != 0) {
2142		printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
2143		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
2144		printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
2145		    txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
2146		cpsw_dump_queue(sc, &sc->tx.active);
2147	}
2148
2149	switch (rxerr) {
2150	case 0: break;
2151	case 2:	printf("Ownership bit not set on RX channel %d\n", rxchan);
2152		break;
2153	case 4:	printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
2154		break;
2155	case 5:	printf("Zero Buffer Length on RX channel %d\n", rxchan);
2156		break;
2157	case 6:	printf("Buffer offset too big on RX channel %d\n", rxchan);
2158		break;
2159	default: printf("Unknown RX error on RX channel %d\n", rxchan);
2160		break;
2161	}
2162
2163	if (rxerr != 0) {
2164		printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
2165		    rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
2166		printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
2167		    rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
2168		cpsw_dump_queue(sc, &sc->rx.active);
2169	}
2170
2171	printf("\nALE Table\n");
2172	cpsw_ale_dump_table(sc);
2173
2174	// XXX do something useful here??
2175	panic("CPSW HOST ERROR INTERRUPT");
2176
2177	// Suppress this interrupt in the future.
2178	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
2179	printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
2180	// The watchdog will probably reset the controller
2181	// in a little while.  It will probably fail again.
2182}
2183
2184static void
2185cpsw_intr_misc(void *arg)
2186{
2187	struct cpsw_softc *sc = arg;
2188	uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
2189
2190	if (stat & CPSW_WR_C_MISC_EVNT_PEND)
2191		CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented"));
2192	if (stat & CPSW_WR_C_MISC_STAT_PEND)
2193		cpsw_stats_collect(sc);
2194	if (stat & CPSW_WR_C_MISC_HOST_PEND)
2195		cpsw_intr_misc_host_error(sc);
2196	if (stat & CPSW_WR_C_MISC_MDIOLINK) {
2197		cpsw_write_4(sc, MDIOLINKINTMASKED,
2198		    cpsw_read_4(sc, MDIOLINKINTMASKED));
2199	}
2200	if (stat & CPSW_WR_C_MISC_MDIOUSER) {
2201		CPSW_DEBUGF(sc,
2202		    ("MDIO operation completed interrupt unimplemented"));
2203	}
2204	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
2205}
2206
2207/*
2208 *
2209 * Periodic Checks and Watchdog.
2210 *
2211 */
2212
2213static void
2214cpswp_tick(void *msc)
2215{
2216	struct cpswp_softc *sc = msc;
2217
2218	/* Check for media type change */
2219	mii_tick(sc->mii);
2220	if (sc->media_status != sc->mii->mii_media.ifm_media) {
2221		printf("%s: media type changed (ifm_media=%x)\n", __func__,
2222			sc->mii->mii_media.ifm_media);
2223		cpswp_ifmedia_upd(sc->ifp);
2224	}
2225
2226	/* Schedule another timeout one second from now */
2227	callout_reset(&sc->mii_callout, hz, cpswp_tick, sc);
2228}
2229
2230static void
2231cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2232{
2233	struct cpswp_softc *sc;
2234	struct mii_data *mii;
2235
2236	sc = ifp->if_softc;
2237	CPSW_DEBUGF(sc->swsc, (""));
2238	CPSW_PORT_LOCK(sc);
2239
2240	mii = sc->mii;
2241	mii_pollstat(mii);
2242
2243	ifmr->ifm_active = mii->mii_media_active;
2244	ifmr->ifm_status = mii->mii_media_status;
2245	CPSW_PORT_UNLOCK(sc);
2246}
2247
2248static int
2249cpswp_ifmedia_upd(struct ifnet *ifp)
2250{
2251	struct cpswp_softc *sc;
2252
2253	sc = ifp->if_softc;
2254	CPSW_DEBUGF(sc->swsc, (""));
2255	CPSW_PORT_LOCK(sc);
2256	mii_mediachg(sc->mii);
2257	sc->media_status = sc->mii->mii_media.ifm_media;
2258	CPSW_PORT_UNLOCK(sc);
2259
2260	return (0);
2261}
2262
2263static void
2264cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
2265{
2266	struct cpswp_softc *psc;
2267	int i;
2268
2269	cpsw_debugf_head("CPSW watchdog");
2270	device_printf(sc->dev, "watchdog timeout\n");
2271	printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0,
2272	    cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)));
2273	printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0,
2274	    cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)));
2275	cpsw_dump_queue(sc, &sc->tx.active);
2276	for (i = 0; i < CPSW_PORTS; i++) {
2277		if (!sc->dualemac && i != sc->active_slave)
2278			continue;
2279		psc = device_get_softc(sc->port[i].dev);
2280		CPSW_PORT_LOCK(psc);
2281		cpswp_stop_locked(psc);
2282		CPSW_PORT_UNLOCK(psc);
2283	}
2284}
2285
2286static void
2287cpsw_tx_watchdog(void *msc)
2288{
2289	struct cpsw_softc *sc;
2290
2291	sc = msc;
2292	CPSW_TX_LOCK(sc);
2293	if (sc->tx.active_queue_len == 0 || !sc->tx.running) {
2294		sc->watchdog.timer = 0; /* Nothing to do. */
2295	} else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
2296		sc->watchdog.timer = 0;  /* Stuff done while we weren't looking. */
2297	} else if (cpsw_tx_dequeue(sc) > 0) {
2298		sc->watchdog.timer = 0;  /* We just did something. */
2299	} else {
2300		/* There was something to do but it didn't get done. */
2301		++sc->watchdog.timer;
2302		if (sc->watchdog.timer > 5) {
2303			sc->watchdog.timer = 0;
2304			++sc->watchdog.resets;
2305			cpsw_tx_watchdog_full_reset(sc);
2306		}
2307	}
2308	sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
2309	CPSW_TX_UNLOCK(sc);
2310
2311	/* Schedule another timeout one second from now */
2312	callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc);
2313}
2314
2315/*
2316 *
2317 * ALE support routines.
2318 *
2319 */
2320
2321static void
2322cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2323{
2324	cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
2325	ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
2326	ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
2327	ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
2328}
2329
2330static void
2331cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
2332{
2333	cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
2334	cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
2335	cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
2336	cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
2337}
2338
2339static void
2340cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
2341{
2342	int i;
2343	uint32_t ale_entry[3];
2344
2345	/* First four entries are link address and broadcast. */
2346	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2347		cpsw_ale_read_entry(sc, i, ale_entry);
2348		if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR ||
2349		    ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) &&
2350		    ALE_MCAST(ale_entry)  == 1) { /* MCast link addr */
2351			ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2352			cpsw_ale_write_entry(sc, i, ale_entry);
2353		}
2354	}
2355}
2356
2357static int
2358cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan,
2359	uint8_t *mac)
2360{
2361	int free_index = -1, matching_index = -1, i;
2362	uint32_t ale_entry[3], ale_type;
2363
2364	/* Find a matching entry or a free entry. */
2365	for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) {
2366		cpsw_ale_read_entry(sc, i, ale_entry);
2367
2368		/* Entry Type[61:60] is 0 for free entry */
2369		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2370			free_index = i;
2371
2372		if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
2373		    (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
2374		    (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
2375		    (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
2376		    (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
2377		    (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
2378			matching_index = i;
2379			break;
2380		}
2381	}
2382
2383	if (matching_index < 0) {
2384		if (free_index < 0)
2385			return (ENOMEM);
2386		i = free_index;
2387	}
2388
2389	if (vlan != -1)
2390		ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16;
2391	else
2392		ale_type = ALE_TYPE_ADDR << 28;
2393
2394	/* Set MAC address */
2395	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2396	ale_entry[1] = mac[0] << 8 | mac[1];
2397
2398	/* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */
2399	ale_entry[1] |= ALE_MCAST_FWD | ale_type;
2400
2401	/* Set portmask [68:66] */
2402	ale_entry[2] = (portmap & 7) << 2;
2403
2404	cpsw_ale_write_entry(sc, i, ale_entry);
2405
2406	return 0;
2407}
2408
2409static void
2410cpsw_ale_dump_table(struct cpsw_softc *sc) {
2411	int i;
2412	uint32_t ale_entry[3];
2413	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2414		cpsw_ale_read_entry(sc, i, ale_entry);
2415		switch (ALE_TYPE(ale_entry)) {
2416		case ALE_TYPE_VLAN:
2417			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2418				ale_entry[1], ale_entry[0]);
2419			printf("type: %u ", ALE_TYPE(ale_entry));
2420			printf("vlan: %u ", ALE_VLAN(ale_entry));
2421			printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry));
2422			printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry));
2423			printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry));
2424			printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry));
2425			printf("\n");
2426			break;
2427		case ALE_TYPE_ADDR:
2428		case ALE_TYPE_VLAN_ADDR:
2429			printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2],
2430				ale_entry[1], ale_entry[0]);
2431			printf("type: %u ", ALE_TYPE(ale_entry));
2432			printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
2433				(ale_entry[1] >> 8) & 0xFF,
2434				(ale_entry[1] >> 0) & 0xFF,
2435				(ale_entry[0] >>24) & 0xFF,
2436				(ale_entry[0] >>16) & 0xFF,
2437				(ale_entry[0] >> 8) & 0xFF,
2438				(ale_entry[0] >> 0) & 0xFF);
2439			printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast ");
2440			if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR)
2441				printf("vlan: %u ", ALE_VLAN(ale_entry));
2442			printf("port: %u ", ALE_PORTS(ale_entry));
2443			printf("\n");
2444			break;
2445		}
2446	}
2447	printf("\n");
2448}
2449
2450static u_int
2451cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2452{
2453	struct cpswp_softc *sc = arg;
2454	uint32_t portmask;
2455
2456	if (sc->swsc->dualemac)
2457		portmask = 1 << (sc->unit + 1) | 1 << 0;
2458	else
2459		portmask = 7;
2460
2461	cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl));
2462
2463	return (1);
2464}
2465
2466static int
2467cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge)
2468{
2469	uint8_t *mac;
2470	uint32_t ale_entry[3], ale_type, portmask;
2471
2472	if (sc->swsc->dualemac) {
2473		ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16;
2474		portmask = 1 << (sc->unit + 1) | 1 << 0;
2475	} else {
2476		ale_type = ALE_TYPE_ADDR << 28;
2477		portmask = 7;
2478	}
2479
2480	/*
2481	 * Route incoming packets for our MAC address to Port 0 (host).
2482	 * For simplicity, keep this entry at table index 0 for port 1 and
2483	 * at index 2 for port 2 in the ALE.
2484	 */
2485	mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr);
2486	ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
2487	ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */
2488	ale_entry[2] = 0; /* port = 0 */
2489	cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry);
2490
2491	/* Set outgoing MAC Address for slave port. */
2492	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1),
2493	    mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
2494	cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1),
2495	    mac[5] << 8 | mac[4]);
2496
2497	/* Keep the broadcast address at table entry 1 (or 3). */
2498	ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
2499	/* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */
2500	ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff;
2501	ale_entry[2] = portmask << 2;
2502	cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry);
2503
2504	/* SIOCDELMULTI doesn't specify the particular address
2505	   being removed, so we have to remove all and rebuild. */
2506	if (purge)
2507		cpsw_ale_remove_all_mc_entries(sc->swsc);
2508
2509        /* Set other multicast addrs desired. */
2510	if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc);
2511
2512	return (0);
2513}
2514
2515static int
2516cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports,
2517	int untag, int mcregflood, int mcunregflood)
2518{
2519	int free_index, i, matching_index;
2520	uint32_t ale_entry[3];
2521
2522	free_index = matching_index = -1;
2523	/* Find a matching entry or a free entry. */
2524	for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) {
2525		cpsw_ale_read_entry(sc, i, ale_entry);
2526
2527		/* Entry Type[61:60] is 0 for free entry */
2528		if (free_index < 0 && ALE_TYPE(ale_entry) == 0)
2529			free_index = i;
2530
2531		if (ALE_VLAN(ale_entry) == vlan) {
2532			matching_index = i;
2533			break;
2534		}
2535	}
2536
2537	if (matching_index < 0) {
2538		if (free_index < 0)
2539			return (-1);
2540		i = free_index;
2541	}
2542
2543	ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 |
2544	    (mcunregflood & 7) << 8 | (ports & 7);
2545	ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16;
2546	ale_entry[2] = 0;
2547	cpsw_ale_write_entry(sc, i, ale_entry);
2548
2549	return (0);
2550}
2551
2552/*
2553 *
2554 * Statistics and Sysctls.
2555 *
2556 */
2557
2558#if 0
2559static void
2560cpsw_stats_dump(struct cpsw_softc *sc)
2561{
2562	int i;
2563	uint32_t r;
2564
2565	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2566		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2567		    cpsw_stat_sysctls[i].reg);
2568		CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2569		    (intmax_t)sc->shadow_stats[i], r,
2570		    (intmax_t)sc->shadow_stats[i] + r));
2571	}
2572}
2573#endif
2574
2575static void
2576cpsw_stats_collect(struct cpsw_softc *sc)
2577{
2578	int i;
2579	uint32_t r;
2580
2581	CPSW_DEBUGF(sc, ("Controller shadow statistics updated."));
2582
2583	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2584		r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2585		    cpsw_stat_sysctls[i].reg);
2586		sc->shadow_stats[i] += r;
2587		cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg,
2588		    r);
2589	}
2590}
2591
2592static int
2593cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2594{
2595	struct cpsw_softc *sc;
2596	struct cpsw_stat *stat;
2597	uint64_t result;
2598
2599	sc = (struct cpsw_softc *)arg1;
2600	stat = &cpsw_stat_sysctls[oidp->oid_number];
2601	result = sc->shadow_stats[oidp->oid_number];
2602	result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2603	return (sysctl_handle_64(oidp, &result, 0, req));
2604}
2605
2606static int
2607cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2608{
2609	struct cpsw_softc *sc;
2610	struct bintime t;
2611	unsigned result;
2612
2613	sc = (struct cpsw_softc *)arg1;
2614	getbinuptime(&t);
2615	bintime_sub(&t, &sc->attach_uptime);
2616	result = t.sec;
2617	return (sysctl_handle_int(oidp, &result, 0, req));
2618}
2619
2620static int
2621cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS)
2622{
2623	int error;
2624	struct cpsw_softc *sc;
2625	uint32_t ctrl, intr_per_ms;
2626
2627	sc = (struct cpsw_softc *)arg1;
2628	error = sysctl_handle_int(oidp, &sc->coal_us, 0, req);
2629	if (error != 0 || req->newptr == NULL)
2630		return (error);
2631
2632	ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL);
2633	ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK);
2634	if (sc->coal_us == 0) {
2635		/* Disable the interrupt pace hardware. */
2636		cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2637		cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0);
2638		cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0);
2639		return (0);
2640	}
2641
2642	if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX)
2643		sc->coal_us = CPSW_WR_C_IMAX_US_MAX;
2644	if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN)
2645		sc->coal_us = CPSW_WR_C_IMAX_US_MIN;
2646	intr_per_ms = 1000 / sc->coal_us;
2647	/* Just to make sure... */
2648	if (intr_per_ms > CPSW_WR_C_IMAX_MAX)
2649		intr_per_ms = CPSW_WR_C_IMAX_MAX;
2650	if (intr_per_ms < CPSW_WR_C_IMAX_MIN)
2651		intr_per_ms = CPSW_WR_C_IMAX_MIN;
2652
2653	/* Set the prescale to produce 4us pulses from the 125 Mhz clock. */
2654	ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK;
2655
2656	/* Enable the interrupt pace hardware. */
2657	cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms);
2658	cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms);
2659	ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE;
2660	cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl);
2661
2662	return (0);
2663}
2664
2665static int
2666cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2667{
2668	struct cpsw_softc *swsc;
2669	struct cpswp_softc *sc;
2670	struct bintime t;
2671	unsigned result;
2672
2673	swsc = arg1;
2674	sc = device_get_softc(swsc->port[arg2].dev);
2675	if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2676		getbinuptime(&t);
2677		bintime_sub(&t, &sc->init_uptime);
2678		result = t.sec;
2679	} else
2680		result = 0;
2681	return (sysctl_handle_int(oidp, &result, 0, req));
2682}
2683
2684static void
2685cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2686	struct cpsw_queue *queue)
2687{
2688	struct sysctl_oid_list *parent;
2689
2690	parent = SYSCTL_CHILDREN(node);
2691	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2692	    CTLFLAG_RD, &queue->queue_slots, 0,
2693	    "Total buffers currently assigned to this queue");
2694	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2695	    CTLFLAG_RD, &queue->active_queue_len, 0,
2696	    "Buffers currently registered with hardware controller");
2697	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2698	    CTLFLAG_RD, &queue->max_active_queue_len, 0,
2699	    "Max value of activeBuffers since last driver reset");
2700	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2701	    CTLFLAG_RD, &queue->avail_queue_len, 0,
2702	    "Buffers allocated to this queue but not currently "
2703	    "registered with hardware controller");
2704	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2705	    CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2706	    "Max value of availBuffers since last driver reset");
2707	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2708	    CTLFLAG_RD, &queue->queue_adds, 0,
2709	    "Total buffers added to queue");
2710	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2711	    CTLFLAG_RD, &queue->queue_removes, 0,
2712	    "Total buffers removed from queue");
2713	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart",
2714	    CTLFLAG_RD, &queue->queue_restart, 0,
2715	    "Total times the queue has been restarted");
2716	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2717	    CTLFLAG_RD, &queue->longest_chain, 0,
2718	    "Max buffers used for a single packet");
2719}
2720
2721static void
2722cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node,
2723	struct cpsw_softc *sc)
2724{
2725	struct sysctl_oid_list *parent;
2726
2727	parent = SYSCTL_CHILDREN(node);
2728	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2729	    CTLFLAG_RD, &sc->watchdog.resets, 0,
2730	    "Total number of watchdog resets");
2731}
2732
2733static void
2734cpsw_add_sysctls(struct cpsw_softc *sc)
2735{
2736	struct sysctl_ctx_list *ctx;
2737	struct sysctl_oid *stats_node, *queue_node, *node;
2738	struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2739	struct sysctl_oid_list *ports_parent, *port_parent;
2740	char port[16];
2741	int i;
2742
2743	ctx = device_get_sysctl_ctx(sc->dev);
2744	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2745
2746	SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug",
2747	    CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages");
2748
2749	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2750	    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2751	    sc, 0, cpsw_stat_attached, "IU",
2752	    "Time since driver attach");
2753
2754	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us",
2755	    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2756	    sc, 0, cpsw_intr_coalesce, "IU",
2757	    "minimum time between interrupts");
2758
2759	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports",
2760	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics");
2761	ports_parent = SYSCTL_CHILDREN(node);
2762	for (i = 0; i < CPSW_PORTS; i++) {
2763		if (!sc->dualemac && i != sc->active_slave)
2764			continue;
2765		port[0] = '0' + i;
2766		port[1] = '\0';
2767		node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO,
2768		    port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2769		    "CPSW Port Statistics");
2770		port_parent = SYSCTL_CHILDREN(node);
2771		SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime",
2772		    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i,
2773		    cpsw_stat_uptime, "IU", "Seconds since driver init");
2774	}
2775
2776	stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2777	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics");
2778	stats_parent = SYSCTL_CHILDREN(stats_node);
2779	for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2780		SYSCTL_ADD_PROC(ctx, stats_parent, i,
2781				cpsw_stat_sysctls[i].oid,
2782				CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2783				sc, 0, cpsw_stats_sysctl, "IU",
2784				cpsw_stat_sysctls[i].oid);
2785	}
2786
2787	queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2788	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics");
2789	queue_parent = SYSCTL_CHILDREN(queue_node);
2790
2791	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2792	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics");
2793	cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2794
2795	node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2796	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics");
2797	cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2798
2799	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2800	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics");
2801	cpsw_add_watchdog_sysctls(ctx, node, sc);
2802}
2803
2804#ifdef CPSW_ETHERSWITCH
2805static etherswitch_info_t etherswitch_info = {
2806	.es_nports =		CPSW_PORTS + 1,
2807	.es_nvlangroups =	CPSW_VLANS,
2808	.es_name =		"TI Common Platform Ethernet Switch (CPSW)",
2809	.es_vlan_caps =		ETHERSWITCH_VLAN_DOT1Q,
2810};
2811
2812static etherswitch_info_t *
2813cpsw_getinfo(device_t dev)
2814{
2815	return (&etherswitch_info);
2816}
2817
2818static int
2819cpsw_getport(device_t dev, etherswitch_port_t *p)
2820{
2821	int err;
2822	struct cpsw_softc *sc;
2823	struct cpswp_softc *psc;
2824	struct ifmediareq *ifmr;
2825	uint32_t reg;
2826
2827	if (p->es_port < 0 || p->es_port > CPSW_PORTS)
2828		return (ENXIO);
2829
2830	err = 0;
2831	sc = device_get_softc(dev);
2832	if (p->es_port == CPSW_CPU_PORT) {
2833		p->es_flags |= ETHERSWITCH_PORT_CPU;
2834 		ifmr = &p->es_ifmr;
2835		ifmr->ifm_current = ifmr->ifm_active =
2836		    IFM_ETHER | IFM_1000_T | IFM_FDX;
2837		ifmr->ifm_mask = 0;
2838		ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
2839		ifmr->ifm_count = 0;
2840	} else {
2841		psc = device_get_softc(sc->port[p->es_port - 1].dev);
2842		err = ifmedia_ioctl(psc->ifp, &p->es_ifr,
2843		    &psc->mii->mii_media, SIOCGIFMEDIA);
2844	}
2845	reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port));
2846	p->es_pvid = reg & ETHERSWITCH_VID_MASK;
2847
2848	reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
2849	if (reg & ALE_PORTCTL_DROP_UNTAGGED)
2850		p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED;
2851	if (reg & ALE_PORTCTL_INGRESS)
2852		p->es_flags |= ETHERSWITCH_PORT_INGRESS;
2853
2854	return (err);
2855}
2856
2857static int
2858cpsw_setport(device_t dev, etherswitch_port_t *p)
2859{
2860	struct cpsw_softc *sc;
2861	struct cpswp_softc *psc;
2862	struct ifmedia *ifm;
2863	uint32_t reg;
2864
2865	if (p->es_port < 0 || p->es_port > CPSW_PORTS)
2866		return (ENXIO);
2867
2868	sc = device_get_softc(dev);
2869	if (p->es_pvid != 0) {
2870		cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port),
2871		    p->es_pvid & ETHERSWITCH_VID_MASK);
2872	}
2873
2874	reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port));
2875	if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED)
2876		reg |= ALE_PORTCTL_DROP_UNTAGGED;
2877	else
2878		reg &= ~ALE_PORTCTL_DROP_UNTAGGED;
2879	if (p->es_flags & ETHERSWITCH_PORT_INGRESS)
2880		reg |= ALE_PORTCTL_INGRESS;
2881	else
2882		reg &= ~ALE_PORTCTL_INGRESS;
2883	cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg);
2884
2885	/* CPU port does not allow media settings. */
2886	if (p->es_port == CPSW_CPU_PORT)
2887		return (0);
2888
2889	psc = device_get_softc(sc->port[p->es_port - 1].dev);
2890	ifm = &psc->mii->mii_media;
2891
2892	return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
2893}
2894
2895static int
2896cpsw_getconf(device_t dev, etherswitch_conf_t *conf)
2897{
2898
2899	/* Return the VLAN mode. */
2900	conf->cmd = ETHERSWITCH_CONF_VLAN_MODE;
2901	conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q;
2902
2903	return (0);
2904}
2905
2906static int
2907cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
2908{
2909	int i, vid;
2910	uint32_t ale_entry[3];
2911	struct cpsw_softc *sc;
2912
2913	sc = device_get_softc(dev);
2914
2915	if (vg->es_vlangroup >= CPSW_VLANS)
2916		return (EINVAL);
2917
2918	vg->es_vid = 0;
2919	vid = cpsw_vgroups[vg->es_vlangroup].vid;
2920	if (vid == -1)
2921		return (0);
2922
2923	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2924		cpsw_ale_read_entry(sc, i, ale_entry);
2925		if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
2926			continue;
2927		if (vid != ALE_VLAN(ale_entry))
2928			continue;
2929
2930		vg->es_fid = 0;
2931		vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID;
2932		vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry);
2933		vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry);
2934	}
2935
2936	return (0);
2937}
2938
2939static void
2940cpsw_remove_vlan(struct cpsw_softc *sc, int vlan)
2941{
2942	int i;
2943	uint32_t ale_entry[3];
2944
2945	for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
2946		cpsw_ale_read_entry(sc, i, ale_entry);
2947		if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN)
2948			continue;
2949		if (vlan != ALE_VLAN(ale_entry))
2950			continue;
2951		ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
2952		cpsw_ale_write_entry(sc, i, ale_entry);
2953		break;
2954	}
2955}
2956
2957static int
2958cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg)
2959{
2960	int i;
2961	struct cpsw_softc *sc;
2962
2963	sc = device_get_softc(dev);
2964
2965	for (i = 0; i < CPSW_VLANS; i++) {
2966		/* Is this Vlan ID in use by another vlangroup ? */
2967		if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid)
2968			return (EINVAL);
2969	}
2970
2971	if (vg->es_vid == 0) {
2972		if (cpsw_vgroups[vg->es_vlangroup].vid == -1)
2973			return (0);
2974		cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid);
2975		cpsw_vgroups[vg->es_vlangroup].vid = -1;
2976		vg->es_untagged_ports = 0;
2977		vg->es_member_ports = 0;
2978		vg->es_vid = 0;
2979		return (0);
2980	}
2981
2982	vg->es_vid &= ETHERSWITCH_VID_MASK;
2983	vg->es_member_ports &= CPSW_PORTS_MASK;
2984	vg->es_untagged_ports &= CPSW_PORTS_MASK;
2985
2986	if (cpsw_vgroups[vg->es_vlangroup].vid != -1 &&
2987	    cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid)
2988		return (EINVAL);
2989
2990	cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid;
2991	cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports,
2992	    vg->es_untagged_ports, vg->es_member_ports, 0);
2993
2994	return (0);
2995}
2996
2997static int
2998cpsw_readreg(device_t dev, int addr)
2999{
3000
3001	/* Not supported. */
3002	return (0);
3003}
3004
3005static int
3006cpsw_writereg(device_t dev, int addr, int value)
3007{
3008
3009	/* Not supported. */
3010	return (0);
3011}
3012
3013static int
3014cpsw_readphy(device_t dev, int phy, int reg)
3015{
3016
3017	/* Not supported. */
3018	return (0);
3019}
3020
3021static int
3022cpsw_writephy(device_t dev, int phy, int reg, int data)
3023{
3024
3025	/* Not supported. */
3026	return (0);
3027}
3028#endif
3029