if_cpsw.c revision 312761
1/*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * TI Common Platform Ethernet Switch (CPSW) Driver 30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 31 * 32 * This controller is documented in the AM335x Technical Reference 33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 35 * 36 * It is basically a single Ethernet port (port 0) wired internally to 37 * a 3-port store-and-forward switch connected to two independent 38 * "sliver" controllers (port 1 and port 2). You can operate the 39 * controller in a variety of different ways by suitably configuring 40 * the slivers and the Address Lookup Engine (ALE) that routes packets 41 * between the ports. 42 * 43 * This code was developed and tested on a BeagleBone with 44 * an AM335x SoC. 45 */ 46 47#include <sys/cdefs.h> 48__FBSDID("$FreeBSD: stable/11/sys/arm/ti/cpsw/if_cpsw.c 312761 2017-01-25 16:10:35Z loos $"); 49 50#include "opt_cpsw.h" 51 52#include <sys/param.h> 53#include <sys/bus.h> 54#include <sys/kernel.h> 55#include <sys/lock.h> 56#include <sys/mbuf.h> 57#include <sys/module.h> 58#include <sys/mutex.h> 59#include <sys/rman.h> 60#include <sys/socket.h> 61#include <sys/sockio.h> 62#include <sys/sysctl.h> 63 64#include <machine/bus.h> 65#include <machine/resource.h> 66#include <machine/stdarg.h> 67 68#include <net/ethernet.h> 69#include <net/bpf.h> 70#include <net/if.h> 71#include <net/if_dl.h> 72#include <net/if_media.h> 73#include <net/if_types.h> 74 75#include <arm/ti/ti_scm.h> 76#include <arm/ti/am335x/am335x_scm.h> 77 78#include <dev/mii/mii.h> 79#include <dev/mii/miivar.h> 80 81#include <dev/fdt/fdt_common.h> 82#include <dev/ofw/ofw_bus.h> 83#include <dev/ofw/ofw_bus_subr.h> 84 85#ifdef CPSW_ETHERSWITCH 86#include <dev/etherswitch/etherswitch.h> 87#include "etherswitch_if.h" 88#endif 89 90#include "if_cpswreg.h" 91#include "if_cpswvar.h" 92 93#include "miibus_if.h" 94 95/* Device probe/attach/detach. */ 96static int cpsw_probe(device_t); 97static int cpsw_attach(device_t); 98static int cpsw_detach(device_t); 99static int cpswp_probe(device_t); 100static int cpswp_attach(device_t); 101static int cpswp_detach(device_t); 102 103static phandle_t cpsw_get_node(device_t, device_t); 104 105/* Device Init/shutdown. */ 106static int cpsw_shutdown(device_t); 107static void cpswp_init(void *); 108static void cpswp_init_locked(void *); 109static void cpswp_stop_locked(struct cpswp_softc *); 110 111/* Device Suspend/Resume. */ 112static int cpsw_suspend(device_t); 113static int cpsw_resume(device_t); 114 115/* Ioctl. */ 116static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 117 118static int cpswp_miibus_readreg(device_t, int phy, int reg); 119static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 120static void cpswp_miibus_statchg(device_t); 121 122/* Send/Receive packets. */ 123static void cpsw_intr_rx(void *arg); 124static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 125static void cpsw_rx_enqueue(struct cpsw_softc *); 126static void cpswp_start(struct ifnet *); 127static void cpsw_intr_tx(void *); 128static void cpswp_tx_enqueue(struct cpswp_softc *); 129static int cpsw_tx_dequeue(struct cpsw_softc *); 130 131/* Misc interrupts and watchdog. */ 132static void cpsw_intr_rx_thresh(void *); 133static void cpsw_intr_misc(void *); 134static void cpswp_tick(void *); 135static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 136static int cpswp_ifmedia_upd(struct ifnet *); 137static void cpsw_tx_watchdog(void *); 138 139/* ALE support */ 140static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 141static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 142static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 143static void cpsw_ale_dump_table(struct cpsw_softc *); 144static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 145 int); 146static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 147 148/* Statistics and sysctls. */ 149static void cpsw_add_sysctls(struct cpsw_softc *); 150static void cpsw_stats_collect(struct cpsw_softc *); 151static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 152 153#ifdef CPSW_ETHERSWITCH 154static etherswitch_info_t *cpsw_getinfo(device_t); 155static int cpsw_getport(device_t, etherswitch_port_t *); 156static int cpsw_setport(device_t, etherswitch_port_t *); 157static int cpsw_getconf(device_t, etherswitch_conf_t *); 158static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); 159static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); 160static int cpsw_readreg(device_t, int); 161static int cpsw_writereg(device_t, int, int); 162static int cpsw_readphy(device_t, int, int); 163static int cpsw_writephy(device_t, int, int, int); 164#endif 165 166/* 167 * Arbitrary limit on number of segments in an mbuf to be transmitted. 168 * Packets with more segments than this will be defragmented before 169 * they are queued. 170 */ 171#define CPSW_TXFRAGS 16 172 173/* Shared resources. */ 174static device_method_t cpsw_methods[] = { 175 /* Device interface */ 176 DEVMETHOD(device_probe, cpsw_probe), 177 DEVMETHOD(device_attach, cpsw_attach), 178 DEVMETHOD(device_detach, cpsw_detach), 179 DEVMETHOD(device_shutdown, cpsw_shutdown), 180 DEVMETHOD(device_suspend, cpsw_suspend), 181 DEVMETHOD(device_resume, cpsw_resume), 182 /* Bus interface */ 183 DEVMETHOD(bus_add_child, device_add_child_ordered), 184 /* OFW methods */ 185 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 186#ifdef CPSW_ETHERSWITCH 187 /* etherswitch interface */ 188 DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), 189 DEVMETHOD(etherswitch_readreg, cpsw_readreg), 190 DEVMETHOD(etherswitch_writereg, cpsw_writereg), 191 DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), 192 DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), 193 DEVMETHOD(etherswitch_getport, cpsw_getport), 194 DEVMETHOD(etherswitch_setport, cpsw_setport), 195 DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), 196 DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), 197 DEVMETHOD(etherswitch_getconf, cpsw_getconf), 198#endif 199 DEVMETHOD_END 200}; 201 202static driver_t cpsw_driver = { 203 "cpswss", 204 cpsw_methods, 205 sizeof(struct cpsw_softc), 206}; 207 208static devclass_t cpsw_devclass; 209 210DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 211 212/* Port/Slave resources. */ 213static device_method_t cpswp_methods[] = { 214 /* Device interface */ 215 DEVMETHOD(device_probe, cpswp_probe), 216 DEVMETHOD(device_attach, cpswp_attach), 217 DEVMETHOD(device_detach, cpswp_detach), 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 220 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 221 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 222 DEVMETHOD_END 223}; 224 225static driver_t cpswp_driver = { 226 "cpsw", 227 cpswp_methods, 228 sizeof(struct cpswp_softc), 229}; 230 231static devclass_t cpswp_devclass; 232 233#ifdef CPSW_ETHERSWITCH 234DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, etherswitch_devclass, 0, 0); 235MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); 236#endif 237 238DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); 239DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 240MODULE_DEPEND(cpsw, ether, 1, 1, 1); 241MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 242 243#ifdef CPSW_ETHERSWITCH 244static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; 245#endif 246 247static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 248 249static struct resource_spec irq_res_spec[] = { 250 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 251 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 252 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 253 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 254 { -1, 0 } 255}; 256 257static struct { 258 void (*cb)(void *); 259} cpsw_intr_cb[] = { 260 { cpsw_intr_rx_thresh }, 261 { cpsw_intr_rx }, 262 { cpsw_intr_tx }, 263 { cpsw_intr_misc }, 264}; 265 266/* Number of entries here must match size of stats 267 * array in struct cpswp_softc. */ 268static struct cpsw_stat { 269 int reg; 270 char *oid; 271} cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 272 {0x00, "GoodRxFrames"}, 273 {0x04, "BroadcastRxFrames"}, 274 {0x08, "MulticastRxFrames"}, 275 {0x0C, "PauseRxFrames"}, 276 {0x10, "RxCrcErrors"}, 277 {0x14, "RxAlignErrors"}, 278 {0x18, "OversizeRxFrames"}, 279 {0x1c, "RxJabbers"}, 280 {0x20, "ShortRxFrames"}, 281 {0x24, "RxFragments"}, 282 {0x30, "RxOctets"}, 283 {0x34, "GoodTxFrames"}, 284 {0x38, "BroadcastTxFrames"}, 285 {0x3c, "MulticastTxFrames"}, 286 {0x40, "PauseTxFrames"}, 287 {0x44, "DeferredTxFrames"}, 288 {0x48, "CollisionsTxFrames"}, 289 {0x4c, "SingleCollisionTxFrames"}, 290 {0x50, "MultipleCollisionTxFrames"}, 291 {0x54, "ExcessiveCollisions"}, 292 {0x58, "LateCollisions"}, 293 {0x5c, "TxUnderrun"}, 294 {0x60, "CarrierSenseErrors"}, 295 {0x64, "TxOctets"}, 296 {0x68, "RxTx64OctetFrames"}, 297 {0x6c, "RxTx65to127OctetFrames"}, 298 {0x70, "RxTx128to255OctetFrames"}, 299 {0x74, "RxTx256to511OctetFrames"}, 300 {0x78, "RxTx512to1024OctetFrames"}, 301 {0x7c, "RxTx1024upOctetFrames"}, 302 {0x80, "NetOctets"}, 303 {0x84, "RxStartOfFrameOverruns"}, 304 {0x88, "RxMiddleOfFrameOverruns"}, 305 {0x8c, "RxDmaOverruns"} 306}; 307 308/* 309 * Basic debug support. 310 */ 311 312static void 313cpsw_debugf_head(const char *funcname) 314{ 315 int t = (int)(time_second % (24 * 60 * 60)); 316 317 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 318} 319 320static void 321cpsw_debugf(const char *fmt, ...) 322{ 323 va_list ap; 324 325 va_start(ap, fmt); 326 vprintf(fmt, ap); 327 va_end(ap); 328 printf("\n"); 329 330} 331 332#define CPSW_DEBUGF(_sc, a) do { \ 333 if ((_sc)->debug) { \ 334 cpsw_debugf_head(__func__); \ 335 cpsw_debugf a; \ 336 } \ 337} while (0) 338 339/* 340 * Locking macros 341 */ 342#define CPSW_TX_LOCK(sc) do { \ 343 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 344 mtx_lock(&(sc)->tx.lock); \ 345} while (0) 346 347#define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 348#define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 349 350#define CPSW_RX_LOCK(sc) do { \ 351 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 352 mtx_lock(&(sc)->rx.lock); \ 353} while (0) 354 355#define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 356#define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 357 358#define CPSW_PORT_LOCK(_sc) do { \ 359 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 360 mtx_lock(&(_sc)->lock); \ 361} while (0) 362 363#define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 364#define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 365 366/* 367 * Read/Write macros 368 */ 369#define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 370#define cpsw_write_4(_sc, _reg, _val) \ 371 bus_write_4((_sc)->mem_res, (_reg), (_val)) 372 373#define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 374 375#define cpsw_cpdma_bd_paddr(sc, slot) \ 376 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 377#define cpsw_cpdma_read_bd(sc, slot, val) \ 378 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 379#define cpsw_cpdma_write_bd(sc, slot, val) \ 380 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 381#define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 382 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 383#define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 384 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 385#define cpsw_cpdma_read_bd_flags(sc, slot) \ 386 bus_read_2(sc->mem_res, slot->bd_offset + 14) 387#define cpsw_write_hdp_slot(sc, queue, slot) \ 388 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 389#define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 390#define cpsw_read_cp(sc, queue) \ 391 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 392#define cpsw_write_cp(sc, queue, val) \ 393 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 394#define cpsw_write_cp_slot(sc, queue, slot) \ 395 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 396 397#if 0 398/* XXX temporary function versions for debugging. */ 399static void 400cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 401{ 402 uint32_t reg = queue->hdp_offset; 403 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 404 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 405 cpsw_write_4(sc, reg, v); 406} 407 408static void 409cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 410{ 411 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 412 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 413 cpsw_write_cp(sc, queue, v); 414} 415#endif 416 417/* 418 * Expanded dump routines for verbose debugging. 419 */ 420static void 421cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 422{ 423 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 424 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 425 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 426 "Port0"}; 427 struct cpsw_cpdma_bd bd; 428 const char *sep; 429 int i; 430 431 cpsw_cpdma_read_bd(sc, slot, &bd); 432 printf("BD Addr : 0x%08x Next : 0x%08x\n", 433 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 434 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 435 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 436 printf(" Flags: "); 437 sep = ""; 438 for (i = 0; i < 16; ++i) { 439 if (bd.flags & (1 << (15 - i))) { 440 printf("%s%s", sep, flags[i]); 441 sep = ","; 442 } 443 } 444 printf("\n"); 445 if (slot->mbuf) { 446 printf(" Ether: %14D\n", 447 (char *)(slot->mbuf->m_data), " "); 448 printf(" Packet: %16D\n", 449 (char *)(slot->mbuf->m_data) + 14, " "); 450 } 451} 452 453#define CPSW_DUMP_SLOT(cs, slot) do { \ 454 IF_DEBUG(sc) { \ 455 cpsw_dump_slot(sc, slot); \ 456 } \ 457} while (0) 458 459static void 460cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 461{ 462 struct cpsw_slot *slot; 463 int i = 0; 464 int others = 0; 465 466 STAILQ_FOREACH(slot, q, next) { 467 if (i > CPSW_TXFRAGS) 468 ++others; 469 else 470 cpsw_dump_slot(sc, slot); 471 ++i; 472 } 473 if (others) 474 printf(" ... and %d more.\n", others); 475 printf("\n"); 476} 477 478#define CPSW_DUMP_QUEUE(sc, q) do { \ 479 IF_DEBUG(sc) { \ 480 cpsw_dump_queue(sc, q); \ 481 } \ 482} while (0) 483 484static void 485cpsw_init_slots(struct cpsw_softc *sc) 486{ 487 struct cpsw_slot *slot; 488 int i; 489 490 STAILQ_INIT(&sc->avail); 491 492 /* Put the slot descriptors onto the global avail list. */ 493 for (i = 0; i < nitems(sc->_slots); i++) { 494 slot = &sc->_slots[i]; 495 slot->bd_offset = cpsw_cpdma_bd_offset(i); 496 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 497 } 498} 499 500static int 501cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 502{ 503 const int max_slots = nitems(sc->_slots); 504 struct cpsw_slot *slot; 505 int i; 506 507 if (requested < 0) 508 requested = max_slots; 509 510 for (i = 0; i < requested; ++i) { 511 slot = STAILQ_FIRST(&sc->avail); 512 if (slot == NULL) 513 return (0); 514 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 515 device_printf(sc->dev, "failed to create dmamap\n"); 516 return (ENOMEM); 517 } 518 STAILQ_REMOVE_HEAD(&sc->avail, next); 519 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 520 ++queue->avail_queue_len; 521 ++queue->queue_slots; 522 } 523 return (0); 524} 525 526static void 527cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 528{ 529 int error; 530 531 if (slot->dmamap) { 532 if (slot->mbuf) 533 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 534 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 535 KASSERT(error == 0, ("Mapping still active")); 536 slot->dmamap = NULL; 537 } 538 if (slot->mbuf) { 539 m_freem(slot->mbuf); 540 slot->mbuf = NULL; 541 } 542} 543 544static void 545cpsw_reset(struct cpsw_softc *sc) 546{ 547 int i; 548 549 callout_stop(&sc->watchdog.callout); 550 551 /* Reset RMII/RGMII wrapper. */ 552 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 553 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 554 ; 555 556 /* Disable TX and RX interrupts for all cores. */ 557 for (i = 0; i < 3; ++i) { 558 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 559 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 560 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 561 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 562 } 563 564 /* Reset CPSW subsystem. */ 565 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 566 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 567 ; 568 569 /* Reset Sliver port 1 and 2 */ 570 for (i = 0; i < 2; i++) { 571 /* Reset */ 572 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 573 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 574 ; 575 } 576 577 /* Reset DMA controller. */ 578 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 579 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 580 ; 581 582 /* Disable TX & RX DMA */ 583 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 584 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 585 586 /* Clear all queues. */ 587 for (i = 0; i < 8; i++) { 588 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 589 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 590 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 591 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 592 } 593 594 /* Clear all interrupt Masks */ 595 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 596 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 597} 598 599static void 600cpsw_init(struct cpsw_softc *sc) 601{ 602 struct cpsw_slot *slot; 603 uint32_t reg; 604 605 /* Disable the interrupt pacing. */ 606 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 607 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 608 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 609 610 /* Clear ALE */ 611 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 612 613 /* Enable ALE */ 614 reg = CPSW_ALE_CTL_ENABLE; 615 if (sc->dualemac) 616 reg |= CPSW_ALE_CTL_VLAN_AWARE; 617 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 618 619 /* Set Host Port Mapping. */ 620 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 621 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 622 623 /* Initialize ALE: set host port to forwarding(3). */ 624 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 625 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 626 627 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 628 629 /* Enable statistics for ports 0, 1 and 2 */ 630 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 631 632 /* Turn off flow control. */ 633 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 634 635 /* Make IP hdr aligned with 4 */ 636 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 637 638 /* Initialize RX Buffer Descriptors */ 639 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 640 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 641 642 /* Enable TX & RX DMA */ 643 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 644 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 645 646 /* Enable Interrupts for core 0 */ 647 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 648 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 649 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 650 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 651 652 /* Enable host Error Interrupt */ 653 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 654 655 /* Enable interrupts for RX and TX on Channel 0 */ 656 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 657 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 658 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 659 660 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 661 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 662 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 663 664 /* Select MII in GMII_SEL, Internal Delay mode */ 665 //ti_scm_reg_write_4(0x650, 0); 666 667 /* Initialize active queues. */ 668 slot = STAILQ_FIRST(&sc->tx.active); 669 if (slot != NULL) 670 cpsw_write_hdp_slot(sc, &sc->tx, slot); 671 slot = STAILQ_FIRST(&sc->rx.active); 672 if (slot != NULL) 673 cpsw_write_hdp_slot(sc, &sc->rx, slot); 674 cpsw_rx_enqueue(sc); 675 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 676 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 677 678 /* Activate network interface. */ 679 sc->rx.running = 1; 680 sc->tx.running = 1; 681 sc->watchdog.timer = 0; 682 callout_init(&sc->watchdog.callout, 0); 683 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 684} 685 686/* 687 * 688 * Device Probe, Attach, Detach. 689 * 690 */ 691 692static int 693cpsw_probe(device_t dev) 694{ 695 696 if (!ofw_bus_status_okay(dev)) 697 return (ENXIO); 698 699 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 700 return (ENXIO); 701 702 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 703 return (BUS_PROBE_DEFAULT); 704} 705 706static int 707cpsw_intr_attach(struct cpsw_softc *sc) 708{ 709 int i; 710 711 for (i = 0; i < CPSW_INTR_COUNT; i++) { 712 if (bus_setup_intr(sc->dev, sc->irq_res[i], 713 INTR_TYPE_NET | INTR_MPSAFE, NULL, 714 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 715 return (-1); 716 } 717 } 718 719 return (0); 720} 721 722static void 723cpsw_intr_detach(struct cpsw_softc *sc) 724{ 725 int i; 726 727 for (i = 0; i < CPSW_INTR_COUNT; i++) { 728 if (sc->ih_cookie[i]) { 729 bus_teardown_intr(sc->dev, sc->irq_res[i], 730 sc->ih_cookie[i]); 731 } 732 } 733} 734 735static int 736cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 737{ 738 char *name; 739 int len, phy, vlan; 740 pcell_t phy_id[3], vlan_id; 741 phandle_t child; 742 unsigned long mdio_child_addr; 743 744 /* Find any slave with phy_id */ 745 phy = -1; 746 vlan = -1; 747 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 748 if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) 749 continue; 750 if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { 751 OF_prop_free(name); 752 continue; 753 } 754 OF_prop_free(name); 755 if (mdio_child_addr != slave_mdio_addr[port]) 756 continue; 757 758 len = OF_getproplen(child, "phy_id"); 759 if (len / sizeof(pcell_t) == 2) { 760 /* Get phy address from fdt */ 761 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 762 phy = phy_id[1]; 763 } 764 765 len = OF_getproplen(child, "dual_emac_res_vlan"); 766 if (len / sizeof(pcell_t) == 1) { 767 /* Get phy address from fdt */ 768 if (OF_getencprop(child, "dual_emac_res_vlan", 769 &vlan_id, len) > 0) { 770 vlan = vlan_id; 771 } 772 } 773 774 break; 775 } 776 if (phy == -1) 777 return (ENXIO); 778 sc->port[port].phy = phy; 779 sc->port[port].vlan = vlan; 780 781 return (0); 782} 783 784static int 785cpsw_attach(device_t dev) 786{ 787 bus_dma_segment_t segs[1]; 788 int error, i, nsegs; 789 struct cpsw_softc *sc; 790 uint32_t reg; 791 792 sc = device_get_softc(dev); 793 sc->dev = dev; 794 sc->node = ofw_bus_get_node(dev); 795 getbinuptime(&sc->attach_uptime); 796 797 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 798 sizeof(sc->active_slave)) <= 0) { 799 sc->active_slave = 0; 800 } 801 if (sc->active_slave > 1) 802 sc->active_slave = 1; 803 804 if (OF_hasprop(sc->node, "dual_emac")) 805 sc->dualemac = 1; 806 807 for (i = 0; i < CPSW_PORTS; i++) { 808 if (!sc->dualemac && i != sc->active_slave) 809 continue; 810 if (cpsw_get_fdt_data(sc, i) != 0) { 811 device_printf(dev, 812 "failed to get PHY address from FDT\n"); 813 return (ENXIO); 814 } 815 } 816 817 /* Initialize mutexes */ 818 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 819 "cpsw TX lock", MTX_DEF); 820 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 821 "cpsw RX lock", MTX_DEF); 822 823 /* Allocate IRQ resources */ 824 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 825 if (error) { 826 device_printf(dev, "could not allocate IRQ resources\n"); 827 cpsw_detach(dev); 828 return (ENXIO); 829 } 830 831 sc->mem_rid = 0; 832 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 833 &sc->mem_rid, RF_ACTIVE); 834 if (sc->mem_res == NULL) { 835 device_printf(sc->dev, "failed to allocate memory resource\n"); 836 cpsw_detach(dev); 837 return (ENXIO); 838 } 839 840 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 841 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 842 reg & 0xFF, (reg >> 11) & 0x1F); 843 844 cpsw_add_sysctls(sc); 845 846 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 847 error = bus_dma_tag_create( 848 bus_get_dma_tag(sc->dev), /* parent */ 849 1, 0, /* alignment, boundary */ 850 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 851 BUS_SPACE_MAXADDR, /* highaddr */ 852 NULL, NULL, /* filtfunc, filtfuncarg */ 853 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 854 MCLBYTES, 0, /* maxsegsz, flags */ 855 NULL, NULL, /* lockfunc, lockfuncarg */ 856 &sc->mbuf_dtag); /* dmatag */ 857 if (error) { 858 device_printf(dev, "bus_dma_tag_create failed\n"); 859 cpsw_detach(dev); 860 return (error); 861 } 862 863 /* Allocate the null mbuf and pre-sync it. */ 864 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 865 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 866 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 867 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 868 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 869 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 870 BUS_DMASYNC_PREWRITE); 871 sc->null_mbuf_paddr = segs[0].ds_addr; 872 873 cpsw_init_slots(sc); 874 875 /* Allocate slots to TX and RX queues. */ 876 STAILQ_INIT(&sc->rx.avail); 877 STAILQ_INIT(&sc->rx.active); 878 STAILQ_INIT(&sc->tx.avail); 879 STAILQ_INIT(&sc->tx.active); 880 // For now: 128 slots to TX, rest to RX. 881 // XXX TODO: start with 32/64 and grow dynamically based on demand. 882 if (cpsw_add_slots(sc, &sc->tx, 128) || 883 cpsw_add_slots(sc, &sc->rx, -1)) { 884 device_printf(dev, "failed to allocate dmamaps\n"); 885 cpsw_detach(dev); 886 return (ENOMEM); 887 } 888 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 889 sc->tx.queue_slots, sc->rx.queue_slots); 890 891 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 892 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 893 894 if (cpsw_intr_attach(sc) == -1) { 895 device_printf(dev, "failed to setup interrupts\n"); 896 cpsw_detach(dev); 897 return (ENXIO); 898 } 899 900#ifdef CPSW_ETHERSWITCH 901 for (i = 0; i < CPSW_VLANS; i++) 902 cpsw_vgroups[i].vid = -1; 903#endif 904 905 /* Reset the controller. */ 906 cpsw_reset(sc); 907 cpsw_init(sc); 908 909 for (i = 0; i < CPSW_PORTS; i++) { 910 if (!sc->dualemac && i != sc->active_slave) 911 continue; 912 sc->port[i].dev = device_add_child(dev, "cpsw", i); 913 if (sc->port[i].dev == NULL) { 914 cpsw_detach(dev); 915 return (ENXIO); 916 } 917 } 918 bus_generic_probe(dev); 919 bus_generic_attach(dev); 920 921 return (0); 922} 923 924static int 925cpsw_detach(device_t dev) 926{ 927 struct cpsw_softc *sc; 928 int error, i; 929 930 bus_generic_detach(dev); 931 sc = device_get_softc(dev); 932 933 for (i = 0; i < CPSW_PORTS; i++) { 934 if (sc->port[i].dev) 935 device_delete_child(dev, sc->port[i].dev); 936 } 937 938 if (device_is_attached(dev)) { 939 callout_stop(&sc->watchdog.callout); 940 callout_drain(&sc->watchdog.callout); 941 } 942 943 /* Stop and release all interrupts */ 944 cpsw_intr_detach(sc); 945 946 /* Free dmamaps and mbufs */ 947 for (i = 0; i < nitems(sc->_slots); ++i) 948 cpsw_free_slot(sc, &sc->_slots[i]); 949 950 /* Free null mbuf. */ 951 if (sc->null_mbuf_dmamap) { 952 bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); 953 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 954 KASSERT(error == 0, ("Mapping still active")); 955 m_freem(sc->null_mbuf); 956 } 957 958 /* Free DMA tag */ 959 if (sc->mbuf_dtag) { 960 error = bus_dma_tag_destroy(sc->mbuf_dtag); 961 KASSERT(error == 0, ("Unable to destroy DMA tag")); 962 } 963 964 /* Free IO memory handler */ 965 if (sc->mem_res != NULL) 966 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 967 bus_release_resources(dev, irq_res_spec, sc->irq_res); 968 969 /* Destroy mutexes */ 970 mtx_destroy(&sc->rx.lock); 971 mtx_destroy(&sc->tx.lock); 972 973 /* Detach the switch device, if present. */ 974 error = bus_generic_detach(dev); 975 if (error != 0) 976 return (error); 977 978 return (device_delete_children(dev)); 979} 980 981static phandle_t 982cpsw_get_node(device_t bus, device_t dev) 983{ 984 985 /* Share controller node with port device. */ 986 return (ofw_bus_get_node(bus)); 987} 988 989static int 990cpswp_probe(device_t dev) 991{ 992 993 if (device_get_unit(dev) > 1) { 994 device_printf(dev, "Only two ports are supported.\n"); 995 return (ENXIO); 996 } 997 device_set_desc(dev, "Ethernet Switch Port"); 998 999 return (BUS_PROBE_DEFAULT); 1000} 1001 1002static int 1003cpswp_attach(device_t dev) 1004{ 1005 int error; 1006 struct ifnet *ifp; 1007 struct cpswp_softc *sc; 1008 uint32_t reg; 1009 uint8_t mac_addr[ETHER_ADDR_LEN]; 1010 1011 sc = device_get_softc(dev); 1012 sc->dev = dev; 1013 sc->pdev = device_get_parent(dev); 1014 sc->swsc = device_get_softc(sc->pdev); 1015 sc->unit = device_get_unit(dev); 1016 sc->phy = sc->swsc->port[sc->unit].phy; 1017 sc->vlan = sc->swsc->port[sc->unit].vlan; 1018 if (sc->swsc->dualemac && sc->vlan == -1) 1019 sc->vlan = sc->unit + 1; 1020 1021 if (sc->unit == 0) { 1022 sc->physel = MDIOUSERPHYSEL0; 1023 sc->phyaccess = MDIOUSERACCESS0; 1024 } else { 1025 sc->physel = MDIOUSERPHYSEL1; 1026 sc->phyaccess = MDIOUSERACCESS1; 1027 } 1028 1029 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 1030 MTX_DEF); 1031 1032 /* Allocate network interface */ 1033 ifp = sc->ifp = if_alloc(IFT_ETHER); 1034 if (ifp == NULL) { 1035 cpswp_detach(dev); 1036 return (ENXIO); 1037 } 1038 1039 if_initname(ifp, device_get_name(sc->dev), sc->unit); 1040 ifp->if_softc = sc; 1041 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 1042 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 1043 ifp->if_capenable = ifp->if_capabilities; 1044 1045 ifp->if_init = cpswp_init; 1046 ifp->if_start = cpswp_start; 1047 ifp->if_ioctl = cpswp_ioctl; 1048 1049 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 1050 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1051 IFQ_SET_READY(&ifp->if_snd); 1052 1053 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 1054 ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); 1055 mac_addr[0] = reg & 0xFF; 1056 mac_addr[1] = (reg >> 8) & 0xFF; 1057 mac_addr[2] = (reg >> 16) & 0xFF; 1058 mac_addr[3] = (reg >> 24) & 0xFF; 1059 1060 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1061 ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); 1062 mac_addr[4] = reg & 0xFF; 1063 mac_addr[5] = (reg >> 8) & 0xFF; 1064 1065 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1066 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1067 if (error) { 1068 device_printf(dev, "attaching PHYs failed\n"); 1069 cpswp_detach(dev); 1070 return (error); 1071 } 1072 sc->mii = device_get_softc(sc->miibus); 1073 1074 /* Select PHY and enable interrupts */ 1075 cpsw_write_4(sc->swsc, sc->physel, 1076 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1077 1078 ether_ifattach(sc->ifp, mac_addr); 1079 callout_init(&sc->mii_callout, 0); 1080 1081 return (0); 1082} 1083 1084static int 1085cpswp_detach(device_t dev) 1086{ 1087 struct cpswp_softc *sc; 1088 1089 sc = device_get_softc(dev); 1090 CPSW_DEBUGF(sc->swsc, ("")); 1091 if (device_is_attached(dev)) { 1092 ether_ifdetach(sc->ifp); 1093 CPSW_PORT_LOCK(sc); 1094 cpswp_stop_locked(sc); 1095 CPSW_PORT_UNLOCK(sc); 1096 callout_drain(&sc->mii_callout); 1097 } 1098 1099 bus_generic_detach(dev); 1100 1101 if_free(sc->ifp); 1102 mtx_destroy(&sc->lock); 1103 1104 return (0); 1105} 1106 1107/* 1108 * 1109 * Init/Shutdown. 1110 * 1111 */ 1112 1113static int 1114cpsw_ports_down(struct cpsw_softc *sc) 1115{ 1116 struct cpswp_softc *psc; 1117 struct ifnet *ifp1, *ifp2; 1118 1119 if (!sc->dualemac) 1120 return (1); 1121 psc = device_get_softc(sc->port[0].dev); 1122 ifp1 = psc->ifp; 1123 psc = device_get_softc(sc->port[1].dev); 1124 ifp2 = psc->ifp; 1125 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1126 return (1); 1127 1128 return (0); 1129} 1130 1131static void 1132cpswp_init(void *arg) 1133{ 1134 struct cpswp_softc *sc = arg; 1135 1136 CPSW_DEBUGF(sc->swsc, ("")); 1137 CPSW_PORT_LOCK(sc); 1138 cpswp_init_locked(arg); 1139 CPSW_PORT_UNLOCK(sc); 1140} 1141 1142static void 1143cpswp_init_locked(void *arg) 1144{ 1145#ifdef CPSW_ETHERSWITCH 1146 int i; 1147#endif 1148 struct cpswp_softc *sc = arg; 1149 struct ifnet *ifp; 1150 uint32_t reg; 1151 1152 CPSW_DEBUGF(sc->swsc, ("")); 1153 CPSW_PORT_LOCK_ASSERT(sc); 1154 ifp = sc->ifp; 1155 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1156 return; 1157 1158 getbinuptime(&sc->init_uptime); 1159 1160 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1161 /* Reset the controller. */ 1162 cpsw_reset(sc->swsc); 1163 cpsw_init(sc->swsc); 1164 } 1165 1166 /* Set Slave Mapping. */ 1167 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1168 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1169 0x33221100); 1170 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1171 /* Enable MAC RX/TX modules. */ 1172 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1173 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1174 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1175 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1176 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1177 1178 /* Initialize ALE: set port to forwarding, initialize addrs */ 1179 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 1180 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 1181 cpswp_ale_update_addresses(sc, 1); 1182 1183 if (sc->swsc->dualemac) { 1184 /* Set Port VID. */ 1185 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1186 sc->vlan & 0xfff); 1187 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1188 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1189 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1190 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1191#ifdef CPSW_ETHERSWITCH 1192 for (i = 0; i < CPSW_VLANS; i++) { 1193 if (cpsw_vgroups[i].vid != -1) 1194 continue; 1195 cpsw_vgroups[i].vid = sc->vlan; 1196 break; 1197 } 1198#endif 1199 } 1200 1201 mii_mediachg(sc->mii); 1202 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1203 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1204 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1205} 1206 1207static int 1208cpsw_shutdown(device_t dev) 1209{ 1210 struct cpsw_softc *sc; 1211 struct cpswp_softc *psc; 1212 int i; 1213 1214 sc = device_get_softc(dev); 1215 CPSW_DEBUGF(sc, ("")); 1216 for (i = 0; i < CPSW_PORTS; i++) { 1217 if (!sc->dualemac && i != sc->active_slave) 1218 continue; 1219 psc = device_get_softc(sc->port[i].dev); 1220 CPSW_PORT_LOCK(psc); 1221 cpswp_stop_locked(psc); 1222 CPSW_PORT_UNLOCK(psc); 1223 } 1224 1225 return (0); 1226} 1227 1228static void 1229cpsw_rx_teardown(struct cpsw_softc *sc) 1230{ 1231 int i = 0; 1232 1233 CPSW_RX_LOCK(sc); 1234 CPSW_DEBUGF(sc, ("starting RX teardown")); 1235 sc->rx.teardown = 1; 1236 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1237 CPSW_RX_UNLOCK(sc); 1238 while (sc->rx.running) { 1239 if (++i > 10) { 1240 device_printf(sc->dev, 1241 "Unable to cleanly shutdown receiver\n"); 1242 return; 1243 } 1244 DELAY(200); 1245 } 1246 if (!sc->rx.running) 1247 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1248} 1249 1250static void 1251cpsw_tx_teardown(struct cpsw_softc *sc) 1252{ 1253 int i = 0; 1254 1255 CPSW_TX_LOCK(sc); 1256 CPSW_DEBUGF(sc, ("starting TX teardown")); 1257 /* Start the TX queue teardown if queue is not empty. */ 1258 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1259 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1260 else 1261 sc->tx.teardown = 1; 1262 cpsw_tx_dequeue(sc); 1263 while (sc->tx.running && ++i < 10) { 1264 DELAY(200); 1265 cpsw_tx_dequeue(sc); 1266 } 1267 if (sc->tx.running) { 1268 device_printf(sc->dev, 1269 "Unable to cleanly shutdown transmitter\n"); 1270 } 1271 CPSW_DEBUGF(sc, 1272 ("finished TX teardown (%d retries, %d idle buffers)", i, 1273 sc->tx.active_queue_len)); 1274 CPSW_TX_UNLOCK(sc); 1275} 1276 1277static void 1278cpswp_stop_locked(struct cpswp_softc *sc) 1279{ 1280 struct ifnet *ifp; 1281 uint32_t reg; 1282 1283 ifp = sc->ifp; 1284 CPSW_DEBUGF(sc->swsc, ("")); 1285 CPSW_PORT_LOCK_ASSERT(sc); 1286 1287 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1288 return; 1289 1290 /* Disable interface */ 1291 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1292 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1293 1294 /* Stop ticker */ 1295 callout_stop(&sc->mii_callout); 1296 1297 /* Tear down the RX/TX queues. */ 1298 if (cpsw_ports_down(sc->swsc)) { 1299 cpsw_rx_teardown(sc->swsc); 1300 cpsw_tx_teardown(sc->swsc); 1301 } 1302 1303 /* Stop MAC RX/TX modules. */ 1304 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1305 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1306 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1307 1308 if (cpsw_ports_down(sc->swsc)) { 1309 /* Capture stats before we reset controller. */ 1310 cpsw_stats_collect(sc->swsc); 1311 1312 cpsw_reset(sc->swsc); 1313 cpsw_init(sc->swsc); 1314 } 1315} 1316 1317/* 1318 * Suspend/Resume. 1319 */ 1320 1321static int 1322cpsw_suspend(device_t dev) 1323{ 1324 struct cpsw_softc *sc; 1325 struct cpswp_softc *psc; 1326 int i; 1327 1328 sc = device_get_softc(dev); 1329 CPSW_DEBUGF(sc, ("")); 1330 for (i = 0; i < CPSW_PORTS; i++) { 1331 if (!sc->dualemac && i != sc->active_slave) 1332 continue; 1333 psc = device_get_softc(sc->port[i].dev); 1334 CPSW_PORT_LOCK(psc); 1335 cpswp_stop_locked(psc); 1336 CPSW_PORT_UNLOCK(psc); 1337 } 1338 1339 return (0); 1340} 1341 1342static int 1343cpsw_resume(device_t dev) 1344{ 1345 struct cpsw_softc *sc; 1346 1347 sc = device_get_softc(dev); 1348 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1349 1350 return (0); 1351} 1352 1353/* 1354 * 1355 * IOCTL 1356 * 1357 */ 1358 1359static void 1360cpsw_set_promisc(struct cpswp_softc *sc, int set) 1361{ 1362 uint32_t reg; 1363 1364 /* 1365 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1366 * That disables the ALE forwarding logic and causes every 1367 * packet to be sent only to the host port. In bypass mode, 1368 * the ALE processes host port transmit packets the same as in 1369 * normal mode. 1370 */ 1371 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1372 reg &= ~CPSW_ALE_CTL_BYPASS; 1373 if (set) 1374 reg |= CPSW_ALE_CTL_BYPASS; 1375 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1376} 1377 1378static void 1379cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1380{ 1381 if (set) { 1382 printf("All-multicast mode unimplemented\n"); 1383 } 1384} 1385 1386static int 1387cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1388{ 1389 struct cpswp_softc *sc; 1390 struct ifreq *ifr; 1391 int error; 1392 uint32_t changed; 1393 1394 error = 0; 1395 sc = ifp->if_softc; 1396 ifr = (struct ifreq *)data; 1397 1398 switch (command) { 1399 case SIOCSIFCAP: 1400 changed = ifp->if_capenable ^ ifr->ifr_reqcap; 1401 if (changed & IFCAP_HWCSUM) { 1402 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) 1403 ifp->if_capenable |= IFCAP_HWCSUM; 1404 else 1405 ifp->if_capenable &= ~IFCAP_HWCSUM; 1406 } 1407 error = 0; 1408 break; 1409 case SIOCSIFFLAGS: 1410 CPSW_PORT_LOCK(sc); 1411 if (ifp->if_flags & IFF_UP) { 1412 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1413 changed = ifp->if_flags ^ sc->if_flags; 1414 CPSW_DEBUGF(sc->swsc, 1415 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1416 changed)); 1417 if (changed & IFF_PROMISC) 1418 cpsw_set_promisc(sc, 1419 ifp->if_flags & IFF_PROMISC); 1420 if (changed & IFF_ALLMULTI) 1421 cpsw_set_allmulti(sc, 1422 ifp->if_flags & IFF_ALLMULTI); 1423 } else { 1424 CPSW_DEBUGF(sc->swsc, 1425 ("SIOCSIFFLAGS: starting up")); 1426 cpswp_init_locked(sc); 1427 } 1428 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1429 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1430 cpswp_stop_locked(sc); 1431 } 1432 1433 sc->if_flags = ifp->if_flags; 1434 CPSW_PORT_UNLOCK(sc); 1435 break; 1436 case SIOCADDMULTI: 1437 cpswp_ale_update_addresses(sc, 0); 1438 break; 1439 case SIOCDELMULTI: 1440 /* Ugh. DELMULTI doesn't provide the specific address 1441 being removed, so the best we can do is remove 1442 everything and rebuild it all. */ 1443 cpswp_ale_update_addresses(sc, 1); 1444 break; 1445 case SIOCGIFMEDIA: 1446 case SIOCSIFMEDIA: 1447 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1448 break; 1449 default: 1450 error = ether_ioctl(ifp, command, data); 1451 } 1452 return (error); 1453} 1454 1455/* 1456 * 1457 * MIIBUS 1458 * 1459 */ 1460static int 1461cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1462{ 1463 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1464 1465 while (--retries) { 1466 r = cpsw_read_4(sc, reg); 1467 if ((r & MDIO_PHYACCESS_GO) == 0) 1468 return (1); 1469 DELAY(CPSW_MIIBUS_DELAY); 1470 } 1471 1472 return (0); 1473} 1474 1475static int 1476cpswp_miibus_readreg(device_t dev, int phy, int reg) 1477{ 1478 struct cpswp_softc *sc; 1479 uint32_t cmd, r; 1480 1481 sc = device_get_softc(dev); 1482 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1483 device_printf(dev, "MDIO not ready to read\n"); 1484 return (0); 1485 } 1486 1487 /* Set GO, reg, phy */ 1488 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1489 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1490 1491 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1492 device_printf(dev, "MDIO timed out during read\n"); 1493 return (0); 1494 } 1495 1496 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1497 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1498 device_printf(dev, "Failed to read from PHY.\n"); 1499 r = 0; 1500 } 1501 return (r & 0xFFFF); 1502} 1503 1504static int 1505cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1506{ 1507 struct cpswp_softc *sc; 1508 uint32_t cmd; 1509 1510 sc = device_get_softc(dev); 1511 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1512 device_printf(dev, "MDIO not ready to write\n"); 1513 return (0); 1514 } 1515 1516 /* Set GO, WRITE, reg, phy, and value */ 1517 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1518 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1519 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1520 1521 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1522 device_printf(dev, "MDIO timed out during write\n"); 1523 return (0); 1524 } 1525 1526 return (0); 1527} 1528 1529static void 1530cpswp_miibus_statchg(device_t dev) 1531{ 1532 struct cpswp_softc *sc; 1533 uint32_t mac_control, reg; 1534 1535 sc = device_get_softc(dev); 1536 CPSW_DEBUGF(sc->swsc, ("")); 1537 1538 reg = CPSW_SL_MACCONTROL(sc->unit); 1539 mac_control = cpsw_read_4(sc->swsc, reg); 1540 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1541 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1542 1543 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1544 case IFM_1000_SX: 1545 case IFM_1000_LX: 1546 case IFM_1000_CX: 1547 case IFM_1000_T: 1548 mac_control |= CPSW_SL_MACTL_GIG; 1549 break; 1550 1551 case IFM_100_TX: 1552 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1553 break; 1554 } 1555 if (sc->mii->mii_media_active & IFM_FDX) 1556 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1557 1558 cpsw_write_4(sc->swsc, reg, mac_control); 1559} 1560 1561/* 1562 * 1563 * Transmit/Receive Packets. 1564 * 1565 */ 1566static void 1567cpsw_intr_rx(void *arg) 1568{ 1569 struct cpsw_softc *sc; 1570 struct ifnet *ifp; 1571 struct mbuf *received, *next; 1572 1573 sc = (struct cpsw_softc *)arg; 1574 CPSW_RX_LOCK(sc); 1575 if (sc->rx.teardown) { 1576 sc->rx.running = 0; 1577 sc->rx.teardown = 0; 1578 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1579 } 1580 received = cpsw_rx_dequeue(sc); 1581 cpsw_rx_enqueue(sc); 1582 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1583 CPSW_RX_UNLOCK(sc); 1584 1585 while (received != NULL) { 1586 next = received->m_nextpkt; 1587 received->m_nextpkt = NULL; 1588 ifp = received->m_pkthdr.rcvif; 1589 (*ifp->if_input)(ifp, received); 1590 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1591 received = next; 1592 } 1593} 1594 1595static struct mbuf * 1596cpsw_rx_dequeue(struct cpsw_softc *sc) 1597{ 1598 struct cpsw_cpdma_bd bd; 1599 struct cpsw_slot *last, *slot; 1600 struct cpswp_softc *psc; 1601 struct mbuf *mb_head, *mb_tail; 1602 int port, removed = 0; 1603 1604 last = NULL; 1605 mb_head = mb_tail = NULL; 1606 1607 /* Pull completed packets off hardware RX queue. */ 1608 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1609 cpsw_cpdma_read_bd(sc, slot, &bd); 1610 1611 /* 1612 * Stop on packets still in use by hardware, but do not stop 1613 * on packets with the teardown complete flag, they will be 1614 * discarded later. 1615 */ 1616 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1617 CPDMA_BD_OWNER) 1618 break; 1619 1620 last = slot; 1621 ++removed; 1622 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1623 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1624 1625 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1626 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1627 1628 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1629 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1630 m_freem(slot->mbuf); 1631 slot->mbuf = NULL; 1632 sc->rx.running = 0; 1633 sc->rx.teardown = 0; 1634 break; 1635 } 1636 1637 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1638 KASSERT(port >= 0 && port <= 1, 1639 ("patcket received with invalid port: %d", port)); 1640 psc = device_get_softc(sc->port[port].dev); 1641 1642 /* Set up mbuf */ 1643 /* TODO: track SOP/EOP bits to assemble a full mbuf 1644 out of received fragments. */ 1645 slot->mbuf->m_data += bd.bufoff; 1646 slot->mbuf->m_len = bd.buflen; 1647 if (bd.flags & CPDMA_BD_SOP) { 1648 slot->mbuf->m_pkthdr.len = bd.pktlen; 1649 slot->mbuf->m_pkthdr.rcvif = psc->ifp; 1650 slot->mbuf->m_flags |= M_PKTHDR; 1651 } 1652 slot->mbuf->m_next = NULL; 1653 slot->mbuf->m_nextpkt = NULL; 1654 if (bd.flags & CPDMA_BD_PASS_CRC) 1655 m_adj(slot->mbuf, -ETHER_CRC_LEN); 1656 1657 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1658 /* check for valid CRC by looking into pkt_err[5:4] */ 1659 if ((bd.flags & 1660 (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == 1661 CPDMA_BD_SOP) { 1662 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1663 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1664 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1665 } 1666 } 1667 1668 /* Add mbuf to packet list to be returned. */ 1669 if (mb_tail) { 1670 mb_tail->m_nextpkt = slot->mbuf; 1671 } else { 1672 mb_head = slot->mbuf; 1673 } 1674 mb_tail = slot->mbuf; 1675 slot->mbuf = NULL; 1676 if (sc->rx_batch > 0 && sc->rx_batch == removed) 1677 break; 1678 } 1679 1680 if (removed != 0) { 1681 cpsw_write_cp_slot(sc, &sc->rx, last); 1682 sc->rx.queue_removes += removed; 1683 sc->rx.avail_queue_len += removed; 1684 sc->rx.active_queue_len -= removed; 1685 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1686 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1687 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1688 } 1689 1690 return (mb_head); 1691} 1692 1693static void 1694cpsw_rx_enqueue(struct cpsw_softc *sc) 1695{ 1696 bus_dma_segment_t seg[1]; 1697 struct cpsw_cpdma_bd bd; 1698 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1699 int error, nsegs, added = 0; 1700 uint32_t flags; 1701 1702 /* Register new mbufs with hardware. */ 1703 first_new_slot = NULL; 1704 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1705 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1706 if (first_new_slot == NULL) 1707 first_new_slot = slot; 1708 if (slot->mbuf == NULL) { 1709 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1710 if (slot->mbuf == NULL) { 1711 device_printf(sc->dev, 1712 "Unable to fill RX queue\n"); 1713 break; 1714 } 1715 slot->mbuf->m_len = 1716 slot->mbuf->m_pkthdr.len = 1717 slot->mbuf->m_ext.ext_size; 1718 } 1719 1720 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1721 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1722 1723 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1724 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1725 if (error != 0 || nsegs != 1) { 1726 device_printf(sc->dev, 1727 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1728 __func__, nsegs, error); 1729 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1730 m_freem(slot->mbuf); 1731 slot->mbuf = NULL; 1732 break; 1733 } 1734 1735 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1736 1737 /* Create and submit new rx descriptor. */ 1738 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1739 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1740 else 1741 bd.next = 0; 1742 bd.bufptr = seg->ds_addr; 1743 bd.bufoff = 0; 1744 bd.buflen = MCLBYTES - 1; 1745 bd.pktlen = bd.buflen; 1746 bd.flags = CPDMA_BD_OWNER; 1747 cpsw_cpdma_write_bd(sc, slot, &bd); 1748 ++added; 1749 1750 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1751 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1752 } 1753 1754 if (added == 0 || first_new_slot == NULL) 1755 return; 1756 1757 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1758 1759 /* Link new entries to hardware RX queue. */ 1760 if (last_old_slot == NULL) { 1761 /* Start a fresh queue. */ 1762 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1763 } else { 1764 /* Add buffers to end of current queue. */ 1765 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1766 /* If underrun, restart queue. */ 1767 if ((flags = cpsw_cpdma_read_bd_flags(sc, last_old_slot)) & 1768 CPDMA_BD_EOQ) { 1769 flags &= ~CPDMA_BD_EOQ; 1770 cpsw_cpdma_write_bd_flags(sc, last_old_slot, flags); 1771 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1772 sc->rx.queue_restart++; 1773 } 1774 } 1775 sc->rx.queue_adds += added; 1776 sc->rx.avail_queue_len -= added; 1777 sc->rx.active_queue_len += added; 1778 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); 1779 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1780 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1781 } 1782} 1783 1784static void 1785cpswp_start(struct ifnet *ifp) 1786{ 1787 struct cpswp_softc *sc; 1788 1789 sc = ifp->if_softc; 1790 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1791 sc->swsc->tx.running == 0) { 1792 return; 1793 } 1794 CPSW_TX_LOCK(sc->swsc); 1795 cpswp_tx_enqueue(sc); 1796 cpsw_tx_dequeue(sc->swsc); 1797 CPSW_TX_UNLOCK(sc->swsc); 1798} 1799 1800static void 1801cpsw_intr_tx(void *arg) 1802{ 1803 struct cpsw_softc *sc; 1804 1805 sc = (struct cpsw_softc *)arg; 1806 CPSW_TX_LOCK(sc); 1807 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1808 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1809 cpsw_tx_dequeue(sc); 1810 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1811 CPSW_TX_UNLOCK(sc); 1812} 1813 1814static void 1815cpswp_tx_enqueue(struct cpswp_softc *sc) 1816{ 1817 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1818 struct cpsw_cpdma_bd bd; 1819 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1820 struct mbuf *m0; 1821 int error, nsegs, seg, added = 0, padlen; 1822 1823 /* Pull pending packets from IF queue and prep them for DMA. */ 1824 last = NULL; 1825 first_new_slot = NULL; 1826 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1827 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1828 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1829 if (m0 == NULL) 1830 break; 1831 1832 slot->mbuf = m0; 1833 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1834 if (padlen < 0) 1835 padlen = 0; 1836 1837 /* Create mapping in DMA memory */ 1838 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1839 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1840 /* If the packet is too fragmented, try to simplify. */ 1841 if (error == EFBIG || 1842 (error == 0 && 1843 nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { 1844 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1845 if (padlen > 0) /* May as well add padding. */ 1846 m_append(slot->mbuf, padlen, 1847 sc->swsc->null_mbuf->m_data); 1848 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1849 if (m0 == NULL) { 1850 device_printf(sc->dev, 1851 "Can't defragment packet; dropping\n"); 1852 m_freem(slot->mbuf); 1853 } else { 1854 CPSW_DEBUGF(sc->swsc, 1855 ("Requeueing defragmented packet")); 1856 IF_PREPEND(&sc->ifp->if_snd, m0); 1857 } 1858 slot->mbuf = NULL; 1859 continue; 1860 } 1861 if (error != 0) { 1862 device_printf(sc->dev, 1863 "%s: Can't setup DMA (error=%d), dropping packet\n", 1864 __func__, error); 1865 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1866 m_freem(slot->mbuf); 1867 slot->mbuf = NULL; 1868 break; 1869 } 1870 1871 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1872 BUS_DMASYNC_PREWRITE); 1873 1874 CPSW_DEBUGF(sc->swsc, 1875 ("Queueing TX packet: %d segments + %d pad bytes", 1876 nsegs, padlen)); 1877 1878 if (first_new_slot == NULL) 1879 first_new_slot = slot; 1880 1881 /* Link from the previous descriptor. */ 1882 if (last != NULL) 1883 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1884 1885 slot->ifp = sc->ifp; 1886 1887 /* If there is only one segment, the for() loop 1888 * gets skipped and the single buffer gets set up 1889 * as both SOP and EOP. */ 1890 if (nsegs > 1) { 1891 next = STAILQ_NEXT(slot, next); 1892 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1893 } else 1894 bd.next = 0; 1895 /* Start by setting up the first buffer. */ 1896 bd.bufptr = segs[0].ds_addr; 1897 bd.bufoff = 0; 1898 bd.buflen = segs[0].ds_len; 1899 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1900 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1901 if (sc->swsc->dualemac) { 1902 bd.flags |= CPDMA_BD_TO_PORT; 1903 bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1904 } 1905 for (seg = 1; seg < nsegs; ++seg) { 1906 /* Save the previous buffer (which isn't EOP) */ 1907 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1908 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1909 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1910 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1911 1912 /* Setup next buffer (which isn't SOP) */ 1913 if (nsegs > seg + 1) { 1914 next = STAILQ_NEXT(slot, next); 1915 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1916 } else 1917 bd.next = 0; 1918 bd.bufptr = segs[seg].ds_addr; 1919 bd.bufoff = 0; 1920 bd.buflen = segs[seg].ds_len; 1921 bd.pktlen = 0; 1922 bd.flags = CPDMA_BD_OWNER; 1923 } 1924 /* Save the final buffer. */ 1925 if (padlen <= 0) 1926 bd.flags |= CPDMA_BD_EOP; 1927 else { 1928 next = STAILQ_NEXT(slot, next); 1929 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1930 } 1931 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1932 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1933 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1934 1935 if (padlen > 0) { 1936 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1937 1938 /* Setup buffer of null pad bytes (definitely EOP). */ 1939 bd.next = 0; 1940 bd.bufptr = sc->swsc->null_mbuf_paddr; 1941 bd.bufoff = 0; 1942 bd.buflen = padlen; 1943 bd.pktlen = 0; 1944 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER; 1945 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1946 ++nsegs; 1947 1948 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1949 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1950 } 1951 1952 last = slot; 1953 1954 added += nsegs; 1955 if (nsegs > sc->swsc->tx.longest_chain) 1956 sc->swsc->tx.longest_chain = nsegs; 1957 1958 // TODO: Should we defer the BPF tap until 1959 // after all packets are queued? 1960 BPF_MTAP(sc->ifp, m0); 1961 } 1962 1963 if (first_new_slot == NULL) 1964 return; 1965 1966 /* Attach the list of new buffers to the hardware TX queue. */ 1967 if (last_old_slot != NULL && 1968 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1969 CPDMA_BD_EOQ) == 0) { 1970 /* Add buffers to end of current queue. */ 1971 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1972 first_new_slot); 1973 } else { 1974 /* Start a fresh queue. */ 1975 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1976 } 1977 sc->swsc->tx.queue_adds += added; 1978 sc->swsc->tx.avail_queue_len -= added; 1979 sc->swsc->tx.active_queue_len += added; 1980 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1981 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1982 } 1983 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1984} 1985 1986static int 1987cpsw_tx_dequeue(struct cpsw_softc *sc) 1988{ 1989 struct cpsw_slot *slot, *last_removed_slot = NULL; 1990 struct cpsw_cpdma_bd bd; 1991 uint32_t flags, removed = 0; 1992 1993 /* Pull completed buffers off the hardware TX queue. */ 1994 slot = STAILQ_FIRST(&sc->tx.active); 1995 while (slot != NULL) { 1996 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1997 1998 /* TearDown complete is only marked on the SOP for the packet. */ 1999 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 2000 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 2001 sc->tx.teardown = 1; 2002 } 2003 2004 if ((flags & CPDMA_BD_OWNER) != 0 && sc->tx.teardown == 0) 2005 break; /* Hardware is still using this packet. */ 2006 2007 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 2008 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 2009 m_freem(slot->mbuf); 2010 slot->mbuf = NULL; 2011 2012 if (slot->ifp) { 2013 if (sc->tx.teardown == 0) 2014 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 2015 else 2016 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 2017 } 2018 2019 /* Dequeue any additional buffers used by this packet. */ 2020 while (slot != NULL && slot->mbuf == NULL) { 2021 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 2022 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 2023 ++removed; 2024 last_removed_slot = slot; 2025 slot = STAILQ_FIRST(&sc->tx.active); 2026 } 2027 2028 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 2029 2030 /* Restart the TX queue if necessary. */ 2031 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 2032 if (slot != NULL && bd.next != 0 && (bd.flags & 2033 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 2034 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 2035 cpsw_write_hdp_slot(sc, &sc->tx, slot); 2036 sc->tx.queue_restart++; 2037 break; 2038 } 2039 } 2040 2041 if (removed != 0) { 2042 sc->tx.queue_removes += removed; 2043 sc->tx.active_queue_len -= removed; 2044 sc->tx.avail_queue_len += removed; 2045 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 2046 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 2047 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 2048 } 2049 2050 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 2051 CPSW_DEBUGF(sc, ("TX teardown is complete")); 2052 sc->tx.teardown = 0; 2053 sc->tx.running = 0; 2054 } 2055 2056 return (removed); 2057} 2058 2059/* 2060 * 2061 * Miscellaneous interrupts. 2062 * 2063 */ 2064 2065static void 2066cpsw_intr_rx_thresh(void *arg) 2067{ 2068 struct cpsw_softc *sc; 2069 struct ifnet *ifp; 2070 struct mbuf *received, *next; 2071 2072 sc = (struct cpsw_softc *)arg; 2073 CPSW_RX_LOCK(sc); 2074 received = cpsw_rx_dequeue(sc); 2075 cpsw_rx_enqueue(sc); 2076 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 2077 CPSW_RX_UNLOCK(sc); 2078 2079 while (received != NULL) { 2080 next = received->m_nextpkt; 2081 received->m_nextpkt = NULL; 2082 ifp = received->m_pkthdr.rcvif; 2083 (*ifp->if_input)(ifp, received); 2084 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2085 received = next; 2086 } 2087} 2088 2089static void 2090cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2091{ 2092 uint32_t intstat; 2093 uint32_t dmastat; 2094 int txerr, rxerr, txchan, rxchan; 2095 2096 printf("\n\n"); 2097 device_printf(sc->dev, 2098 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2099 printf("\n\n"); 2100 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2101 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2102 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2103 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2104 2105 txerr = (dmastat >> 20) & 15; 2106 txchan = (dmastat >> 16) & 7; 2107 rxerr = (dmastat >> 12) & 15; 2108 rxchan = (dmastat >> 8) & 7; 2109 2110 switch (txerr) { 2111 case 0: break; 2112 case 1: printf("SOP error on TX channel %d\n", txchan); 2113 break; 2114 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2115 break; 2116 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2117 break; 2118 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2119 break; 2120 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2121 break; 2122 case 6: printf("Packet length error on TX channel %d\n", txchan); 2123 break; 2124 default: printf("Unknown error on TX channel %d\n", txchan); 2125 break; 2126 } 2127 2128 if (txerr != 0) { 2129 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2130 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2131 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2132 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2133 cpsw_dump_queue(sc, &sc->tx.active); 2134 } 2135 2136 switch (rxerr) { 2137 case 0: break; 2138 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2139 break; 2140 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2141 break; 2142 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2143 break; 2144 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2145 break; 2146 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2147 break; 2148 } 2149 2150 if (rxerr != 0) { 2151 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2152 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2153 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2154 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2155 cpsw_dump_queue(sc, &sc->rx.active); 2156 } 2157 2158 printf("\nALE Table\n"); 2159 cpsw_ale_dump_table(sc); 2160 2161 // XXX do something useful here?? 2162 panic("CPSW HOST ERROR INTERRUPT"); 2163 2164 // Suppress this interrupt in the future. 2165 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2166 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2167 // The watchdog will probably reset the controller 2168 // in a little while. It will probably fail again. 2169} 2170 2171static void 2172cpsw_intr_misc(void *arg) 2173{ 2174 struct cpsw_softc *sc = arg; 2175 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2176 2177 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2178 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2179 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2180 cpsw_stats_collect(sc); 2181 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2182 cpsw_intr_misc_host_error(sc); 2183 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2184 cpsw_write_4(sc, MDIOLINKINTMASKED, 2185 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2186 } 2187 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2188 CPSW_DEBUGF(sc, 2189 ("MDIO operation completed interrupt unimplemented")); 2190 } 2191 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2192} 2193 2194/* 2195 * 2196 * Periodic Checks and Watchdog. 2197 * 2198 */ 2199 2200static void 2201cpswp_tick(void *msc) 2202{ 2203 struct cpswp_softc *sc = msc; 2204 2205 /* Check for media type change */ 2206 mii_tick(sc->mii); 2207 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2208 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2209 sc->mii->mii_media.ifm_media); 2210 cpswp_ifmedia_upd(sc->ifp); 2211 } 2212 2213 /* Schedule another timeout one second from now */ 2214 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2215} 2216 2217static void 2218cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2219{ 2220 struct cpswp_softc *sc; 2221 struct mii_data *mii; 2222 2223 sc = ifp->if_softc; 2224 CPSW_DEBUGF(sc->swsc, ("")); 2225 CPSW_PORT_LOCK(sc); 2226 2227 mii = sc->mii; 2228 mii_pollstat(mii); 2229 2230 ifmr->ifm_active = mii->mii_media_active; 2231 ifmr->ifm_status = mii->mii_media_status; 2232 CPSW_PORT_UNLOCK(sc); 2233} 2234 2235static int 2236cpswp_ifmedia_upd(struct ifnet *ifp) 2237{ 2238 struct cpswp_softc *sc; 2239 2240 sc = ifp->if_softc; 2241 CPSW_DEBUGF(sc->swsc, ("")); 2242 CPSW_PORT_LOCK(sc); 2243 mii_mediachg(sc->mii); 2244 sc->media_status = sc->mii->mii_media.ifm_media; 2245 CPSW_PORT_UNLOCK(sc); 2246 2247 return (0); 2248} 2249 2250static void 2251cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2252{ 2253 struct cpswp_softc *psc; 2254 int i; 2255 2256 cpsw_debugf_head("CPSW watchdog"); 2257 device_printf(sc->dev, "watchdog timeout\n"); 2258 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2259 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2260 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2261 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2262 cpsw_dump_queue(sc, &sc->tx.active); 2263 for (i = 0; i < CPSW_PORTS; i++) { 2264 if (!sc->dualemac && i != sc->active_slave) 2265 continue; 2266 psc = device_get_softc(sc->port[i].dev); 2267 CPSW_PORT_LOCK(psc); 2268 cpswp_stop_locked(psc); 2269 CPSW_PORT_UNLOCK(psc); 2270 } 2271} 2272 2273static void 2274cpsw_tx_watchdog(void *msc) 2275{ 2276 struct cpsw_softc *sc; 2277 2278 sc = msc; 2279 CPSW_TX_LOCK(sc); 2280 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2281 sc->watchdog.timer = 0; /* Nothing to do. */ 2282 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2283 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2284 } else if (cpsw_tx_dequeue(sc) > 0) { 2285 sc->watchdog.timer = 0; /* We just did something. */ 2286 } else { 2287 /* There was something to do but it didn't get done. */ 2288 ++sc->watchdog.timer; 2289 if (sc->watchdog.timer > 5) { 2290 sc->watchdog.timer = 0; 2291 ++sc->watchdog.resets; 2292 cpsw_tx_watchdog_full_reset(sc); 2293 } 2294 } 2295 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2296 CPSW_TX_UNLOCK(sc); 2297 2298 /* Schedule another timeout one second from now */ 2299 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2300} 2301 2302/* 2303 * 2304 * ALE support routines. 2305 * 2306 */ 2307 2308static void 2309cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2310{ 2311 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2312 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2313 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2314 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2315} 2316 2317static void 2318cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2319{ 2320 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2321 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2322 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2323 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2324} 2325 2326static void 2327cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2328{ 2329 int i; 2330 uint32_t ale_entry[3]; 2331 2332 /* First four entries are link address and broadcast. */ 2333 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2334 cpsw_ale_read_entry(sc, i, ale_entry); 2335 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2336 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2337 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2338 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2339 cpsw_ale_write_entry(sc, i, ale_entry); 2340 } 2341 } 2342} 2343 2344static int 2345cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2346 uint8_t *mac) 2347{ 2348 int free_index = -1, matching_index = -1, i; 2349 uint32_t ale_entry[3], ale_type; 2350 2351 /* Find a matching entry or a free entry. */ 2352 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2353 cpsw_ale_read_entry(sc, i, ale_entry); 2354 2355 /* Entry Type[61:60] is 0 for free entry */ 2356 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2357 free_index = i; 2358 2359 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2360 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2361 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2362 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2363 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2364 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2365 matching_index = i; 2366 break; 2367 } 2368 } 2369 2370 if (matching_index < 0) { 2371 if (free_index < 0) 2372 return (ENOMEM); 2373 i = free_index; 2374 } 2375 2376 if (vlan != -1) 2377 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2378 else 2379 ale_type = ALE_TYPE_ADDR << 28; 2380 2381 /* Set MAC address */ 2382 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2383 ale_entry[1] = mac[0] << 8 | mac[1]; 2384 2385 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2386 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2387 2388 /* Set portmask [68:66] */ 2389 ale_entry[2] = (portmap & 7) << 2; 2390 2391 cpsw_ale_write_entry(sc, i, ale_entry); 2392 2393 return 0; 2394} 2395 2396static void 2397cpsw_ale_dump_table(struct cpsw_softc *sc) { 2398 int i; 2399 uint32_t ale_entry[3]; 2400 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2401 cpsw_ale_read_entry(sc, i, ale_entry); 2402 switch (ALE_TYPE(ale_entry)) { 2403 case ALE_TYPE_VLAN: 2404 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2405 ale_entry[1], ale_entry[0]); 2406 printf("type: %u ", ALE_TYPE(ale_entry)); 2407 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2408 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2409 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2410 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2411 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2412 printf("\n"); 2413 break; 2414 case ALE_TYPE_ADDR: 2415 case ALE_TYPE_VLAN_ADDR: 2416 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2417 ale_entry[1], ale_entry[0]); 2418 printf("type: %u ", ALE_TYPE(ale_entry)); 2419 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2420 (ale_entry[1] >> 8) & 0xFF, 2421 (ale_entry[1] >> 0) & 0xFF, 2422 (ale_entry[0] >>24) & 0xFF, 2423 (ale_entry[0] >>16) & 0xFF, 2424 (ale_entry[0] >> 8) & 0xFF, 2425 (ale_entry[0] >> 0) & 0xFF); 2426 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2427 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2428 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2429 printf("port: %u ", ALE_PORTS(ale_entry)); 2430 printf("\n"); 2431 break; 2432 } 2433 } 2434 printf("\n"); 2435} 2436 2437static int 2438cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2439{ 2440 uint8_t *mac; 2441 uint32_t ale_entry[3], ale_type, portmask; 2442 struct ifmultiaddr *ifma; 2443 2444 if (sc->swsc->dualemac) { 2445 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2446 portmask = 1 << (sc->unit + 1) | 1 << 0; 2447 } else { 2448 ale_type = ALE_TYPE_ADDR << 28; 2449 portmask = 7; 2450 } 2451 2452 /* 2453 * Route incoming packets for our MAC address to Port 0 (host). 2454 * For simplicity, keep this entry at table index 0 for port 1 and 2455 * at index 2 for port 2 in the ALE. 2456 */ 2457 if_addr_rlock(sc->ifp); 2458 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2459 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2460 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2461 ale_entry[2] = 0; /* port = 0 */ 2462 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2463 2464 /* Set outgoing MAC Address for slave port. */ 2465 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2466 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2467 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2468 mac[5] << 8 | mac[4]); 2469 if_addr_runlock(sc->ifp); 2470 2471 /* Keep the broadcast address at table entry 1 (or 3). */ 2472 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2473 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2474 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2475 ale_entry[2] = portmask << 2; 2476 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2477 2478 /* SIOCDELMULTI doesn't specify the particular address 2479 being removed, so we have to remove all and rebuild. */ 2480 if (purge) 2481 cpsw_ale_remove_all_mc_entries(sc->swsc); 2482 2483 /* Set other multicast addrs desired. */ 2484 if_maddr_rlock(sc->ifp); 2485 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 2486 if (ifma->ifma_addr->sa_family != AF_LINK) 2487 continue; 2488 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, 2489 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2490 } 2491 if_maddr_runlock(sc->ifp); 2492 2493 return (0); 2494} 2495 2496static int 2497cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2498 int untag, int mcregflood, int mcunregflood) 2499{ 2500 int free_index, i, matching_index; 2501 uint32_t ale_entry[3]; 2502 2503 free_index = matching_index = -1; 2504 /* Find a matching entry or a free entry. */ 2505 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2506 cpsw_ale_read_entry(sc, i, ale_entry); 2507 2508 /* Entry Type[61:60] is 0 for free entry */ 2509 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2510 free_index = i; 2511 2512 if (ALE_VLAN(ale_entry) == vlan) { 2513 matching_index = i; 2514 break; 2515 } 2516 } 2517 2518 if (matching_index < 0) { 2519 if (free_index < 0) 2520 return (-1); 2521 i = free_index; 2522 } 2523 2524 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2525 (mcunregflood & 7) << 8 | (ports & 7); 2526 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2527 ale_entry[2] = 0; 2528 cpsw_ale_write_entry(sc, i, ale_entry); 2529 2530 return (0); 2531} 2532 2533/* 2534 * 2535 * Statistics and Sysctls. 2536 * 2537 */ 2538 2539#if 0 2540static void 2541cpsw_stats_dump(struct cpsw_softc *sc) 2542{ 2543 int i; 2544 uint32_t r; 2545 2546 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2547 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2548 cpsw_stat_sysctls[i].reg); 2549 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2550 (intmax_t)sc->shadow_stats[i], r, 2551 (intmax_t)sc->shadow_stats[i] + r)); 2552 } 2553} 2554#endif 2555 2556static void 2557cpsw_stats_collect(struct cpsw_softc *sc) 2558{ 2559 int i; 2560 uint32_t r; 2561 2562 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2563 2564 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2565 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2566 cpsw_stat_sysctls[i].reg); 2567 sc->shadow_stats[i] += r; 2568 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2569 r); 2570 } 2571} 2572 2573static int 2574cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2575{ 2576 struct cpsw_softc *sc; 2577 struct cpsw_stat *stat; 2578 uint64_t result; 2579 2580 sc = (struct cpsw_softc *)arg1; 2581 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2582 result = sc->shadow_stats[oidp->oid_number]; 2583 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2584 return (sysctl_handle_64(oidp, &result, 0, req)); 2585} 2586 2587static int 2588cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2589{ 2590 struct cpsw_softc *sc; 2591 struct bintime t; 2592 unsigned result; 2593 2594 sc = (struct cpsw_softc *)arg1; 2595 getbinuptime(&t); 2596 bintime_sub(&t, &sc->attach_uptime); 2597 result = t.sec; 2598 return (sysctl_handle_int(oidp, &result, 0, req)); 2599} 2600 2601static int 2602cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2603{ 2604 int error; 2605 struct cpsw_softc *sc; 2606 uint32_t ctrl, intr_per_ms; 2607 2608 sc = (struct cpsw_softc *)arg1; 2609 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2610 if (error != 0 || req->newptr == NULL) 2611 return (error); 2612 2613 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2614 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2615 if (sc->coal_us == 0) { 2616 /* Disable the interrupt pace hardware. */ 2617 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2618 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2619 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2620 return (0); 2621 } 2622 2623 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2624 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2625 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2626 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2627 intr_per_ms = 1000 / sc->coal_us; 2628 /* Just to make sure... */ 2629 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2630 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2631 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2632 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2633 2634 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2635 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2636 2637 /* Enable the interrupt pace hardware. */ 2638 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2639 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2640 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2641 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2642 2643 return (0); 2644} 2645 2646static int 2647cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2648{ 2649 struct cpsw_softc *swsc; 2650 struct cpswp_softc *sc; 2651 struct bintime t; 2652 unsigned result; 2653 2654 swsc = arg1; 2655 sc = device_get_softc(swsc->port[arg2].dev); 2656 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2657 getbinuptime(&t); 2658 bintime_sub(&t, &sc->init_uptime); 2659 result = t.sec; 2660 } else 2661 result = 0; 2662 return (sysctl_handle_int(oidp, &result, 0, req)); 2663} 2664 2665static void 2666cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2667 struct cpsw_queue *queue) 2668{ 2669 struct sysctl_oid_list *parent; 2670 2671 parent = SYSCTL_CHILDREN(node); 2672 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2673 CTLFLAG_RD, &queue->queue_slots, 0, 2674 "Total buffers currently assigned to this queue"); 2675 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2676 CTLFLAG_RD, &queue->active_queue_len, 0, 2677 "Buffers currently registered with hardware controller"); 2678 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2679 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2680 "Max value of activeBuffers since last driver reset"); 2681 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2682 CTLFLAG_RD, &queue->avail_queue_len, 0, 2683 "Buffers allocated to this queue but not currently " 2684 "registered with hardware controller"); 2685 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2686 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2687 "Max value of availBuffers since last driver reset"); 2688 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2689 CTLFLAG_RD, &queue->queue_adds, 0, 2690 "Total buffers added to queue"); 2691 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2692 CTLFLAG_RD, &queue->queue_removes, 0, 2693 "Total buffers removed from queue"); 2694 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2695 CTLFLAG_RD, &queue->queue_restart, 0, 2696 "Total times the queue has been restarted"); 2697 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2698 CTLFLAG_RD, &queue->longest_chain, 0, 2699 "Max buffers used for a single packet"); 2700} 2701 2702static void 2703cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2704 struct cpsw_softc *sc) 2705{ 2706 struct sysctl_oid_list *parent; 2707 2708 parent = SYSCTL_CHILDREN(node); 2709 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2710 CTLFLAG_RD, &sc->watchdog.resets, 0, 2711 "Total number of watchdog resets"); 2712} 2713 2714static void 2715cpsw_add_sysctls(struct cpsw_softc *sc) 2716{ 2717 struct sysctl_ctx_list *ctx; 2718 struct sysctl_oid *stats_node, *queue_node, *node; 2719 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2720 struct sysctl_oid_list *ports_parent, *port_parent; 2721 char port[16]; 2722 int i; 2723 2724 ctx = device_get_sysctl_ctx(sc->dev); 2725 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2726 2727 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2728 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2729 2730 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "rx_batch", 2731 CTLFLAG_RW, &sc->rx_batch, 0, "Set the rx batch size"); 2732 2733 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2734 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2735 "Time since driver attach"); 2736 2737 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2738 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU", 2739 "minimum time between interrupts"); 2740 2741 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2742 CTLFLAG_RD, NULL, "CPSW Ports Statistics"); 2743 ports_parent = SYSCTL_CHILDREN(node); 2744 for (i = 0; i < CPSW_PORTS; i++) { 2745 if (!sc->dualemac && i != sc->active_slave) 2746 continue; 2747 port[0] = '0' + i; 2748 port[1] = '\0'; 2749 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2750 port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); 2751 port_parent = SYSCTL_CHILDREN(node); 2752 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2753 CTLTYPE_UINT | CTLFLAG_RD, sc, i, 2754 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2755 } 2756 2757 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2758 CTLFLAG_RD, NULL, "CPSW Statistics"); 2759 stats_parent = SYSCTL_CHILDREN(stats_node); 2760 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2761 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2762 cpsw_stat_sysctls[i].oid, 2763 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2764 cpsw_stats_sysctl, "IU", 2765 cpsw_stat_sysctls[i].oid); 2766 } 2767 2768 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2769 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2770 queue_parent = SYSCTL_CHILDREN(queue_node); 2771 2772 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2773 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2774 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2775 2776 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2777 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2778 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2779 2780 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2781 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2782 cpsw_add_watchdog_sysctls(ctx, node, sc); 2783} 2784 2785#ifdef CPSW_ETHERSWITCH 2786static etherswitch_info_t etherswitch_info = { 2787 .es_nports = CPSW_PORTS + 1, 2788 .es_nvlangroups = CPSW_VLANS, 2789 .es_name = "TI Common Platform Ethernet Switch (CPSW)", 2790 .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, 2791}; 2792 2793static etherswitch_info_t * 2794cpsw_getinfo(device_t dev) 2795{ 2796 return (ðerswitch_info); 2797} 2798 2799static int 2800cpsw_getport(device_t dev, etherswitch_port_t *p) 2801{ 2802 int err; 2803 struct cpsw_softc *sc; 2804 struct cpswp_softc *psc; 2805 struct ifmediareq *ifmr; 2806 uint32_t reg; 2807 2808 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2809 return (ENXIO); 2810 2811 err = 0; 2812 sc = device_get_softc(dev); 2813 if (p->es_port == CPSW_CPU_PORT) { 2814 p->es_flags |= ETHERSWITCH_PORT_CPU; 2815 ifmr = &p->es_ifmr; 2816 ifmr->ifm_current = ifmr->ifm_active = 2817 IFM_ETHER | IFM_1000_T | IFM_FDX; 2818 ifmr->ifm_mask = 0; 2819 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; 2820 ifmr->ifm_count = 0; 2821 } else { 2822 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2823 err = ifmedia_ioctl(psc->ifp, &p->es_ifr, 2824 &psc->mii->mii_media, SIOCGIFMEDIA); 2825 } 2826 reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); 2827 p->es_pvid = reg & ETHERSWITCH_VID_MASK; 2828 2829 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2830 if (reg & ALE_PORTCTL_DROP_UNTAGGED) 2831 p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; 2832 if (reg & ALE_PORTCTL_INGRESS) 2833 p->es_flags |= ETHERSWITCH_PORT_INGRESS; 2834 2835 return (err); 2836} 2837 2838static int 2839cpsw_setport(device_t dev, etherswitch_port_t *p) 2840{ 2841 struct cpsw_softc *sc; 2842 struct cpswp_softc *psc; 2843 struct ifmedia *ifm; 2844 uint32_t reg; 2845 2846 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2847 return (ENXIO); 2848 2849 sc = device_get_softc(dev); 2850 if (p->es_pvid != 0) { 2851 cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), 2852 p->es_pvid & ETHERSWITCH_VID_MASK); 2853 } 2854 2855 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2856 if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) 2857 reg |= ALE_PORTCTL_DROP_UNTAGGED; 2858 else 2859 reg &= ~ALE_PORTCTL_DROP_UNTAGGED; 2860 if (p->es_flags & ETHERSWITCH_PORT_INGRESS) 2861 reg |= ALE_PORTCTL_INGRESS; 2862 else 2863 reg &= ~ALE_PORTCTL_INGRESS; 2864 cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); 2865 2866 /* CPU port does not allow media settings. */ 2867 if (p->es_port == CPSW_CPU_PORT) 2868 return (0); 2869 2870 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2871 ifm = &psc->mii->mii_media; 2872 2873 return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); 2874} 2875 2876static int 2877cpsw_getconf(device_t dev, etherswitch_conf_t *conf) 2878{ 2879 2880 /* Return the VLAN mode. */ 2881 conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; 2882 conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; 2883 2884 return (0); 2885} 2886 2887static int 2888cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2889{ 2890 int i, vid; 2891 uint32_t ale_entry[3]; 2892 struct cpsw_softc *sc; 2893 2894 sc = device_get_softc(dev); 2895 2896 if (vg->es_vlangroup >= CPSW_VLANS) 2897 return (EINVAL); 2898 2899 vg->es_vid = 0; 2900 vid = cpsw_vgroups[vg->es_vlangroup].vid; 2901 if (vid == -1) 2902 return (0); 2903 2904 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2905 cpsw_ale_read_entry(sc, i, ale_entry); 2906 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2907 continue; 2908 if (vid != ALE_VLAN(ale_entry)) 2909 continue; 2910 2911 vg->es_fid = 0; 2912 vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; 2913 vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); 2914 vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); 2915 } 2916 2917 return (0); 2918} 2919 2920static void 2921cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) 2922{ 2923 int i; 2924 uint32_t ale_entry[3]; 2925 2926 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2927 cpsw_ale_read_entry(sc, i, ale_entry); 2928 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2929 continue; 2930 if (vlan != ALE_VLAN(ale_entry)) 2931 continue; 2932 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2933 cpsw_ale_write_entry(sc, i, ale_entry); 2934 break; 2935 } 2936} 2937 2938static int 2939cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2940{ 2941 int i; 2942 struct cpsw_softc *sc; 2943 2944 sc = device_get_softc(dev); 2945 2946 for (i = 0; i < CPSW_VLANS; i++) { 2947 /* Is this Vlan ID in use by another vlangroup ? */ 2948 if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) 2949 return (EINVAL); 2950 } 2951 2952 if (vg->es_vid == 0) { 2953 if (cpsw_vgroups[vg->es_vlangroup].vid == -1) 2954 return (0); 2955 cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); 2956 cpsw_vgroups[vg->es_vlangroup].vid = -1; 2957 vg->es_untagged_ports = 0; 2958 vg->es_member_ports = 0; 2959 vg->es_vid = 0; 2960 return (0); 2961 } 2962 2963 vg->es_vid &= ETHERSWITCH_VID_MASK; 2964 vg->es_member_ports &= CPSW_PORTS_MASK; 2965 vg->es_untagged_ports &= CPSW_PORTS_MASK; 2966 2967 if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && 2968 cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) 2969 return (EINVAL); 2970 2971 cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; 2972 cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, 2973 vg->es_untagged_ports, vg->es_member_ports, 0); 2974 2975 return (0); 2976} 2977 2978static int 2979cpsw_readreg(device_t dev, int addr) 2980{ 2981 2982 /* Not supported. */ 2983 return (0); 2984} 2985 2986static int 2987cpsw_writereg(device_t dev, int addr, int value) 2988{ 2989 2990 /* Not supported. */ 2991 return (0); 2992} 2993 2994static int 2995cpsw_readphy(device_t dev, int phy, int reg) 2996{ 2997 2998 /* Not supported. */ 2999 return (0); 3000} 3001 3002static int 3003cpsw_writephy(device_t dev, int phy, int reg, int data) 3004{ 3005 3006 /* Not supported. */ 3007 return (0); 3008} 3009#endif 3010