1/*- 2 * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD$"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/bus.h> 33#include <sys/kernel.h> 34#include <sys/mbuf.h> 35#include <sys/malloc.h> 36#include <sys/module.h> 37#include <sys/rman.h> 38#include <sys/socket.h> 39#include <sys/sockio.h> 40#include <sys/sysctl.h> 41#include <sys/taskqueue.h> 42 43#include <net/ethernet.h> 44#include <net/if.h> 45#include <net/if_arp.h> 46#include <net/if_dl.h> 47#include <net/if_media.h> 48#include <net/if_types.h> 49#include <net/if_var.h> 50#include <net/if_vlan_var.h> 51 52#ifdef INET 53#include <netinet/in.h> 54#include <netinet/in_systm.h> 55#include <netinet/in_var.h> 56#include <netinet/ip.h> 57#endif 58 59#include <net/bpf.h> 60#include <net/bpfdesc.h> 61 62#include <dev/mii/mii.h> 63#include <dev/mii/miivar.h> 64 65#include <arm/cavium/cns11xx/if_ecereg.h> 66#include <arm/cavium/cns11xx/if_ecevar.h> 67#include <arm/cavium/cns11xx/econa_var.h> 68 69#include <machine/bus.h> 70#include <machine/intr.h> 71 72/* "device miibus" required. See GENERIC if you get errors here. */ 73#include "miibus_if.h" 74 75static uint8_t 76vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19}; 77 78/* 79 * Boot loader expects the hardware state to be the same when we 80 * restart the device (warm boot), so we need to save the initial 81 * config values. 82 */ 83int initial_switch_config; 84int initial_cpu_config; 85int initial_port0_config; 86int initial_port1_config; 87 88static inline uint32_t 89read_4(struct ece_softc *sc, bus_size_t off) 90{ 91 92 return (bus_read_4(sc->mem_res, off)); 93} 94 95static inline void 96write_4(struct ece_softc *sc, bus_size_t off, uint32_t val) 97{ 98 99 bus_write_4(sc->mem_res, off, val); 100} 101 102#define ECE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 103#define ECE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 104#define ECE_LOCK_INIT(_sc) \ 105 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 106 MTX_NETWORK_LOCK, MTX_DEF) 107 108#define ECE_TXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_tx) 109#define ECE_TXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_tx) 110#define ECE_TXLOCK_INIT(_sc) \ 111 mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev), \ 112 "ECE TX Lock", MTX_DEF) 113 114#define ECE_CLEANUPLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_cleanup) 115#define ECE_CLEANUPUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_cleanup) 116#define ECE_CLEANUPLOCK_INIT(_sc) \ 117 mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev), \ 118 "ECE cleanup Lock", MTX_DEF) 119 120#define ECE_RXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_rx) 121#define ECE_RXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_rx) 122#define ECE_RXLOCK_INIT(_sc) \ 123 mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev), \ 124 "ECE RX Lock", MTX_DEF) 125 126#define ECE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 127#define ECE_TXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_tx); 128#define ECE_RXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_rx); 129#define ECE_CLEANUPLOCK_DESTROY(_sc) \ 130 mtx_destroy(&_sc->sc_mtx_cleanup); 131 132#define ECE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 133#define ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 134 135static devclass_t ece_devclass; 136 137/* ifnet entry points */ 138 139static void eceinit_locked(void *); 140static void ecestart_locked(struct ifnet *); 141 142static void eceinit(void *); 143static void ecestart(struct ifnet *); 144static void ecestop(struct ece_softc *); 145static int eceioctl(struct ifnet * ifp, u_long, caddr_t); 146 147/* bus entry points */ 148 149static int ece_probe(device_t dev); 150static int ece_attach(device_t dev); 151static int ece_detach(device_t dev); 152static void ece_intr(void *); 153static void ece_intr_qf(void *); 154static void ece_intr_status(void *xsc); 155 156/* helper routines */ 157static int ece_activate(device_t dev); 158static void ece_deactivate(device_t dev); 159static int ece_ifmedia_upd(struct ifnet *ifp); 160static void ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 161static int ece_get_mac(struct ece_softc *sc, u_char *eaddr); 162static void ece_set_mac(struct ece_softc *sc, u_char *eaddr); 163static int configure_cpu_port(struct ece_softc *sc); 164static int configure_lan_port(struct ece_softc *sc, int phy_type); 165static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu); 166static void set_vlan_vid(struct ece_softc *sc, int vlan); 167static void set_vlan_member(struct ece_softc *sc, int vlan); 168static void set_vlan_tag(struct ece_softc *sc, int vlan); 169static int hardware_init(struct ece_softc *sc); 170static void ece_intr_rx_locked(struct ece_softc *sc, int count); 171 172static void ece_free_desc_dma_tx(struct ece_softc *sc); 173static void ece_free_desc_dma_rx(struct ece_softc *sc); 174 175static void ece_intr_task(void *arg, int pending __unused); 176static void ece_tx_task(void *arg, int pending __unused); 177static void ece_cleanup_task(void *arg, int pending __unused); 178 179static int ece_allocate_dma(struct ece_softc *sc); 180 181static void ece_intr_tx(void *xsc); 182 183static void clear_mac_entries(struct ece_softc *ec, int include_this_mac); 184 185static uint32_t read_mac_entry(struct ece_softc *ec, 186 uint8_t *mac_result, 187 int first); 188 189/*PHY related functions*/ 190static inline int 191phy_read(struct ece_softc *sc, int phy, int reg) 192{ 193 int val; 194 int ii; 195 int status; 196 197 write_4(sc, PHY_CONTROL, PHY_RW_OK); 198 write_4(sc, PHY_CONTROL, 199 (PHY_ADDRESS(phy)|PHY_READ_COMMAND | 200 PHY_REGISTER(reg))); 201 202 for (ii = 0; ii < 0x1000; ii++) { 203 status = read_4(sc, PHY_CONTROL); 204 if (status & PHY_RW_OK) { 205 /* Clear the rw_ok status, and clear other 206 * bits value. */ 207 write_4(sc, PHY_CONTROL, PHY_RW_OK); 208 val = PHY_GET_DATA(status); 209 return (val); 210 } 211 } 212 return (0); 213} 214 215static inline void 216phy_write(struct ece_softc *sc, int phy, int reg, int data) 217{ 218 int ii; 219 220 write_4(sc, PHY_CONTROL, PHY_RW_OK); 221 write_4(sc, PHY_CONTROL, 222 PHY_ADDRESS(phy) | PHY_REGISTER(reg) | 223 PHY_WRITE_COMMAND | PHY_DATA(data)); 224 for (ii = 0; ii < 0x1000; ii++) { 225 if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) { 226 /* Clear the rw_ok status, and clear other 227 * bits value. 228 */ 229 write_4(sc, PHY_CONTROL, PHY_RW_OK); 230 return; 231 } 232 } 233} 234 235static int get_phy_type(struct ece_softc *sc) 236{ 237 uint16_t phy0_id = 0, phy1_id = 0; 238 239 /* 240 * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier 241 * Register 1. 242 */ 243 phy0_id = phy_read(sc, 0, 0x2); 244 phy1_id = phy_read(sc, 1, 0x2); 245 246 if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F)) 247 return (ASIX_GIGA_PHY); 248 else if ((phy0_id == 0x0243) && (phy1_id == 0x0243)) 249 return (TWO_SINGLE_PHY); 250 else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007)) 251 return (VSC8601_GIGA_PHY); 252 else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF)) 253 return (IC_PLUS_PHY); 254 255 return (NOT_FOUND_PHY); 256} 257 258static int 259ece_probe(device_t dev) 260{ 261 262 device_set_desc(dev, "Econa Ethernet Controller"); 263 return (0); 264} 265 266 267static int 268ece_attach(device_t dev) 269{ 270 struct ece_softc *sc; 271 struct ifnet *ifp = NULL; 272 struct sysctl_ctx_list *sctx; 273 struct sysctl_oid *soid; 274 u_char eaddr[ETHER_ADDR_LEN]; 275 int err; 276 int i, rid; 277 uint32_t rnd; 278 279 err = 0; 280 281 sc = device_get_softc(dev); 282 283 sc->dev = dev; 284 285 rid = 0; 286 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 287 RF_ACTIVE); 288 if (sc->mem_res == NULL) 289 goto out; 290 291 power_on_network_interface(); 292 293 rid = 0; 294 sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 295 RF_ACTIVE); 296 if (sc->irq_res_status == NULL) 297 goto out; 298 299 rid = 1; 300 /*TSTC: Fm-Switch-Tx-Complete*/ 301 sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 302 RF_ACTIVE); 303 if (sc->irq_res_tx == NULL) 304 goto out; 305 306 rid = 2; 307 /*FSRC: Fm-Switch-Rx-Complete*/ 308 sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 309 RF_ACTIVE); 310 if (sc->irq_res_rec == NULL) 311 goto out; 312 313 rid = 4; 314 /*FSQF: Fm-Switch-Queue-Full*/ 315 sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 316 RF_ACTIVE); 317 if (sc->irq_res_qf == NULL) 318 goto out; 319 320 err = ece_activate(dev); 321 if (err) 322 goto out; 323 324 /* Sysctls */ 325 sctx = device_get_sysctl_ctx(dev); 326 soid = device_get_sysctl_tree(dev); 327 328 ECE_LOCK_INIT(sc); 329 330 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 331 332 if ((err = ece_get_mac(sc, eaddr)) != 0) { 333 /* No MAC address configured. Generate the random one. */ 334 if (bootverbose) 335 device_printf(dev, 336 "Generating random ethernet address.\n"); 337 rnd = arc4random(); 338 339 /*from if_ae.c/if_ate.c*/ 340 /* 341 * Set OUI to convenient locally assigned address. 'b' 342 * is 0x62, which has the locally assigned bit set, and 343 * the broadcast/multicast bit clear. 344 */ 345 eaddr[0] = 'b'; 346 eaddr[1] = 's'; 347 eaddr[2] = 'd'; 348 eaddr[3] = (rnd >> 16) & 0xff; 349 eaddr[4] = (rnd >> 8) & 0xff; 350 eaddr[5] = rnd & 0xff; 351 352 for (i = 0; i < ETHER_ADDR_LEN; i++) 353 eaddr[i] = vlan0_mac[i]; 354 } 355 ece_set_mac(sc, eaddr); 356 sc->ifp = ifp = if_alloc(IFT_ETHER); 357 /* Only one PHY at address 0 in this device. */ 358 err = mii_attach(dev, &sc->miibus, ifp, ece_ifmedia_upd, 359 ece_ifmedia_sts, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0); 360 if (err != 0) { 361 device_printf(dev, "attaching PHYs failed\n"); 362 goto out; 363 } 364 ifp->if_softc = sc; 365 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 366 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 367 368 ifp->if_capabilities = IFCAP_HWCSUM; 369 370 ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP); 371 ifp->if_capenable = ifp->if_capabilities; 372 ifp->if_start = ecestart; 373 ifp->if_ioctl = eceioctl; 374 ifp->if_init = eceinit; 375 ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1; 376 IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1); 377 IFQ_SET_READY(&ifp->if_snd); 378 379 /* Create local taskq. */ 380 381 TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc); 382 TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp); 383 TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc); 384 sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK, 385 taskqueue_thread_enqueue, 386 &sc->sc_tq); 387 if (sc->sc_tq == NULL) { 388 device_printf(sc->dev, "could not create taskqueue\n"); 389 goto out; 390 } 391 392 ether_ifattach(ifp, eaddr); 393 394 /* 395 * Activate interrupts 396 */ 397 err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE, 398 NULL, ece_intr, sc, &sc->intrhand); 399 if (err) { 400 ether_ifdetach(ifp); 401 ECE_LOCK_DESTROY(sc); 402 goto out; 403 } 404 405 err = bus_setup_intr(dev, sc->irq_res_status, 406 INTR_TYPE_NET | INTR_MPSAFE, 407 NULL, ece_intr_status, sc, &sc->intrhand_status); 408 if (err) { 409 ether_ifdetach(ifp); 410 ECE_LOCK_DESTROY(sc); 411 goto out; 412 } 413 414 err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE, 415 NULL,ece_intr_qf, sc, &sc->intrhand_qf); 416 417 if (err) { 418 ether_ifdetach(ifp); 419 ECE_LOCK_DESTROY(sc); 420 goto out; 421 } 422 423 err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE, 424 NULL, ece_intr_tx, sc, &sc->intrhand_tx); 425 426 if (err) { 427 ether_ifdetach(ifp); 428 ECE_LOCK_DESTROY(sc); 429 goto out; 430 } 431 432 ECE_TXLOCK_INIT(sc); 433 ECE_RXLOCK_INIT(sc); 434 ECE_CLEANUPLOCK_INIT(sc); 435 436 /* Enable all interrupt sources. */ 437 write_4(sc, INTERRUPT_MASK, 0x00000000); 438 439 /* Enable port 0. */ 440 write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE)); 441 442 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 443 device_get_nameunit(sc->dev)); 444 445out: 446 if (err) 447 ece_deactivate(dev); 448 if (err && ifp) 449 if_free(ifp); 450 return (err); 451} 452 453static int 454ece_detach(device_t dev) 455{ 456 struct ece_softc *sc = device_get_softc(dev); 457 struct ifnet *ifp = sc->ifp; 458 459 ecestop(sc); 460 if (ifp != NULL) { 461 ether_ifdetach(ifp); 462 if_free(ifp); 463 } 464 ece_deactivate(dev); 465 return (0); 466} 467 468static void 469ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 470{ 471 u_int32_t *paddr; 472 KASSERT(nsegs == 1, ("wrong number of segments, should be 1")); 473 paddr = arg; 474 *paddr = segs->ds_addr; 475} 476 477static int 478ece_alloc_desc_dma_tx(struct ece_softc *sc) 479{ 480 int i; 481 int error; 482 483 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 484 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 485 16, 0, /* alignment, boundary */ 486 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 487 BUS_SPACE_MAXADDR, /* highaddr */ 488 NULL, NULL, /* filtfunc, filtfuncarg */ 489 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */ 490 1, /*nsegments */ 491 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, 492 0, /* flags */ 493 NULL, NULL, /* lockfunc, lockfuncarg */ 494 &sc->dmatag_data_tx); /* dmat */ 495 496 /* Allocate memory for TX ring. */ 497 error = bus_dmamem_alloc(sc->dmatag_data_tx, 498 (void**)&(sc->desc_tx), 499 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 500 BUS_DMA_COHERENT, 501 &(sc->dmamap_ring_tx)); 502 503 if (error) { 504 if_printf(sc->ifp, "failed to allocate DMA memory\n"); 505 bus_dma_tag_destroy(sc->dmatag_data_tx); 506 sc->dmatag_data_tx = 0; 507 return (ENXIO); 508 } 509 510 /* Load Ring DMA. */ 511 error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx, 512 sc->desc_tx, 513 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, 514 ece_getaddr, 515 &(sc->ring_paddr_tx), BUS_DMA_NOWAIT); 516 517 if (error) { 518 if_printf(sc->ifp, "can't load descriptor\n"); 519 bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx, 520 sc->dmamap_ring_tx); 521 sc->desc_tx = NULL; 522 bus_dma_tag_destroy(sc->dmatag_data_tx); 523 sc->dmatag_data_tx = 0; 524 return (ENXIO); 525 } 526 527 /* Allocate a busdma tag for mbufs. Alignment is 2 bytes */ 528 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 529 1, 0, /* alignment, boundary */ 530 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 531 BUS_SPACE_MAXADDR, /* highaddr */ 532 NULL, NULL, /* filtfunc, filtfuncarg */ 533 MCLBYTES*MAX_FRAGMENT, /* maxsize */ 534 MAX_FRAGMENT, /* nsegments */ 535 MCLBYTES, 0, /* maxsegsz, flags */ 536 NULL, NULL, /* lockfunc, lockfuncarg */ 537 &sc->dmatag_ring_tx); /* dmat */ 538 539 if (error) { 540 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); 541 return (ENXIO); 542 } 543 544 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { 545 /* Create dma map for each descriptor. */ 546 error = bus_dmamap_create(sc->dmatag_ring_tx, 0, 547 &(sc->tx_desc[i].dmamap)); 548 if (error) { 549 if_printf(sc->ifp, "failed to create map for mbuf\n"); 550 return (ENXIO); 551 } 552 } 553 return (0); 554} 555 556static void 557ece_free_desc_dma_tx(struct ece_softc *sc) 558{ 559 int i; 560 561 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { 562 if (sc->tx_desc[i].buff) { 563 m_freem(sc->tx_desc[i].buff); 564 sc->tx_desc[i].buff= 0; 565 } 566 } 567 568 if (sc->ring_paddr_tx) { 569 bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx); 570 sc->ring_paddr_tx = 0; 571 } 572 573 if (sc->desc_tx) { 574 bus_dmamem_free(sc->dmatag_data_tx, 575 sc->desc_tx, sc->dmamap_ring_tx); 576 sc->desc_tx = NULL; 577 } 578 579 if (sc->dmatag_data_tx) { 580 bus_dma_tag_destroy(sc->dmatag_data_tx); 581 sc->dmatag_data_tx = 0; 582 } 583 584 if (sc->dmatag_ring_tx) { 585 for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) { 586 bus_dmamap_destroy(sc->dmatag_ring_tx, 587 sc->tx_desc[i].dmamap); 588 sc->tx_desc[i].dmamap = 0; 589 } 590 bus_dma_tag_destroy(sc->dmatag_ring_tx); 591 sc->dmatag_ring_tx = 0; 592 } 593} 594 595static int 596ece_alloc_desc_dma_rx(struct ece_softc *sc) 597{ 598 int error; 599 int i; 600 601 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ 602 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */ 603 16, 0, /* alignment, boundary */ 604 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 605 BUS_SPACE_MAXADDR, /* highaddr */ 606 NULL, NULL, /* filtfunc, filtfuncarg */ 607 /* maxsize, nsegments */ 608 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1, 609 /* maxsegsz, flags */ 610 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0, 611 NULL, NULL, /* lockfunc, lockfuncarg */ 612 &sc->dmatag_data_rx); /* dmat */ 613 614 /* Allocate RX ring. */ 615 error = bus_dmamem_alloc(sc->dmatag_data_rx, 616 (void**)&(sc->desc_rx), 617 BUS_DMA_NOWAIT | BUS_DMA_ZERO | 618 BUS_DMA_COHERENT, 619 &(sc->dmamap_ring_rx)); 620 621 if (error) { 622 if_printf(sc->ifp, "failed to allocate DMA memory\n"); 623 return (ENXIO); 624 } 625 626 /* Load dmamap. */ 627 error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx, 628 sc->desc_rx, 629 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 630 ece_getaddr, 631 &(sc->ring_paddr_rx), BUS_DMA_NOWAIT); 632 633 if (error) { 634 if_printf(sc->ifp, "can't load descriptor\n"); 635 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx, 636 sc->dmamap_ring_rx); 637 bus_dma_tag_destroy(sc->dmatag_data_rx); 638 sc->desc_rx = NULL; 639 return (ENXIO); 640 } 641 642 /* Allocate a busdma tag for mbufs. */ 643 error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */ 644 16, 0, /* alignment, boundary */ 645 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 646 BUS_SPACE_MAXADDR, /* highaddr */ 647 NULL, NULL, /* filtfunc, filtfuncarg */ 648 MCLBYTES, 1, /* maxsize, nsegments */ 649 MCLBYTES, 0, /* maxsegsz, flags */ 650 NULL, NULL, /* lockfunc, lockfuncarg */ 651 &sc->dmatag_ring_rx); /* dmat */ 652 653 if (error) { 654 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); 655 return (ENXIO); 656 } 657 658 for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) { 659 error = bus_dmamap_create(sc->dmatag_ring_rx, 0, 660 &sc->rx_desc[i].dmamap); 661 if (error) { 662 if_printf(sc->ifp, "failed to create map for mbuf\n"); 663 return (ENXIO); 664 } 665 } 666 667 error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap); 668 if (error) { 669 if_printf(sc->ifp, "failed to create spare map\n"); 670 return (ENXIO); 671 } 672 673 return (0); 674} 675 676static void 677ece_free_desc_dma_rx(struct ece_softc *sc) 678{ 679 int i; 680 681 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) { 682 if (sc->rx_desc[i].buff) { 683 m_freem(sc->rx_desc[i].buff); 684 sc->rx_desc[i].buff = NULL; 685 } 686 } 687 688 if (sc->ring_paddr_rx) { 689 bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx); 690 sc->ring_paddr_rx = 0; 691 } 692 693 if (sc->desc_rx) { 694 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx, 695 sc->dmamap_ring_rx); 696 sc->desc_rx = NULL; 697 } 698 699 if (sc->dmatag_data_rx) { 700 bus_dma_tag_destroy(sc->dmatag_data_rx); 701 sc->dmatag_data_rx = NULL; 702 } 703 704 if (sc->dmatag_ring_rx) { 705 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) 706 bus_dmamap_destroy(sc->dmatag_ring_rx, 707 sc->rx_desc[i].dmamap); 708 bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap); 709 bus_dma_tag_destroy(sc->dmatag_ring_rx); 710 sc->dmatag_ring_rx = NULL; 711 } 712} 713 714static int 715ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo) 716{ 717 struct mbuf *new_mbuf; 718 bus_dma_segment_t seg[1]; 719 bus_dmamap_t map; 720 int error; 721 int nsegs; 722 bus_dma_tag_t tag; 723 724 tag = sc->dmatag_ring_rx; 725 726 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 727 728 if (new_mbuf == NULL) 729 return (ENOBUFS); 730 731 new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES; 732 733 error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf, 734 seg, &nsegs, BUS_DMA_NOWAIT); 735 736 KASSERT(nsegs == 1, ("Too many segments returned!")); 737 738 if (nsegs != 1 || error) { 739 m_free(new_mbuf); 740 return (ENOBUFS); 741 } 742 743 if (descinfo->buff != NULL) { 744 bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD); 745 bus_dmamap_unload(tag, descinfo->dmamap); 746 } 747 748 map = descinfo->dmamap; 749 descinfo->dmamap = sc->rx_sparemap; 750 sc->rx_sparemap = map; 751 752 bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD); 753 754 descinfo->buff = new_mbuf; 755 descinfo->desc->data_ptr = seg->ds_addr; 756 descinfo->desc->length = seg->ds_len - 2; 757 758 return (0); 759} 760 761static int 762ece_allocate_dma(struct ece_softc *sc) 763{ 764 eth_tx_desc_t *desctx; 765 eth_rx_desc_t *descrx; 766 int i; 767 int error; 768 769 /* Create parent tag for tx and rx */ 770 error = bus_dma_tag_create( 771 bus_get_dma_tag(sc->dev),/* parent */ 772 1, 0, /* alignment, boundary */ 773 BUS_SPACE_MAXADDR, /* lowaddr */ 774 BUS_SPACE_MAXADDR, /* highaddr */ 775 NULL, NULL, /* filter, filterarg */ 776 BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */ 777 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 778 0, /* flags */ 779 NULL, NULL, /* lockfunc, lockarg */ 780 &sc->sc_parent_tag); 781 782 ece_alloc_desc_dma_tx(sc); 783 784 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) { 785 desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]); 786 memset(desctx, 0, sizeof(eth_tx_desc_t)); 787 desctx->length = MAX_PACKET_LEN; 788 desctx->cown = 1; 789 if (i == ECE_MAX_TX_BUFFERS - 1) 790 desctx->eor = 1; 791 } 792 793 ece_alloc_desc_dma_rx(sc); 794 795 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) { 796 descrx = &(sc->desc_rx[i]); 797 memset(descrx, 0, sizeof(eth_rx_desc_t)); 798 sc->rx_desc[i].desc = descrx; 799 sc->rx_desc[i].buff = 0; 800 ece_new_rxbuf(sc, &(sc->rx_desc[i])); 801 802 if (i == ECE_MAX_RX_BUFFERS - 1) 803 descrx->eor = 1; 804 } 805 sc->tx_prod = 0; 806 sc->tx_cons = 0; 807 sc->last_rx = 0; 808 sc->desc_curr_tx = 0; 809 810 return (0); 811} 812 813static int 814ece_activate(device_t dev) 815{ 816 struct ece_softc *sc; 817 int err; 818 uint32_t mac_port_config; 819 struct ifnet *ifp; 820 821 sc = device_get_softc(dev); 822 ifp = sc->ifp; 823 824 initial_switch_config = read_4(sc, SWITCH_CONFIG); 825 initial_cpu_config = read_4(sc, CPU_PORT_CONFIG); 826 initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG); 827 initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG); 828 829 /* Disable Port 0 */ 830 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); 831 mac_port_config |= (PORT_DISABLE); 832 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); 833 834 /* Disable Port 1 */ 835 mac_port_config = read_4(sc, MAC_PORT_1_CONFIG); 836 mac_port_config |= (PORT_DISABLE); 837 write_4(sc, MAC_PORT_1_CONFIG, mac_port_config); 838 839 err = ece_allocate_dma(sc); 840 if (err) { 841 if_printf(sc->ifp, "failed allocating dma\n"); 842 goto out; 843 } 844 845 write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx); 846 write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx); 847 848 write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx); 849 write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx); 850 851 write_4(sc, FS_DMA_CONTROL, 1); 852 853 return (0); 854out: 855 return (ENXIO); 856 857} 858 859static void 860ece_deactivate(device_t dev) 861{ 862 struct ece_softc *sc; 863 864 sc = device_get_softc(dev); 865 866 if (sc->intrhand) 867 bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand); 868 869 sc->intrhand = 0; 870 871 if (sc->intrhand_qf) 872 bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf); 873 874 sc->intrhand_qf = 0; 875 876 bus_generic_detach(sc->dev); 877 if (sc->miibus) 878 device_delete_child(sc->dev, sc->miibus); 879 if (sc->mem_res) 880 bus_release_resource(dev, SYS_RES_IOPORT, 881 rman_get_rid(sc->mem_res), sc->mem_res); 882 sc->mem_res = 0; 883 884 if (sc->irq_res_rec) 885 bus_release_resource(dev, SYS_RES_IRQ, 886 rman_get_rid(sc->irq_res_rec), sc->irq_res_rec); 887 888 if (sc->irq_res_qf) 889 bus_release_resource(dev, SYS_RES_IRQ, 890 rman_get_rid(sc->irq_res_qf), sc->irq_res_qf); 891 892 if (sc->irq_res_qf) 893 bus_release_resource(dev, SYS_RES_IRQ, 894 rman_get_rid(sc->irq_res_status), sc->irq_res_status); 895 896 sc->irq_res_rec = 0; 897 sc->irq_res_qf = 0; 898 sc->irq_res_status = 0; 899 ECE_TXLOCK_DESTROY(sc); 900 ECE_RXLOCK_DESTROY(sc); 901 902 ece_free_desc_dma_tx(sc); 903 ece_free_desc_dma_rx(sc); 904 905 return; 906} 907 908/* 909 * Change media according to request. 910 */ 911static int 912ece_ifmedia_upd(struct ifnet *ifp) 913{ 914 struct ece_softc *sc = ifp->if_softc; 915 struct mii_data *mii; 916 int error; 917 918 mii = device_get_softc(sc->miibus); 919 ECE_LOCK(sc); 920 error = mii_mediachg(mii); 921 ECE_UNLOCK(sc); 922 return (error); 923} 924 925/* 926 * Notify the world which media we're using. 927 */ 928static void 929ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 930{ 931 struct ece_softc *sc = ifp->if_softc; 932 struct mii_data *mii; 933 934 mii = device_get_softc(sc->miibus); 935 ECE_LOCK(sc); 936 mii_pollstat(mii); 937 ifmr->ifm_active = mii->mii_media_active; 938 ifmr->ifm_status = mii->mii_media_status; 939 ECE_UNLOCK(sc); 940} 941 942static void 943ece_tick(void *xsc) 944{ 945 struct ece_softc *sc = xsc; 946 struct mii_data *mii; 947 int active; 948 949 mii = device_get_softc(sc->miibus); 950 active = mii->mii_media_active; 951 mii_tick(mii); 952 953 /* 954 * Schedule another timeout one second from now. 955 */ 956 callout_reset(&sc->tick_ch, hz, ece_tick, sc); 957} 958 959static uint32_t 960read_mac_entry(struct ece_softc *ec, 961 uint8_t *mac_result, 962 int first) 963{ 964 uint32_t ii; 965 struct arl_table_entry_t entry; 966 uint32_t *entry_val; 967 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0); 968 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0); 969 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0); 970 if (first) 971 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1); 972 else 973 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2); 974 975 for (ii = 0; ii < 0x1000; ii++) 976 if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1)) 977 break; 978 979 entry_val = (uint32_t*) (&entry); 980 entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1); 981 entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2); 982 983 if (mac_result) 984 memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN); 985 986 return (entry.table_end); 987} 988 989static uint32_t 990write_arl_table_entry(struct ece_softc *ec, 991 uint32_t filter, 992 uint32_t vlan_mac, 993 uint32_t vlan_gid, 994 uint32_t age_field, 995 uint32_t port_map, 996 const uint8_t *mac_addr) 997{ 998 uint32_t ii; 999 uint32_t *entry_val; 1000 struct arl_table_entry_t entry; 1001 1002 memset(&entry, 0, sizeof(entry)); 1003 1004 entry.filter = filter; 1005 entry.vlan_mac = vlan_mac; 1006 entry.vlan_gid = vlan_gid; 1007 entry.age_field = age_field; 1008 entry.port_map = port_map; 1009 memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN); 1010 1011 entry_val = (uint32_t*) (&entry); 1012 1013 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0); 1014 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0); 1015 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0); 1016 1017 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]); 1018 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]); 1019 1020 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND); 1021 1022 for (ii = 0; ii < 0x1000; ii++) 1023 if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & 1024 ARL_COMMAND_COMPLETE) 1025 return (1); /* Write OK. */ 1026 1027 /* Write failed. */ 1028 return (0); 1029} 1030 1031static void 1032remove_mac_entry(struct ece_softc *sc, 1033 uint8_t *mac) 1034{ 1035 1036 /* Invalid age_field mean erase this entry. */ 1037 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, 1038 INVALID_ENTRY, VLAN0_GROUP, 1039 mac); 1040} 1041 1042static void 1043add_mac_entry(struct ece_softc *sc, 1044 uint8_t *mac) 1045{ 1046 1047 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, 1048 NEW_ENTRY, VLAN0_GROUP, 1049 mac); 1050} 1051 1052/** 1053 * The behavior of ARL table reading and deletion is not well defined 1054 * in the documentation. To be safe, all mac addresses are put to a 1055 * list, then deleted. 1056 * 1057 */ 1058static void 1059clear_mac_entries(struct ece_softc *ec, int include_this_mac) 1060{ 1061 int table_end; 1062 struct mac_list * temp; 1063 struct mac_list * mac_list_header; 1064 struct mac_list * current; 1065 char mac[ETHER_ADDR_LEN]; 1066 1067 current = NULL; 1068 mac_list_header = NULL; 1069 1070 table_end = read_mac_entry(ec, mac, 1); 1071 while (!table_end) { 1072 if (!include_this_mac && 1073 memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) { 1074 /* Read next entry. */ 1075 table_end = read_mac_entry(ec, mac, 0); 1076 continue; 1077 } 1078 1079 temp = (struct mac_list*)malloc(sizeof(struct mac_list), 1080 M_DEVBUF, 1081 M_NOWAIT | M_ZERO); 1082 memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN); 1083 temp->next = 0; 1084 if (mac_list_header) { 1085 current->next = temp; 1086 current = temp; 1087 } else { 1088 mac_list_header = temp; 1089 current = temp; 1090 } 1091 /* Read next Entry */ 1092 table_end = read_mac_entry(ec, mac, 0); 1093 } 1094 1095 current = mac_list_header; 1096 1097 while (current) { 1098 remove_mac_entry(ec, current->mac_addr); 1099 temp = current; 1100 current = current->next; 1101 free(temp, M_DEVBUF); 1102 } 1103} 1104 1105static int 1106configure_lan_port(struct ece_softc *sc, int phy_type) 1107{ 1108 uint32_t sw_config; 1109 uint32_t mac_port_config; 1110 1111 /* 1112 * Configure switch 1113 */ 1114 sw_config = read_4(sc, SWITCH_CONFIG); 1115 /* Enable fast aging. */ 1116 sw_config |= FAST_AGING; 1117 /* Enable IVL learning. */ 1118 sw_config |= IVL_LEARNING; 1119 /* Disable hardware NAT. */ 1120 sw_config &= ~(HARDWARE_NAT); 1121 1122 sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE; 1123 1124 write_4(sc, SWITCH_CONFIG, sw_config); 1125 1126 sw_config = read_4(sc, SWITCH_CONFIG); 1127 1128 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); 1129 1130 if (!(mac_port_config & 0x1) || (mac_port_config & 0x2)) 1131 if_printf(sc->ifp, "Link Down\n"); 1132 else 1133 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); 1134 return (0); 1135} 1136 1137static void 1138set_pvid(struct ece_softc *sc, int port0, int port1, int cpu) 1139{ 1140 uint32_t val; 1141 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0)); 1142 write_4(sc, VLAN_PORT_PVID, val); 1143 val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07); 1144 write_4(sc, VLAN_PORT_PVID, val); 1145 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4)); 1146 write_4(sc, VLAN_PORT_PVID, val); 1147 val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4); 1148 write_4(sc, VLAN_PORT_PVID, val); 1149 1150 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8)); 1151 write_4(sc, VLAN_PORT_PVID, val); 1152 val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8); 1153 write_4(sc, VLAN_PORT_PVID, val); 1154 1155} 1156 1157/* VLAN related functions */ 1158static void 1159set_vlan_vid(struct ece_softc *sc, int vlan) 1160{ 1161 const uint32_t regs[] = { 1162 VLAN_VID_0_1, 1163 VLAN_VID_0_1, 1164 VLAN_VID_2_3, 1165 VLAN_VID_2_3, 1166 VLAN_VID_4_5, 1167 VLAN_VID_4_5, 1168 VLAN_VID_6_7, 1169 VLAN_VID_6_7 1170 }; 1171 1172 const int vids[] = { 1173 VLAN0_VID, 1174 VLAN1_VID, 1175 VLAN2_VID, 1176 VLAN3_VID, 1177 VLAN4_VID, 1178 VLAN5_VID, 1179 VLAN6_VID, 1180 VLAN7_VID 1181 }; 1182 1183 uint32_t val; 1184 uint32_t reg; 1185 int vid; 1186 1187 reg = regs[vlan]; 1188 vid = vids[vlan]; 1189 1190 if (vlan & 1) { 1191 val = read_4(sc, reg); 1192 write_4(sc, reg, val & (~(0xFFF << 0))); 1193 val = read_4(sc, reg); 1194 write_4(sc, reg, val|((vid & 0xFFF) << 0)); 1195 } else { 1196 val = read_4(sc, reg); 1197 write_4(sc, reg, val & (~(0xFFF << 12))); 1198 val = read_4(sc, reg); 1199 write_4(sc, reg, val|((vid & 0xFFF) << 12)); 1200 } 1201} 1202 1203static void 1204set_vlan_member(struct ece_softc *sc, int vlan) 1205{ 1206 unsigned char shift; 1207 uint32_t val; 1208 int group; 1209 const int groups[] = { 1210 VLAN0_GROUP, 1211 VLAN1_GROUP, 1212 VLAN2_GROUP, 1213 VLAN3_GROUP, 1214 VLAN4_GROUP, 1215 VLAN5_GROUP, 1216 VLAN6_GROUP, 1217 VLAN7_GROUP 1218 }; 1219 1220 group = groups[vlan]; 1221 1222 shift = vlan*3; 1223 val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift)); 1224 write_4(sc, VLAN_MEMBER_PORT_MAP, val); 1225 val = read_4(sc, VLAN_MEMBER_PORT_MAP); 1226 write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift)); 1227} 1228 1229static void 1230set_vlan_tag(struct ece_softc *sc, int vlan) 1231{ 1232 unsigned char shift; 1233 uint32_t val; 1234 1235 int tag = 0; 1236 1237 shift = vlan*3; 1238 val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift)); 1239 write_4(sc, VLAN_TAG_PORT_MAP, val); 1240 val = read_4(sc, VLAN_TAG_PORT_MAP); 1241 write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift)); 1242} 1243 1244static int 1245configure_cpu_port(struct ece_softc *sc) 1246{ 1247 uint32_t cpu_port_config; 1248 int i; 1249 1250 cpu_port_config = read_4(sc, CPU_PORT_CONFIG); 1251 /* SA learning Disable */ 1252 cpu_port_config |= (SA_LEARNING_DISABLE); 1253 /* set data offset + 2 */ 1254 cpu_port_config &= ~(1U << 31); 1255 1256 write_4(sc, CPU_PORT_CONFIG, cpu_port_config); 1257 1258 if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, 1259 STATIC_ENTRY, VLAN0_GROUP, 1260 vlan0_mac)) 1261 return (1); 1262 1263 set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID); 1264 1265 for (i = 0; i < 8; i++) { 1266 set_vlan_vid(sc, i); 1267 set_vlan_member(sc, i); 1268 set_vlan_tag(sc, i); 1269 } 1270 1271 /* disable all interrupt status sources */ 1272 write_4(sc, INTERRUPT_MASK, 0xffff1fff); 1273 1274 /* clear previous interrupt sources */ 1275 write_4(sc, INTERRUPT_STATUS, 0x00001FFF); 1276 1277 write_4(sc, TS_DMA_CONTROL, 0); 1278 write_4(sc, FS_DMA_CONTROL, 0); 1279 return (0); 1280} 1281 1282static int 1283hardware_init(struct ece_softc *sc) 1284{ 1285 int status = 0; 1286 static int gw_phy_type; 1287 1288 gw_phy_type = get_phy_type(sc); 1289 /* Currently only ic_plus phy is supported. */ 1290 if (gw_phy_type != IC_PLUS_PHY) { 1291 device_printf(sc->dev, "PHY type is not supported (%d)\n", 1292 gw_phy_type); 1293 return (-1); 1294 } 1295 status = configure_lan_port(sc, gw_phy_type); 1296 configure_cpu_port(sc); 1297 return (0); 1298} 1299 1300static void 1301set_mac_address(struct ece_softc *sc, const char *mac, int mac_len) 1302{ 1303 1304 /* Invalid age_field mean erase this entry. */ 1305 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, 1306 INVALID_ENTRY, VLAN0_GROUP, 1307 mac); 1308 memcpy(vlan0_mac, mac, ETHER_ADDR_LEN); 1309 1310 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID, 1311 STATIC_ENTRY, VLAN0_GROUP, 1312 mac); 1313} 1314 1315static void 1316ece_set_mac(struct ece_softc *sc, u_char *eaddr) 1317{ 1318 memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN); 1319 set_mac_address(sc, eaddr, ETHER_ADDR_LEN); 1320} 1321 1322/* 1323 * TODO: the device doesn't have MAC stored, we should read the 1324 * configuration stored in FLASH, but the format depends on the 1325 * bootloader used.* 1326 */ 1327static int 1328ece_get_mac(struct ece_softc *sc, u_char *eaddr) 1329{ 1330 return (ENXIO); 1331} 1332 1333static void 1334ece_intr_rx_locked(struct ece_softc *sc, int count) 1335{ 1336 struct ifnet *ifp = sc->ifp; 1337 struct mbuf *mb; 1338 struct rx_desc_info *rxdesc; 1339 eth_rx_desc_t *desc; 1340 1341 int fssd_curr; 1342 int fssd; 1343 int i; 1344 int idx; 1345 int rxcount; 1346 uint32_t status; 1347 1348 fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER); 1349 1350 fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4; 1351 1352 desc = sc->rx_desc[sc->last_rx].desc; 1353 1354 /* Prepare to read the data in the ring. */ 1355 bus_dmamap_sync(sc->dmatag_ring_rx, 1356 sc->dmamap_ring_rx, 1357 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1358 1359 if (fssd > sc->last_rx) 1360 rxcount = fssd - sc->last_rx; 1361 else if (fssd < sc->last_rx) 1362 rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd; 1363 else { 1364 if (desc->cown == 0) 1365 return; 1366 else 1367 rxcount = ECE_MAX_RX_BUFFERS; 1368 } 1369 1370 for (i= 0; i < rxcount; i++) { 1371 status = desc->cown; 1372 if (!status) 1373 break; 1374 1375 idx = sc->last_rx; 1376 rxdesc = &sc->rx_desc[idx]; 1377 mb = rxdesc->buff; 1378 1379 if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN || 1380 desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN + 1381 ETHER_VLAN_ENCAP_LEN) { 1382 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1383 desc->cown = 0; 1384 desc->length = MCLBYTES - 2; 1385 /* Invalid packet, skip and process next 1386 * packet. 1387 */ 1388 continue; 1389 } 1390 1391 if (ece_new_rxbuf(sc, rxdesc) != 0) { 1392 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1393 desc->cown = 0; 1394 desc->length = MCLBYTES - 2; 1395 break; 1396 } 1397 1398 /** 1399 * The device will write to addrress + 2 So we need to adjust 1400 * the address after the packet is received. 1401 */ 1402 mb->m_data += 2; 1403 mb->m_len = mb->m_pkthdr.len = desc->length; 1404 1405 mb->m_flags |= M_PKTHDR; 1406 mb->m_pkthdr.rcvif = ifp; 1407 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1408 /*check for valid checksum*/ 1409 if ( (!desc->l4f) && (desc->prot != 3)) { 1410 mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1411 mb->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1412 mb->m_pkthdr.csum_data = 0xffff; 1413 } 1414 } 1415 ECE_RXUNLOCK(sc); 1416 (*ifp->if_input)(ifp, mb); 1417 ECE_RXLOCK(sc); 1418 1419 desc->cown = 0; 1420 desc->length = MCLBYTES - 2; 1421 1422 bus_dmamap_sync(sc->dmatag_ring_rx, 1423 sc->dmamap_ring_rx, 1424 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1425 1426 if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1) 1427 sc->last_rx = 0; 1428 else 1429 sc->last_rx++; 1430 1431 desc = sc->rx_desc[sc->last_rx].desc; 1432 } 1433 1434 /* Sync updated flags. */ 1435 bus_dmamap_sync(sc->dmatag_ring_rx, 1436 sc->dmamap_ring_rx, 1437 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1438 1439 return; 1440} 1441 1442static void 1443ece_intr_task(void *arg, int pending __unused) 1444{ 1445 struct ece_softc *sc = arg; 1446 ECE_RXLOCK(sc); 1447 ece_intr_rx_locked(sc, -1); 1448 ECE_RXUNLOCK(sc); 1449} 1450 1451static void 1452ece_intr(void *xsc) 1453{ 1454 struct ece_softc *sc = xsc; 1455 struct ifnet *ifp = sc->ifp; 1456 1457 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1458 write_4(sc, FS_DMA_CONTROL, 0); 1459 return; 1460 } 1461 1462 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 1463 1464 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1465 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 1466} 1467 1468static void 1469ece_intr_status(void *xsc) 1470{ 1471 struct ece_softc *sc = xsc; 1472 struct ifnet *ifp = sc->ifp; 1473 int stat; 1474 1475 stat = read_4(sc, INTERRUPT_STATUS); 1476 1477 write_4(sc, INTERRUPT_STATUS, stat); 1478 1479 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1480 if ((stat & ERROR_MASK) != 0) 1481 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1482 } 1483} 1484 1485static void 1486ece_cleanup_locked(struct ece_softc *sc) 1487{ 1488 eth_tx_desc_t *desc; 1489 1490 if (sc->tx_cons == sc->tx_prod) return; 1491 1492 /* Prepare to read the ring (owner bit). */ 1493 bus_dmamap_sync(sc->dmatag_ring_tx, 1494 sc->dmamap_ring_tx, 1495 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1496 1497 while (sc->tx_cons != sc->tx_prod) { 1498 desc = sc->tx_desc[sc->tx_cons].desc; 1499 if (desc->cown != 0) { 1500 struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]); 1501 /* We are finished with this descriptor ... */ 1502 bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap, 1503 BUS_DMASYNC_POSTWRITE); 1504 /* ... and unload, so we can reuse. */ 1505 bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap); 1506 m_freem(td->buff); 1507 td->buff = 0; 1508 sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS; 1509 } else { 1510 break; 1511 } 1512 } 1513 1514} 1515 1516static void 1517ece_cleanup_task(void *arg, int pending __unused) 1518{ 1519 struct ece_softc *sc = arg; 1520 ECE_CLEANUPLOCK(sc); 1521 ece_cleanup_locked(sc); 1522 ECE_CLEANUPUNLOCK(sc); 1523} 1524 1525static void 1526ece_intr_tx(void *xsc) 1527{ 1528 struct ece_softc *sc = xsc; 1529 struct ifnet *ifp = sc->ifp; 1530 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1531 /* This should not happen, stop DMA. */ 1532 write_4(sc, FS_DMA_CONTROL, 0); 1533 return; 1534 } 1535 taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task); 1536} 1537 1538static void 1539ece_intr_qf(void *xsc) 1540{ 1541 struct ece_softc *sc = xsc; 1542 struct ifnet *ifp = sc->ifp; 1543 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1544 /* This should not happen, stop DMA. */ 1545 write_4(sc, FS_DMA_CONTROL, 0); 1546 return; 1547 } 1548 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 1549 write_4(sc, FS_DMA_CONTROL, 1); 1550} 1551 1552/* 1553 * Reset and initialize the chip 1554 */ 1555static void 1556eceinit_locked(void *xsc) 1557{ 1558 struct ece_softc *sc = xsc; 1559 struct ifnet *ifp = sc->ifp; 1560 struct mii_data *mii; 1561 uint32_t cfg_reg; 1562 uint32_t cpu_port_config; 1563 uint32_t mac_port_config; 1564 1565 while (1) { 1566 cfg_reg = read_4(sc, BIST_RESULT_TEST_0); 1567 if ((cfg_reg & (1<<17))) 1568 break; 1569 DELAY(100); 1570 } 1571 /* Set to default values. */ 1572 write_4(sc, SWITCH_CONFIG, 0x007AA7A1); 1573 write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00); 1574 write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80); 1575 write_4(sc, CPU_PORT_CONFIG, 0x004C0000); 1576 1577 hardware_init(sc); 1578 1579 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); 1580 1581 /* Enable Port 0 */ 1582 mac_port_config &= (~(PORT_DISABLE)); 1583 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); 1584 1585 cpu_port_config = read_4(sc, CPU_PORT_CONFIG); 1586 /* Enable CPU. */ 1587 cpu_port_config &= ~(PORT_DISABLE); 1588 write_4(sc, CPU_PORT_CONFIG, cpu_port_config); 1589 1590 /* 1591 * Set 'running' flag, and clear output active flag 1592 * and attempt to start the output 1593 */ 1594 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1595 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1596 1597 mii = device_get_softc(sc->miibus); 1598 mii_pollstat(mii); 1599 /* Enable DMA. */ 1600 write_4(sc, FS_DMA_CONTROL, 1); 1601 1602 callout_reset(&sc->tick_ch, hz, ece_tick, sc); 1603} 1604 1605static inline int 1606ece_encap(struct ece_softc *sc, struct mbuf *m0) 1607{ 1608 struct ifnet *ifp; 1609 bus_dma_segment_t segs[MAX_FRAGMENT]; 1610 bus_dmamap_t mapp; 1611 eth_tx_desc_t *desc = NULL; 1612 int csum_flags; 1613 int desc_no; 1614 int error; 1615 int nsegs; 1616 int seg; 1617 1618 ifp = sc->ifp; 1619 1620 /* Fetch unused map */ 1621 mapp = sc->tx_desc[sc->tx_prod].dmamap; 1622 1623 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp, 1624 m0, segs, &nsegs, 1625 BUS_DMA_NOWAIT); 1626 1627 if (error != 0) { 1628 bus_dmamap_unload(sc->dmatag_ring_tx, mapp); 1629 return ((error != 0) ? error : -1); 1630 } 1631 1632 desc = &(sc->desc_tx[sc->desc_curr_tx]); 1633 sc->tx_desc[sc->tx_prod].desc = desc; 1634 sc->tx_desc[sc->tx_prod].buff = m0; 1635 desc_no = sc->desc_curr_tx; 1636 1637 for (seg = 0; seg < nsegs; seg++) { 1638 if (desc->cown == 0 ) { 1639 if_printf(ifp, "ERROR: descriptor is still used\n"); 1640 return (-1); 1641 } 1642 1643 desc->length = segs[seg].ds_len; 1644 desc->data_ptr = segs[seg].ds_addr; 1645 1646 if (seg == 0) { 1647 desc->fs = 1; 1648 } else { 1649 desc->fs = 0; 1650 } 1651 if (seg == nsegs - 1) { 1652 desc->ls = 1; 1653 } else { 1654 desc->ls = 0; 1655 } 1656 1657 csum_flags = m0->m_pkthdr.csum_flags; 1658 1659 desc->fr = 1; 1660 desc->pmap = 1; 1661 desc->insv = 0; 1662 desc->ico = 0; 1663 desc->tco = 0; 1664 desc->uco = 0; 1665 desc->interrupt = 1; 1666 1667 if (csum_flags & CSUM_IP) { 1668 desc->ico = 1; 1669 if (csum_flags & CSUM_TCP) 1670 desc->tco = 1; 1671 if (csum_flags & CSUM_UDP) 1672 desc->uco = 1; 1673 } 1674 1675 desc++; 1676 sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS; 1677 if (sc->desc_curr_tx == 0) { 1678 desc = (eth_tx_desc_t *)&(sc->desc_tx[0]); 1679 } 1680 } 1681 1682 desc = sc->tx_desc[sc->tx_prod].desc; 1683 1684 sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS; 1685 1686 /* 1687 * After all descriptors are set, we set the flags to start the 1688 * sending process. 1689 */ 1690 for (seg = 0; seg < nsegs; seg++) { 1691 desc->cown = 0; 1692 desc++; 1693 desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS; 1694 if (desc_no == 0) 1695 desc = (eth_tx_desc_t *)&(sc->desc_tx[0]); 1696 } 1697 1698 bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE); 1699 return (0); 1700} 1701 1702/* 1703 * dequeu packets and transmit 1704 */ 1705static void 1706ecestart_locked(struct ifnet *ifp) 1707{ 1708 struct ece_softc *sc; 1709 struct mbuf *m0; 1710 uint32_t queued = 0; 1711 1712 sc = ifp->if_softc; 1713 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1714 IFF_DRV_RUNNING) 1715 return; 1716 1717 bus_dmamap_sync(sc->dmatag_ring_tx, 1718 sc->dmamap_ring_tx, 1719 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1720 1721 for (;;) { 1722 /* Get packet from the queue */ 1723 IF_DEQUEUE(&ifp->if_snd, m0); 1724 if (m0 == NULL) 1725 break; 1726 if (ece_encap(sc, m0)) { 1727 IF_PREPEND(&ifp->if_snd, m0); 1728 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1729 break; 1730 } 1731 queued++; 1732 BPF_MTAP(ifp, m0); 1733 } 1734 if (queued) { 1735 bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx, 1736 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1737 write_4(sc, TS_DMA_CONTROL, 1); 1738 } 1739} 1740 1741static void 1742eceinit(void *xsc) 1743{ 1744 struct ece_softc *sc = xsc; 1745 ECE_LOCK(sc); 1746 eceinit_locked(sc); 1747 ECE_UNLOCK(sc); 1748} 1749 1750static void 1751ece_tx_task(void *arg, int pending __unused) 1752{ 1753 struct ifnet *ifp; 1754 ifp = (struct ifnet *)arg; 1755 ecestart(ifp); 1756} 1757 1758static void 1759ecestart(struct ifnet *ifp) 1760{ 1761 struct ece_softc *sc = ifp->if_softc; 1762 ECE_TXLOCK(sc); 1763 ecestart_locked(ifp); 1764 ECE_TXUNLOCK(sc); 1765} 1766 1767/* 1768 * Turn off interrupts, and stop the nic. Can be called with sc->ifp 1769 * NULL so be careful. 1770 */ 1771static void 1772ecestop(struct ece_softc *sc) 1773{ 1774 struct ifnet *ifp = sc->ifp; 1775 uint32_t mac_port_config; 1776 1777 write_4(sc, TS_DMA_CONTROL, 0); 1778 write_4(sc, FS_DMA_CONTROL, 0); 1779 1780 if (ifp) 1781 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1782 1783 callout_stop(&sc->tick_ch); 1784 1785 /*Disable Port 0 */ 1786 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); 1787 mac_port_config |= (PORT_DISABLE); 1788 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); 1789 1790 /*Disable Port 1 */ 1791 mac_port_config = read_4(sc, MAC_PORT_1_CONFIG); 1792 mac_port_config |= (PORT_DISABLE); 1793 write_4(sc, MAC_PORT_1_CONFIG, mac_port_config); 1794 1795 /* Disable all interrupt status sources. */ 1796 write_4(sc, INTERRUPT_MASK, 0x00001FFF); 1797 1798 /* Clear previous interrupt sources. */ 1799 write_4(sc, INTERRUPT_STATUS, 0x00001FFF); 1800 1801 write_4(sc, SWITCH_CONFIG, initial_switch_config); 1802 write_4(sc, CPU_PORT_CONFIG, initial_cpu_config); 1803 write_4(sc, MAC_PORT_0_CONFIG, initial_port0_config); 1804 write_4(sc, MAC_PORT_1_CONFIG, initial_port1_config); 1805 1806 clear_mac_entries(sc, 1); 1807} 1808 1809static void 1810ece_restart(struct ece_softc *sc) 1811{ 1812 struct ifnet *ifp = sc->ifp; 1813 1814 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1815 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1816 /* Enable port 0. */ 1817 write_4(sc, PORT_0_CONFIG, 1818 read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE)); 1819 write_4(sc, INTERRUPT_MASK, 0x00000000); 1820 write_4(sc, FS_DMA_CONTROL, 1); 1821 callout_reset(&sc->tick_ch, hz, ece_tick, sc); 1822} 1823 1824static void 1825set_filter(struct ece_softc *sc) 1826{ 1827 struct ifnet *ifp; 1828 struct ifmultiaddr *ifma; 1829 uint32_t mac_port_config; 1830 1831 ifp = sc->ifp; 1832 1833 clear_mac_entries(sc, 0); 1834 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1835 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG); 1836 mac_port_config &= ~(DISABLE_BROADCAST_PACKET); 1837 mac_port_config &= ~(DISABLE_MULTICAST_PACKET); 1838 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config); 1839 return; 1840 } 1841 if_maddr_rlock(ifp); 1842 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1843 if (ifma->ifma_addr->sa_family != AF_LINK) 1844 continue; 1845 add_mac_entry(sc, 1846 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1847 } 1848 if_maddr_runlock(ifp); 1849} 1850 1851static int 1852eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1853{ 1854 struct ece_softc *sc = ifp->if_softc; 1855 struct mii_data *mii; 1856 struct ifreq *ifr = (struct ifreq *)data; 1857 int mask, error = 0; 1858 1859 switch (cmd) { 1860 case SIOCSIFFLAGS: 1861 ECE_LOCK(sc); 1862 if ((ifp->if_flags & IFF_UP) == 0 && 1863 ifp->if_drv_flags & IFF_DRV_RUNNING) { 1864 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1865 ecestop(sc); 1866 } else { 1867 /* Reinitialize card on any parameter change. */ 1868 if ((ifp->if_flags & IFF_UP) && 1869 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1870 ece_restart(sc); 1871 } 1872 ECE_UNLOCK(sc); 1873 break; 1874 1875 case SIOCADDMULTI: 1876 case SIOCDELMULTI: 1877 ECE_LOCK(sc); 1878 set_filter(sc); 1879 ECE_UNLOCK(sc); 1880 break; 1881 1882 case SIOCSIFMEDIA: 1883 case SIOCGIFMEDIA: 1884 mii = device_get_softc(sc->miibus); 1885 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1886 break; 1887 case SIOCSIFCAP: 1888 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1889 if (mask & IFCAP_VLAN_MTU) { 1890 ECE_LOCK(sc); 1891 ECE_UNLOCK(sc); 1892 } 1893 default: 1894 error = ether_ioctl(ifp, cmd, data); 1895 break; 1896 } 1897 return (error); 1898} 1899 1900static void 1901ece_child_detached(device_t dev, device_t child) 1902{ 1903 struct ece_softc *sc; 1904 1905 sc = device_get_softc(dev); 1906 if (child == sc->miibus) 1907 sc->miibus = NULL; 1908} 1909 1910/* 1911 * MII bus support routines. 1912 */ 1913static int 1914ece_miibus_readreg(device_t dev, int phy, int reg) 1915{ 1916 struct ece_softc *sc; 1917 sc = device_get_softc(dev); 1918 return (phy_read(sc, phy, reg)); 1919} 1920 1921static int 1922ece_miibus_writereg(device_t dev, int phy, int reg, int data) 1923{ 1924 struct ece_softc *sc; 1925 sc = device_get_softc(dev); 1926 phy_write(sc, phy, reg, data); 1927 return (0); 1928} 1929 1930static device_method_t ece_methods[] = { 1931 /* Device interface */ 1932 DEVMETHOD(device_probe, ece_probe), 1933 DEVMETHOD(device_attach, ece_attach), 1934 DEVMETHOD(device_detach, ece_detach), 1935 1936 /* Bus interface */ 1937 DEVMETHOD(bus_child_detached, ece_child_detached), 1938 1939 /* MII interface */ 1940 DEVMETHOD(miibus_readreg, ece_miibus_readreg), 1941 DEVMETHOD(miibus_writereg, ece_miibus_writereg), 1942 1943 { 0, 0 } 1944}; 1945 1946static driver_t ece_driver = { 1947 "ece", 1948 ece_methods, 1949 sizeof(struct ece_softc), 1950}; 1951 1952DRIVER_MODULE(ece, econaarm, ece_driver, ece_devclass, 0, 0); 1953DRIVER_MODULE(miibus, ece, miibus_driver, miibus_devclass, 0, 0); 1954MODULE_DEPEND(ece, miibus, 1, 1, 1); 1955MODULE_DEPEND(ece, ether, 1, 1, 1); 1956