if_cgem.c revision 249997
1/*- 2 * Copyright (c) 2012-2013 Thomas Skibo. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* A network interface driver for Cadence GEM Gigabit Ethernet 28 * interface such as the one used in Xilinx Zynq-7000 SoC. 29 * 30 * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. 31 * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 32 * and register definitions are in appendix B.18. 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/dev/cadence/if_cgem.c 249997 2013-04-27 22:38:29Z wkoszek $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/bus.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/module.h> 45#include <sys/rman.h> 46#include <sys/socket.h> 47#include <sys/sockio.h> 48#include <sys/sysctl.h> 49 50#include <machine/bus.h> 51 52#include <net/ethernet.h> 53#include <net/if.h> 54#include <net/if_arp.h> 55#include <net/if_dl.h> 56#include <net/if_media.h> 57#include <net/if_mib.h> 58#include <net/if_types.h> 59 60#ifdef INET 61#include <netinet/in.h> 62#include <netinet/in_systm.h> 63#include <netinet/in_var.h> 64#include <netinet/ip.h> 65#endif 66 67#include <net/bpf.h> 68#include <net/bpfdesc.h> 69 70#include <dev/fdt/fdt_common.h> 71#include <dev/ofw/ofw_bus.h> 72#include <dev/ofw/ofw_bus_subr.h> 73 74#include <dev/mii/mii.h> 75#include <dev/mii/miivar.h> 76 77#include <dev/cadence/if_cgem_hw.h> 78 79#include "miibus_if.h" 80 81#define IF_CGEM_NAME "cgem" 82 83#define CGEM_NUM_RX_DESCS 256 /* size of receive descriptor ring */ 84#define CGEM_NUM_TX_DESCS 256 /* size of transmit descriptor ring */ 85 86#define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ 87 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) 88 89 90/* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ 91#define DEFAULT_NUM_RX_BUFS 64 /* number of receive bufs to queue. */ 92 93#define TX_MAX_DMA_SEGS 4 /* maximum segs in a tx mbuf dma */ 94 95#define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ 96 CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 97 98struct cgem_softc { 99 struct ifnet *ifp; 100 struct mtx sc_mtx; 101 device_t dev; 102 device_t miibus; 103 int if_old_flags; 104 struct resource *mem_res; 105 struct resource *irq_res; 106 void *intrhand; 107 struct callout tick_ch; 108 uint32_t net_ctl_shadow; 109 u_char eaddr[6]; 110 111 bus_dma_tag_t desc_dma_tag; 112 bus_dma_tag_t mbuf_dma_tag; 113 114 /* receive descriptor ring */ 115 struct cgem_rx_desc *rxring; 116 bus_addr_t rxring_physaddr; 117 struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; 118 bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; 119 int rxring_hd_ptr; /* where to put rcv bufs */ 120 int rxring_tl_ptr; /* where to get receives */ 121 int rxring_queued; /* how many rcv bufs queued */ 122 bus_dmamap_t rxring_dma_map; 123 int rxbufs; /* tunable number rcv bufs */ 124 int rxoverruns; /* rx ring overruns */ 125 126 /* transmit descriptor ring */ 127 struct cgem_tx_desc *txring; 128 bus_addr_t txring_physaddr; 129 struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; 130 bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; 131 int txring_hd_ptr; /* where to put next xmits */ 132 int txring_tl_ptr; /* next xmit mbuf to free */ 133 int txring_queued; /* num xmits segs queued */ 134 bus_dmamap_t txring_dma_map; 135}; 136 137#define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) 138#define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) 139#define BARRIER(sc, off, len, flags) \ 140 (bus_barrier((sc)->mem_res, (off), (len), (flags)) 141 142#define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) 143#define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) 144#define CGEM_LOCK_INIT(sc) \ 145 mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ 146 MTX_NETWORK_LOCK, MTX_DEF) 147#define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) 148#define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) 149 150static devclass_t cgem_devclass; 151 152static int cgem_probe(device_t dev); 153static int cgem_attach(device_t dev); 154static int cgem_detach(device_t dev); 155static void cgem_tick(void *); 156static void cgem_intr(void *); 157 158static void 159cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) 160{ 161 int i; 162 uint32_t rnd; 163 164 /* See if boot loader gave us a MAC address already. */ 165 for (i = 0; i < 4; i++) { 166 uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); 167 uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; 168 if (low != 0 || high != 0) { 169 eaddr[0] = low & 0xff; 170 eaddr[1] = (low >> 8) & 0xff; 171 eaddr[2] = (low >> 16) & 0xff; 172 eaddr[3] = (low >> 24) & 0xff; 173 eaddr[4] = high & 0xff; 174 eaddr[5] = (high >> 8) & 0xff; 175 break; 176 } 177 } 178 179 /* No MAC from boot loader? Assign a random one. */ 180 if (i == 4) { 181 rnd = arc4random(); 182 183 eaddr[0] = 'b'; 184 eaddr[1] = 's'; 185 eaddr[2] = 'd'; 186 eaddr[3] = (rnd >> 16) & 0xff; 187 eaddr[4] = (rnd >> 8) & 0xff; 188 eaddr[5] = rnd & 0xff; 189 190 device_printf(sc->dev, "no mac address found, assigning " 191 "random: %02x:%02x:%02x:%02x:%02x:%02x\n", 192 eaddr[0], eaddr[1], eaddr[2], 193 eaddr[3], eaddr[4], eaddr[5]); 194 195 WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | 196 (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); 197 WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); 198 } 199} 200 201/* cgem_mac_hash(): map 48-bit address to a 6-bit hash. 202 * The 6-bit hash corresponds to a bit in a 64-bit hash 203 * register. Setting that bit in the hash register enables 204 * reception of all frames with a destination address that hashes 205 * to that 6-bit value. 206 * 207 * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech 208 * Reference Manual. Bits 0-5 in the hash are the exclusive-or of 209 * every sixth bit in the destination address. 210 */ 211static int 212cgem_mac_hash(u_char eaddr[]) 213{ 214 int hash; 215 int i, j; 216 217 hash = 0; 218 for (i = 0; i < 6; i++) 219 for (j = i; j < 48; j += 6) 220 if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) 221 hash ^= (1 << i); 222 223 return hash; 224} 225 226/* After any change in rx flags or multi-cast addresses, set up 227 * hash registers and net config register bits. 228 */ 229static void 230cgem_rx_filter(struct cgem_softc *sc) 231{ 232 struct ifnet *ifp = sc->ifp; 233 struct ifmultiaddr *ifma; 234 int index; 235 uint32_t hash_hi, hash_lo; 236 uint32_t net_cfg; 237 238 hash_hi = 0; 239 hash_lo = 0; 240 241 net_cfg = RD4(sc, CGEM_NET_CFG); 242 243 net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | 244 CGEM_NET_CFG_NO_BCAST | 245 CGEM_NET_CFG_COPY_ALL); 246 247 if ((ifp->if_flags & IFF_PROMISC) != 0) 248 net_cfg |= CGEM_NET_CFG_COPY_ALL; 249 else { 250 if ((ifp->if_flags & IFF_BROADCAST) == 0) 251 net_cfg |= CGEM_NET_CFG_NO_BCAST; 252 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 253 hash_hi = 0xffffffff; 254 hash_lo = 0xffffffff; 255 } else { 256 if_maddr_rlock(ifp); 257 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 258 if (ifma->ifma_addr->sa_family != AF_LINK) 259 continue; 260 index = cgem_mac_hash( 261 LLADDR((struct sockaddr_dl *) 262 ifma->ifma_addr)); 263 if (index > 31) 264 hash_hi |= (1<<(index-32)); 265 else 266 hash_lo |= (1<<index); 267 } 268 if_maddr_runlock(ifp); 269 } 270 271 if (hash_hi != 0 || hash_lo != 0) 272 net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; 273 } 274 275 WR4(sc, CGEM_HASH_TOP, hash_hi); 276 WR4(sc, CGEM_HASH_BOT, hash_lo); 277 WR4(sc, CGEM_NET_CFG, net_cfg); 278} 279 280/* For bus_dmamap_load() callback. */ 281static void 282cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 283{ 284 285 if (nsegs != 1 || error != 0) 286 return; 287 *(bus_addr_t *)arg = segs[0].ds_addr; 288} 289 290/* Create DMA'able descriptor rings. */ 291static int 292cgem_setup_descs(struct cgem_softc *sc) 293{ 294 int i, err; 295 296 sc->txring = NULL; 297 sc->rxring = NULL; 298 299 /* Allocate non-cached DMA space for RX and TX descriptors. 300 */ 301 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 302 BUS_SPACE_MAXADDR_32BIT, 303 BUS_SPACE_MAXADDR, 304 NULL, NULL, 305 MAX_DESC_RING_SIZE, 306 1, 307 MAX_DESC_RING_SIZE, 308 0, 309 busdma_lock_mutex, 310 &sc->sc_mtx, 311 &sc->desc_dma_tag); 312 if (err) 313 return (err); 314 315 /* Set up a bus_dma_tag for mbufs. */ 316 err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 317 BUS_SPACE_MAXADDR_32BIT, 318 BUS_SPACE_MAXADDR, 319 NULL, NULL, 320 MCLBYTES, 321 TX_MAX_DMA_SEGS, 322 MCLBYTES, 323 0, 324 busdma_lock_mutex, 325 &sc->sc_mtx, 326 &sc->mbuf_dma_tag); 327 if (err) 328 return (err); 329 330 /* Allocate DMA memory in non-cacheable space. */ 331 err = bus_dmamem_alloc(sc->desc_dma_tag, 332 (void **)&sc->rxring, 333 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 334 &sc->rxring_dma_map); 335 if (err) 336 return (err); 337 338 /* Load descriptor DMA memory. */ 339 err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, 340 (void *)sc->rxring, 341 CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), 342 cgem_getaddr, &sc->rxring_physaddr, 343 BUS_DMA_NOWAIT); 344 if (err) 345 return (err); 346 347 /* Initialize RX descriptors. */ 348 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 349 sc->rxring[i].addr = CGEM_RXDESC_OWN; 350 sc->rxring[i].ctl = 0; 351 sc->rxring_m[i] = NULL; 352 err = bus_dmamap_create(sc->mbuf_dma_tag, 0, 353 &sc->rxring_m_dmamap[i]); 354 if (err) 355 return (err); 356 } 357 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 358 359 sc->rxring_hd_ptr = 0; 360 sc->rxring_tl_ptr = 0; 361 sc->rxring_queued = 0; 362 363 /* Allocate DMA memory for TX descriptors in non-cacheable space. */ 364 err = bus_dmamem_alloc(sc->desc_dma_tag, 365 (void **)&sc->txring, 366 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 367 &sc->txring_dma_map); 368 if (err) 369 return (err); 370 371 /* Load TX descriptor DMA memory. */ 372 err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, 373 (void *)sc->txring, 374 CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), 375 cgem_getaddr, &sc->txring_physaddr, 376 BUS_DMA_NOWAIT); 377 if (err) 378 return (err); 379 380 /* Initialize TX descriptor ring. */ 381 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 382 sc->txring[i].addr = 0; 383 sc->txring[i].ctl = CGEM_TXDESC_USED; 384 sc->txring_m[i] = NULL; 385 err = bus_dmamap_create(sc->mbuf_dma_tag, 0, 386 &sc->txring_m_dmamap[i]); 387 if (err) 388 return (err); 389 } 390 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 391 392 sc->txring_hd_ptr = 0; 393 sc->txring_tl_ptr = 0; 394 sc->txring_queued = 0; 395 396 return (0); 397} 398 399/* Fill receive descriptor ring with mbufs. */ 400static void 401cgem_fill_rqueue(struct cgem_softc *sc) 402{ 403 struct mbuf *m = NULL; 404 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 405 int nsegs; 406 407 CGEM_ASSERT_LOCKED(sc); 408 409 while (sc->rxring_queued < sc->rxbufs) { 410 /* Get a cluster mbuf. */ 411 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 412 if (m == NULL) 413 break; 414 415 m->m_len = MCLBYTES; 416 m->m_pkthdr.len = MCLBYTES; 417 m->m_pkthdr.rcvif = sc->ifp; 418 419 /* Load map and plug in physical address. */ 420 if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 421 sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, 422 segs, &nsegs, BUS_DMA_NOWAIT)) { 423 /* XXX: warn? */ 424 m_free(m); 425 break; 426 } 427 sc->rxring_m[sc->rxring_hd_ptr] = m; 428 429 /* Sync cache with receive buffer. */ 430 bus_dmamap_sync(sc->mbuf_dma_tag, 431 sc->rxring_m_dmamap[sc->rxring_hd_ptr], 432 BUS_DMASYNC_PREREAD); 433 434 /* Write rx descriptor and increment head pointer. */ 435 sc->rxring[sc->rxring_hd_ptr].ctl = 0; 436 if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { 437 sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | 438 CGEM_RXDESC_WRAP; 439 sc->rxring_hd_ptr = 0; 440 } else 441 sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; 442 443 sc->rxring_queued++; 444 } 445} 446 447/* Pull received packets off of receive descriptor ring. */ 448static void 449cgem_recv(struct cgem_softc *sc) 450{ 451 struct ifnet *ifp = sc->ifp; 452 struct mbuf *m; 453 uint32_t ctl; 454 455 CGEM_ASSERT_LOCKED(sc); 456 457 /* Pick up all packets in which the OWN bit is set. */ 458 while (sc->rxring_queued > 0 && 459 (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { 460 461 ctl = sc->rxring[sc->rxring_tl_ptr].ctl; 462 463 /* Grab filled mbuf. */ 464 m = sc->rxring_m[sc->rxring_tl_ptr]; 465 sc->rxring_m[sc->rxring_tl_ptr] = NULL; 466 467 /* Sync cache with receive buffer. */ 468 bus_dmamap_sync(sc->mbuf_dma_tag, 469 sc->rxring_m_dmamap[sc->rxring_tl_ptr], 470 BUS_DMASYNC_POSTREAD); 471 472 /* Unload dmamap. */ 473 bus_dmamap_unload(sc->mbuf_dma_tag, 474 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 475 476 /* Increment tail pointer. */ 477 if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) 478 sc->rxring_tl_ptr = 0; 479 sc->rxring_queued--; 480 481 /* Check FCS and make sure entire packet landed in one mbuf 482 * cluster (which is much bigger than the largest ethernet 483 * packet). 484 */ 485 if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || 486 (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != 487 (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { 488 /* discard. */ 489 m_free(m); 490 ifp->if_ierrors++; 491 continue; 492 } 493 494 /* Hand it off to upper layers. */ 495 m->m_data += ETHER_ALIGN; 496 m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); 497 m->m_pkthdr.rcvif = ifp; 498 m->m_pkthdr.len = m->m_len; 499 500 /* Are we using hardware checksumming? Check the 501 * status in the receive descriptor. 502 */ 503 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 504 /* TCP or UDP checks out, IP checks out too. */ 505 if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 506 CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || 507 (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 508 CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { 509 m->m_pkthdr.csum_flags |= 510 CSUM_IP_CHECKED | CSUM_IP_VALID | 511 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 512 m->m_pkthdr.csum_data = 0xffff; 513 } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == 514 CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { 515 /* Only IP checks out. */ 516 m->m_pkthdr.csum_flags |= 517 CSUM_IP_CHECKED | CSUM_IP_VALID; 518 m->m_pkthdr.csum_data = 0xffff; 519 } 520 } 521 522 ifp->if_ipackets++; 523 CGEM_UNLOCK(sc); 524 (*ifp->if_input)(ifp, m); 525 CGEM_LOCK(sc); 526 } 527} 528 529/* Find completed transmits and free their mbufs. */ 530static void 531cgem_clean_tx(struct cgem_softc *sc) 532{ 533 struct mbuf *m; 534 uint32_t ctl; 535 536 CGEM_ASSERT_LOCKED(sc); 537 538 /* free up finished transmits. */ 539 while (sc->txring_queued > 0 && 540 ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & 541 CGEM_TXDESC_USED) != 0) { 542 543 /* Sync cache. nop? */ 544 bus_dmamap_sync(sc->mbuf_dma_tag, 545 sc->txring_m_dmamap[sc->txring_tl_ptr], 546 BUS_DMASYNC_POSTWRITE); 547 548 /* Unload DMA map. */ 549 bus_dmamap_unload(sc->mbuf_dma_tag, 550 sc->txring_m_dmamap[sc->txring_tl_ptr]); 551 552 /* Free up the mbuf. */ 553 m = sc->txring_m[sc->txring_tl_ptr]; 554 sc->txring_m[sc->txring_tl_ptr] = NULL; 555 m_freem(m); 556 557 /* Check the status. */ 558 if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { 559 /* Serious bus error. log to console. */ 560 device_printf(sc->dev, "cgem_clean_tx: Whoa! " 561 "AHB error, addr=0x%x\n", 562 sc->txring[sc->txring_tl_ptr].addr); 563 } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | 564 CGEM_TXDESC_LATE_COLL)) != 0) { 565 sc->ifp->if_oerrors++; 566 } else 567 sc->ifp->if_opackets++; 568 569 /* If the packet spanned more than one tx descriptor, 570 * skip descriptors until we find the end so that only 571 * start-of-frame descriptors are processed. 572 */ 573 while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { 574 if ((ctl & CGEM_TXDESC_WRAP) != 0) 575 sc->txring_tl_ptr = 0; 576 else 577 sc->txring_tl_ptr++; 578 sc->txring_queued--; 579 580 ctl = sc->txring[sc->txring_tl_ptr].ctl; 581 582 sc->txring[sc->txring_tl_ptr].ctl = 583 ctl | CGEM_TXDESC_USED; 584 } 585 586 /* Next descriptor. */ 587 if ((ctl & CGEM_TXDESC_WRAP) != 0) 588 sc->txring_tl_ptr = 0; 589 else 590 sc->txring_tl_ptr++; 591 sc->txring_queued--; 592 } 593} 594 595/* Start transmits. */ 596static void 597cgem_start_locked(struct ifnet *ifp) 598{ 599 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 600 struct mbuf *m; 601 bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; 602 uint32_t ctl; 603 int i, nsegs, wrap, err; 604 605 CGEM_ASSERT_LOCKED(sc); 606 607 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) 608 return; 609 610 for (;;) { 611 /* Check that there is room in the descriptor ring. */ 612 if (sc->txring_queued >= CGEM_NUM_TX_DESCS - 613 TX_MAX_DMA_SEGS - 1) { 614 615 /* Try to make room. */ 616 cgem_clean_tx(sc); 617 618 /* Still no room? */ 619 if (sc->txring_queued >= CGEM_NUM_TX_DESCS - 620 TX_MAX_DMA_SEGS - 1) { 621 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 622 break; 623 } 624 } 625 626 /* Grab next transmit packet. */ 627 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 628 if (m == NULL) 629 break; 630 631 /* Load DMA map. */ 632 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 633 sc->txring_m_dmamap[sc->txring_hd_ptr], 634 m, segs, &nsegs, BUS_DMA_NOWAIT); 635 if (err == EFBIG) { 636 /* Too many segments! defrag and try again. */ 637 struct mbuf *m2 = m_defrag(m, M_NOWAIT); 638 639 if (m2 == NULL) { 640 m_freem(m); 641 continue; 642 } 643 m = m2; 644 err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, 645 sc->txring_m_dmamap[sc->txring_hd_ptr], 646 m, segs, &nsegs, BUS_DMA_NOWAIT); 647 } 648 if (err) { 649 /* Give up. */ 650 m_freem(m); 651 continue; 652 } 653 sc->txring_m[sc->txring_hd_ptr] = m; 654 655 /* Sync tx buffer with cache. */ 656 bus_dmamap_sync(sc->mbuf_dma_tag, 657 sc->txring_m_dmamap[sc->txring_hd_ptr], 658 BUS_DMASYNC_PREWRITE); 659 660 /* Set wrap flag if next packet might run off end of ring. */ 661 wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= 662 CGEM_NUM_TX_DESCS; 663 664 /* Fill in the TX descriptors back to front so that USED 665 * bit in first descriptor is cleared last. 666 */ 667 for (i = nsegs - 1; i >= 0; i--) { 668 /* Descriptor address. */ 669 sc->txring[sc->txring_hd_ptr + i].addr = 670 segs[i].ds_addr; 671 672 /* Descriptor control word. */ 673 ctl = segs[i].ds_len; 674 if (i == nsegs - 1) { 675 ctl |= CGEM_TXDESC_LAST_BUF; 676 if (wrap) 677 ctl |= CGEM_TXDESC_WRAP; 678 } 679 sc->txring[sc->txring_hd_ptr + i].ctl = ctl; 680 681 if (i != 0) 682 sc->txring_m[sc->txring_hd_ptr + i] = NULL; 683 } 684 685 if (wrap) 686 sc->txring_hd_ptr = 0; 687 else 688 sc->txring_hd_ptr += nsegs; 689 sc->txring_queued += nsegs; 690 691 /* Kick the transmitter. */ 692 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | 693 CGEM_NET_CTRL_START_TX); 694 } 695 696} 697 698static void 699cgem_start(struct ifnet *ifp) 700{ 701 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 702 703 CGEM_LOCK(sc); 704 cgem_start_locked(ifp); 705 CGEM_UNLOCK(sc); 706} 707 708/* Respond to changes in media. */ 709static void 710cgem_media_update(struct cgem_softc *sc, int active) 711{ 712 uint32_t net_cfg; 713 714 CGEM_ASSERT_LOCKED(sc); 715 716 /* Update hardware to reflect phy status. */ 717 net_cfg = RD4(sc, CGEM_NET_CFG); 718 net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | 719 CGEM_NET_CFG_FULL_DUPLEX); 720 721 if (IFM_SUBTYPE(active) == IFM_1000_T) 722 net_cfg |= (CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN); 723 else if (IFM_SUBTYPE(active) == IFM_100_TX) 724 net_cfg |= CGEM_NET_CFG_SPEED100; 725 726 if ((active & IFM_FDX) != 0) 727 net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; 728 WR4(sc, CGEM_NET_CFG, net_cfg); 729} 730 731static void 732cgem_tick(void *arg) 733{ 734 struct cgem_softc *sc = (struct cgem_softc *)arg; 735 struct mii_data *mii; 736 int active; 737 738 CGEM_ASSERT_LOCKED(sc); 739 740 /* Poll the phy. */ 741 if (sc->miibus != NULL) { 742 mii = device_get_softc(sc->miibus); 743 active = mii->mii_media_active; 744 mii_tick(mii); 745 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 746 (IFM_ACTIVE | IFM_AVALID) && 747 active != mii->mii_media_active) 748 cgem_media_update(sc, mii->mii_media_active); 749 } 750 751 /* Next callout in one second. */ 752 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 753} 754 755/* Interrupt handler. */ 756static void 757cgem_intr(void *arg) 758{ 759 struct cgem_softc *sc = (struct cgem_softc *)arg; 760 uint32_t istatus; 761 762 CGEM_LOCK(sc); 763 764 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 765 CGEM_UNLOCK(sc); 766 return; 767 } 768 769 istatus = RD4(sc, CGEM_INTR_STAT); 770 WR4(sc, CGEM_INTR_STAT, istatus & 771 (CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ | 772 CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK)); 773 774 /* Hresp not ok. Something very bad with DMA. Try to clear. */ 775 if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { 776 printf("cgem_intr: hresp not okay! rx_status=0x%x\n", 777 RD4(sc, CGEM_RX_STAT)); 778 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); 779 } 780 781 /* Transmitter has idled. Free up any spent transmit buffers. */ 782 if ((istatus & CGEM_INTR_TX_USED_READ) != 0) 783 cgem_clean_tx(sc); 784 785 /* Packets received or overflow. */ 786 if ((istatus & (CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN)) != 0) { 787 cgem_recv(sc); 788 cgem_fill_rqueue(sc); 789 if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { 790 /* Clear rx status register. */ 791 sc->rxoverruns++; 792 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); 793 } 794 } 795 796 CGEM_UNLOCK(sc); 797} 798 799/* Reset hardware. */ 800static void 801cgem_reset(struct cgem_softc *sc) 802{ 803 804 CGEM_ASSERT_LOCKED(sc); 805 806 WR4(sc, CGEM_NET_CTRL, 0); 807 WR4(sc, CGEM_NET_CFG, 0); 808 WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); 809 WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); 810 WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); 811 WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); 812 WR4(sc, CGEM_HASH_BOT, 0); 813 WR4(sc, CGEM_HASH_TOP, 0); 814 WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ 815 WR4(sc, CGEM_RX_QBAR, 0); 816 817 /* Get management port running even if interface is down. */ 818 WR4(sc, CGEM_NET_CFG, 819 CGEM_NET_CFG_DBUS_WIDTH_32 | 820 CGEM_NET_CFG_MDC_CLK_DIV_64); 821 822 sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; 823 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 824} 825 826/* Bring up the hardware. */ 827static void 828cgem_config(struct cgem_softc *sc) 829{ 830 uint32_t net_cfg; 831 uint32_t dma_cfg; 832 833 CGEM_ASSERT_LOCKED(sc); 834 835 /* Program Net Config Register. */ 836 net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | 837 CGEM_NET_CFG_MDC_CLK_DIV_64 | 838 CGEM_NET_CFG_FCS_REMOVE | 839 CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | 840 CGEM_NET_CFG_GIGE_EN | 841 CGEM_NET_CFG_FULL_DUPLEX | 842 CGEM_NET_CFG_SPEED100; 843 844 /* Enable receive checksum offloading? */ 845 if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) 846 net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; 847 848 WR4(sc, CGEM_NET_CFG, net_cfg); 849 850 /* Program DMA Config Register. */ 851 dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | 852 CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | 853 CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | 854 CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16; 855 856 /* Enable transmit checksum offloading? */ 857 if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) 858 dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; 859 860 WR4(sc, CGEM_DMA_CFG, dma_cfg); 861 862 /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ 863 WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); 864 WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); 865 866 /* Enable rx and tx. */ 867 sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); 868 WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); 869 870 /* Set up interrupts. */ 871 WR4(sc, CGEM_INTR_EN, 872 CGEM_INTR_RX_COMPLETE | CGEM_INTR_TX_USED_READ | 873 CGEM_INTR_RX_OVERRUN | CGEM_INTR_HRESP_NOT_OK); 874} 875 876/* Turn on interface and load up receive ring with buffers. */ 877static void 878cgem_init_locked(struct cgem_softc *sc) 879{ 880 struct mii_data *mii; 881 882 CGEM_ASSERT_LOCKED(sc); 883 884 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 885 return; 886 887 cgem_config(sc); 888 cgem_fill_rqueue(sc); 889 890 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 891 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 892 893 mii = device_get_softc(sc->miibus); 894 mii_pollstat(mii); 895 cgem_media_update(sc, mii->mii_media_active); 896 cgem_start_locked(sc->ifp); 897 898 callout_reset(&sc->tick_ch, hz, cgem_tick, sc); 899} 900 901static void 902cgem_init(void *arg) 903{ 904 struct cgem_softc *sc = (struct cgem_softc *)arg; 905 906 CGEM_LOCK(sc); 907 cgem_init_locked(sc); 908 CGEM_UNLOCK(sc); 909} 910 911/* Turn off interface. Free up any buffers in transmit or receive queues. */ 912static void 913cgem_stop(struct cgem_softc *sc) 914{ 915 int i; 916 917 CGEM_ASSERT_LOCKED(sc); 918 919 callout_stop(&sc->tick_ch); 920 921 /* Shut down hardware. */ 922 cgem_reset(sc); 923 924 /* Clear out transmit queue. */ 925 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { 926 sc->txring[i].ctl = CGEM_TXDESC_USED; 927 sc->txring[i].addr = 0; 928 if (sc->txring_m[i]) { 929 bus_dmamap_unload(sc->mbuf_dma_tag, 930 sc->txring_m_dmamap[i]); 931 m_freem(sc->txring_m[i]); 932 sc->txring_m[i] = NULL; 933 } 934 } 935 sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; 936 937 sc->txring_hd_ptr = 0; 938 sc->txring_tl_ptr = 0; 939 sc->txring_queued = 0; 940 941 /* Clear out receive queue. */ 942 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { 943 sc->rxring[i].addr = CGEM_RXDESC_OWN; 944 sc->rxring[i].ctl = 0; 945 if (sc->rxring_m[i]) { 946 /* Unload dmamap. */ 947 bus_dmamap_unload(sc->mbuf_dma_tag, 948 sc->rxring_m_dmamap[sc->rxring_tl_ptr]); 949 950 m_freem(sc->rxring_m[i]); 951 sc->rxring_m[i] = NULL; 952 } 953 } 954 sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; 955 956 sc->rxring_hd_ptr = 0; 957 sc->rxring_tl_ptr = 0; 958 sc->rxring_queued = 0; 959} 960 961 962static int 963cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 964{ 965 struct cgem_softc *sc = ifp->if_softc; 966 struct ifreq *ifr = (struct ifreq *)data; 967 struct mii_data *mii; 968 int error = 0, mask; 969 970 switch (cmd) { 971 case SIOCSIFFLAGS: 972 CGEM_LOCK(sc); 973 if ((ifp->if_flags & IFF_UP) != 0) { 974 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 975 if (((ifp->if_flags ^ sc->if_old_flags) & 976 (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 977 cgem_rx_filter(sc); 978 } 979 } else { 980 cgem_init_locked(sc); 981 } 982 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 983 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 984 cgem_stop(sc); 985 } 986 sc->if_old_flags = ifp->if_flags; 987 CGEM_UNLOCK(sc); 988 break; 989 990 case SIOCADDMULTI: 991 case SIOCDELMULTI: 992 /* Set up multi-cast filters. */ 993 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 994 CGEM_LOCK(sc); 995 cgem_rx_filter(sc); 996 CGEM_UNLOCK(sc); 997 } 998 break; 999 1000 case SIOCSIFMEDIA: 1001 case SIOCGIFMEDIA: 1002 mii = device_get_softc(sc->miibus); 1003 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1004 break; 1005 1006 case SIOCSIFCAP: 1007 CGEM_LOCK(sc); 1008 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1009 1010 if ((mask & IFCAP_TXCSUM) != 0) { 1011 if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { 1012 /* Turn on TX checksumming. */ 1013 ifp->if_capenable |= (IFCAP_TXCSUM | 1014 IFCAP_TXCSUM_IPV6); 1015 ifp->if_hwassist |= CGEM_CKSUM_ASSIST; 1016 1017 WR4(sc, CGEM_DMA_CFG, 1018 RD4(sc, CGEM_DMA_CFG) | 1019 CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1020 } else { 1021 /* Turn off TX checksumming. */ 1022 ifp->if_capenable &= ~(IFCAP_TXCSUM | 1023 IFCAP_TXCSUM_IPV6); 1024 ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; 1025 1026 WR4(sc, CGEM_DMA_CFG, 1027 RD4(sc, CGEM_DMA_CFG) & 1028 ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); 1029 } 1030 } 1031 if ((mask & IFCAP_RXCSUM) != 0) { 1032 if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { 1033 /* Turn on RX checksumming. */ 1034 ifp->if_capenable |= (IFCAP_RXCSUM | 1035 IFCAP_RXCSUM_IPV6); 1036 WR4(sc, CGEM_NET_CFG, 1037 RD4(sc, CGEM_NET_CFG) | 1038 CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); 1039 } else { 1040 /* Turn off RX checksumming. */ 1041 ifp->if_capenable &= ~(IFCAP_RXCSUM | 1042 IFCAP_RXCSUM_IPV6); 1043 WR4(sc, CGEM_NET_CFG, 1044 RD4(sc, CGEM_NET_CFG) & 1045 ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); 1046 } 1047 } 1048 1049 CGEM_UNLOCK(sc); 1050 break; 1051 default: 1052 error = ether_ioctl(ifp, cmd, data); 1053 break; 1054 } 1055 1056 return (error); 1057} 1058 1059/* MII bus support routines. 1060 */ 1061static void 1062cgem_child_detached(device_t dev, device_t child) 1063{ 1064 struct cgem_softc *sc = device_get_softc(dev); 1065 if (child == sc->miibus) 1066 sc->miibus = NULL; 1067} 1068 1069static int 1070cgem_ifmedia_upd(struct ifnet *ifp) 1071{ 1072 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 1073 struct mii_data *mii; 1074 1075 mii = device_get_softc(sc->miibus); 1076 CGEM_LOCK(sc); 1077 mii_mediachg(mii); 1078 CGEM_UNLOCK(sc); 1079 return (0); 1080} 1081 1082static void 1083cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1084{ 1085 struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; 1086 struct mii_data *mii; 1087 1088 mii = device_get_softc(sc->miibus); 1089 CGEM_LOCK(sc); 1090 mii_pollstat(mii); 1091 ifmr->ifm_active = mii->mii_media_active; 1092 ifmr->ifm_status = mii->mii_media_status; 1093 CGEM_UNLOCK(sc); 1094} 1095 1096static int 1097cgem_miibus_readreg(device_t dev, int phy, int reg) 1098{ 1099 struct cgem_softc *sc = device_get_softc(dev); 1100 int tries, val; 1101 1102 WR4(sc, CGEM_PHY_MAINT, 1103 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | 1104 CGEM_PHY_MAINT_OP_READ | 1105 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1106 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); 1107 1108 /* Wait for completion. */ 1109 tries=0; 1110 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1111 DELAY(5); 1112 if (++tries > 200) { 1113 device_printf(dev, "phy read timeout: %d\n", reg); 1114 return (-1); 1115 } 1116 } 1117 1118 val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; 1119 1120 return (val); 1121} 1122 1123static int 1124cgem_miibus_writereg(device_t dev, int phy, int reg, int data) 1125{ 1126 struct cgem_softc *sc = device_get_softc(dev); 1127 int tries; 1128 1129 WR4(sc, CGEM_PHY_MAINT, 1130 CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | 1131 CGEM_PHY_MAINT_OP_WRITE | 1132 (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | 1133 (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | 1134 (data & CGEM_PHY_MAINT_DATA_MASK)); 1135 1136 /* Wait for completion. */ 1137 tries = 0; 1138 while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { 1139 DELAY(5); 1140 if (++tries > 200) { 1141 device_printf(dev, "phy write timeout: %d\n", reg); 1142 return (-1); 1143 } 1144 } 1145 1146 return (0); 1147} 1148 1149 1150static int 1151cgem_probe(device_t dev) 1152{ 1153 1154 if (!ofw_bus_is_compatible(dev, "cadence,gem")) 1155 return (ENXIO); 1156 1157 device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); 1158 return (0); 1159} 1160 1161static int 1162cgem_attach(device_t dev) 1163{ 1164 struct cgem_softc *sc = device_get_softc(dev); 1165 struct ifnet *ifp = NULL; 1166 int rid, err; 1167 u_char eaddr[ETHER_ADDR_LEN]; 1168 1169 sc->dev = dev; 1170 CGEM_LOCK_INIT(sc); 1171 1172 /* Get memory resource. */ 1173 rid = 0; 1174 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1175 RF_ACTIVE); 1176 if (sc->mem_res == NULL) { 1177 device_printf(dev, "could not allocate memory resources.\n"); 1178 return (ENOMEM); 1179 } 1180 1181 /* Get IRQ resource. */ 1182 rid = 0; 1183 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1184 RF_ACTIVE); 1185 if (sc->irq_res == NULL) { 1186 device_printf(dev, "could not allocate interrupt resource.\n"); 1187 cgem_detach(dev); 1188 return (ENOMEM); 1189 } 1190 1191 ifp = sc->ifp = if_alloc(IFT_ETHER); 1192 if (ifp == NULL) { 1193 device_printf(dev, "could not allocate ifnet structure\n"); 1194 cgem_detach(dev); 1195 return (ENOMEM); 1196 } 1197 1198 CGEM_LOCK(sc); 1199 1200 /* Reset hardware. */ 1201 cgem_reset(sc); 1202 1203 /* Attach phy to mii bus. */ 1204 err = mii_attach(dev, &sc->miibus, ifp, 1205 cgem_ifmedia_upd, cgem_ifmedia_sts, 1206 BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 1207 if (err) { 1208 CGEM_UNLOCK(sc); 1209 device_printf(dev, "attaching PHYs failed\n"); 1210 cgem_detach(dev); 1211 return (err); 1212 } 1213 1214 /* Set up TX and RX descriptor area. */ 1215 err = cgem_setup_descs(sc); 1216 if (err) { 1217 CGEM_UNLOCK(sc); 1218 device_printf(dev, "could not set up dma mem for descs.\n"); 1219 cgem_detach(dev); 1220 return (ENOMEM); 1221 } 1222 1223 /* Get a MAC address. */ 1224 cgem_get_mac(sc, eaddr); 1225 1226 /* Start ticks. */ 1227 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 1228 1229 /* Set up ifnet structure. */ 1230 ifp->if_softc = sc; 1231 if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); 1232 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1233 ifp->if_start = cgem_start; 1234 ifp->if_ioctl = cgem_ioctl; 1235 ifp->if_init = cgem_init; 1236 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 1237 /* XXX: disable hw checksumming for now. */ 1238 ifp->if_hwassist = 0; 1239 ifp->if_capenable = ifp->if_capabilities & 1240 ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1241 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 1242 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 1243 IFQ_SET_READY(&ifp->if_snd); 1244 1245 sc->if_old_flags = ifp->if_flags; 1246 sc->rxbufs = DEFAULT_NUM_RX_BUFS; 1247 1248 ether_ifattach(ifp, eaddr); 1249 1250 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | 1251 INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); 1252 if (err) { 1253 CGEM_UNLOCK(sc); 1254 device_printf(dev, "could not set interrupt handler.\n"); 1255 ether_ifdetach(ifp); 1256 cgem_detach(dev); 1257 return (err); 1258 } 1259 1260 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 1261 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1262 OID_AUTO, "rxbufs", CTLFLAG_RW, 1263 &sc->rxbufs, 0, 1264 "Number receive buffers to provide"); 1265 1266 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 1267 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1268 OID_AUTO, "_rxoverruns", CTLFLAG_RD, 1269 &sc->rxoverruns, 0, 1270 "Receive ring overrun events"); 1271 1272 CGEM_UNLOCK(sc); 1273 1274 return (0); 1275} 1276 1277static int 1278cgem_detach(device_t dev) 1279{ 1280 struct cgem_softc *sc = device_get_softc(dev); 1281 int i; 1282 1283 if (sc == NULL) 1284 return (ENODEV); 1285 1286 if (device_is_attached(dev)) { 1287 CGEM_LOCK(sc); 1288 cgem_stop(sc); 1289 CGEM_UNLOCK(sc); 1290 callout_drain(&sc->tick_ch); 1291 sc->ifp->if_flags &= ~IFF_UP; 1292 ether_ifdetach(sc->ifp); 1293 } 1294 1295 if (sc->miibus != NULL) { 1296 device_delete_child(dev, sc->miibus); 1297 sc->miibus = NULL; 1298 } 1299 1300 /* Release resrouces. */ 1301 if (sc->mem_res != NULL) { 1302 bus_release_resource(dev, SYS_RES_MEMORY, 1303 rman_get_rid(sc->mem_res), sc->mem_res); 1304 sc->mem_res = NULL; 1305 } 1306 if (sc->irq_res != NULL) { 1307 if (sc->intrhand) 1308 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 1309 bus_release_resource(dev, SYS_RES_IRQ, 1310 rman_get_rid(sc->irq_res), sc->irq_res); 1311 sc->irq_res = NULL; 1312 } 1313 1314 /* Release DMA resources. */ 1315 if (sc->rxring_dma_map != NULL) { 1316 bus_dmamem_free(sc->desc_dma_tag, sc->rxring, 1317 sc->rxring_dma_map); 1318 sc->rxring_dma_map = NULL; 1319 for (i = 0; i < CGEM_NUM_RX_DESCS; i++) 1320 if (sc->rxring_m_dmamap[i] != NULL) { 1321 bus_dmamap_destroy(sc->mbuf_dma_tag, 1322 sc->rxring_m_dmamap[i]); 1323 sc->rxring_m_dmamap[i] = NULL; 1324 } 1325 } 1326 if (sc->txring_dma_map != NULL) { 1327 bus_dmamem_free(sc->desc_dma_tag, sc->txring, 1328 sc->txring_dma_map); 1329 sc->txring_dma_map = NULL; 1330 for (i = 0; i < CGEM_NUM_TX_DESCS; i++) 1331 if (sc->txring_m_dmamap[i] != NULL) { 1332 bus_dmamap_destroy(sc->mbuf_dma_tag, 1333 sc->txring_m_dmamap[i]); 1334 sc->txring_m_dmamap[i] = NULL; 1335 } 1336 } 1337 if (sc->desc_dma_tag != NULL) { 1338 bus_dma_tag_destroy(sc->desc_dma_tag); 1339 sc->desc_dma_tag = NULL; 1340 } 1341 if (sc->mbuf_dma_tag != NULL) { 1342 bus_dma_tag_destroy(sc->mbuf_dma_tag); 1343 sc->mbuf_dma_tag = NULL; 1344 } 1345 1346 bus_generic_detach(dev); 1347 1348 CGEM_LOCK_DESTROY(sc); 1349 1350 return (0); 1351} 1352 1353static device_method_t cgem_methods[] = { 1354 /* Device interface */ 1355 DEVMETHOD(device_probe, cgem_probe), 1356 DEVMETHOD(device_attach, cgem_attach), 1357 DEVMETHOD(device_detach, cgem_detach), 1358 1359 /* Bus interface */ 1360 DEVMETHOD(bus_child_detached, cgem_child_detached), 1361 1362 /* MII interface */ 1363 DEVMETHOD(miibus_readreg, cgem_miibus_readreg), 1364 DEVMETHOD(miibus_writereg, cgem_miibus_writereg), 1365 1366 DEVMETHOD_END 1367}; 1368 1369static driver_t cgem_driver = { 1370 "cgem", 1371 cgem_methods, 1372 sizeof(struct cgem_softc), 1373}; 1374 1375DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); 1376DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); 1377MODULE_DEPEND(cgem, miibus, 1, 1, 1); 1378MODULE_DEPEND(cgem, ether, 1, 1, 1); 1379