if_vge.c revision 229093
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/9/sys/dev/vge/if_vge.c 229093 2011-12-31 14:12:12Z hselasky $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96#include <sys/sysctl.h> 97 98#include <net/if.h> 99#include <net/if_arp.h> 100#include <net/ethernet.h> 101#include <net/if_dl.h> 102#include <net/if_media.h> 103#include <net/if_types.h> 104#include <net/if_vlan_var.h> 105 106#include <net/bpf.h> 107 108#include <machine/bus.h> 109#include <machine/resource.h> 110#include <sys/bus.h> 111#include <sys/rman.h> 112 113#include <dev/mii/mii.h> 114#include <dev/mii/miivar.h> 115 116#include <dev/pci/pcireg.h> 117#include <dev/pci/pcivar.h> 118 119MODULE_DEPEND(vge, pci, 1, 1, 1); 120MODULE_DEPEND(vge, ether, 1, 1, 1); 121MODULE_DEPEND(vge, miibus, 1, 1, 1); 122 123/* "device miibus" required. See GENERIC if you get errors here. */ 124#include "miibus_if.h" 125 126#include <dev/vge/if_vgereg.h> 127#include <dev/vge/if_vgevar.h> 128 129#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130 131/* Tunables */ 132static int msi_disable = 0; 133TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 134 135/* 136 * The SQE error counter of MIB seems to report bogus value. 137 * Vendor's workaround does not seem to work on PCIe based 138 * controllers. Disable it until we find better workaround. 139 */ 140#undef VGE_ENABLE_SQEERR 141 142/* 143 * Various supported device vendors/types and their names. 144 */ 145static struct vge_type vge_devs[] = { 146 { VIA_VENDORID, VIA_DEVICEID_61XX, 147 "VIA Networking Velocity Gigabit Ethernet" }, 148 { 0, 0, NULL } 149}; 150 151static int vge_attach(device_t); 152static int vge_detach(device_t); 153static int vge_probe(device_t); 154static int vge_resume(device_t); 155static int vge_shutdown(device_t); 156static int vge_suspend(device_t); 157 158static void vge_cam_clear(struct vge_softc *); 159static int vge_cam_set(struct vge_softc *, uint8_t *); 160static void vge_clrwol(struct vge_softc *); 161static void vge_discard_rxbuf(struct vge_softc *, int); 162static int vge_dma_alloc(struct vge_softc *); 163static void vge_dma_free(struct vge_softc *); 164static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 165#ifdef VGE_EEPROM 166static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 167#endif 168static int vge_encap(struct vge_softc *, struct mbuf **); 169#ifndef __NO_STRICT_ALIGNMENT 170static __inline void 171 vge_fixup_rx(struct mbuf *); 172#endif 173static void vge_freebufs(struct vge_softc *); 174static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 175static int vge_ifmedia_upd(struct ifnet *); 176static void vge_init(void *); 177static void vge_init_locked(struct vge_softc *); 178static void vge_intr(void *); 179static void vge_intr_holdoff(struct vge_softc *); 180static int vge_ioctl(struct ifnet *, u_long, caddr_t); 181static void vge_link_statchg(void *); 182static int vge_miibus_readreg(device_t, int, int); 183static void vge_miibus_statchg(device_t); 184static int vge_miibus_writereg(device_t, int, int, int); 185static void vge_miipoll_start(struct vge_softc *); 186static void vge_miipoll_stop(struct vge_softc *); 187static int vge_newbuf(struct vge_softc *, int); 188static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 189static void vge_reset(struct vge_softc *); 190static int vge_rx_list_init(struct vge_softc *); 191static int vge_rxeof(struct vge_softc *, int); 192static void vge_rxfilter(struct vge_softc *); 193static void vge_setvlan(struct vge_softc *); 194static void vge_setwol(struct vge_softc *); 195static void vge_start(struct ifnet *); 196static void vge_start_locked(struct ifnet *); 197static void vge_stats_clear(struct vge_softc *); 198static void vge_stats_update(struct vge_softc *); 199static void vge_stop(struct vge_softc *); 200static void vge_sysctl_node(struct vge_softc *); 201static int vge_tx_list_init(struct vge_softc *); 202static void vge_txeof(struct vge_softc *); 203static void vge_watchdog(void *); 204 205static device_method_t vge_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, vge_probe), 208 DEVMETHOD(device_attach, vge_attach), 209 DEVMETHOD(device_detach, vge_detach), 210 DEVMETHOD(device_suspend, vge_suspend), 211 DEVMETHOD(device_resume, vge_resume), 212 DEVMETHOD(device_shutdown, vge_shutdown), 213 214 /* MII interface */ 215 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 216 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 217 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 218 219 DEVMETHOD_END 220}; 221 222static driver_t vge_driver = { 223 "vge", 224 vge_methods, 225 sizeof(struct vge_softc) 226}; 227 228static devclass_t vge_devclass; 229 230DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 231DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 232 233#ifdef VGE_EEPROM 234/* 235 * Read a word of data stored in the EEPROM at address 'addr.' 236 */ 237static void 238vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 239{ 240 int i; 241 uint16_t word = 0; 242 243 /* 244 * Enter EEPROM embedded programming mode. In order to 245 * access the EEPROM at all, we first have to set the 246 * EELOAD bit in the CHIPCFG2 register. 247 */ 248 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 249 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 250 251 /* Select the address of the word we want to read */ 252 CSR_WRITE_1(sc, VGE_EEADDR, addr); 253 254 /* Issue read command */ 255 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 256 257 /* Wait for the done bit to be set. */ 258 for (i = 0; i < VGE_TIMEOUT; i++) { 259 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 260 break; 261 } 262 263 if (i == VGE_TIMEOUT) { 264 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 265 *dest = 0; 266 return; 267 } 268 269 /* Read the result */ 270 word = CSR_READ_2(sc, VGE_EERDDAT); 271 272 /* Turn off EEPROM access mode. */ 273 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 274 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 275 276 *dest = word; 277} 278#endif 279 280/* 281 * Read a sequence of words from the EEPROM. 282 */ 283static void 284vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 285{ 286 int i; 287#ifdef VGE_EEPROM 288 uint16_t word = 0, *ptr; 289 290 for (i = 0; i < cnt; i++) { 291 vge_eeprom_getword(sc, off + i, &word); 292 ptr = (uint16_t *)(dest + (i * 2)); 293 if (swap) 294 *ptr = ntohs(word); 295 else 296 *ptr = word; 297 } 298#else 299 for (i = 0; i < ETHER_ADDR_LEN; i++) 300 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 301#endif 302} 303 304static void 305vge_miipoll_stop(struct vge_softc *sc) 306{ 307 int i; 308 309 CSR_WRITE_1(sc, VGE_MIICMD, 0); 310 311 for (i = 0; i < VGE_TIMEOUT; i++) { 312 DELAY(1); 313 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 314 break; 315 } 316 317 if (i == VGE_TIMEOUT) 318 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 319} 320 321static void 322vge_miipoll_start(struct vge_softc *sc) 323{ 324 int i; 325 326 /* First, make sure we're idle. */ 327 328 CSR_WRITE_1(sc, VGE_MIICMD, 0); 329 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 330 331 for (i = 0; i < VGE_TIMEOUT; i++) { 332 DELAY(1); 333 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 334 break; 335 } 336 337 if (i == VGE_TIMEOUT) { 338 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 339 return; 340 } 341 342 /* Now enable auto poll mode. */ 343 344 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 345 346 /* And make sure it started. */ 347 348 for (i = 0; i < VGE_TIMEOUT; i++) { 349 DELAY(1); 350 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 351 break; 352 } 353 354 if (i == VGE_TIMEOUT) 355 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 356} 357 358static int 359vge_miibus_readreg(device_t dev, int phy, int reg) 360{ 361 struct vge_softc *sc; 362 int i; 363 uint16_t rval = 0; 364 365 sc = device_get_softc(dev); 366 367 vge_miipoll_stop(sc); 368 369 /* Specify the register we want to read. */ 370 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 371 372 /* Issue read command. */ 373 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 374 375 /* Wait for the read command bit to self-clear. */ 376 for (i = 0; i < VGE_TIMEOUT; i++) { 377 DELAY(1); 378 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 379 break; 380 } 381 382 if (i == VGE_TIMEOUT) 383 device_printf(sc->vge_dev, "MII read timed out\n"); 384 else 385 rval = CSR_READ_2(sc, VGE_MIIDATA); 386 387 vge_miipoll_start(sc); 388 389 return (rval); 390} 391 392static int 393vge_miibus_writereg(device_t dev, int phy, int reg, int data) 394{ 395 struct vge_softc *sc; 396 int i, rval = 0; 397 398 sc = device_get_softc(dev); 399 400 vge_miipoll_stop(sc); 401 402 /* Specify the register we want to write. */ 403 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 404 405 /* Specify the data we want to write. */ 406 CSR_WRITE_2(sc, VGE_MIIDATA, data); 407 408 /* Issue write command. */ 409 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 410 411 /* Wait for the write command bit to self-clear. */ 412 for (i = 0; i < VGE_TIMEOUT; i++) { 413 DELAY(1); 414 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 415 break; 416 } 417 418 if (i == VGE_TIMEOUT) { 419 device_printf(sc->vge_dev, "MII write timed out\n"); 420 rval = EIO; 421 } 422 423 vge_miipoll_start(sc); 424 425 return (rval); 426} 427 428static void 429vge_cam_clear(struct vge_softc *sc) 430{ 431 int i; 432 433 /* 434 * Turn off all the mask bits. This tells the chip 435 * that none of the entries in the CAM filter are valid. 436 * desired entries will be enabled as we fill the filter in. 437 */ 438 439 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 440 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 441 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 442 for (i = 0; i < 8; i++) 443 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 444 445 /* Clear the VLAN filter too. */ 446 447 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 448 for (i = 0; i < 8; i++) 449 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 450 451 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 452 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 453 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 454 455 sc->vge_camidx = 0; 456} 457 458static int 459vge_cam_set(struct vge_softc *sc, uint8_t *addr) 460{ 461 int i, error = 0; 462 463 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 464 return (ENOSPC); 465 466 /* Select the CAM data page. */ 467 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 468 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 469 470 /* Set the filter entry we want to update and enable writing. */ 471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 472 473 /* Write the address to the CAM registers */ 474 for (i = 0; i < ETHER_ADDR_LEN; i++) 475 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 476 477 /* Issue a write command. */ 478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 479 480 /* Wake for it to clear. */ 481 for (i = 0; i < VGE_TIMEOUT; i++) { 482 DELAY(1); 483 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 484 break; 485 } 486 487 if (i == VGE_TIMEOUT) { 488 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 489 error = EIO; 490 goto fail; 491 } 492 493 /* Select the CAM mask page. */ 494 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 495 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 496 497 /* Set the mask bit that enables this filter. */ 498 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 499 1<<(sc->vge_camidx & 7)); 500 501 sc->vge_camidx++; 502 503fail: 504 /* Turn off access to CAM. */ 505 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 506 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 507 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 508 509 return (error); 510} 511 512static void 513vge_setvlan(struct vge_softc *sc) 514{ 515 struct ifnet *ifp; 516 uint8_t cfg; 517 518 VGE_LOCK_ASSERT(sc); 519 520 ifp = sc->vge_ifp; 521 cfg = CSR_READ_1(sc, VGE_RXCFG); 522 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 523 cfg |= VGE_VTAG_OPT2; 524 else 525 cfg &= ~VGE_VTAG_OPT2; 526 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 527} 528 529/* 530 * Program the multicast filter. We use the 64-entry CAM filter 531 * for perfect filtering. If there's more than 64 multicast addresses, 532 * we use the hash filter instead. 533 */ 534static void 535vge_rxfilter(struct vge_softc *sc) 536{ 537 struct ifnet *ifp; 538 struct ifmultiaddr *ifma; 539 uint32_t h, hashes[2]; 540 uint8_t rxcfg; 541 int error = 0; 542 543 VGE_LOCK_ASSERT(sc); 544 545 /* First, zot all the multicast entries. */ 546 hashes[0] = 0; 547 hashes[1] = 0; 548 549 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 550 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 551 VGE_RXCTL_RX_PROMISC); 552 /* 553 * Always allow VLAN oversized frames and frames for 554 * this host. 555 */ 556 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 557 558 ifp = sc->vge_ifp; 559 if ((ifp->if_flags & IFF_BROADCAST) != 0) 560 rxcfg |= VGE_RXCTL_RX_BCAST; 561 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 562 if ((ifp->if_flags & IFF_PROMISC) != 0) 563 rxcfg |= VGE_RXCTL_RX_PROMISC; 564 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 565 hashes[0] = 0xFFFFFFFF; 566 hashes[1] = 0xFFFFFFFF; 567 } 568 goto done; 569 } 570 571 vge_cam_clear(sc); 572 /* Now program new ones */ 573 if_maddr_rlock(ifp); 574 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 575 if (ifma->ifma_addr->sa_family != AF_LINK) 576 continue; 577 error = vge_cam_set(sc, 578 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 579 if (error) 580 break; 581 } 582 583 /* If there were too many addresses, use the hash filter. */ 584 if (error) { 585 vge_cam_clear(sc); 586 587 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 588 if (ifma->ifma_addr->sa_family != AF_LINK) 589 continue; 590 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 591 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 592 if (h < 32) 593 hashes[0] |= (1 << h); 594 else 595 hashes[1] |= (1 << (h - 32)); 596 } 597 } 598 if_maddr_runlock(ifp); 599 600done: 601 if (hashes[0] != 0 || hashes[1] != 0) 602 rxcfg |= VGE_RXCTL_RX_MCAST; 603 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 604 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 605 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 606} 607 608static void 609vge_reset(struct vge_softc *sc) 610{ 611 int i; 612 613 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 614 615 for (i = 0; i < VGE_TIMEOUT; i++) { 616 DELAY(5); 617 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 618 break; 619 } 620 621 if (i == VGE_TIMEOUT) { 622 device_printf(sc->vge_dev, "soft reset timed out\n"); 623 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 624 DELAY(2000); 625 } 626 627 DELAY(5000); 628} 629 630/* 631 * Probe for a VIA gigabit chip. Check the PCI vendor and device 632 * IDs against our list and return a device name if we find a match. 633 */ 634static int 635vge_probe(device_t dev) 636{ 637 struct vge_type *t; 638 639 t = vge_devs; 640 641 while (t->vge_name != NULL) { 642 if ((pci_get_vendor(dev) == t->vge_vid) && 643 (pci_get_device(dev) == t->vge_did)) { 644 device_set_desc(dev, t->vge_name); 645 return (BUS_PROBE_DEFAULT); 646 } 647 t++; 648 } 649 650 return (ENXIO); 651} 652 653/* 654 * Map a single buffer address. 655 */ 656 657struct vge_dmamap_arg { 658 bus_addr_t vge_busaddr; 659}; 660 661static void 662vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 663{ 664 struct vge_dmamap_arg *ctx; 665 666 if (error != 0) 667 return; 668 669 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 670 671 ctx = (struct vge_dmamap_arg *)arg; 672 ctx->vge_busaddr = segs[0].ds_addr; 673} 674 675static int 676vge_dma_alloc(struct vge_softc *sc) 677{ 678 struct vge_dmamap_arg ctx; 679 struct vge_txdesc *txd; 680 struct vge_rxdesc *rxd; 681 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 682 int error, i; 683 684 /* 685 * It seems old PCI controllers do not support DAC. DAC 686 * configuration can be enabled by accessing VGE_CHIPCFG3 687 * register but honor EEPROM configuration instead of 688 * blindly overriding DAC configuration. PCIe based 689 * controllers are supposed to support 64bit DMA so enable 690 * 64bit DMA on these controllers. 691 */ 692 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 693 lowaddr = BUS_SPACE_MAXADDR; 694 else 695 lowaddr = BUS_SPACE_MAXADDR_32BIT; 696 697again: 698 /* Create parent ring tag. */ 699 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 700 1, 0, /* algnmnt, boundary */ 701 lowaddr, /* lowaddr */ 702 BUS_SPACE_MAXADDR, /* highaddr */ 703 NULL, NULL, /* filter, filterarg */ 704 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 705 0, /* nsegments */ 706 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 707 0, /* flags */ 708 NULL, NULL, /* lockfunc, lockarg */ 709 &sc->vge_cdata.vge_ring_tag); 710 if (error != 0) { 711 device_printf(sc->vge_dev, 712 "could not create parent DMA tag.\n"); 713 goto fail; 714 } 715 716 /* Create tag for Tx ring. */ 717 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 718 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 719 BUS_SPACE_MAXADDR, /* lowaddr */ 720 BUS_SPACE_MAXADDR, /* highaddr */ 721 NULL, NULL, /* filter, filterarg */ 722 VGE_TX_LIST_SZ, /* maxsize */ 723 1, /* nsegments */ 724 VGE_TX_LIST_SZ, /* maxsegsize */ 725 0, /* flags */ 726 NULL, NULL, /* lockfunc, lockarg */ 727 &sc->vge_cdata.vge_tx_ring_tag); 728 if (error != 0) { 729 device_printf(sc->vge_dev, 730 "could not allocate Tx ring DMA tag.\n"); 731 goto fail; 732 } 733 734 /* Create tag for Rx ring. */ 735 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 736 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 737 BUS_SPACE_MAXADDR, /* lowaddr */ 738 BUS_SPACE_MAXADDR, /* highaddr */ 739 NULL, NULL, /* filter, filterarg */ 740 VGE_RX_LIST_SZ, /* maxsize */ 741 1, /* nsegments */ 742 VGE_RX_LIST_SZ, /* maxsegsize */ 743 0, /* flags */ 744 NULL, NULL, /* lockfunc, lockarg */ 745 &sc->vge_cdata.vge_rx_ring_tag); 746 if (error != 0) { 747 device_printf(sc->vge_dev, 748 "could not allocate Rx ring DMA tag.\n"); 749 goto fail; 750 } 751 752 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 753 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 754 (void **)&sc->vge_rdata.vge_tx_ring, 755 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 756 &sc->vge_cdata.vge_tx_ring_map); 757 if (error != 0) { 758 device_printf(sc->vge_dev, 759 "could not allocate DMA'able memory for Tx ring.\n"); 760 goto fail; 761 } 762 763 ctx.vge_busaddr = 0; 764 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 765 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 766 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 767 if (error != 0 || ctx.vge_busaddr == 0) { 768 device_printf(sc->vge_dev, 769 "could not load DMA'able memory for Tx ring.\n"); 770 goto fail; 771 } 772 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 773 774 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 775 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 776 (void **)&sc->vge_rdata.vge_rx_ring, 777 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 778 &sc->vge_cdata.vge_rx_ring_map); 779 if (error != 0) { 780 device_printf(sc->vge_dev, 781 "could not allocate DMA'able memory for Rx ring.\n"); 782 goto fail; 783 } 784 785 ctx.vge_busaddr = 0; 786 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 787 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 788 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 789 if (error != 0 || ctx.vge_busaddr == 0) { 790 device_printf(sc->vge_dev, 791 "could not load DMA'able memory for Rx ring.\n"); 792 goto fail; 793 } 794 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 795 796 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 797 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 798 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 799 if ((VGE_ADDR_HI(tx_ring_end) != 800 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 801 (VGE_ADDR_HI(rx_ring_end) != 802 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 803 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 804 device_printf(sc->vge_dev, "4GB boundary crossed, " 805 "switching to 32bit DMA address mode.\n"); 806 vge_dma_free(sc); 807 /* Limit DMA address space to 32bit and try again. */ 808 lowaddr = BUS_SPACE_MAXADDR_32BIT; 809 goto again; 810 } 811 812 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 813 lowaddr = VGE_BUF_DMA_MAXADDR; 814 else 815 lowaddr = BUS_SPACE_MAXADDR_32BIT; 816 /* Create parent buffer tag. */ 817 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 818 1, 0, /* algnmnt, boundary */ 819 lowaddr, /* lowaddr */ 820 BUS_SPACE_MAXADDR, /* highaddr */ 821 NULL, NULL, /* filter, filterarg */ 822 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 823 0, /* nsegments */ 824 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 825 0, /* flags */ 826 NULL, NULL, /* lockfunc, lockarg */ 827 &sc->vge_cdata.vge_buffer_tag); 828 if (error != 0) { 829 device_printf(sc->vge_dev, 830 "could not create parent buffer DMA tag.\n"); 831 goto fail; 832 } 833 834 /* Create tag for Tx buffers. */ 835 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 836 1, 0, /* algnmnt, boundary */ 837 BUS_SPACE_MAXADDR, /* lowaddr */ 838 BUS_SPACE_MAXADDR, /* highaddr */ 839 NULL, NULL, /* filter, filterarg */ 840 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 841 VGE_MAXTXSEGS, /* nsegments */ 842 MCLBYTES, /* maxsegsize */ 843 0, /* flags */ 844 NULL, NULL, /* lockfunc, lockarg */ 845 &sc->vge_cdata.vge_tx_tag); 846 if (error != 0) { 847 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 848 goto fail; 849 } 850 851 /* Create tag for Rx buffers. */ 852 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 853 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 854 BUS_SPACE_MAXADDR, /* lowaddr */ 855 BUS_SPACE_MAXADDR, /* highaddr */ 856 NULL, NULL, /* filter, filterarg */ 857 MCLBYTES, /* maxsize */ 858 1, /* nsegments */ 859 MCLBYTES, /* maxsegsize */ 860 0, /* flags */ 861 NULL, NULL, /* lockfunc, lockarg */ 862 &sc->vge_cdata.vge_rx_tag); 863 if (error != 0) { 864 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 865 goto fail; 866 } 867 868 /* Create DMA maps for Tx buffers. */ 869 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 870 txd = &sc->vge_cdata.vge_txdesc[i]; 871 txd->tx_m = NULL; 872 txd->tx_dmamap = NULL; 873 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 874 &txd->tx_dmamap); 875 if (error != 0) { 876 device_printf(sc->vge_dev, 877 "could not create Tx dmamap.\n"); 878 goto fail; 879 } 880 } 881 /* Create DMA maps for Rx buffers. */ 882 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 883 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 884 device_printf(sc->vge_dev, 885 "could not create spare Rx dmamap.\n"); 886 goto fail; 887 } 888 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 889 rxd = &sc->vge_cdata.vge_rxdesc[i]; 890 rxd->rx_m = NULL; 891 rxd->rx_dmamap = NULL; 892 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 893 &rxd->rx_dmamap); 894 if (error != 0) { 895 device_printf(sc->vge_dev, 896 "could not create Rx dmamap.\n"); 897 goto fail; 898 } 899 } 900 901fail: 902 return (error); 903} 904 905static void 906vge_dma_free(struct vge_softc *sc) 907{ 908 struct vge_txdesc *txd; 909 struct vge_rxdesc *rxd; 910 int i; 911 912 /* Tx ring. */ 913 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 914 if (sc->vge_cdata.vge_tx_ring_map) 915 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 916 sc->vge_cdata.vge_tx_ring_map); 917 if (sc->vge_cdata.vge_tx_ring_map && 918 sc->vge_rdata.vge_tx_ring) 919 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 920 sc->vge_rdata.vge_tx_ring, 921 sc->vge_cdata.vge_tx_ring_map); 922 sc->vge_rdata.vge_tx_ring = NULL; 923 sc->vge_cdata.vge_tx_ring_map = NULL; 924 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 925 sc->vge_cdata.vge_tx_ring_tag = NULL; 926 } 927 /* Rx ring. */ 928 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 929 if (sc->vge_cdata.vge_rx_ring_map) 930 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 931 sc->vge_cdata.vge_rx_ring_map); 932 if (sc->vge_cdata.vge_rx_ring_map && 933 sc->vge_rdata.vge_rx_ring) 934 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 935 sc->vge_rdata.vge_rx_ring, 936 sc->vge_cdata.vge_rx_ring_map); 937 sc->vge_rdata.vge_rx_ring = NULL; 938 sc->vge_cdata.vge_rx_ring_map = NULL; 939 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 940 sc->vge_cdata.vge_rx_ring_tag = NULL; 941 } 942 /* Tx buffers. */ 943 if (sc->vge_cdata.vge_tx_tag != NULL) { 944 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 945 txd = &sc->vge_cdata.vge_txdesc[i]; 946 if (txd->tx_dmamap != NULL) { 947 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 948 txd->tx_dmamap); 949 txd->tx_dmamap = NULL; 950 } 951 } 952 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 953 sc->vge_cdata.vge_tx_tag = NULL; 954 } 955 /* Rx buffers. */ 956 if (sc->vge_cdata.vge_rx_tag != NULL) { 957 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 958 rxd = &sc->vge_cdata.vge_rxdesc[i]; 959 if (rxd->rx_dmamap != NULL) { 960 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 961 rxd->rx_dmamap); 962 rxd->rx_dmamap = NULL; 963 } 964 } 965 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 966 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 967 sc->vge_cdata.vge_rx_sparemap); 968 sc->vge_cdata.vge_rx_sparemap = NULL; 969 } 970 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 971 sc->vge_cdata.vge_rx_tag = NULL; 972 } 973 974 if (sc->vge_cdata.vge_buffer_tag != NULL) { 975 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 976 sc->vge_cdata.vge_buffer_tag = NULL; 977 } 978 if (sc->vge_cdata.vge_ring_tag != NULL) { 979 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 980 sc->vge_cdata.vge_ring_tag = NULL; 981 } 982} 983 984/* 985 * Attach the interface. Allocate softc structures, do ifmedia 986 * setup and ethernet/BPF attach. 987 */ 988static int 989vge_attach(device_t dev) 990{ 991 u_char eaddr[ETHER_ADDR_LEN]; 992 struct vge_softc *sc; 993 struct ifnet *ifp; 994 int error = 0, cap, i, msic, rid; 995 996 sc = device_get_softc(dev); 997 sc->vge_dev = dev; 998 999 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1000 MTX_DEF); 1001 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1002 1003 /* 1004 * Map control/status registers. 1005 */ 1006 pci_enable_busmaster(dev); 1007 1008 rid = PCIR_BAR(1); 1009 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1010 RF_ACTIVE); 1011 1012 if (sc->vge_res == NULL) { 1013 device_printf(dev, "couldn't map ports/memory\n"); 1014 error = ENXIO; 1015 goto fail; 1016 } 1017 1018 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1019 sc->vge_flags |= VGE_FLAG_PCIE; 1020 sc->vge_expcap = cap; 1021 } else 1022 sc->vge_flags |= VGE_FLAG_JUMBO; 1023 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1024 sc->vge_flags |= VGE_FLAG_PMCAP; 1025 sc->vge_pmcap = cap; 1026 } 1027 rid = 0; 1028 msic = pci_msi_count(dev); 1029 if (msi_disable == 0 && msic > 0) { 1030 msic = 1; 1031 if (pci_alloc_msi(dev, &msic) == 0) { 1032 if (msic == 1) { 1033 sc->vge_flags |= VGE_FLAG_MSI; 1034 device_printf(dev, "Using %d MSI message\n", 1035 msic); 1036 rid = 1; 1037 } else 1038 pci_release_msi(dev); 1039 } 1040 } 1041 1042 /* Allocate interrupt */ 1043 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1044 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1045 if (sc->vge_irq == NULL) { 1046 device_printf(dev, "couldn't map interrupt\n"); 1047 error = ENXIO; 1048 goto fail; 1049 } 1050 1051 /* Reset the adapter. */ 1052 vge_reset(sc); 1053 /* Reload EEPROM. */ 1054 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1055 for (i = 0; i < VGE_TIMEOUT; i++) { 1056 DELAY(5); 1057 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1058 break; 1059 } 1060 if (i == VGE_TIMEOUT) 1061 device_printf(dev, "EEPROM reload timed out\n"); 1062 /* 1063 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1064 * MAC will receive magic packet which in turn confuses 1065 * controller. 1066 */ 1067 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1068 1069 /* 1070 * Get station address from the EEPROM. 1071 */ 1072 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1073 /* 1074 * Save configured PHY address. 1075 * It seems the PHY address of PCIe controllers just 1076 * reflects media jump strapping status so we assume the 1077 * internal PHY address of PCIe controller is at 1. 1078 */ 1079 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1080 sc->vge_phyaddr = 1; 1081 else 1082 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1083 VGE_MIICFG_PHYADDR; 1084 /* Clear WOL and take hardware from powerdown. */ 1085 vge_clrwol(sc); 1086 vge_sysctl_node(sc); 1087 error = vge_dma_alloc(sc); 1088 if (error) 1089 goto fail; 1090 1091 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1092 if (ifp == NULL) { 1093 device_printf(dev, "can not if_alloc()\n"); 1094 error = ENOSPC; 1095 goto fail; 1096 } 1097 1098 /* Do MII setup */ 1099 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1100 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1101 0); 1102 if (error != 0) { 1103 device_printf(dev, "attaching PHYs failed\n"); 1104 goto fail; 1105 } 1106 1107 ifp->if_softc = sc; 1108 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1109 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1110 ifp->if_ioctl = vge_ioctl; 1111 ifp->if_capabilities = IFCAP_VLAN_MTU; 1112 ifp->if_start = vge_start; 1113 ifp->if_hwassist = VGE_CSUM_FEATURES; 1114 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1115 IFCAP_VLAN_HWTAGGING; 1116 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1117 ifp->if_capabilities |= IFCAP_WOL; 1118 ifp->if_capenable = ifp->if_capabilities; 1119#ifdef DEVICE_POLLING 1120 ifp->if_capabilities |= IFCAP_POLLING; 1121#endif 1122 ifp->if_init = vge_init; 1123 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1124 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1125 IFQ_SET_READY(&ifp->if_snd); 1126 1127 /* 1128 * Call MI attach routine. 1129 */ 1130 ether_ifattach(ifp, eaddr); 1131 1132 /* Tell the upper layer(s) we support long frames. */ 1133 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1134 1135 /* Hook interrupt last to avoid having to lock softc */ 1136 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1137 NULL, vge_intr, sc, &sc->vge_intrhand); 1138 1139 if (error) { 1140 device_printf(dev, "couldn't set up irq\n"); 1141 ether_ifdetach(ifp); 1142 goto fail; 1143 } 1144 1145fail: 1146 if (error) 1147 vge_detach(dev); 1148 1149 return (error); 1150} 1151 1152/* 1153 * Shutdown hardware and free up resources. This can be called any 1154 * time after the mutex has been initialized. It is called in both 1155 * the error case in attach and the normal detach case so it needs 1156 * to be careful about only freeing resources that have actually been 1157 * allocated. 1158 */ 1159static int 1160vge_detach(device_t dev) 1161{ 1162 struct vge_softc *sc; 1163 struct ifnet *ifp; 1164 1165 sc = device_get_softc(dev); 1166 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1167 ifp = sc->vge_ifp; 1168 1169#ifdef DEVICE_POLLING 1170 if (ifp->if_capenable & IFCAP_POLLING) 1171 ether_poll_deregister(ifp); 1172#endif 1173 1174 /* These should only be active if attach succeeded */ 1175 if (device_is_attached(dev)) { 1176 ether_ifdetach(ifp); 1177 VGE_LOCK(sc); 1178 vge_stop(sc); 1179 VGE_UNLOCK(sc); 1180 callout_drain(&sc->vge_watchdog); 1181 } 1182 if (sc->vge_miibus) 1183 device_delete_child(dev, sc->vge_miibus); 1184 bus_generic_detach(dev); 1185 1186 if (sc->vge_intrhand) 1187 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1188 if (sc->vge_irq) 1189 bus_release_resource(dev, SYS_RES_IRQ, 1190 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1191 if (sc->vge_flags & VGE_FLAG_MSI) 1192 pci_release_msi(dev); 1193 if (sc->vge_res) 1194 bus_release_resource(dev, SYS_RES_MEMORY, 1195 PCIR_BAR(1), sc->vge_res); 1196 if (ifp) 1197 if_free(ifp); 1198 1199 vge_dma_free(sc); 1200 mtx_destroy(&sc->vge_mtx); 1201 1202 return (0); 1203} 1204 1205static void 1206vge_discard_rxbuf(struct vge_softc *sc, int prod) 1207{ 1208 struct vge_rxdesc *rxd; 1209 int i; 1210 1211 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1212 rxd->rx_desc->vge_sts = 0; 1213 rxd->rx_desc->vge_ctl = 0; 1214 1215 /* 1216 * Note: the manual fails to document the fact that for 1217 * proper opration, the driver needs to replentish the RX 1218 * DMA ring 4 descriptors at a time (rather than one at a 1219 * time, like most chips). We can allocate the new buffers 1220 * but we should not set the OWN bits until we're ready 1221 * to hand back 4 of them in one shot. 1222 */ 1223 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1224 for (i = VGE_RXCHUNK; i > 0; i--) { 1225 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1226 rxd = rxd->rxd_prev; 1227 } 1228 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1229 } 1230} 1231 1232static int 1233vge_newbuf(struct vge_softc *sc, int prod) 1234{ 1235 struct vge_rxdesc *rxd; 1236 struct mbuf *m; 1237 bus_dma_segment_t segs[1]; 1238 bus_dmamap_t map; 1239 int i, nsegs; 1240 1241 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1242 if (m == NULL) 1243 return (ENOBUFS); 1244 /* 1245 * This is part of an evil trick to deal with strict-alignment 1246 * architectures. The VIA chip requires RX buffers to be aligned 1247 * on 32-bit boundaries, but that will hose strict-alignment 1248 * architectures. To get around this, we leave some empty space 1249 * at the start of each buffer and for non-strict-alignment hosts, 1250 * we copy the buffer back two bytes to achieve word alignment. 1251 * This is slightly more efficient than allocating a new buffer, 1252 * copying the contents, and discarding the old buffer. 1253 */ 1254 m->m_len = m->m_pkthdr.len = MCLBYTES; 1255 m_adj(m, VGE_RX_BUF_ALIGN); 1256 1257 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1258 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1259 m_freem(m); 1260 return (ENOBUFS); 1261 } 1262 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1263 1264 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1265 if (rxd->rx_m != NULL) { 1266 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1267 BUS_DMASYNC_POSTREAD); 1268 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1269 } 1270 map = rxd->rx_dmamap; 1271 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1272 sc->vge_cdata.vge_rx_sparemap = map; 1273 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1274 BUS_DMASYNC_PREREAD); 1275 rxd->rx_m = m; 1276 1277 rxd->rx_desc->vge_sts = 0; 1278 rxd->rx_desc->vge_ctl = 0; 1279 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1280 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1281 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1282 1283 /* 1284 * Note: the manual fails to document the fact that for 1285 * proper operation, the driver needs to replenish the RX 1286 * DMA ring 4 descriptors at a time (rather than one at a 1287 * time, like most chips). We can allocate the new buffers 1288 * but we should not set the OWN bits until we're ready 1289 * to hand back 4 of them in one shot. 1290 */ 1291 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1292 for (i = VGE_RXCHUNK; i > 0; i--) { 1293 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1294 rxd = rxd->rxd_prev; 1295 } 1296 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1297 } 1298 1299 return (0); 1300} 1301 1302static int 1303vge_tx_list_init(struct vge_softc *sc) 1304{ 1305 struct vge_ring_data *rd; 1306 struct vge_txdesc *txd; 1307 int i; 1308 1309 VGE_LOCK_ASSERT(sc); 1310 1311 sc->vge_cdata.vge_tx_prodidx = 0; 1312 sc->vge_cdata.vge_tx_considx = 0; 1313 sc->vge_cdata.vge_tx_cnt = 0; 1314 1315 rd = &sc->vge_rdata; 1316 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1317 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1318 txd = &sc->vge_cdata.vge_txdesc[i]; 1319 txd->tx_m = NULL; 1320 txd->tx_desc = &rd->vge_tx_ring[i]; 1321 } 1322 1323 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1324 sc->vge_cdata.vge_tx_ring_map, 1325 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1326 1327 return (0); 1328} 1329 1330static int 1331vge_rx_list_init(struct vge_softc *sc) 1332{ 1333 struct vge_ring_data *rd; 1334 struct vge_rxdesc *rxd; 1335 int i; 1336 1337 VGE_LOCK_ASSERT(sc); 1338 1339 sc->vge_cdata.vge_rx_prodidx = 0; 1340 sc->vge_cdata.vge_head = NULL; 1341 sc->vge_cdata.vge_tail = NULL; 1342 sc->vge_cdata.vge_rx_commit = 0; 1343 1344 rd = &sc->vge_rdata; 1345 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1346 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1347 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1348 rxd->rx_m = NULL; 1349 rxd->rx_desc = &rd->vge_rx_ring[i]; 1350 if (i == 0) 1351 rxd->rxd_prev = 1352 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1353 else 1354 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1355 if (vge_newbuf(sc, i) != 0) 1356 return (ENOBUFS); 1357 } 1358 1359 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1360 sc->vge_cdata.vge_rx_ring_map, 1361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1362 1363 sc->vge_cdata.vge_rx_commit = 0; 1364 1365 return (0); 1366} 1367 1368static void 1369vge_freebufs(struct vge_softc *sc) 1370{ 1371 struct vge_txdesc *txd; 1372 struct vge_rxdesc *rxd; 1373 struct ifnet *ifp; 1374 int i; 1375 1376 VGE_LOCK_ASSERT(sc); 1377 1378 ifp = sc->vge_ifp; 1379 /* 1380 * Free RX and TX mbufs still in the queues. 1381 */ 1382 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1383 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1384 if (rxd->rx_m != NULL) { 1385 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1386 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1387 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1388 rxd->rx_dmamap); 1389 m_freem(rxd->rx_m); 1390 rxd->rx_m = NULL; 1391 } 1392 } 1393 1394 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1395 txd = &sc->vge_cdata.vge_txdesc[i]; 1396 if (txd->tx_m != NULL) { 1397 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1398 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1399 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1400 txd->tx_dmamap); 1401 m_freem(txd->tx_m); 1402 txd->tx_m = NULL; 1403 ifp->if_oerrors++; 1404 } 1405 } 1406} 1407 1408#ifndef __NO_STRICT_ALIGNMENT 1409static __inline void 1410vge_fixup_rx(struct mbuf *m) 1411{ 1412 int i; 1413 uint16_t *src, *dst; 1414 1415 src = mtod(m, uint16_t *); 1416 dst = src - 1; 1417 1418 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1419 *dst++ = *src++; 1420 1421 m->m_data -= ETHER_ALIGN; 1422} 1423#endif 1424 1425/* 1426 * RX handler. We support the reception of jumbo frames that have 1427 * been fragmented across multiple 2K mbuf cluster buffers. 1428 */ 1429static int 1430vge_rxeof(struct vge_softc *sc, int count) 1431{ 1432 struct mbuf *m; 1433 struct ifnet *ifp; 1434 int prod, prog, total_len; 1435 struct vge_rxdesc *rxd; 1436 struct vge_rx_desc *cur_rx; 1437 uint32_t rxstat, rxctl; 1438 1439 VGE_LOCK_ASSERT(sc); 1440 1441 ifp = sc->vge_ifp; 1442 1443 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1444 sc->vge_cdata.vge_rx_ring_map, 1445 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1446 1447 prod = sc->vge_cdata.vge_rx_prodidx; 1448 for (prog = 0; count > 0 && 1449 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1450 VGE_RX_DESC_INC(prod)) { 1451 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1452 rxstat = le32toh(cur_rx->vge_sts); 1453 if ((rxstat & VGE_RDSTS_OWN) != 0) 1454 break; 1455 count--; 1456 prog++; 1457 rxctl = le32toh(cur_rx->vge_ctl); 1458 total_len = VGE_RXBYTES(rxstat); 1459 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1460 m = rxd->rx_m; 1461 1462 /* 1463 * If the 'start of frame' bit is set, this indicates 1464 * either the first fragment in a multi-fragment receive, 1465 * or an intermediate fragment. Either way, we want to 1466 * accumulate the buffers. 1467 */ 1468 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1469 if (vge_newbuf(sc, prod) != 0) { 1470 ifp->if_iqdrops++; 1471 VGE_CHAIN_RESET(sc); 1472 vge_discard_rxbuf(sc, prod); 1473 continue; 1474 } 1475 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1476 if (sc->vge_cdata.vge_head == NULL) { 1477 sc->vge_cdata.vge_head = m; 1478 sc->vge_cdata.vge_tail = m; 1479 } else { 1480 m->m_flags &= ~M_PKTHDR; 1481 sc->vge_cdata.vge_tail->m_next = m; 1482 sc->vge_cdata.vge_tail = m; 1483 } 1484 continue; 1485 } 1486 1487 /* 1488 * Bad/error frames will have the RXOK bit cleared. 1489 * However, there's one error case we want to allow: 1490 * if a VLAN tagged frame arrives and the chip can't 1491 * match it against the CAM filter, it considers this 1492 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1493 * We don't want to drop the frame though: our VLAN 1494 * filtering is done in software. 1495 * We also want to receive bad-checksummed frames and 1496 * and frames with bad-length. 1497 */ 1498 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1499 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1500 VGE_RDSTS_CSUMERR)) == 0) { 1501 ifp->if_ierrors++; 1502 /* 1503 * If this is part of a multi-fragment packet, 1504 * discard all the pieces. 1505 */ 1506 VGE_CHAIN_RESET(sc); 1507 vge_discard_rxbuf(sc, prod); 1508 continue; 1509 } 1510 1511 if (vge_newbuf(sc, prod) != 0) { 1512 ifp->if_iqdrops++; 1513 VGE_CHAIN_RESET(sc); 1514 vge_discard_rxbuf(sc, prod); 1515 continue; 1516 } 1517 1518 /* Chain received mbufs. */ 1519 if (sc->vge_cdata.vge_head != NULL) { 1520 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1521 /* 1522 * Special case: if there's 4 bytes or less 1523 * in this buffer, the mbuf can be discarded: 1524 * the last 4 bytes is the CRC, which we don't 1525 * care about anyway. 1526 */ 1527 if (m->m_len <= ETHER_CRC_LEN) { 1528 sc->vge_cdata.vge_tail->m_len -= 1529 (ETHER_CRC_LEN - m->m_len); 1530 m_freem(m); 1531 } else { 1532 m->m_len -= ETHER_CRC_LEN; 1533 m->m_flags &= ~M_PKTHDR; 1534 sc->vge_cdata.vge_tail->m_next = m; 1535 } 1536 m = sc->vge_cdata.vge_head; 1537 m->m_flags |= M_PKTHDR; 1538 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1539 } else { 1540 m->m_flags |= M_PKTHDR; 1541 m->m_pkthdr.len = m->m_len = 1542 (total_len - ETHER_CRC_LEN); 1543 } 1544 1545#ifndef __NO_STRICT_ALIGNMENT 1546 vge_fixup_rx(m); 1547#endif 1548 m->m_pkthdr.rcvif = ifp; 1549 1550 /* Do RX checksumming if enabled */ 1551 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1552 (rxctl & VGE_RDCTL_FRAG) == 0) { 1553 /* Check IP header checksum */ 1554 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1555 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1556 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1557 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1558 1559 /* Check TCP/UDP checksum */ 1560 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1561 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1562 m->m_pkthdr.csum_flags |= 1563 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1564 m->m_pkthdr.csum_data = 0xffff; 1565 } 1566 } 1567 1568 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1569 /* 1570 * The 32-bit rxctl register is stored in little-endian. 1571 * However, the 16-bit vlan tag is stored in big-endian, 1572 * so we have to byte swap it. 1573 */ 1574 m->m_pkthdr.ether_vtag = 1575 bswap16(rxctl & VGE_RDCTL_VLANID); 1576 m->m_flags |= M_VLANTAG; 1577 } 1578 1579 VGE_UNLOCK(sc); 1580 (*ifp->if_input)(ifp, m); 1581 VGE_LOCK(sc); 1582 sc->vge_cdata.vge_head = NULL; 1583 sc->vge_cdata.vge_tail = NULL; 1584 } 1585 1586 if (prog > 0) { 1587 sc->vge_cdata.vge_rx_prodidx = prod; 1588 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1589 sc->vge_cdata.vge_rx_ring_map, 1590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1591 /* Update residue counter. */ 1592 if (sc->vge_cdata.vge_rx_commit != 0) { 1593 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1594 sc->vge_cdata.vge_rx_commit); 1595 sc->vge_cdata.vge_rx_commit = 0; 1596 } 1597 } 1598 return (prog); 1599} 1600 1601static void 1602vge_txeof(struct vge_softc *sc) 1603{ 1604 struct ifnet *ifp; 1605 struct vge_tx_desc *cur_tx; 1606 struct vge_txdesc *txd; 1607 uint32_t txstat; 1608 int cons, prod; 1609 1610 VGE_LOCK_ASSERT(sc); 1611 1612 ifp = sc->vge_ifp; 1613 1614 if (sc->vge_cdata.vge_tx_cnt == 0) 1615 return; 1616 1617 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1618 sc->vge_cdata.vge_tx_ring_map, 1619 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1620 1621 /* 1622 * Go through our tx list and free mbufs for those 1623 * frames that have been transmitted. 1624 */ 1625 cons = sc->vge_cdata.vge_tx_considx; 1626 prod = sc->vge_cdata.vge_tx_prodidx; 1627 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1628 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1629 txstat = le32toh(cur_tx->vge_sts); 1630 if ((txstat & VGE_TDSTS_OWN) != 0) 1631 break; 1632 sc->vge_cdata.vge_tx_cnt--; 1633 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1634 1635 txd = &sc->vge_cdata.vge_txdesc[cons]; 1636 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1637 BUS_DMASYNC_POSTWRITE); 1638 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1639 1640 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1641 __func__)); 1642 m_freem(txd->tx_m); 1643 txd->tx_m = NULL; 1644 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1645 } 1646 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1647 sc->vge_cdata.vge_tx_ring_map, 1648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1649 sc->vge_cdata.vge_tx_considx = cons; 1650 if (sc->vge_cdata.vge_tx_cnt == 0) 1651 sc->vge_timer = 0; 1652} 1653 1654static void 1655vge_link_statchg(void *xsc) 1656{ 1657 struct vge_softc *sc; 1658 struct ifnet *ifp; 1659 struct mii_data *mii; 1660 1661 sc = xsc; 1662 ifp = sc->vge_ifp; 1663 VGE_LOCK_ASSERT(sc); 1664 mii = device_get_softc(sc->vge_miibus); 1665 1666 mii_pollstat(mii); 1667 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) { 1668 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1669 sc->vge_flags &= ~VGE_FLAG_LINK; 1670 if_link_state_change(sc->vge_ifp, 1671 LINK_STATE_DOWN); 1672 } 1673 } else { 1674 if (mii->mii_media_status & IFM_ACTIVE && 1675 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1676 sc->vge_flags |= VGE_FLAG_LINK; 1677 if_link_state_change(sc->vge_ifp, 1678 LINK_STATE_UP); 1679 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1680 vge_start_locked(ifp); 1681 } 1682 } 1683} 1684 1685#ifdef DEVICE_POLLING 1686static int 1687vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1688{ 1689 struct vge_softc *sc = ifp->if_softc; 1690 int rx_npkts = 0; 1691 1692 VGE_LOCK(sc); 1693 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1694 goto done; 1695 1696 rx_npkts = vge_rxeof(sc, count); 1697 vge_txeof(sc); 1698 1699 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1700 vge_start_locked(ifp); 1701 1702 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1703 uint32_t status; 1704 status = CSR_READ_4(sc, VGE_ISR); 1705 if (status == 0xFFFFFFFF) 1706 goto done; 1707 if (status) 1708 CSR_WRITE_4(sc, VGE_ISR, status); 1709 1710 /* 1711 * XXX check behaviour on receiver stalls. 1712 */ 1713 1714 if (status & VGE_ISR_TXDMA_STALL || 1715 status & VGE_ISR_RXDMA_STALL) { 1716 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1717 vge_init_locked(sc); 1718 } 1719 1720 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1721 vge_rxeof(sc, count); 1722 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1723 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1724 } 1725 } 1726done: 1727 VGE_UNLOCK(sc); 1728 return (rx_npkts); 1729} 1730#endif /* DEVICE_POLLING */ 1731 1732static void 1733vge_intr(void *arg) 1734{ 1735 struct vge_softc *sc; 1736 struct ifnet *ifp; 1737 uint32_t status; 1738 1739 sc = arg; 1740 VGE_LOCK(sc); 1741 1742 ifp = sc->vge_ifp; 1743 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1744 (ifp->if_flags & IFF_UP) == 0) { 1745 VGE_UNLOCK(sc); 1746 return; 1747 } 1748 1749#ifdef DEVICE_POLLING 1750 if (ifp->if_capenable & IFCAP_POLLING) { 1751 status = CSR_READ_4(sc, VGE_ISR); 1752 CSR_WRITE_4(sc, VGE_ISR, status); 1753 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1754 vge_link_statchg(sc); 1755 VGE_UNLOCK(sc); 1756 return; 1757 } 1758#endif 1759 1760 /* Disable interrupts */ 1761 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1762 status = CSR_READ_4(sc, VGE_ISR); 1763 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1764 /* If the card has gone away the read returns 0xffff. */ 1765 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1766 goto done; 1767 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1768 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1769 vge_rxeof(sc, VGE_RX_DESC_CNT); 1770 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1771 vge_rxeof(sc, VGE_RX_DESC_CNT); 1772 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1773 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1774 } 1775 1776 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1777 vge_txeof(sc); 1778 1779 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1780 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1781 vge_init_locked(sc); 1782 } 1783 1784 if (status & VGE_ISR_LINKSTS) 1785 vge_link_statchg(sc); 1786 } 1787done: 1788 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1789 /* Re-enable interrupts */ 1790 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1791 1792 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1793 vge_start_locked(ifp); 1794 } 1795 VGE_UNLOCK(sc); 1796} 1797 1798static int 1799vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1800{ 1801 struct vge_txdesc *txd; 1802 struct vge_tx_frag *frag; 1803 struct mbuf *m; 1804 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1805 int error, i, nsegs, padlen; 1806 uint32_t cflags; 1807 1808 VGE_LOCK_ASSERT(sc); 1809 1810 M_ASSERTPKTHDR((*m_head)); 1811 1812 /* Argh. This chip does not autopad short frames. */ 1813 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1814 m = *m_head; 1815 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1816 if (M_WRITABLE(m) == 0) { 1817 /* Get a writable copy. */ 1818 m = m_dup(*m_head, M_DONTWAIT); 1819 m_freem(*m_head); 1820 if (m == NULL) { 1821 *m_head = NULL; 1822 return (ENOBUFS); 1823 } 1824 *m_head = m; 1825 } 1826 if (M_TRAILINGSPACE(m) < padlen) { 1827 m = m_defrag(m, M_DONTWAIT); 1828 if (m == NULL) { 1829 m_freem(*m_head); 1830 *m_head = NULL; 1831 return (ENOBUFS); 1832 } 1833 } 1834 /* 1835 * Manually pad short frames, and zero the pad space 1836 * to avoid leaking data. 1837 */ 1838 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1839 m->m_pkthdr.len += padlen; 1840 m->m_len = m->m_pkthdr.len; 1841 *m_head = m; 1842 } 1843 1844 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1845 1846 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1847 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1848 if (error == EFBIG) { 1849 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1850 if (m == NULL) { 1851 m_freem(*m_head); 1852 *m_head = NULL; 1853 return (ENOMEM); 1854 } 1855 *m_head = m; 1856 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1857 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1858 if (error != 0) { 1859 m_freem(*m_head); 1860 *m_head = NULL; 1861 return (error); 1862 } 1863 } else if (error != 0) 1864 return (error); 1865 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1866 BUS_DMASYNC_PREWRITE); 1867 1868 m = *m_head; 1869 cflags = 0; 1870 1871 /* Configure checksum offload. */ 1872 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1873 cflags |= VGE_TDCTL_IPCSUM; 1874 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1875 cflags |= VGE_TDCTL_TCPCSUM; 1876 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1877 cflags |= VGE_TDCTL_UDPCSUM; 1878 1879 /* Configure VLAN. */ 1880 if ((m->m_flags & M_VLANTAG) != 0) 1881 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1882 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1883 /* 1884 * XXX 1885 * Velocity family seems to support TSO but no information 1886 * for MSS configuration is available. Also the number of 1887 * fragments supported by a descriptor is too small to hold 1888 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1889 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1890 * longer chain of buffers but no additional information is 1891 * available. 1892 * 1893 * When telling the chip how many segments there are, we 1894 * must use nsegs + 1 instead of just nsegs. Darned if I 1895 * know why. This also means we can't use the last fragment 1896 * field of Tx descriptor. 1897 */ 1898 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1899 VGE_TD_LS_NORM); 1900 for (i = 0; i < nsegs; i++) { 1901 frag = &txd->tx_desc->vge_frag[i]; 1902 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1903 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1904 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1905 } 1906 1907 sc->vge_cdata.vge_tx_cnt++; 1908 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1909 1910 /* 1911 * Finally request interrupt and give the first descriptor 1912 * ownership to hardware. 1913 */ 1914 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1915 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1916 txd->tx_m = m; 1917 1918 return (0); 1919} 1920 1921/* 1922 * Main transmit routine. 1923 */ 1924 1925static void 1926vge_start(struct ifnet *ifp) 1927{ 1928 struct vge_softc *sc; 1929 1930 sc = ifp->if_softc; 1931 VGE_LOCK(sc); 1932 vge_start_locked(ifp); 1933 VGE_UNLOCK(sc); 1934} 1935 1936 1937static void 1938vge_start_locked(struct ifnet *ifp) 1939{ 1940 struct vge_softc *sc; 1941 struct vge_txdesc *txd; 1942 struct mbuf *m_head; 1943 int enq, idx; 1944 1945 sc = ifp->if_softc; 1946 1947 VGE_LOCK_ASSERT(sc); 1948 1949 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1950 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1951 IFF_DRV_RUNNING) 1952 return; 1953 1954 idx = sc->vge_cdata.vge_tx_prodidx; 1955 VGE_TX_DESC_DEC(idx); 1956 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1957 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1958 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1959 if (m_head == NULL) 1960 break; 1961 /* 1962 * Pack the data into the transmit ring. If we 1963 * don't have room, set the OACTIVE flag and wait 1964 * for the NIC to drain the ring. 1965 */ 1966 if (vge_encap(sc, &m_head)) { 1967 if (m_head == NULL) 1968 break; 1969 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1970 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1971 break; 1972 } 1973 1974 txd = &sc->vge_cdata.vge_txdesc[idx]; 1975 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1976 VGE_TX_DESC_INC(idx); 1977 1978 enq++; 1979 /* 1980 * If there's a BPF listener, bounce a copy of this frame 1981 * to him. 1982 */ 1983 ETHER_BPF_MTAP(ifp, m_head); 1984 } 1985 1986 if (enq > 0) { 1987 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1988 sc->vge_cdata.vge_tx_ring_map, 1989 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1990 /* Issue a transmit command. */ 1991 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1992 /* 1993 * Set a timeout in case the chip goes out to lunch. 1994 */ 1995 sc->vge_timer = 5; 1996 } 1997} 1998 1999static void 2000vge_init(void *xsc) 2001{ 2002 struct vge_softc *sc = xsc; 2003 2004 VGE_LOCK(sc); 2005 vge_init_locked(sc); 2006 VGE_UNLOCK(sc); 2007} 2008 2009static void 2010vge_init_locked(struct vge_softc *sc) 2011{ 2012 struct ifnet *ifp = sc->vge_ifp; 2013 struct mii_data *mii; 2014 int error, i; 2015 2016 VGE_LOCK_ASSERT(sc); 2017 mii = device_get_softc(sc->vge_miibus); 2018 2019 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2020 return; 2021 2022 /* 2023 * Cancel pending I/O and free all RX/TX buffers. 2024 */ 2025 vge_stop(sc); 2026 vge_reset(sc); 2027 2028 /* 2029 * Initialize the RX and TX descriptors and mbufs. 2030 */ 2031 2032 error = vge_rx_list_init(sc); 2033 if (error != 0) { 2034 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2035 return; 2036 } 2037 vge_tx_list_init(sc); 2038 /* Clear MAC statistics. */ 2039 vge_stats_clear(sc); 2040 /* Set our station address */ 2041 for (i = 0; i < ETHER_ADDR_LEN; i++) 2042 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2043 2044 /* 2045 * Set receive FIFO threshold. Also allow transmission and 2046 * reception of VLAN tagged frames. 2047 */ 2048 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2049 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2050 2051 /* Set DMA burst length */ 2052 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2053 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2054 2055 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2056 2057 /* Set collision backoff algorithm */ 2058 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2059 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2060 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2061 2062 /* Disable LPSEL field in priority resolution */ 2063 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2064 2065 /* 2066 * Load the addresses of the DMA queues into the chip. 2067 * Note that we only use one transmit queue. 2068 */ 2069 2070 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2071 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2072 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2073 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2074 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2075 2076 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2077 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2078 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2079 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2080 2081 /* Configure interrupt moderation. */ 2082 vge_intr_holdoff(sc); 2083 2084 /* Enable and wake up the RX descriptor queue */ 2085 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2086 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2087 2088 /* Enable the TX descriptor queue */ 2089 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2090 2091 /* Init the cam filter. */ 2092 vge_cam_clear(sc); 2093 2094 /* Set up receiver filter. */ 2095 vge_rxfilter(sc); 2096 vge_setvlan(sc); 2097 2098 /* Enable flow control */ 2099 2100 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2101 2102 /* Enable jumbo frame reception (if desired) */ 2103 2104 /* Start the MAC. */ 2105 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2106 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2107 CSR_WRITE_1(sc, VGE_CRS0, 2108 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2109 2110#ifdef DEVICE_POLLING 2111 /* 2112 * Disable interrupts except link state change if we are polling. 2113 */ 2114 if (ifp->if_capenable & IFCAP_POLLING) { 2115 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2116 } else /* otherwise ... */ 2117#endif 2118 { 2119 /* 2120 * Enable interrupts. 2121 */ 2122 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2123 } 2124 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2125 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2126 2127 sc->vge_flags &= ~VGE_FLAG_LINK; 2128 mii_mediachg(mii); 2129 2130 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2131 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2132 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2133} 2134 2135/* 2136 * Set media options. 2137 */ 2138static int 2139vge_ifmedia_upd(struct ifnet *ifp) 2140{ 2141 struct vge_softc *sc; 2142 struct mii_data *mii; 2143 int error; 2144 2145 sc = ifp->if_softc; 2146 VGE_LOCK(sc); 2147 mii = device_get_softc(sc->vge_miibus); 2148 error = mii_mediachg(mii); 2149 VGE_UNLOCK(sc); 2150 2151 return (error); 2152} 2153 2154/* 2155 * Report current media status. 2156 */ 2157static void 2158vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2159{ 2160 struct vge_softc *sc; 2161 struct mii_data *mii; 2162 2163 sc = ifp->if_softc; 2164 mii = device_get_softc(sc->vge_miibus); 2165 2166 VGE_LOCK(sc); 2167 if ((ifp->if_flags & IFF_UP) == 0) { 2168 VGE_UNLOCK(sc); 2169 return; 2170 } 2171 mii_pollstat(mii); 2172 ifmr->ifm_active = mii->mii_media_active; 2173 ifmr->ifm_status = mii->mii_media_status; 2174 VGE_UNLOCK(sc); 2175} 2176 2177static void 2178vge_miibus_statchg(device_t dev) 2179{ 2180 struct vge_softc *sc; 2181 struct mii_data *mii; 2182 struct ifmedia_entry *ife; 2183 2184 sc = device_get_softc(dev); 2185 mii = device_get_softc(sc->vge_miibus); 2186 ife = mii->mii_media.ifm_cur; 2187 2188 /* 2189 * If the user manually selects a media mode, we need to turn 2190 * on the forced MAC mode bit in the DIAGCTL register. If the 2191 * user happens to choose a full duplex mode, we also need to 2192 * set the 'force full duplex' bit. This applies only to 2193 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2194 * mode is disabled, and in 1000baseT mode, full duplex is 2195 * always implied, so we turn on the forced mode bit but leave 2196 * the FDX bit cleared. 2197 */ 2198 2199 switch (IFM_SUBTYPE(ife->ifm_media)) { 2200 case IFM_AUTO: 2201 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2202 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2203 break; 2204 case IFM_1000_T: 2205 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2206 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2207 break; 2208 case IFM_100_TX: 2209 case IFM_10_T: 2210 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2211 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2212 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2213 } else { 2214 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2215 } 2216 break; 2217 default: 2218 device_printf(dev, "unknown media type: %x\n", 2219 IFM_SUBTYPE(ife->ifm_media)); 2220 break; 2221 } 2222} 2223 2224static int 2225vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2226{ 2227 struct vge_softc *sc = ifp->if_softc; 2228 struct ifreq *ifr = (struct ifreq *) data; 2229 struct mii_data *mii; 2230 int error = 0, mask; 2231 2232 switch (command) { 2233 case SIOCSIFMTU: 2234 VGE_LOCK(sc); 2235 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2236 error = EINVAL; 2237 else if (ifp->if_mtu != ifr->ifr_mtu) { 2238 if (ifr->ifr_mtu > ETHERMTU && 2239 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2240 error = EINVAL; 2241 else 2242 ifp->if_mtu = ifr->ifr_mtu; 2243 } 2244 VGE_UNLOCK(sc); 2245 break; 2246 case SIOCSIFFLAGS: 2247 VGE_LOCK(sc); 2248 if ((ifp->if_flags & IFF_UP) != 0) { 2249 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2250 ((ifp->if_flags ^ sc->vge_if_flags) & 2251 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2252 vge_rxfilter(sc); 2253 else 2254 vge_init_locked(sc); 2255 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2256 vge_stop(sc); 2257 sc->vge_if_flags = ifp->if_flags; 2258 VGE_UNLOCK(sc); 2259 break; 2260 case SIOCADDMULTI: 2261 case SIOCDELMULTI: 2262 VGE_LOCK(sc); 2263 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2264 vge_rxfilter(sc); 2265 VGE_UNLOCK(sc); 2266 break; 2267 case SIOCGIFMEDIA: 2268 case SIOCSIFMEDIA: 2269 mii = device_get_softc(sc->vge_miibus); 2270 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2271 break; 2272 case SIOCSIFCAP: 2273 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2274#ifdef DEVICE_POLLING 2275 if (mask & IFCAP_POLLING) { 2276 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2277 error = ether_poll_register(vge_poll, ifp); 2278 if (error) 2279 return (error); 2280 VGE_LOCK(sc); 2281 /* Disable interrupts */ 2282 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2283 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2284 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2285 ifp->if_capenable |= IFCAP_POLLING; 2286 VGE_UNLOCK(sc); 2287 } else { 2288 error = ether_poll_deregister(ifp); 2289 /* Enable interrupts. */ 2290 VGE_LOCK(sc); 2291 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2292 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2293 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2294 ifp->if_capenable &= ~IFCAP_POLLING; 2295 VGE_UNLOCK(sc); 2296 } 2297 } 2298#endif /* DEVICE_POLLING */ 2299 VGE_LOCK(sc); 2300 if ((mask & IFCAP_TXCSUM) != 0 && 2301 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2302 ifp->if_capenable ^= IFCAP_TXCSUM; 2303 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2304 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2305 else 2306 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2307 } 2308 if ((mask & IFCAP_RXCSUM) != 0 && 2309 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2310 ifp->if_capenable ^= IFCAP_RXCSUM; 2311 if ((mask & IFCAP_WOL_UCAST) != 0 && 2312 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2313 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2314 if ((mask & IFCAP_WOL_MCAST) != 0 && 2315 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2316 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2317 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2318 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2319 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2320 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2321 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2322 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2323 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2324 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2325 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2326 vge_setvlan(sc); 2327 } 2328 VGE_UNLOCK(sc); 2329 VLAN_CAPABILITIES(ifp); 2330 break; 2331 default: 2332 error = ether_ioctl(ifp, command, data); 2333 break; 2334 } 2335 2336 return (error); 2337} 2338 2339static void 2340vge_watchdog(void *arg) 2341{ 2342 struct vge_softc *sc; 2343 struct ifnet *ifp; 2344 2345 sc = arg; 2346 VGE_LOCK_ASSERT(sc); 2347 vge_stats_update(sc); 2348 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2349 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2350 return; 2351 2352 ifp = sc->vge_ifp; 2353 if_printf(ifp, "watchdog timeout\n"); 2354 ifp->if_oerrors++; 2355 2356 vge_txeof(sc); 2357 vge_rxeof(sc, VGE_RX_DESC_CNT); 2358 2359 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2360 vge_init_locked(sc); 2361} 2362 2363/* 2364 * Stop the adapter and free any mbufs allocated to the 2365 * RX and TX lists. 2366 */ 2367static void 2368vge_stop(struct vge_softc *sc) 2369{ 2370 struct ifnet *ifp; 2371 2372 VGE_LOCK_ASSERT(sc); 2373 ifp = sc->vge_ifp; 2374 sc->vge_timer = 0; 2375 callout_stop(&sc->vge_watchdog); 2376 2377 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2378 2379 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2380 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2381 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2382 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2383 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2384 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2385 2386 vge_stats_update(sc); 2387 VGE_CHAIN_RESET(sc); 2388 vge_txeof(sc); 2389 vge_freebufs(sc); 2390} 2391 2392/* 2393 * Device suspend routine. Stop the interface and save some PCI 2394 * settings in case the BIOS doesn't restore them properly on 2395 * resume. 2396 */ 2397static int 2398vge_suspend(device_t dev) 2399{ 2400 struct vge_softc *sc; 2401 2402 sc = device_get_softc(dev); 2403 2404 VGE_LOCK(sc); 2405 vge_stop(sc); 2406 vge_setwol(sc); 2407 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2408 VGE_UNLOCK(sc); 2409 2410 return (0); 2411} 2412 2413/* 2414 * Device resume routine. Restore some PCI settings in case the BIOS 2415 * doesn't, re-enable busmastering, and restart the interface if 2416 * appropriate. 2417 */ 2418static int 2419vge_resume(device_t dev) 2420{ 2421 struct vge_softc *sc; 2422 struct ifnet *ifp; 2423 uint16_t pmstat; 2424 2425 sc = device_get_softc(dev); 2426 VGE_LOCK(sc); 2427 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2428 /* Disable PME and clear PME status. */ 2429 pmstat = pci_read_config(sc->vge_dev, 2430 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2431 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2432 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2433 pci_write_config(sc->vge_dev, 2434 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2435 } 2436 } 2437 vge_clrwol(sc); 2438 /* Restart MII auto-polling. */ 2439 vge_miipoll_start(sc); 2440 ifp = sc->vge_ifp; 2441 /* Reinitialize interface if necessary. */ 2442 if ((ifp->if_flags & IFF_UP) != 0) { 2443 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2444 vge_init_locked(sc); 2445 } 2446 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2447 VGE_UNLOCK(sc); 2448 2449 return (0); 2450} 2451 2452/* 2453 * Stop all chip I/O so that the kernel's probe routines don't 2454 * get confused by errant DMAs when rebooting. 2455 */ 2456static int 2457vge_shutdown(device_t dev) 2458{ 2459 2460 return (vge_suspend(dev)); 2461} 2462 2463#define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2464 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2465 2466static void 2467vge_sysctl_node(struct vge_softc *sc) 2468{ 2469 struct sysctl_ctx_list *ctx; 2470 struct sysctl_oid_list *child, *parent; 2471 struct sysctl_oid *tree; 2472 struct vge_hw_stats *stats; 2473 2474 stats = &sc->vge_stats; 2475 ctx = device_get_sysctl_ctx(sc->vge_dev); 2476 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2477 2478 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2479 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2480 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2481 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2482 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2483 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2484 2485 /* Pull in device tunables. */ 2486 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2487 resource_int_value(device_get_name(sc->vge_dev), 2488 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2489 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2490 resource_int_value(device_get_name(sc->vge_dev), 2491 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2492 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2493 resource_int_value(device_get_name(sc->vge_dev), 2494 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2495 2496 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2497 NULL, "VGE statistics"); 2498 parent = SYSCTL_CHILDREN(tree); 2499 2500 /* Rx statistics. */ 2501 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2502 NULL, "RX MAC statistics"); 2503 child = SYSCTL_CHILDREN(tree); 2504 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2505 &stats->rx_frames, "frames"); 2506 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2507 &stats->rx_good_frames, "Good frames"); 2508 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2509 &stats->rx_fifo_oflows, "FIFO overflows"); 2510 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2511 &stats->rx_runts, "Too short frames"); 2512 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2513 &stats->rx_runts_errs, "Too short frames with errors"); 2514 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2515 &stats->rx_pkts_64, "64 bytes frames"); 2516 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2517 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2518 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2519 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2520 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2521 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2522 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2523 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2524 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2525 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2526 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2527 &stats->rx_pkts_1519_max, "1519 to max frames"); 2528 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2529 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2530 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2531 &stats->rx_jumbos, "Jumbo frames"); 2532 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2533 &stats->rx_crcerrs, "CRC errors"); 2534 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2535 &stats->rx_pause_frames, "CRC errors"); 2536 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2537 &stats->rx_alignerrs, "Alignment errors"); 2538 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2539 &stats->rx_nobufs, "Frames with no buffer event"); 2540 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2541 &stats->rx_symerrs, "Frames with symbol errors"); 2542 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2543 &stats->rx_lenerrs, "Frames with length mismatched"); 2544 2545 /* Tx statistics. */ 2546 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2547 NULL, "TX MAC statistics"); 2548 child = SYSCTL_CHILDREN(tree); 2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2550 &stats->tx_good_frames, "Good frames"); 2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2552 &stats->tx_pkts_64, "64 bytes frames"); 2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2554 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2556 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2558 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2560 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2561 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2562 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2563 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2564 &stats->tx_jumbos, "Jumbo frames"); 2565 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2566 &stats->tx_colls, "Collisions"); 2567 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2568 &stats->tx_latecolls, "Late collisions"); 2569 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2570 &stats->tx_pause, "Pause frames"); 2571#ifdef VGE_ENABLE_SQEERR 2572 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2573 &stats->tx_sqeerrs, "SQE errors"); 2574#endif 2575 /* Clear MAC statistics. */ 2576 vge_stats_clear(sc); 2577} 2578 2579#undef VGE_SYSCTL_STAT_ADD32 2580 2581static void 2582vge_stats_clear(struct vge_softc *sc) 2583{ 2584 int i; 2585 2586 CSR_WRITE_1(sc, VGE_MIBCSR, 2587 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2588 CSR_WRITE_1(sc, VGE_MIBCSR, 2589 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2590 for (i = VGE_TIMEOUT; i > 0; i--) { 2591 DELAY(1); 2592 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2593 break; 2594 } 2595 if (i == 0) 2596 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2597 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2598 ~VGE_MIBCSR_FREEZE); 2599} 2600 2601static void 2602vge_stats_update(struct vge_softc *sc) 2603{ 2604 struct vge_hw_stats *stats; 2605 struct ifnet *ifp; 2606 uint32_t mib[VGE_MIB_CNT], val; 2607 int i; 2608 2609 VGE_LOCK_ASSERT(sc); 2610 2611 stats = &sc->vge_stats; 2612 ifp = sc->vge_ifp; 2613 2614 CSR_WRITE_1(sc, VGE_MIBCSR, 2615 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2616 for (i = VGE_TIMEOUT; i > 0; i--) { 2617 DELAY(1); 2618 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2619 break; 2620 } 2621 if (i == 0) { 2622 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2623 vge_stats_clear(sc); 2624 return; 2625 } 2626 2627 bzero(mib, sizeof(mib)); 2628reset_idx: 2629 /* Set MIB read index to 0. */ 2630 CSR_WRITE_1(sc, VGE_MIBCSR, 2631 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2632 for (i = 0; i < VGE_MIB_CNT; i++) { 2633 val = CSR_READ_4(sc, VGE_MIBDATA); 2634 if (i != VGE_MIB_DATA_IDX(val)) { 2635 /* Reading interrupted. */ 2636 goto reset_idx; 2637 } 2638 mib[i] = val & VGE_MIB_DATA_MASK; 2639 } 2640 2641 /* Rx stats. */ 2642 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2643 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2644 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2645 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2646 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2647 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2648 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2649 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2650 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2651 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2652 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2653 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2654 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2655 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2656 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2657 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2658 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2659 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2660 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2661 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2662 2663 /* Tx stats. */ 2664 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2665 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2666 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2667 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2668 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2669 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2670 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2671 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2672 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2673 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2674#ifdef VGE_ENABLE_SQEERR 2675 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2676#endif 2677 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2678 2679 /* Update counters in ifnet. */ 2680 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES]; 2681 2682 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] + 2683 mib[VGE_MIB_TX_LATECOLLS]; 2684 2685 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] + 2686 mib[VGE_MIB_TX_LATECOLLS]; 2687 2688 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES]; 2689 2690 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2691 mib[VGE_MIB_RX_RUNTS] + 2692 mib[VGE_MIB_RX_RUNTS_ERRS] + 2693 mib[VGE_MIB_RX_CRCERRS] + 2694 mib[VGE_MIB_RX_ALIGNERRS] + 2695 mib[VGE_MIB_RX_NOBUFS] + 2696 mib[VGE_MIB_RX_SYMERRS] + 2697 mib[VGE_MIB_RX_LENERRS]; 2698} 2699 2700static void 2701vge_intr_holdoff(struct vge_softc *sc) 2702{ 2703 uint8_t intctl; 2704 2705 VGE_LOCK_ASSERT(sc); 2706 2707 /* 2708 * Set Tx interrupt supression threshold. 2709 * It's possible to use single-shot timer in VGE_CRS1 register 2710 * in Tx path such that driver can remove most of Tx completion 2711 * interrupts. However this requires additional access to 2712 * VGE_CRS1 register to reload the timer in addintion to 2713 * activating Tx kick command. Another downside is we don't know 2714 * what single-shot timer value should be used in advance so 2715 * reclaiming transmitted mbufs could be delayed a lot which in 2716 * turn slows down Tx operation. 2717 */ 2718 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2719 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2720 2721 /* Set Rx interrupt suppresion threshold. */ 2722 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2723 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2724 2725 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2726 intctl &= ~VGE_INTCTL_SC_RELOAD; 2727 intctl |= VGE_INTCTL_HC_RELOAD; 2728 if (sc->vge_tx_coal_pkt <= 0) 2729 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2730 else 2731 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2732 if (sc->vge_rx_coal_pkt <= 0) 2733 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2734 else 2735 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2736 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2737 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2738 if (sc->vge_int_holdoff > 0) { 2739 /* Set interrupt holdoff timer. */ 2740 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2741 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2742 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2743 /* Enable holdoff timer. */ 2744 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2745 } 2746} 2747 2748static void 2749vge_setlinkspeed(struct vge_softc *sc) 2750{ 2751 struct mii_data *mii; 2752 int aneg, i; 2753 2754 VGE_LOCK_ASSERT(sc); 2755 2756 mii = device_get_softc(sc->vge_miibus); 2757 mii_pollstat(mii); 2758 aneg = 0; 2759 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2760 (IFM_ACTIVE | IFM_AVALID)) { 2761 switch IFM_SUBTYPE(mii->mii_media_active) { 2762 case IFM_10_T: 2763 case IFM_100_TX: 2764 return; 2765 case IFM_1000_T: 2766 aneg++; 2767 default: 2768 break; 2769 } 2770 } 2771 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2772 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2773 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2774 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2775 BMCR_AUTOEN | BMCR_STARTNEG); 2776 DELAY(1000); 2777 if (aneg != 0) { 2778 /* Poll link state until vge(4) get a 10/100 link. */ 2779 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2780 mii_pollstat(mii); 2781 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2782 == (IFM_ACTIVE | IFM_AVALID)) { 2783 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2784 case IFM_10_T: 2785 case IFM_100_TX: 2786 return; 2787 default: 2788 break; 2789 } 2790 } 2791 VGE_UNLOCK(sc); 2792 pause("vgelnk", hz); 2793 VGE_LOCK(sc); 2794 } 2795 if (i == MII_ANEGTICKS_GIGE) 2796 device_printf(sc->vge_dev, "establishing link failed, " 2797 "WOL may not work!"); 2798 } 2799 /* 2800 * No link, force MAC to have 100Mbps, full-duplex link. 2801 * This is the last resort and may/may not work. 2802 */ 2803 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2804 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2805} 2806 2807static void 2808vge_setwol(struct vge_softc *sc) 2809{ 2810 struct ifnet *ifp; 2811 uint16_t pmstat; 2812 uint8_t val; 2813 2814 VGE_LOCK_ASSERT(sc); 2815 2816 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2817 /* No PME capability, PHY power down. */ 2818 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2819 BMCR_PDOWN); 2820 vge_miipoll_stop(sc); 2821 return; 2822 } 2823 2824 ifp = sc->vge_ifp; 2825 2826 /* Clear WOL on pattern match. */ 2827 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2828 /* Disable WOL on magic/unicast packet. */ 2829 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2830 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2831 VGE_WOLCFG_PMEOVR); 2832 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2833 vge_setlinkspeed(sc); 2834 val = 0; 2835 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2836 val |= VGE_WOLCR1_UCAST; 2837 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2838 val |= VGE_WOLCR1_MAGIC; 2839 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2840 val = 0; 2841 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2842 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2843 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2844 /* Disable MII auto-polling. */ 2845 vge_miipoll_stop(sc); 2846 } 2847 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2848 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2849 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2850 2851 /* Clear WOL status on pattern match. */ 2852 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2853 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2854 2855 val = CSR_READ_1(sc, VGE_PWRSTAT); 2856 val |= VGE_STICKHW_SWPTAG; 2857 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2858 /* Put hardware into sleep. */ 2859 val = CSR_READ_1(sc, VGE_PWRSTAT); 2860 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2861 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2862 /* Request PME if WOL is requested. */ 2863 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2864 PCIR_POWER_STATUS, 2); 2865 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2866 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2867 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2868 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2869 pmstat, 2); 2870} 2871 2872static void 2873vge_clrwol(struct vge_softc *sc) 2874{ 2875 uint8_t val; 2876 2877 val = CSR_READ_1(sc, VGE_PWRSTAT); 2878 val &= ~VGE_STICKHW_SWPTAG; 2879 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2880 /* Disable WOL and clear power state indicator. */ 2881 val = CSR_READ_1(sc, VGE_PWRSTAT); 2882 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2883 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2884 2885 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2886 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2887 2888 /* Clear WOL on pattern match. */ 2889 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2890 /* Disable WOL on magic/unicast packet. */ 2891 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2892 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2893 VGE_WOLCFG_PMEOVR); 2894 /* Clear WOL status on pattern match. */ 2895 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2896 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2897} 2898