if_vge.c revision 200616
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200616 2009-12-16 19:49:23Z yongari $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96#include <sys/sysctl.h> 97 98#include <net/if.h> 99#include <net/if_arp.h> 100#include <net/ethernet.h> 101#include <net/if_dl.h> 102#include <net/if_media.h> 103#include <net/if_types.h> 104#include <net/if_vlan_var.h> 105 106#include <net/bpf.h> 107 108#include <machine/bus.h> 109#include <machine/resource.h> 110#include <sys/bus.h> 111#include <sys/rman.h> 112 113#include <dev/mii/mii.h> 114#include <dev/mii/miivar.h> 115 116#include <dev/pci/pcireg.h> 117#include <dev/pci/pcivar.h> 118 119MODULE_DEPEND(vge, pci, 1, 1, 1); 120MODULE_DEPEND(vge, ether, 1, 1, 1); 121MODULE_DEPEND(vge, miibus, 1, 1, 1); 122 123/* "device miibus" required. See GENERIC if you get errors here. */ 124#include "miibus_if.h" 125 126#include <dev/vge/if_vgereg.h> 127#include <dev/vge/if_vgevar.h> 128 129#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130 131/* Tunables */ 132static int msi_disable = 0; 133TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 134 135/* 136 * The SQE error counter of MIB seems to report bogus value. 137 * Vendor's workaround does not seem to work on PCIe based 138 * controllers. Disable it until we find better workaround. 139 */ 140#undef VGE_ENABLE_SQEERR 141 142/* 143 * Various supported device vendors/types and their names. 144 */ 145static struct vge_type vge_devs[] = { 146 { VIA_VENDORID, VIA_DEVICEID_61XX, 147 "VIA Networking Gigabit Ethernet" }, 148 { 0, 0, NULL } 149}; 150 151static int vge_attach(device_t); 152static int vge_detach(device_t); 153static int vge_probe(device_t); 154static int vge_resume(device_t); 155static int vge_shutdown(device_t); 156static int vge_suspend(device_t); 157 158static void vge_cam_clear(struct vge_softc *); 159static int vge_cam_set(struct vge_softc *, uint8_t *); 160static void vge_discard_rxbuf(struct vge_softc *, int); 161static int vge_dma_alloc(struct vge_softc *); 162static void vge_dma_free(struct vge_softc *); 163static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 164#ifdef VGE_EEPROM 165static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 166#endif 167static int vge_encap(struct vge_softc *, struct mbuf **); 168#ifndef __NO_STRICT_ALIGNMENT 169static __inline void 170 vge_fixup_rx(struct mbuf *); 171#endif 172static void vge_freebufs(struct vge_softc *); 173static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 174static int vge_ifmedia_upd(struct ifnet *); 175static void vge_init(void *); 176static void vge_init_locked(struct vge_softc *); 177static void vge_intr(void *); 178static int vge_ioctl(struct ifnet *, u_long, caddr_t); 179static void vge_link_statchg(void *); 180static int vge_miibus_readreg(device_t, int, int); 181static void vge_miibus_statchg(device_t); 182static int vge_miibus_writereg(device_t, int, int, int); 183static void vge_miipoll_start(struct vge_softc *); 184static void vge_miipoll_stop(struct vge_softc *); 185static int vge_newbuf(struct vge_softc *, int); 186static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 187static void vge_reset(struct vge_softc *); 188static int vge_rx_list_init(struct vge_softc *); 189static int vge_rxeof(struct vge_softc *, int); 190static void vge_rxfilter(struct vge_softc *); 191static void vge_setvlan(struct vge_softc *); 192static void vge_start(struct ifnet *); 193static void vge_start_locked(struct ifnet *); 194static void vge_stats_clear(struct vge_softc *); 195static void vge_stats_update(struct vge_softc *); 196static void vge_stop(struct vge_softc *); 197static void vge_sysctl_node(struct vge_softc *); 198static int vge_tx_list_init(struct vge_softc *); 199static void vge_txeof(struct vge_softc *); 200static void vge_watchdog(void *); 201 202static device_method_t vge_methods[] = { 203 /* Device interface */ 204 DEVMETHOD(device_probe, vge_probe), 205 DEVMETHOD(device_attach, vge_attach), 206 DEVMETHOD(device_detach, vge_detach), 207 DEVMETHOD(device_suspend, vge_suspend), 208 DEVMETHOD(device_resume, vge_resume), 209 DEVMETHOD(device_shutdown, vge_shutdown), 210 211 /* bus interface */ 212 DEVMETHOD(bus_print_child, bus_generic_print_child), 213 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 214 215 /* MII interface */ 216 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 217 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 218 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 219 220 { 0, 0 } 221}; 222 223static driver_t vge_driver = { 224 "vge", 225 vge_methods, 226 sizeof(struct vge_softc) 227}; 228 229static devclass_t vge_devclass; 230 231DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 232DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 233 234#ifdef VGE_EEPROM 235/* 236 * Read a word of data stored in the EEPROM at address 'addr.' 237 */ 238static void 239vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 240{ 241 int i; 242 uint16_t word = 0; 243 244 /* 245 * Enter EEPROM embedded programming mode. In order to 246 * access the EEPROM at all, we first have to set the 247 * EELOAD bit in the CHIPCFG2 register. 248 */ 249 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 250 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 251 252 /* Select the address of the word we want to read */ 253 CSR_WRITE_1(sc, VGE_EEADDR, addr); 254 255 /* Issue read command */ 256 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 257 258 /* Wait for the done bit to be set. */ 259 for (i = 0; i < VGE_TIMEOUT; i++) { 260 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 261 break; 262 } 263 264 if (i == VGE_TIMEOUT) { 265 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 266 *dest = 0; 267 return; 268 } 269 270 /* Read the result */ 271 word = CSR_READ_2(sc, VGE_EERDDAT); 272 273 /* Turn off EEPROM access mode. */ 274 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 275 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 276 277 *dest = word; 278} 279#endif 280 281/* 282 * Read a sequence of words from the EEPROM. 283 */ 284static void 285vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 286{ 287 int i; 288#ifdef VGE_EEPROM 289 uint16_t word = 0, *ptr; 290 291 for (i = 0; i < cnt; i++) { 292 vge_eeprom_getword(sc, off + i, &word); 293 ptr = (uint16_t *)(dest + (i * 2)); 294 if (swap) 295 *ptr = ntohs(word); 296 else 297 *ptr = word; 298 } 299#else 300 for (i = 0; i < ETHER_ADDR_LEN; i++) 301 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 302#endif 303} 304 305static void 306vge_miipoll_stop(struct vge_softc *sc) 307{ 308 int i; 309 310 CSR_WRITE_1(sc, VGE_MIICMD, 0); 311 312 for (i = 0; i < VGE_TIMEOUT; i++) { 313 DELAY(1); 314 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 315 break; 316 } 317 318 if (i == VGE_TIMEOUT) 319 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 320} 321 322static void 323vge_miipoll_start(struct vge_softc *sc) 324{ 325 int i; 326 327 /* First, make sure we're idle. */ 328 329 CSR_WRITE_1(sc, VGE_MIICMD, 0); 330 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 331 332 for (i = 0; i < VGE_TIMEOUT; i++) { 333 DELAY(1); 334 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 335 break; 336 } 337 338 if (i == VGE_TIMEOUT) { 339 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 340 return; 341 } 342 343 /* Now enable auto poll mode. */ 344 345 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 346 347 /* And make sure it started. */ 348 349 for (i = 0; i < VGE_TIMEOUT; i++) { 350 DELAY(1); 351 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 352 break; 353 } 354 355 if (i == VGE_TIMEOUT) 356 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 357} 358 359static int 360vge_miibus_readreg(device_t dev, int phy, int reg) 361{ 362 struct vge_softc *sc; 363 int i; 364 uint16_t rval = 0; 365 366 sc = device_get_softc(dev); 367 368 if (phy != sc->vge_phyaddr) 369 return (0); 370 371 vge_miipoll_stop(sc); 372 373 /* Specify the register we want to read. */ 374 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 375 376 /* Issue read command. */ 377 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 378 379 /* Wait for the read command bit to self-clear. */ 380 for (i = 0; i < VGE_TIMEOUT; i++) { 381 DELAY(1); 382 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 383 break; 384 } 385 386 if (i == VGE_TIMEOUT) 387 device_printf(sc->vge_dev, "MII read timed out\n"); 388 else 389 rval = CSR_READ_2(sc, VGE_MIIDATA); 390 391 vge_miipoll_start(sc); 392 393 return (rval); 394} 395 396static int 397vge_miibus_writereg(device_t dev, int phy, int reg, int data) 398{ 399 struct vge_softc *sc; 400 int i, rval = 0; 401 402 sc = device_get_softc(dev); 403 404 if (phy != sc->vge_phyaddr) 405 return (0); 406 407 vge_miipoll_stop(sc); 408 409 /* Specify the register we want to write. */ 410 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 411 412 /* Specify the data we want to write. */ 413 CSR_WRITE_2(sc, VGE_MIIDATA, data); 414 415 /* Issue write command. */ 416 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 417 418 /* Wait for the write command bit to self-clear. */ 419 for (i = 0; i < VGE_TIMEOUT; i++) { 420 DELAY(1); 421 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 422 break; 423 } 424 425 if (i == VGE_TIMEOUT) { 426 device_printf(sc->vge_dev, "MII write timed out\n"); 427 rval = EIO; 428 } 429 430 vge_miipoll_start(sc); 431 432 return (rval); 433} 434 435static void 436vge_cam_clear(struct vge_softc *sc) 437{ 438 int i; 439 440 /* 441 * Turn off all the mask bits. This tells the chip 442 * that none of the entries in the CAM filter are valid. 443 * desired entries will be enabled as we fill the filter in. 444 */ 445 446 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 447 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 449 for (i = 0; i < 8; i++) 450 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 451 452 /* Clear the VLAN filter too. */ 453 454 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 455 for (i = 0; i < 8; i++) 456 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 457 458 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 459 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 460 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 461 462 sc->vge_camidx = 0; 463} 464 465static int 466vge_cam_set(struct vge_softc *sc, uint8_t *addr) 467{ 468 int i, error = 0; 469 470 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 471 return (ENOSPC); 472 473 /* Select the CAM data page. */ 474 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 475 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 476 477 /* Set the filter entry we want to update and enable writing. */ 478 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 479 480 /* Write the address to the CAM registers */ 481 for (i = 0; i < ETHER_ADDR_LEN; i++) 482 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 483 484 /* Issue a write command. */ 485 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 486 487 /* Wake for it to clear. */ 488 for (i = 0; i < VGE_TIMEOUT; i++) { 489 DELAY(1); 490 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 491 break; 492 } 493 494 if (i == VGE_TIMEOUT) { 495 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 496 error = EIO; 497 goto fail; 498 } 499 500 /* Select the CAM mask page. */ 501 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 502 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 503 504 /* Set the mask bit that enables this filter. */ 505 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 506 1<<(sc->vge_camidx & 7)); 507 508 sc->vge_camidx++; 509 510fail: 511 /* Turn off access to CAM. */ 512 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 513 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 514 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 515 516 return (error); 517} 518 519static void 520vge_setvlan(struct vge_softc *sc) 521{ 522 struct ifnet *ifp; 523 uint8_t cfg; 524 525 VGE_LOCK_ASSERT(sc); 526 527 ifp = sc->vge_ifp; 528 cfg = CSR_READ_1(sc, VGE_RXCFG); 529 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 530 cfg |= VGE_VTAG_OPT2; 531 else 532 cfg &= ~VGE_VTAG_OPT2; 533 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 534} 535 536/* 537 * Program the multicast filter. We use the 64-entry CAM filter 538 * for perfect filtering. If there's more than 64 multicast addresses, 539 * we use the hash filter instead. 540 */ 541static void 542vge_rxfilter(struct vge_softc *sc) 543{ 544 struct ifnet *ifp; 545 struct ifmultiaddr *ifma; 546 uint32_t h, hashes[2]; 547 uint8_t rxcfg; 548 int error = 0; 549 550 VGE_LOCK_ASSERT(sc); 551 552 /* First, zot all the multicast entries. */ 553 hashes[0] = 0; 554 hashes[1] = 0; 555 556 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 557 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 558 VGE_RXCTL_RX_PROMISC); 559 /* 560 * Always allow VLAN oversized frames and frames for 561 * this host. 562 */ 563 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 564 565 ifp = sc->vge_ifp; 566 if ((ifp->if_flags & IFF_BROADCAST) != 0) 567 rxcfg |= VGE_RXCTL_RX_BCAST; 568 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 569 if ((ifp->if_flags & IFF_PROMISC) != 0) 570 rxcfg |= VGE_RXCTL_RX_PROMISC; 571 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 572 hashes[0] = 0xFFFFFFFF; 573 hashes[1] = 0xFFFFFFFF; 574 } 575 goto done; 576 } 577 578 vge_cam_clear(sc); 579 /* Now program new ones */ 580 if_maddr_rlock(ifp); 581 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 582 if (ifma->ifma_addr->sa_family != AF_LINK) 583 continue; 584 error = vge_cam_set(sc, 585 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 586 if (error) 587 break; 588 } 589 590 /* If there were too many addresses, use the hash filter. */ 591 if (error) { 592 vge_cam_clear(sc); 593 594 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 595 if (ifma->ifma_addr->sa_family != AF_LINK) 596 continue; 597 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 598 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 599 if (h < 32) 600 hashes[0] |= (1 << h); 601 else 602 hashes[1] |= (1 << (h - 32)); 603 } 604 } 605 if_maddr_runlock(ifp); 606 607done: 608 if (hashes[0] != 0 || hashes[1] != 0) 609 rxcfg |= VGE_RXCTL_RX_MCAST; 610 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 611 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 612 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 613} 614 615static void 616vge_reset(struct vge_softc *sc) 617{ 618 int i; 619 620 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 621 622 for (i = 0; i < VGE_TIMEOUT; i++) { 623 DELAY(5); 624 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 625 break; 626 } 627 628 if (i == VGE_TIMEOUT) { 629 device_printf(sc->vge_dev, "soft reset timed out\n"); 630 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 631 DELAY(2000); 632 } 633 634 DELAY(5000); 635} 636 637/* 638 * Probe for a VIA gigabit chip. Check the PCI vendor and device 639 * IDs against our list and return a device name if we find a match. 640 */ 641static int 642vge_probe(device_t dev) 643{ 644 struct vge_type *t; 645 646 t = vge_devs; 647 648 while (t->vge_name != NULL) { 649 if ((pci_get_vendor(dev) == t->vge_vid) && 650 (pci_get_device(dev) == t->vge_did)) { 651 device_set_desc(dev, t->vge_name); 652 return (BUS_PROBE_DEFAULT); 653 } 654 t++; 655 } 656 657 return (ENXIO); 658} 659 660/* 661 * Map a single buffer address. 662 */ 663 664struct vge_dmamap_arg { 665 bus_addr_t vge_busaddr; 666}; 667 668static void 669vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 670{ 671 struct vge_dmamap_arg *ctx; 672 673 if (error != 0) 674 return; 675 676 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 677 678 ctx = (struct vge_dmamap_arg *)arg; 679 ctx->vge_busaddr = segs[0].ds_addr; 680} 681 682static int 683vge_dma_alloc(struct vge_softc *sc) 684{ 685 struct vge_dmamap_arg ctx; 686 struct vge_txdesc *txd; 687 struct vge_rxdesc *rxd; 688 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 689 int error, i; 690 691 lowaddr = BUS_SPACE_MAXADDR; 692 693again: 694 /* Create parent ring tag. */ 695 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 696 1, 0, /* algnmnt, boundary */ 697 lowaddr, /* lowaddr */ 698 BUS_SPACE_MAXADDR, /* highaddr */ 699 NULL, NULL, /* filter, filterarg */ 700 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 701 0, /* nsegments */ 702 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 703 0, /* flags */ 704 NULL, NULL, /* lockfunc, lockarg */ 705 &sc->vge_cdata.vge_ring_tag); 706 if (error != 0) { 707 device_printf(sc->vge_dev, 708 "could not create parent DMA tag.\n"); 709 goto fail; 710 } 711 712 /* Create tag for Tx ring. */ 713 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 714 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 715 BUS_SPACE_MAXADDR, /* lowaddr */ 716 BUS_SPACE_MAXADDR, /* highaddr */ 717 NULL, NULL, /* filter, filterarg */ 718 VGE_TX_LIST_SZ, /* maxsize */ 719 1, /* nsegments */ 720 VGE_TX_LIST_SZ, /* maxsegsize */ 721 0, /* flags */ 722 NULL, NULL, /* lockfunc, lockarg */ 723 &sc->vge_cdata.vge_tx_ring_tag); 724 if (error != 0) { 725 device_printf(sc->vge_dev, 726 "could not allocate Tx ring DMA tag.\n"); 727 goto fail; 728 } 729 730 /* Create tag for Rx ring. */ 731 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 732 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 733 BUS_SPACE_MAXADDR, /* lowaddr */ 734 BUS_SPACE_MAXADDR, /* highaddr */ 735 NULL, NULL, /* filter, filterarg */ 736 VGE_RX_LIST_SZ, /* maxsize */ 737 1, /* nsegments */ 738 VGE_RX_LIST_SZ, /* maxsegsize */ 739 0, /* flags */ 740 NULL, NULL, /* lockfunc, lockarg */ 741 &sc->vge_cdata.vge_rx_ring_tag); 742 if (error != 0) { 743 device_printf(sc->vge_dev, 744 "could not allocate Rx ring DMA tag.\n"); 745 goto fail; 746 } 747 748 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 749 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 750 (void **)&sc->vge_rdata.vge_tx_ring, 751 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 752 &sc->vge_cdata.vge_tx_ring_map); 753 if (error != 0) { 754 device_printf(sc->vge_dev, 755 "could not allocate DMA'able memory for Tx ring.\n"); 756 goto fail; 757 } 758 759 ctx.vge_busaddr = 0; 760 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 761 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 762 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 763 if (error != 0 || ctx.vge_busaddr == 0) { 764 device_printf(sc->vge_dev, 765 "could not load DMA'able memory for Tx ring.\n"); 766 goto fail; 767 } 768 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 769 770 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 771 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 772 (void **)&sc->vge_rdata.vge_rx_ring, 773 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 774 &sc->vge_cdata.vge_rx_ring_map); 775 if (error != 0) { 776 device_printf(sc->vge_dev, 777 "could not allocate DMA'able memory for Rx ring.\n"); 778 goto fail; 779 } 780 781 ctx.vge_busaddr = 0; 782 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 783 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 784 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 785 if (error != 0 || ctx.vge_busaddr == 0) { 786 device_printf(sc->vge_dev, 787 "could not load DMA'able memory for Rx ring.\n"); 788 goto fail; 789 } 790 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 791 792 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 793 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 794 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 795 if ((VGE_ADDR_HI(tx_ring_end) != 796 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 797 (VGE_ADDR_HI(rx_ring_end) != 798 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 799 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 800 device_printf(sc->vge_dev, "4GB boundary crossed, " 801 "switching to 32bit DMA address mode.\n"); 802 vge_dma_free(sc); 803 /* Limit DMA address space to 32bit and try again. */ 804 lowaddr = BUS_SPACE_MAXADDR_32BIT; 805 goto again; 806 } 807 808 /* Create parent buffer tag. */ 809 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 810 1, 0, /* algnmnt, boundary */ 811 VGE_BUF_DMA_MAXADDR, /* lowaddr */ 812 BUS_SPACE_MAXADDR, /* highaddr */ 813 NULL, NULL, /* filter, filterarg */ 814 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 815 0, /* nsegments */ 816 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 817 0, /* flags */ 818 NULL, NULL, /* lockfunc, lockarg */ 819 &sc->vge_cdata.vge_buffer_tag); 820 if (error != 0) { 821 device_printf(sc->vge_dev, 822 "could not create parent buffer DMA tag.\n"); 823 goto fail; 824 } 825 826 /* Create tag for Tx buffers. */ 827 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 828 1, 0, /* algnmnt, boundary */ 829 BUS_SPACE_MAXADDR, /* lowaddr */ 830 BUS_SPACE_MAXADDR, /* highaddr */ 831 NULL, NULL, /* filter, filterarg */ 832 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 833 VGE_MAXTXSEGS, /* nsegments */ 834 MCLBYTES, /* maxsegsize */ 835 0, /* flags */ 836 NULL, NULL, /* lockfunc, lockarg */ 837 &sc->vge_cdata.vge_tx_tag); 838 if (error != 0) { 839 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 840 goto fail; 841 } 842 843 /* Create tag for Rx buffers. */ 844 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 845 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 846 BUS_SPACE_MAXADDR, /* lowaddr */ 847 BUS_SPACE_MAXADDR, /* highaddr */ 848 NULL, NULL, /* filter, filterarg */ 849 MCLBYTES, /* maxsize */ 850 1, /* nsegments */ 851 MCLBYTES, /* maxsegsize */ 852 0, /* flags */ 853 NULL, NULL, /* lockfunc, lockarg */ 854 &sc->vge_cdata.vge_rx_tag); 855 if (error != 0) { 856 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 857 goto fail; 858 } 859 860 /* Create DMA maps for Tx buffers. */ 861 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 862 txd = &sc->vge_cdata.vge_txdesc[i]; 863 txd->tx_m = NULL; 864 txd->tx_dmamap = NULL; 865 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 866 &txd->tx_dmamap); 867 if (error != 0) { 868 device_printf(sc->vge_dev, 869 "could not create Tx dmamap.\n"); 870 goto fail; 871 } 872 } 873 /* Create DMA maps for Rx buffers. */ 874 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 875 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 876 device_printf(sc->vge_dev, 877 "could not create spare Rx dmamap.\n"); 878 goto fail; 879 } 880 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 881 rxd = &sc->vge_cdata.vge_rxdesc[i]; 882 rxd->rx_m = NULL; 883 rxd->rx_dmamap = NULL; 884 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 885 &rxd->rx_dmamap); 886 if (error != 0) { 887 device_printf(sc->vge_dev, 888 "could not create Rx dmamap.\n"); 889 goto fail; 890 } 891 } 892 893fail: 894 return (error); 895} 896 897static void 898vge_dma_free(struct vge_softc *sc) 899{ 900 struct vge_txdesc *txd; 901 struct vge_rxdesc *rxd; 902 int i; 903 904 /* Tx ring. */ 905 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 906 if (sc->vge_cdata.vge_tx_ring_map) 907 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 908 sc->vge_cdata.vge_tx_ring_map); 909 if (sc->vge_cdata.vge_tx_ring_map && 910 sc->vge_rdata.vge_tx_ring) 911 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 912 sc->vge_rdata.vge_tx_ring, 913 sc->vge_cdata.vge_tx_ring_map); 914 sc->vge_rdata.vge_tx_ring = NULL; 915 sc->vge_cdata.vge_tx_ring_map = NULL; 916 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 917 sc->vge_cdata.vge_tx_ring_tag = NULL; 918 } 919 /* Rx ring. */ 920 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 921 if (sc->vge_cdata.vge_rx_ring_map) 922 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 923 sc->vge_cdata.vge_rx_ring_map); 924 if (sc->vge_cdata.vge_rx_ring_map && 925 sc->vge_rdata.vge_rx_ring) 926 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 927 sc->vge_rdata.vge_rx_ring, 928 sc->vge_cdata.vge_rx_ring_map); 929 sc->vge_rdata.vge_rx_ring = NULL; 930 sc->vge_cdata.vge_rx_ring_map = NULL; 931 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 932 sc->vge_cdata.vge_rx_ring_tag = NULL; 933 } 934 /* Tx buffers. */ 935 if (sc->vge_cdata.vge_tx_tag != NULL) { 936 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 937 txd = &sc->vge_cdata.vge_txdesc[i]; 938 if (txd->tx_dmamap != NULL) { 939 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 940 txd->tx_dmamap); 941 txd->tx_dmamap = NULL; 942 } 943 } 944 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 945 sc->vge_cdata.vge_tx_tag = NULL; 946 } 947 /* Rx buffers. */ 948 if (sc->vge_cdata.vge_rx_tag != NULL) { 949 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 950 rxd = &sc->vge_cdata.vge_rxdesc[i]; 951 if (rxd->rx_dmamap != NULL) { 952 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 953 rxd->rx_dmamap); 954 rxd->rx_dmamap = NULL; 955 } 956 } 957 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 958 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 959 sc->vge_cdata.vge_rx_sparemap); 960 sc->vge_cdata.vge_rx_sparemap = NULL; 961 } 962 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 963 sc->vge_cdata.vge_rx_tag = NULL; 964 } 965 966 if (sc->vge_cdata.vge_buffer_tag != NULL) { 967 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 968 sc->vge_cdata.vge_buffer_tag = NULL; 969 } 970 if (sc->vge_cdata.vge_ring_tag != NULL) { 971 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 972 sc->vge_cdata.vge_ring_tag = NULL; 973 } 974} 975 976/* 977 * Attach the interface. Allocate softc structures, do ifmedia 978 * setup and ethernet/BPF attach. 979 */ 980static int 981vge_attach(device_t dev) 982{ 983 u_char eaddr[ETHER_ADDR_LEN]; 984 struct vge_softc *sc; 985 struct ifnet *ifp; 986 int error = 0, cap, i, msic, rid; 987 988 sc = device_get_softc(dev); 989 sc->vge_dev = dev; 990 991 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 992 MTX_DEF); 993 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 994 995 /* 996 * Map control/status registers. 997 */ 998 pci_enable_busmaster(dev); 999 1000 rid = PCIR_BAR(1); 1001 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1002 RF_ACTIVE); 1003 1004 if (sc->vge_res == NULL) { 1005 device_printf(dev, "couldn't map ports/memory\n"); 1006 error = ENXIO; 1007 goto fail; 1008 } 1009 1010 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) { 1011 sc->vge_flags |= VGE_FLAG_PCIE; 1012 sc->vge_expcap = cap; 1013 } 1014 rid = 0; 1015 msic = pci_msi_count(dev); 1016 if (msi_disable == 0 && msic > 0) { 1017 msic = 1; 1018 if (pci_alloc_msi(dev, &msic) == 0) { 1019 if (msic == 1) { 1020 sc->vge_flags |= VGE_FLAG_MSI; 1021 device_printf(dev, "Using %d MSI message\n", 1022 msic); 1023 rid = 1; 1024 } else 1025 pci_release_msi(dev); 1026 } 1027 } 1028 1029 /* Allocate interrupt */ 1030 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1031 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1032 if (sc->vge_irq == NULL) { 1033 device_printf(dev, "couldn't map interrupt\n"); 1034 error = ENXIO; 1035 goto fail; 1036 } 1037 1038 /* Reset the adapter. */ 1039 vge_reset(sc); 1040 /* Reload EEPROM. */ 1041 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1042 for (i = 0; i < VGE_TIMEOUT; i++) { 1043 DELAY(5); 1044 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1045 break; 1046 } 1047 if (i == VGE_TIMEOUT) 1048 device_printf(dev, "EEPROM reload timed out\n"); 1049 /* 1050 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1051 * MAC will receive magic packet which in turn confuses 1052 * controller. 1053 */ 1054 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1055 1056 /* 1057 * Get station address from the EEPROM. 1058 */ 1059 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1060 /* 1061 * Save configured PHY address. 1062 * It seems the PHY address of PCIe controllers just 1063 * reflects media jump strapping status so we assume the 1064 * internal PHY address of PCIe controller is at 1. 1065 */ 1066 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1067 sc->vge_phyaddr = 1; 1068 else 1069 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1070 VGE_MIICFG_PHYADDR; 1071 vge_sysctl_node(sc); 1072 error = vge_dma_alloc(sc); 1073 if (error) 1074 goto fail; 1075 1076 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1077 if (ifp == NULL) { 1078 device_printf(dev, "can not if_alloc()\n"); 1079 error = ENOSPC; 1080 goto fail; 1081 } 1082 1083 /* Do MII setup */ 1084 if (mii_phy_probe(dev, &sc->vge_miibus, 1085 vge_ifmedia_upd, vge_ifmedia_sts)) { 1086 device_printf(dev, "MII without any phy!\n"); 1087 error = ENXIO; 1088 goto fail; 1089 } 1090 1091 ifp->if_softc = sc; 1092 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1094 ifp->if_ioctl = vge_ioctl; 1095 ifp->if_capabilities = IFCAP_VLAN_MTU; 1096 ifp->if_start = vge_start; 1097 ifp->if_hwassist = VGE_CSUM_FEATURES; 1098 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1099 IFCAP_VLAN_HWTAGGING; 1100 ifp->if_capenable = ifp->if_capabilities; 1101#ifdef DEVICE_POLLING 1102 ifp->if_capabilities |= IFCAP_POLLING; 1103#endif 1104 ifp->if_init = vge_init; 1105 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1106 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1107 IFQ_SET_READY(&ifp->if_snd); 1108 1109 /* 1110 * Call MI attach routine. 1111 */ 1112 ether_ifattach(ifp, eaddr); 1113 1114 /* Tell the upper layer(s) we support long frames. */ 1115 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1116 1117 /* Hook interrupt last to avoid having to lock softc */ 1118 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1119 NULL, vge_intr, sc, &sc->vge_intrhand); 1120 1121 if (error) { 1122 device_printf(dev, "couldn't set up irq\n"); 1123 ether_ifdetach(ifp); 1124 goto fail; 1125 } 1126 1127fail: 1128 if (error) 1129 vge_detach(dev); 1130 1131 return (error); 1132} 1133 1134/* 1135 * Shutdown hardware and free up resources. This can be called any 1136 * time after the mutex has been initialized. It is called in both 1137 * the error case in attach and the normal detach case so it needs 1138 * to be careful about only freeing resources that have actually been 1139 * allocated. 1140 */ 1141static int 1142vge_detach(device_t dev) 1143{ 1144 struct vge_softc *sc; 1145 struct ifnet *ifp; 1146 1147 sc = device_get_softc(dev); 1148 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1149 ifp = sc->vge_ifp; 1150 1151#ifdef DEVICE_POLLING 1152 if (ifp->if_capenable & IFCAP_POLLING) 1153 ether_poll_deregister(ifp); 1154#endif 1155 1156 /* These should only be active if attach succeeded */ 1157 if (device_is_attached(dev)) { 1158 ether_ifdetach(ifp); 1159 VGE_LOCK(sc); 1160 vge_stop(sc); 1161 VGE_UNLOCK(sc); 1162 callout_drain(&sc->vge_watchdog); 1163 } 1164 if (sc->vge_miibus) 1165 device_delete_child(dev, sc->vge_miibus); 1166 bus_generic_detach(dev); 1167 1168 if (sc->vge_intrhand) 1169 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1170 if (sc->vge_irq) 1171 bus_release_resource(dev, SYS_RES_IRQ, 1172 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1173 if (sc->vge_flags & VGE_FLAG_MSI) 1174 pci_release_msi(dev); 1175 if (sc->vge_res) 1176 bus_release_resource(dev, SYS_RES_MEMORY, 1177 PCIR_BAR(1), sc->vge_res); 1178 if (ifp) 1179 if_free(ifp); 1180 1181 vge_dma_free(sc); 1182 mtx_destroy(&sc->vge_mtx); 1183 1184 return (0); 1185} 1186 1187static void 1188vge_discard_rxbuf(struct vge_softc *sc, int prod) 1189{ 1190 struct vge_rxdesc *rxd; 1191 int i; 1192 1193 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1194 rxd->rx_desc->vge_sts = 0; 1195 rxd->rx_desc->vge_ctl = 0; 1196 1197 /* 1198 * Note: the manual fails to document the fact that for 1199 * proper opration, the driver needs to replentish the RX 1200 * DMA ring 4 descriptors at a time (rather than one at a 1201 * time, like most chips). We can allocate the new buffers 1202 * but we should not set the OWN bits until we're ready 1203 * to hand back 4 of them in one shot. 1204 */ 1205 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1206 for (i = VGE_RXCHUNK; i > 0; i--) { 1207 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1208 rxd = rxd->rxd_prev; 1209 } 1210 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1211 } 1212} 1213 1214static int 1215vge_newbuf(struct vge_softc *sc, int prod) 1216{ 1217 struct vge_rxdesc *rxd; 1218 struct mbuf *m; 1219 bus_dma_segment_t segs[1]; 1220 bus_dmamap_t map; 1221 int i, nsegs; 1222 1223 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1224 if (m == NULL) 1225 return (ENOBUFS); 1226 /* 1227 * This is part of an evil trick to deal with strict-alignment 1228 * architectures. The VIA chip requires RX buffers to be aligned 1229 * on 32-bit boundaries, but that will hose strict-alignment 1230 * architectures. To get around this, we leave some empty space 1231 * at the start of each buffer and for non-strict-alignment hosts, 1232 * we copy the buffer back two bytes to achieve word alignment. 1233 * This is slightly more efficient than allocating a new buffer, 1234 * copying the contents, and discarding the old buffer. 1235 */ 1236 m->m_len = m->m_pkthdr.len = MCLBYTES; 1237 m_adj(m, VGE_RX_BUF_ALIGN); 1238 1239 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1240 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1241 m_freem(m); 1242 return (ENOBUFS); 1243 } 1244 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1245 1246 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1247 if (rxd->rx_m != NULL) { 1248 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1249 BUS_DMASYNC_POSTREAD); 1250 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1251 } 1252 map = rxd->rx_dmamap; 1253 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1254 sc->vge_cdata.vge_rx_sparemap = map; 1255 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1256 BUS_DMASYNC_PREREAD); 1257 rxd->rx_m = m; 1258 1259 rxd->rx_desc->vge_sts = 0; 1260 rxd->rx_desc->vge_ctl = 0; 1261 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1262 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1263 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1264 1265 /* 1266 * Note: the manual fails to document the fact that for 1267 * proper operation, the driver needs to replenish the RX 1268 * DMA ring 4 descriptors at a time (rather than one at a 1269 * time, like most chips). We can allocate the new buffers 1270 * but we should not set the OWN bits until we're ready 1271 * to hand back 4 of them in one shot. 1272 */ 1273 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1274 for (i = VGE_RXCHUNK; i > 0; i--) { 1275 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1276 rxd = rxd->rxd_prev; 1277 } 1278 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1279 } 1280 1281 return (0); 1282} 1283 1284static int 1285vge_tx_list_init(struct vge_softc *sc) 1286{ 1287 struct vge_ring_data *rd; 1288 struct vge_txdesc *txd; 1289 int i; 1290 1291 VGE_LOCK_ASSERT(sc); 1292 1293 sc->vge_cdata.vge_tx_prodidx = 0; 1294 sc->vge_cdata.vge_tx_considx = 0; 1295 sc->vge_cdata.vge_tx_cnt = 0; 1296 1297 rd = &sc->vge_rdata; 1298 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1299 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1300 txd = &sc->vge_cdata.vge_txdesc[i]; 1301 txd->tx_m = NULL; 1302 txd->tx_desc = &rd->vge_tx_ring[i]; 1303 } 1304 1305 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1306 sc->vge_cdata.vge_tx_ring_map, 1307 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1308 1309 return (0); 1310} 1311 1312static int 1313vge_rx_list_init(struct vge_softc *sc) 1314{ 1315 struct vge_ring_data *rd; 1316 struct vge_rxdesc *rxd; 1317 int i; 1318 1319 VGE_LOCK_ASSERT(sc); 1320 1321 sc->vge_cdata.vge_rx_prodidx = 0; 1322 sc->vge_cdata.vge_head = NULL; 1323 sc->vge_cdata.vge_tail = NULL; 1324 sc->vge_cdata.vge_rx_commit = 0; 1325 1326 rd = &sc->vge_rdata; 1327 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1328 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1329 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1330 rxd->rx_m = NULL; 1331 rxd->rx_desc = &rd->vge_rx_ring[i]; 1332 if (i == 0) 1333 rxd->rxd_prev = 1334 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1335 else 1336 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1337 if (vge_newbuf(sc, i) != 0) 1338 return (ENOBUFS); 1339 } 1340 1341 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1342 sc->vge_cdata.vge_rx_ring_map, 1343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1344 1345 sc->vge_cdata.vge_rx_commit = 0; 1346 1347 return (0); 1348} 1349 1350static void 1351vge_freebufs(struct vge_softc *sc) 1352{ 1353 struct vge_txdesc *txd; 1354 struct vge_rxdesc *rxd; 1355 struct ifnet *ifp; 1356 int i; 1357 1358 VGE_LOCK_ASSERT(sc); 1359 1360 ifp = sc->vge_ifp; 1361 /* 1362 * Free RX and TX mbufs still in the queues. 1363 */ 1364 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1365 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1366 if (rxd->rx_m != NULL) { 1367 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1368 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1369 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1370 rxd->rx_dmamap); 1371 m_freem(rxd->rx_m); 1372 rxd->rx_m = NULL; 1373 } 1374 } 1375 1376 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1377 txd = &sc->vge_cdata.vge_txdesc[i]; 1378 if (txd->tx_m != NULL) { 1379 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1380 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1381 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1382 txd->tx_dmamap); 1383 m_freem(txd->tx_m); 1384 txd->tx_m = NULL; 1385 ifp->if_oerrors++; 1386 } 1387 } 1388} 1389 1390#ifndef __NO_STRICT_ALIGNMENT 1391static __inline void 1392vge_fixup_rx(struct mbuf *m) 1393{ 1394 int i; 1395 uint16_t *src, *dst; 1396 1397 src = mtod(m, uint16_t *); 1398 dst = src - 1; 1399 1400 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1401 *dst++ = *src++; 1402 1403 m->m_data -= ETHER_ALIGN; 1404} 1405#endif 1406 1407/* 1408 * RX handler. We support the reception of jumbo frames that have 1409 * been fragmented across multiple 2K mbuf cluster buffers. 1410 */ 1411static int 1412vge_rxeof(struct vge_softc *sc, int count) 1413{ 1414 struct mbuf *m; 1415 struct ifnet *ifp; 1416 int prod, prog, total_len; 1417 struct vge_rxdesc *rxd; 1418 struct vge_rx_desc *cur_rx; 1419 uint32_t rxstat, rxctl; 1420 1421 VGE_LOCK_ASSERT(sc); 1422 1423 ifp = sc->vge_ifp; 1424 1425 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1426 sc->vge_cdata.vge_rx_ring_map, 1427 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1428 1429 prod = sc->vge_cdata.vge_rx_prodidx; 1430 for (prog = 0; count > 0 && 1431 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1432 VGE_RX_DESC_INC(prod)) { 1433 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1434 rxstat = le32toh(cur_rx->vge_sts); 1435 if ((rxstat & VGE_RDSTS_OWN) != 0) 1436 break; 1437 count--; 1438 prog++; 1439 rxctl = le32toh(cur_rx->vge_ctl); 1440 total_len = VGE_RXBYTES(rxstat); 1441 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1442 m = rxd->rx_m; 1443 1444 /* 1445 * If the 'start of frame' bit is set, this indicates 1446 * either the first fragment in a multi-fragment receive, 1447 * or an intermediate fragment. Either way, we want to 1448 * accumulate the buffers. 1449 */ 1450 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1451 if (vge_newbuf(sc, prod) != 0) { 1452 ifp->if_iqdrops++; 1453 VGE_CHAIN_RESET(sc); 1454 vge_discard_rxbuf(sc, prod); 1455 continue; 1456 } 1457 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1458 if (sc->vge_cdata.vge_head == NULL) { 1459 sc->vge_cdata.vge_head = m; 1460 sc->vge_cdata.vge_tail = m; 1461 } else { 1462 m->m_flags &= ~M_PKTHDR; 1463 sc->vge_cdata.vge_tail->m_next = m; 1464 sc->vge_cdata.vge_tail = m; 1465 } 1466 continue; 1467 } 1468 1469 /* 1470 * Bad/error frames will have the RXOK bit cleared. 1471 * However, there's one error case we want to allow: 1472 * if a VLAN tagged frame arrives and the chip can't 1473 * match it against the CAM filter, it considers this 1474 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1475 * We don't want to drop the frame though: our VLAN 1476 * filtering is done in software. 1477 * We also want to receive bad-checksummed frames and 1478 * and frames with bad-length. 1479 */ 1480 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1481 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1482 VGE_RDSTS_CSUMERR)) == 0) { 1483 ifp->if_ierrors++; 1484 /* 1485 * If this is part of a multi-fragment packet, 1486 * discard all the pieces. 1487 */ 1488 VGE_CHAIN_RESET(sc); 1489 vge_discard_rxbuf(sc, prod); 1490 continue; 1491 } 1492 1493 if (vge_newbuf(sc, prod) != 0) { 1494 ifp->if_iqdrops++; 1495 VGE_CHAIN_RESET(sc); 1496 vge_discard_rxbuf(sc, prod); 1497 continue; 1498 } 1499 1500 /* Chain received mbufs. */ 1501 if (sc->vge_cdata.vge_head != NULL) { 1502 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1503 /* 1504 * Special case: if there's 4 bytes or less 1505 * in this buffer, the mbuf can be discarded: 1506 * the last 4 bytes is the CRC, which we don't 1507 * care about anyway. 1508 */ 1509 if (m->m_len <= ETHER_CRC_LEN) { 1510 sc->vge_cdata.vge_tail->m_len -= 1511 (ETHER_CRC_LEN - m->m_len); 1512 m_freem(m); 1513 } else { 1514 m->m_len -= ETHER_CRC_LEN; 1515 m->m_flags &= ~M_PKTHDR; 1516 sc->vge_cdata.vge_tail->m_next = m; 1517 } 1518 m = sc->vge_cdata.vge_head; 1519 m->m_flags |= M_PKTHDR; 1520 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1521 } else { 1522 m->m_flags |= M_PKTHDR; 1523 m->m_pkthdr.len = m->m_len = 1524 (total_len - ETHER_CRC_LEN); 1525 } 1526 1527#ifndef __NO_STRICT_ALIGNMENT 1528 vge_fixup_rx(m); 1529#endif 1530 m->m_pkthdr.rcvif = ifp; 1531 1532 /* Do RX checksumming if enabled */ 1533 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1534 (rxctl & VGE_RDCTL_FRAG) == 0) { 1535 /* Check IP header checksum */ 1536 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1537 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1538 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1539 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1540 1541 /* Check TCP/UDP checksum */ 1542 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1543 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1544 m->m_pkthdr.csum_flags |= 1545 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1546 m->m_pkthdr.csum_data = 0xffff; 1547 } 1548 } 1549 1550 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1551 /* 1552 * The 32-bit rxctl register is stored in little-endian. 1553 * However, the 16-bit vlan tag is stored in big-endian, 1554 * so we have to byte swap it. 1555 */ 1556 m->m_pkthdr.ether_vtag = 1557 bswap16(rxctl & VGE_RDCTL_VLANID); 1558 m->m_flags |= M_VLANTAG; 1559 } 1560 1561 VGE_UNLOCK(sc); 1562 (*ifp->if_input)(ifp, m); 1563 VGE_LOCK(sc); 1564 sc->vge_cdata.vge_head = NULL; 1565 sc->vge_cdata.vge_tail = NULL; 1566 } 1567 1568 if (prog > 0) { 1569 sc->vge_cdata.vge_rx_prodidx = prod; 1570 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1571 sc->vge_cdata.vge_rx_ring_map, 1572 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1573 /* Update residue counter. */ 1574 if (sc->vge_cdata.vge_rx_commit != 0) { 1575 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1576 sc->vge_cdata.vge_rx_commit); 1577 sc->vge_cdata.vge_rx_commit = 0; 1578 } 1579 } 1580 return (prog); 1581} 1582 1583static void 1584vge_txeof(struct vge_softc *sc) 1585{ 1586 struct ifnet *ifp; 1587 struct vge_tx_desc *cur_tx; 1588 struct vge_txdesc *txd; 1589 uint32_t txstat; 1590 int cons, prod; 1591 1592 VGE_LOCK_ASSERT(sc); 1593 1594 ifp = sc->vge_ifp; 1595 1596 if (sc->vge_cdata.vge_tx_cnt == 0) 1597 return; 1598 1599 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1600 sc->vge_cdata.vge_tx_ring_map, 1601 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1602 1603 /* 1604 * Go through our tx list and free mbufs for those 1605 * frames that have been transmitted. 1606 */ 1607 cons = sc->vge_cdata.vge_tx_considx; 1608 prod = sc->vge_cdata.vge_tx_prodidx; 1609 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1610 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1611 txstat = le32toh(cur_tx->vge_sts); 1612 if ((txstat & VGE_TDSTS_OWN) != 0) 1613 break; 1614 sc->vge_cdata.vge_tx_cnt--; 1615 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1616 1617 txd = &sc->vge_cdata.vge_txdesc[cons]; 1618 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1619 BUS_DMASYNC_POSTWRITE); 1620 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1621 1622 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1623 __func__)); 1624 m_freem(txd->tx_m); 1625 txd->tx_m = NULL; 1626 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1627 } 1628 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1629 sc->vge_cdata.vge_tx_ring_map, 1630 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1631 sc->vge_cdata.vge_tx_considx = cons; 1632 if (sc->vge_cdata.vge_tx_cnt == 0) 1633 sc->vge_timer = 0; 1634 else { 1635 /* 1636 * If not all descriptors have been released reaped yet, 1637 * reload the timer so that we will eventually get another 1638 * interrupt that will cause us to re-enter this routine. 1639 * This is done in case the transmitter has gone idle. 1640 */ 1641 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1642 } 1643} 1644 1645static void 1646vge_link_statchg(void *xsc) 1647{ 1648 struct vge_softc *sc; 1649 struct ifnet *ifp; 1650 struct mii_data *mii; 1651 1652 sc = xsc; 1653 ifp = sc->vge_ifp; 1654 VGE_LOCK_ASSERT(sc); 1655 mii = device_get_softc(sc->vge_miibus); 1656 1657 mii_pollstat(mii); 1658 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) { 1659 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1660 sc->vge_flags &= ~VGE_FLAG_LINK; 1661 if_link_state_change(sc->vge_ifp, 1662 LINK_STATE_DOWN); 1663 } 1664 } else { 1665 if (mii->mii_media_status & IFM_ACTIVE && 1666 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1667 sc->vge_flags |= VGE_FLAG_LINK; 1668 if_link_state_change(sc->vge_ifp, 1669 LINK_STATE_UP); 1670 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1671 vge_start_locked(ifp); 1672 } 1673 } 1674} 1675 1676#ifdef DEVICE_POLLING 1677static int 1678vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1679{ 1680 struct vge_softc *sc = ifp->if_softc; 1681 int rx_npkts = 0; 1682 1683 VGE_LOCK(sc); 1684 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1685 goto done; 1686 1687 rx_npkts = vge_rxeof(sc, count); 1688 vge_txeof(sc); 1689 1690 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1691 vge_start_locked(ifp); 1692 1693 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1694 uint32_t status; 1695 status = CSR_READ_4(sc, VGE_ISR); 1696 if (status == 0xFFFFFFFF) 1697 goto done; 1698 if (status) 1699 CSR_WRITE_4(sc, VGE_ISR, status); 1700 1701 /* 1702 * XXX check behaviour on receiver stalls. 1703 */ 1704 1705 if (status & VGE_ISR_TXDMA_STALL || 1706 status & VGE_ISR_RXDMA_STALL) { 1707 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1708 vge_init_locked(sc); 1709 } 1710 1711 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1712 vge_rxeof(sc, count); 1713 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1714 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1715 } 1716 } 1717done: 1718 VGE_UNLOCK(sc); 1719 return (rx_npkts); 1720} 1721#endif /* DEVICE_POLLING */ 1722 1723static void 1724vge_intr(void *arg) 1725{ 1726 struct vge_softc *sc; 1727 struct ifnet *ifp; 1728 uint32_t status; 1729 1730 sc = arg; 1731 VGE_LOCK(sc); 1732 1733 ifp = sc->vge_ifp; 1734 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1735 (ifp->if_flags & IFF_UP) == 0) { 1736 VGE_UNLOCK(sc); 1737 return; 1738 } 1739 1740#ifdef DEVICE_POLLING 1741 if (ifp->if_capenable & IFCAP_POLLING) { 1742 VGE_UNLOCK(sc); 1743 return; 1744 } 1745#endif 1746 1747 /* Disable interrupts */ 1748 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1749 1750 for (;;) { 1751 1752 status = CSR_READ_4(sc, VGE_ISR); 1753 /* If the card has gone away the read returns 0xffff. */ 1754 if (status == 0xFFFFFFFF) 1755 break; 1756 1757 if (status) 1758 CSR_WRITE_4(sc, VGE_ISR, status); 1759 1760 if ((status & VGE_INTRS) == 0) 1761 break; 1762 1763 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1764 vge_rxeof(sc, VGE_RX_DESC_CNT); 1765 1766 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1767 vge_rxeof(sc, VGE_RX_DESC_CNT); 1768 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1769 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1770 } 1771 1772 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1773 vge_txeof(sc); 1774 1775 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1776 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1777 vge_init_locked(sc); 1778 } 1779 1780 if (status & VGE_ISR_LINKSTS) 1781 vge_link_statchg(sc); 1782 } 1783 1784 /* Re-enable interrupts */ 1785 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1786 1787 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1788 vge_start_locked(ifp); 1789 1790 VGE_UNLOCK(sc); 1791} 1792 1793static int 1794vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1795{ 1796 struct vge_txdesc *txd; 1797 struct vge_tx_frag *frag; 1798 struct mbuf *m; 1799 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1800 int error, i, nsegs, padlen; 1801 uint32_t cflags; 1802 1803 VGE_LOCK_ASSERT(sc); 1804 1805 M_ASSERTPKTHDR((*m_head)); 1806 1807 /* Argh. This chip does not autopad short frames. */ 1808 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1809 m = *m_head; 1810 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1811 if (M_WRITABLE(m) == 0) { 1812 /* Get a writable copy. */ 1813 m = m_dup(*m_head, M_DONTWAIT); 1814 m_freem(*m_head); 1815 if (m == NULL) { 1816 *m_head = NULL; 1817 return (ENOBUFS); 1818 } 1819 *m_head = m; 1820 } 1821 if (M_TRAILINGSPACE(m) < padlen) { 1822 m = m_defrag(m, M_DONTWAIT); 1823 if (m == NULL) { 1824 m_freem(*m_head); 1825 *m_head = NULL; 1826 return (ENOBUFS); 1827 } 1828 } 1829 /* 1830 * Manually pad short frames, and zero the pad space 1831 * to avoid leaking data. 1832 */ 1833 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1834 m->m_pkthdr.len += padlen; 1835 m->m_len = m->m_pkthdr.len; 1836 *m_head = m; 1837 } 1838 1839 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1840 1841 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1842 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1843 if (error == EFBIG) { 1844 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1845 if (m == NULL) { 1846 m_freem(*m_head); 1847 *m_head = NULL; 1848 return (ENOMEM); 1849 } 1850 *m_head = m; 1851 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1852 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1853 if (error != 0) { 1854 m_freem(*m_head); 1855 *m_head = NULL; 1856 return (error); 1857 } 1858 } else if (error != 0) 1859 return (error); 1860 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1861 BUS_DMASYNC_PREWRITE); 1862 1863 m = *m_head; 1864 cflags = 0; 1865 1866 /* Configure checksum offload. */ 1867 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1868 cflags |= VGE_TDCTL_IPCSUM; 1869 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1870 cflags |= VGE_TDCTL_TCPCSUM; 1871 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1872 cflags |= VGE_TDCTL_UDPCSUM; 1873 1874 /* Configure VLAN. */ 1875 if ((m->m_flags & M_VLANTAG) != 0) 1876 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1877 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1878 /* 1879 * XXX 1880 * Velocity family seems to support TSO but no information 1881 * for MSS configuration is available. Also the number of 1882 * fragments supported by a descriptor is too small to hold 1883 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1884 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1885 * longer chain of buffers but no additional information is 1886 * available. 1887 * 1888 * When telling the chip how many segments there are, we 1889 * must use nsegs + 1 instead of just nsegs. Darned if I 1890 * know why. This also means we can't use the last fragment 1891 * field of Tx descriptor. 1892 */ 1893 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1894 VGE_TD_LS_NORM); 1895 for (i = 0; i < nsegs; i++) { 1896 frag = &txd->tx_desc->vge_frag[i]; 1897 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1898 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1899 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1900 } 1901 1902 sc->vge_cdata.vge_tx_cnt++; 1903 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1904 1905 /* 1906 * Finally request interrupt and give the first descriptor 1907 * ownership to hardware. 1908 */ 1909 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1910 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1911 txd->tx_m = m; 1912 1913 return (0); 1914} 1915 1916/* 1917 * Main transmit routine. 1918 */ 1919 1920static void 1921vge_start(struct ifnet *ifp) 1922{ 1923 struct vge_softc *sc; 1924 1925 sc = ifp->if_softc; 1926 VGE_LOCK(sc); 1927 vge_start_locked(ifp); 1928 VGE_UNLOCK(sc); 1929} 1930 1931 1932static void 1933vge_start_locked(struct ifnet *ifp) 1934{ 1935 struct vge_softc *sc; 1936 struct vge_txdesc *txd; 1937 struct mbuf *m_head; 1938 int enq, idx; 1939 1940 sc = ifp->if_softc; 1941 1942 VGE_LOCK_ASSERT(sc); 1943 1944 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1945 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1946 IFF_DRV_RUNNING) 1947 return; 1948 1949 idx = sc->vge_cdata.vge_tx_prodidx; 1950 VGE_TX_DESC_DEC(idx); 1951 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1952 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1953 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1954 if (m_head == NULL) 1955 break; 1956 /* 1957 * Pack the data into the transmit ring. If we 1958 * don't have room, set the OACTIVE flag and wait 1959 * for the NIC to drain the ring. 1960 */ 1961 if (vge_encap(sc, &m_head)) { 1962 if (m_head == NULL) 1963 break; 1964 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1965 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1966 break; 1967 } 1968 1969 txd = &sc->vge_cdata.vge_txdesc[idx]; 1970 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1971 VGE_TX_DESC_INC(idx); 1972 1973 enq++; 1974 /* 1975 * If there's a BPF listener, bounce a copy of this frame 1976 * to him. 1977 */ 1978 ETHER_BPF_MTAP(ifp, m_head); 1979 } 1980 1981 if (enq > 0) { 1982 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1983 sc->vge_cdata.vge_tx_ring_map, 1984 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1985 /* Issue a transmit command. */ 1986 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1987 /* 1988 * Use the countdown timer for interrupt moderation. 1989 * 'TX done' interrupts are disabled. Instead, we reset the 1990 * countdown timer, which will begin counting until it hits 1991 * the value in the SSTIMER register, and then trigger an 1992 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1993 * the timer count is reloaded. Only when the transmitter 1994 * is idle will the timer hit 0 and an interrupt fire. 1995 */ 1996 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1997 1998 /* 1999 * Set a timeout in case the chip goes out to lunch. 2000 */ 2001 sc->vge_timer = 5; 2002 } 2003} 2004 2005static void 2006vge_init(void *xsc) 2007{ 2008 struct vge_softc *sc = xsc; 2009 2010 VGE_LOCK(sc); 2011 vge_init_locked(sc); 2012 VGE_UNLOCK(sc); 2013} 2014 2015static void 2016vge_init_locked(struct vge_softc *sc) 2017{ 2018 struct ifnet *ifp = sc->vge_ifp; 2019 struct mii_data *mii; 2020 int error, i; 2021 2022 VGE_LOCK_ASSERT(sc); 2023 mii = device_get_softc(sc->vge_miibus); 2024 2025 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2026 return; 2027 2028 /* 2029 * Cancel pending I/O and free all RX/TX buffers. 2030 */ 2031 vge_stop(sc); 2032 vge_reset(sc); 2033 2034 /* 2035 * Initialize the RX and TX descriptors and mbufs. 2036 */ 2037 2038 error = vge_rx_list_init(sc); 2039 if (error != 0) { 2040 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2041 return; 2042 } 2043 vge_tx_list_init(sc); 2044 /* Clear MAC statistics. */ 2045 vge_stats_clear(sc); 2046 /* Set our station address */ 2047 for (i = 0; i < ETHER_ADDR_LEN; i++) 2048 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2049 2050 /* 2051 * Set receive FIFO threshold. Also allow transmission and 2052 * reception of VLAN tagged frames. 2053 */ 2054 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2055 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2056 2057 /* Set DMA burst length */ 2058 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2059 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2060 2061 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2062 2063 /* Set collision backoff algorithm */ 2064 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2065 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2066 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2067 2068 /* Disable LPSEL field in priority resolution */ 2069 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2070 2071 /* 2072 * Load the addresses of the DMA queues into the chip. 2073 * Note that we only use one transmit queue. 2074 */ 2075 2076 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2077 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2078 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2079 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2080 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2081 2082 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2083 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2084 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2085 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2086 2087 /* Enable and wake up the RX descriptor queue */ 2088 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2089 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2090 2091 /* Enable the TX descriptor queue */ 2092 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2093 2094 /* Init the cam filter. */ 2095 vge_cam_clear(sc); 2096 2097 /* Set up receiver filter. */ 2098 vge_rxfilter(sc); 2099 vge_setvlan(sc); 2100 2101 /* Enable flow control */ 2102 2103 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2104 2105 /* Enable jumbo frame reception (if desired) */ 2106 2107 /* Start the MAC. */ 2108 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2109 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2110 CSR_WRITE_1(sc, VGE_CRS0, 2111 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2112 2113 /* 2114 * Configure one-shot timer for microsecond 2115 * resolution and load it for 500 usecs. 2116 */ 2117 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2118 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2119 2120 /* 2121 * Configure interrupt moderation for receive. Enable 2122 * the holdoff counter and load it, and set the RX 2123 * suppression count to the number of descriptors we 2124 * want to allow before triggering an interrupt. 2125 * The holdoff timer is in units of 20 usecs. 2126 */ 2127 2128#ifdef notyet 2129 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2130 /* Select the interrupt holdoff timer page. */ 2131 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2132 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2133 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2134 2135 /* Enable use of the holdoff timer. */ 2136 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2137 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2138 2139 /* Select the RX suppression threshold page. */ 2140 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2141 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2142 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2143 2144 /* Restore the page select bits. */ 2145 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2146 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2147#endif 2148 2149#ifdef DEVICE_POLLING 2150 /* 2151 * Disable interrupts if we are polling. 2152 */ 2153 if (ifp->if_capenable & IFCAP_POLLING) { 2154 CSR_WRITE_4(sc, VGE_IMR, 0); 2155 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2156 } else /* otherwise ... */ 2157#endif 2158 { 2159 /* 2160 * Enable interrupts. 2161 */ 2162 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2163 CSR_WRITE_4(sc, VGE_ISR, 0); 2164 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2165 } 2166 2167 sc->vge_flags &= ~VGE_FLAG_LINK; 2168 mii_mediachg(mii); 2169 2170 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2171 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2172 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2173} 2174 2175/* 2176 * Set media options. 2177 */ 2178static int 2179vge_ifmedia_upd(struct ifnet *ifp) 2180{ 2181 struct vge_softc *sc; 2182 struct mii_data *mii; 2183 int error; 2184 2185 sc = ifp->if_softc; 2186 VGE_LOCK(sc); 2187 mii = device_get_softc(sc->vge_miibus); 2188 error = mii_mediachg(mii); 2189 VGE_UNLOCK(sc); 2190 2191 return (error); 2192} 2193 2194/* 2195 * Report current media status. 2196 */ 2197static void 2198vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2199{ 2200 struct vge_softc *sc; 2201 struct mii_data *mii; 2202 2203 sc = ifp->if_softc; 2204 mii = device_get_softc(sc->vge_miibus); 2205 2206 VGE_LOCK(sc); 2207 if ((ifp->if_flags & IFF_UP) == 0) { 2208 VGE_UNLOCK(sc); 2209 return; 2210 } 2211 mii_pollstat(mii); 2212 VGE_UNLOCK(sc); 2213 ifmr->ifm_active = mii->mii_media_active; 2214 ifmr->ifm_status = mii->mii_media_status; 2215} 2216 2217static void 2218vge_miibus_statchg(device_t dev) 2219{ 2220 struct vge_softc *sc; 2221 struct mii_data *mii; 2222 struct ifmedia_entry *ife; 2223 2224 sc = device_get_softc(dev); 2225 mii = device_get_softc(sc->vge_miibus); 2226 ife = mii->mii_media.ifm_cur; 2227 2228 /* 2229 * If the user manually selects a media mode, we need to turn 2230 * on the forced MAC mode bit in the DIAGCTL register. If the 2231 * user happens to choose a full duplex mode, we also need to 2232 * set the 'force full duplex' bit. This applies only to 2233 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2234 * mode is disabled, and in 1000baseT mode, full duplex is 2235 * always implied, so we turn on the forced mode bit but leave 2236 * the FDX bit cleared. 2237 */ 2238 2239 switch (IFM_SUBTYPE(ife->ifm_media)) { 2240 case IFM_AUTO: 2241 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2242 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2243 break; 2244 case IFM_1000_T: 2245 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2246 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2247 break; 2248 case IFM_100_TX: 2249 case IFM_10_T: 2250 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2251 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2252 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2253 } else { 2254 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2255 } 2256 break; 2257 default: 2258 device_printf(dev, "unknown media type: %x\n", 2259 IFM_SUBTYPE(ife->ifm_media)); 2260 break; 2261 } 2262} 2263 2264static int 2265vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2266{ 2267 struct vge_softc *sc = ifp->if_softc; 2268 struct ifreq *ifr = (struct ifreq *) data; 2269 struct mii_data *mii; 2270 int error = 0, mask; 2271 2272 switch (command) { 2273 case SIOCSIFMTU: 2274 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2275 error = EINVAL; 2276 ifp->if_mtu = ifr->ifr_mtu; 2277 break; 2278 case SIOCSIFFLAGS: 2279 VGE_LOCK(sc); 2280 if ((ifp->if_flags & IFF_UP) != 0) { 2281 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2282 ((ifp->if_flags ^ sc->vge_if_flags) & 2283 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2284 vge_rxfilter(sc); 2285 else 2286 vge_init_locked(sc); 2287 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2288 vge_stop(sc); 2289 sc->vge_if_flags = ifp->if_flags; 2290 VGE_UNLOCK(sc); 2291 break; 2292 case SIOCADDMULTI: 2293 case SIOCDELMULTI: 2294 VGE_LOCK(sc); 2295 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2296 vge_rxfilter(sc); 2297 VGE_UNLOCK(sc); 2298 break; 2299 case SIOCGIFMEDIA: 2300 case SIOCSIFMEDIA: 2301 mii = device_get_softc(sc->vge_miibus); 2302 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2303 break; 2304 case SIOCSIFCAP: 2305 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2306#ifdef DEVICE_POLLING 2307 if (mask & IFCAP_POLLING) { 2308 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2309 error = ether_poll_register(vge_poll, ifp); 2310 if (error) 2311 return (error); 2312 VGE_LOCK(sc); 2313 /* Disable interrupts */ 2314 CSR_WRITE_4(sc, VGE_IMR, 0); 2315 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2316 ifp->if_capenable |= IFCAP_POLLING; 2317 VGE_UNLOCK(sc); 2318 } else { 2319 error = ether_poll_deregister(ifp); 2320 /* Enable interrupts. */ 2321 VGE_LOCK(sc); 2322 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2323 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2324 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2325 ifp->if_capenable &= ~IFCAP_POLLING; 2326 VGE_UNLOCK(sc); 2327 } 2328 } 2329#endif /* DEVICE_POLLING */ 2330 VGE_LOCK(sc); 2331 if ((mask & IFCAP_TXCSUM) != 0 && 2332 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2333 ifp->if_capenable ^= IFCAP_TXCSUM; 2334 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2335 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2336 else 2337 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2338 } 2339 if ((mask & IFCAP_RXCSUM) != 0 && 2340 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2341 ifp->if_capenable ^= IFCAP_RXCSUM; 2342 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2343 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2344 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2345 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2346 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2347 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2348 vge_setvlan(sc); 2349 } 2350 VGE_UNLOCK(sc); 2351 VLAN_CAPABILITIES(ifp); 2352 break; 2353 default: 2354 error = ether_ioctl(ifp, command, data); 2355 break; 2356 } 2357 2358 return (error); 2359} 2360 2361static void 2362vge_watchdog(void *arg) 2363{ 2364 struct vge_softc *sc; 2365 struct ifnet *ifp; 2366 2367 sc = arg; 2368 VGE_LOCK_ASSERT(sc); 2369 vge_stats_update(sc); 2370 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2371 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2372 return; 2373 2374 ifp = sc->vge_ifp; 2375 if_printf(ifp, "watchdog timeout\n"); 2376 ifp->if_oerrors++; 2377 2378 vge_txeof(sc); 2379 vge_rxeof(sc, VGE_RX_DESC_CNT); 2380 2381 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2382 vge_init_locked(sc); 2383} 2384 2385/* 2386 * Stop the adapter and free any mbufs allocated to the 2387 * RX and TX lists. 2388 */ 2389static void 2390vge_stop(struct vge_softc *sc) 2391{ 2392 struct ifnet *ifp; 2393 2394 VGE_LOCK_ASSERT(sc); 2395 ifp = sc->vge_ifp; 2396 sc->vge_timer = 0; 2397 callout_stop(&sc->vge_watchdog); 2398 2399 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2400 2401 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2402 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2403 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2404 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2405 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2406 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2407 2408 vge_stats_update(sc); 2409 VGE_CHAIN_RESET(sc); 2410 vge_txeof(sc); 2411 vge_freebufs(sc); 2412} 2413 2414/* 2415 * Device suspend routine. Stop the interface and save some PCI 2416 * settings in case the BIOS doesn't restore them properly on 2417 * resume. 2418 */ 2419static int 2420vge_suspend(device_t dev) 2421{ 2422 struct vge_softc *sc; 2423 2424 sc = device_get_softc(dev); 2425 2426 VGE_LOCK(sc); 2427 vge_stop(sc); 2428 2429 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2430 VGE_UNLOCK(sc); 2431 2432 return (0); 2433} 2434 2435/* 2436 * Device resume routine. Restore some PCI settings in case the BIOS 2437 * doesn't, re-enable busmastering, and restart the interface if 2438 * appropriate. 2439 */ 2440static int 2441vge_resume(device_t dev) 2442{ 2443 struct vge_softc *sc; 2444 struct ifnet *ifp; 2445 2446 sc = device_get_softc(dev); 2447 ifp = sc->vge_ifp; 2448 2449 /* reenable busmastering */ 2450 pci_enable_busmaster(dev); 2451 pci_enable_io(dev, SYS_RES_MEMORY); 2452 2453 /* reinitialize interface if necessary */ 2454 VGE_LOCK(sc); 2455 if (ifp->if_flags & IFF_UP) { 2456 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2457 vge_init_locked(sc); 2458 } 2459 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2460 VGE_UNLOCK(sc); 2461 2462 return (0); 2463} 2464 2465/* 2466 * Stop all chip I/O so that the kernel's probe routines don't 2467 * get confused by errant DMAs when rebooting. 2468 */ 2469static int 2470vge_shutdown(device_t dev) 2471{ 2472 struct vge_softc *sc; 2473 2474 sc = device_get_softc(dev); 2475 2476 VGE_LOCK(sc); 2477 vge_stop(sc); 2478 VGE_UNLOCK(sc); 2479 2480 return (0); 2481} 2482 2483#define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2484 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2485 2486static void 2487vge_sysctl_node(struct vge_softc *sc) 2488{ 2489 struct sysctl_ctx_list *ctx; 2490 struct sysctl_oid_list *child, *parent; 2491 struct sysctl_oid *tree; 2492 struct vge_hw_stats *stats; 2493 2494 stats = &sc->vge_stats; 2495 ctx = device_get_sysctl_ctx(sc->vge_dev); 2496 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2497 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2498 NULL, "VGE statistics"); 2499 parent = SYSCTL_CHILDREN(tree); 2500 2501 /* Rx statistics. */ 2502 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2503 NULL, "RX MAC statistics"); 2504 child = SYSCTL_CHILDREN(tree); 2505 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2506 &stats->rx_frames, "frames"); 2507 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2508 &stats->rx_good_frames, "Good frames"); 2509 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2510 &stats->rx_fifo_oflows, "FIFO overflows"); 2511 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2512 &stats->rx_runts, "Too short frames"); 2513 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2514 &stats->rx_runts_errs, "Too short frames with errors"); 2515 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2516 &stats->rx_pkts_64, "64 bytes frames"); 2517 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2518 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2519 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2520 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2521 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2522 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2523 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2524 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2525 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2526 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2527 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2528 &stats->rx_pkts_1519_max, "1519 to max frames"); 2529 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2530 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2531 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2532 &stats->rx_jumbos, "Jumbo frames"); 2533 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2534 &stats->rx_crcerrs, "CRC errors"); 2535 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2536 &stats->rx_pause_frames, "CRC errors"); 2537 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2538 &stats->rx_alignerrs, "Alignment errors"); 2539 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2540 &stats->rx_nobufs, "Frames with no buffer event"); 2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2542 &stats->rx_symerrs, "Frames with symbol errors"); 2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2544 &stats->rx_lenerrs, "Frames with length mismatched"); 2545 2546 /* Tx statistics. */ 2547 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2548 NULL, "TX MAC statistics"); 2549 child = SYSCTL_CHILDREN(tree); 2550 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2551 &stats->tx_good_frames, "Good frames"); 2552 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2553 &stats->tx_pkts_64, "64 bytes frames"); 2554 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2555 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2556 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2557 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2558 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2559 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2560 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2561 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2563 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2564 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2565 &stats->tx_jumbos, "Jumbo frames"); 2566 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2567 &stats->tx_colls, "Collisions"); 2568 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2569 &stats->tx_latecolls, "Late collisions"); 2570 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2571 &stats->tx_pause, "Pause frames"); 2572#ifdef VGE_ENABLE_SQEERR 2573 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2574 &stats->tx_sqeerrs, "SQE errors"); 2575#endif 2576 /* Clear MAC statistics. */ 2577 vge_stats_clear(sc); 2578} 2579 2580#undef VGE_SYSCTL_STAT_ADD32 2581 2582static void 2583vge_stats_clear(struct vge_softc *sc) 2584{ 2585 int i; 2586 2587 VGE_LOCK_ASSERT(sc); 2588 2589 CSR_WRITE_1(sc, VGE_MIBCSR, 2590 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2591 CSR_WRITE_1(sc, VGE_MIBCSR, 2592 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2593 for (i = VGE_TIMEOUT; i > 0; i--) { 2594 DELAY(1); 2595 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2596 break; 2597 } 2598 if (i == 0) 2599 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2600 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2601 ~VGE_MIBCSR_FREEZE); 2602} 2603 2604static void 2605vge_stats_update(struct vge_softc *sc) 2606{ 2607 struct vge_hw_stats *stats; 2608 struct ifnet *ifp; 2609 uint32_t mib[VGE_MIB_CNT], val; 2610 int i; 2611 2612 VGE_LOCK_ASSERT(sc); 2613 2614 stats = &sc->vge_stats; 2615 ifp = sc->vge_ifp; 2616 2617 CSR_WRITE_1(sc, VGE_MIBCSR, 2618 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2619 for (i = VGE_TIMEOUT; i > 0; i--) { 2620 DELAY(1); 2621 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2622 break; 2623 } 2624 if (i == 0) { 2625 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2626 vge_stats_clear(sc); 2627 return; 2628 } 2629 2630 bzero(mib, sizeof(mib)); 2631reset_idx: 2632 /* Set MIB read index to 0. */ 2633 CSR_WRITE_1(sc, VGE_MIBCSR, 2634 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2635 for (i = 0; i < VGE_MIB_CNT; i++) { 2636 val = CSR_READ_4(sc, VGE_MIBDATA); 2637 if (i != VGE_MIB_DATA_IDX(val)) { 2638 /* Reading interrupted. */ 2639 goto reset_idx; 2640 } 2641 mib[i] = val & VGE_MIB_DATA_MASK; 2642 } 2643 2644 /* Rx stats. */ 2645 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2646 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2647 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2648 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2649 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2650 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2651 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2652 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2653 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2654 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2655 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2656 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2657 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2658 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2659 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2660 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2661 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2662 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2663 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2664 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2665 2666 /* Tx stats. */ 2667 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2668 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2669 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2670 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2671 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2672 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2673 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2674 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2675 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2676 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2677#ifdef VGE_ENABLE_SQEERR 2678 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2679#endif 2680 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2681 2682 /* Update counters in ifnet. */ 2683 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES]; 2684 2685 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] + 2686 mib[VGE_MIB_TX_LATECOLLS]; 2687 2688 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] + 2689 mib[VGE_MIB_TX_LATECOLLS]; 2690 2691 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES]; 2692 2693 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2694 mib[VGE_MIB_RX_RUNTS] + 2695 mib[VGE_MIB_RX_RUNTS_ERRS] + 2696 mib[VGE_MIB_RX_CRCERRS] + 2697 mib[VGE_MIB_RX_ALIGNERRS] + 2698 mib[VGE_MIB_RX_NOBUFS] + 2699 mib[VGE_MIB_RX_SYMERRS] + 2700 mib[VGE_MIB_RX_LENERRS]; 2701} 2702