if_vge.c revision 200531
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200531 2009-12-14 19:44:54Z yongari $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96 97#include <net/if.h> 98#include <net/if_arp.h> 99#include <net/ethernet.h> 100#include <net/if_dl.h> 101#include <net/if_media.h> 102#include <net/if_types.h> 103#include <net/if_vlan_var.h> 104 105#include <net/bpf.h> 106 107#include <machine/bus.h> 108#include <machine/resource.h> 109#include <sys/bus.h> 110#include <sys/rman.h> 111 112#include <dev/mii/mii.h> 113#include <dev/mii/miivar.h> 114 115#include <dev/pci/pcireg.h> 116#include <dev/pci/pcivar.h> 117 118MODULE_DEPEND(vge, pci, 1, 1, 1); 119MODULE_DEPEND(vge, ether, 1, 1, 1); 120MODULE_DEPEND(vge, miibus, 1, 1, 1); 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125#include <dev/vge/if_vgereg.h> 126#include <dev/vge/if_vgevar.h> 127 128#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 129 130/* 131 * Various supported device vendors/types and their names. 132 */ 133static struct vge_type vge_devs[] = { 134 { VIA_VENDORID, VIA_DEVICEID_61XX, 135 "VIA Networking Gigabit Ethernet" }, 136 { 0, 0, NULL } 137}; 138 139static int vge_probe (device_t); 140static int vge_attach (device_t); 141static int vge_detach (device_t); 142 143static int vge_encap (struct vge_softc *, struct mbuf **); 144 145static void vge_dmamap_cb (void *, bus_dma_segment_t *, int, int); 146static int vge_dma_alloc (struct vge_softc *); 147static void vge_dma_free (struct vge_softc *); 148static void vge_discard_rxbuf (struct vge_softc *, int); 149static int vge_newbuf (struct vge_softc *, int); 150static int vge_rx_list_init (struct vge_softc *); 151static int vge_tx_list_init (struct vge_softc *); 152static void vge_freebufs (struct vge_softc *); 153#ifndef __NO_STRICT_ALIGNMENT 154static __inline void vge_fixup_rx 155 (struct mbuf *); 156#endif 157static int vge_rxeof (struct vge_softc *, int); 158static void vge_txeof (struct vge_softc *); 159static void vge_intr (void *); 160static void vge_tick (void *); 161static void vge_start (struct ifnet *); 162static void vge_start_locked (struct ifnet *); 163static int vge_ioctl (struct ifnet *, u_long, caddr_t); 164static void vge_init (void *); 165static void vge_init_locked (struct vge_softc *); 166static void vge_stop (struct vge_softc *); 167static void vge_watchdog (void *); 168static int vge_suspend (device_t); 169static int vge_resume (device_t); 170static int vge_shutdown (device_t); 171static int vge_ifmedia_upd (struct ifnet *); 172static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 173 174#ifdef VGE_EEPROM 175static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 176#endif 177static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 178 179static void vge_miipoll_start (struct vge_softc *); 180static void vge_miipoll_stop (struct vge_softc *); 181static int vge_miibus_readreg (device_t, int, int); 182static int vge_miibus_writereg (device_t, int, int, int); 183static void vge_miibus_statchg (device_t); 184 185static void vge_cam_clear (struct vge_softc *); 186static int vge_cam_set (struct vge_softc *, uint8_t *); 187static void vge_setmulti (struct vge_softc *); 188static void vge_reset (struct vge_softc *); 189 190static device_method_t vge_methods[] = { 191 /* Device interface */ 192 DEVMETHOD(device_probe, vge_probe), 193 DEVMETHOD(device_attach, vge_attach), 194 DEVMETHOD(device_detach, vge_detach), 195 DEVMETHOD(device_suspend, vge_suspend), 196 DEVMETHOD(device_resume, vge_resume), 197 DEVMETHOD(device_shutdown, vge_shutdown), 198 199 /* bus interface */ 200 DEVMETHOD(bus_print_child, bus_generic_print_child), 201 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 202 203 /* MII interface */ 204 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 205 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 206 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 207 208 { 0, 0 } 209}; 210 211static driver_t vge_driver = { 212 "vge", 213 vge_methods, 214 sizeof(struct vge_softc) 215}; 216 217static devclass_t vge_devclass; 218 219DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 220DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 221 222#ifdef VGE_EEPROM 223/* 224 * Read a word of data stored in the EEPROM at address 'addr.' 225 */ 226static void 227vge_eeprom_getword(struct vge_softc *sc, int addr, u_int16_t *dest) 228{ 229 int i; 230 u_int16_t word = 0; 231 232 /* 233 * Enter EEPROM embedded programming mode. In order to 234 * access the EEPROM at all, we first have to set the 235 * EELOAD bit in the CHIPCFG2 register. 236 */ 237 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 238 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 239 240 /* Select the address of the word we want to read */ 241 CSR_WRITE_1(sc, VGE_EEADDR, addr); 242 243 /* Issue read command */ 244 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 245 246 /* Wait for the done bit to be set. */ 247 for (i = 0; i < VGE_TIMEOUT; i++) { 248 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 249 break; 250 } 251 252 if (i == VGE_TIMEOUT) { 253 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 254 *dest = 0; 255 return; 256 } 257 258 /* Read the result */ 259 word = CSR_READ_2(sc, VGE_EERDDAT); 260 261 /* Turn off EEPROM access mode. */ 262 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 263 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 264 265 *dest = word; 266 267 return; 268} 269#endif 270 271/* 272 * Read a sequence of words from the EEPROM. 273 */ 274static void 275vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 276{ 277 int i; 278#ifdef VGE_EEPROM 279 u_int16_t word = 0, *ptr; 280 281 for (i = 0; i < cnt; i++) { 282 vge_eeprom_getword(sc, off + i, &word); 283 ptr = (u_int16_t *)(dest + (i * 2)); 284 if (swap) 285 *ptr = ntohs(word); 286 else 287 *ptr = word; 288 } 289#else 290 for (i = 0; i < ETHER_ADDR_LEN; i++) 291 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 292#endif 293} 294 295static void 296vge_miipoll_stop(struct vge_softc *sc) 297{ 298 int i; 299 300 CSR_WRITE_1(sc, VGE_MIICMD, 0); 301 302 for (i = 0; i < VGE_TIMEOUT; i++) { 303 DELAY(1); 304 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 305 break; 306 } 307 308 if (i == VGE_TIMEOUT) 309 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 310 311 return; 312} 313 314static void 315vge_miipoll_start(struct vge_softc *sc) 316{ 317 int i; 318 319 /* First, make sure we're idle. */ 320 321 CSR_WRITE_1(sc, VGE_MIICMD, 0); 322 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 323 324 for (i = 0; i < VGE_TIMEOUT; i++) { 325 DELAY(1); 326 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 327 break; 328 } 329 330 if (i == VGE_TIMEOUT) { 331 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 332 return; 333 } 334 335 /* Now enable auto poll mode. */ 336 337 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 338 339 /* And make sure it started. */ 340 341 for (i = 0; i < VGE_TIMEOUT; i++) { 342 DELAY(1); 343 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 344 break; 345 } 346 347 if (i == VGE_TIMEOUT) 348 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 349 350 return; 351} 352 353static int 354vge_miibus_readreg(device_t dev, int phy, int reg) 355{ 356 struct vge_softc *sc; 357 int i; 358 u_int16_t rval = 0; 359 360 sc = device_get_softc(dev); 361 362 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 363 return(0); 364 365 vge_miipoll_stop(sc); 366 367 /* Specify the register we want to read. */ 368 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 369 370 /* Issue read command. */ 371 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 372 373 /* Wait for the read command bit to self-clear. */ 374 for (i = 0; i < VGE_TIMEOUT; i++) { 375 DELAY(1); 376 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 377 break; 378 } 379 380 if (i == VGE_TIMEOUT) 381 device_printf(sc->vge_dev, "MII read timed out\n"); 382 else 383 rval = CSR_READ_2(sc, VGE_MIIDATA); 384 385 vge_miipoll_start(sc); 386 387 return (rval); 388} 389 390static int 391vge_miibus_writereg(device_t dev, int phy, int reg, int data) 392{ 393 struct vge_softc *sc; 394 int i, rval = 0; 395 396 sc = device_get_softc(dev); 397 398 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 399 return(0); 400 401 vge_miipoll_stop(sc); 402 403 /* Specify the register we want to write. */ 404 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 405 406 /* Specify the data we want to write. */ 407 CSR_WRITE_2(sc, VGE_MIIDATA, data); 408 409 /* Issue write command. */ 410 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 411 412 /* Wait for the write command bit to self-clear. */ 413 for (i = 0; i < VGE_TIMEOUT; i++) { 414 DELAY(1); 415 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 416 break; 417 } 418 419 if (i == VGE_TIMEOUT) { 420 device_printf(sc->vge_dev, "MII write timed out\n"); 421 rval = EIO; 422 } 423 424 vge_miipoll_start(sc); 425 426 return (rval); 427} 428 429static void 430vge_cam_clear(struct vge_softc *sc) 431{ 432 int i; 433 434 /* 435 * Turn off all the mask bits. This tells the chip 436 * that none of the entries in the CAM filter are valid. 437 * desired entries will be enabled as we fill the filter in. 438 */ 439 440 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 441 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 442 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 443 for (i = 0; i < 8; i++) 444 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 445 446 /* Clear the VLAN filter too. */ 447 448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 449 for (i = 0; i < 8; i++) 450 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 451 452 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 453 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 454 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 455 456 sc->vge_camidx = 0; 457 458 return; 459} 460 461static int 462vge_cam_set(struct vge_softc *sc, uint8_t *addr) 463{ 464 int i, error = 0; 465 466 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 467 return(ENOSPC); 468 469 /* Select the CAM data page. */ 470 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 471 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 472 473 /* Set the filter entry we want to update and enable writing. */ 474 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 475 476 /* Write the address to the CAM registers */ 477 for (i = 0; i < ETHER_ADDR_LEN; i++) 478 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 479 480 /* Issue a write command. */ 481 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 482 483 /* Wake for it to clear. */ 484 for (i = 0; i < VGE_TIMEOUT; i++) { 485 DELAY(1); 486 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 487 break; 488 } 489 490 if (i == VGE_TIMEOUT) { 491 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 492 error = EIO; 493 goto fail; 494 } 495 496 /* Select the CAM mask page. */ 497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 499 500 /* Set the mask bit that enables this filter. */ 501 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 502 1<<(sc->vge_camidx & 7)); 503 504 sc->vge_camidx++; 505 506fail: 507 /* Turn off access to CAM. */ 508 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 509 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 510 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 511 512 return (error); 513} 514 515/* 516 * Program the multicast filter. We use the 64-entry CAM filter 517 * for perfect filtering. If there's more than 64 multicast addresses, 518 * we use the hash filter instead. 519 */ 520static void 521vge_setmulti(struct vge_softc *sc) 522{ 523 struct ifnet *ifp; 524 int error = 0/*, h = 0*/; 525 struct ifmultiaddr *ifma; 526 u_int32_t h, hashes[2] = { 0, 0 }; 527 528 VGE_LOCK_ASSERT(sc); 529 530 ifp = sc->vge_ifp; 531 532 /* First, zot all the multicast entries. */ 533 vge_cam_clear(sc); 534 CSR_WRITE_4(sc, VGE_MAR0, 0); 535 CSR_WRITE_4(sc, VGE_MAR1, 0); 536 537 /* 538 * If the user wants allmulti or promisc mode, enable reception 539 * of all multicast frames. 540 */ 541 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 542 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 543 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 544 return; 545 } 546 547 /* Now program new ones */ 548 if_maddr_rlock(ifp); 549 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 550 if (ifma->ifma_addr->sa_family != AF_LINK) 551 continue; 552 error = vge_cam_set(sc, 553 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 554 if (error) 555 break; 556 } 557 558 /* If there were too many addresses, use the hash filter. */ 559 if (error) { 560 vge_cam_clear(sc); 561 562 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 563 if (ifma->ifma_addr->sa_family != AF_LINK) 564 continue; 565 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 566 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 567 if (h < 32) 568 hashes[0] |= (1 << h); 569 else 570 hashes[1] |= (1 << (h - 32)); 571 } 572 573 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 574 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 575 } 576 if_maddr_runlock(ifp); 577 578 return; 579} 580 581static void 582vge_reset(struct vge_softc *sc) 583{ 584 int i; 585 586 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 587 588 for (i = 0; i < VGE_TIMEOUT; i++) { 589 DELAY(5); 590 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 591 break; 592 } 593 594 if (i == VGE_TIMEOUT) { 595 device_printf(sc->vge_dev, "soft reset timed out"); 596 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 597 DELAY(2000); 598 } 599 600 DELAY(5000); 601 602 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 603 604 for (i = 0; i < VGE_TIMEOUT; i++) { 605 DELAY(5); 606 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 607 break; 608 } 609 610 if (i == VGE_TIMEOUT) { 611 device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 612 return; 613 } 614 615 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 616 617 return; 618} 619 620/* 621 * Probe for a VIA gigabit chip. Check the PCI vendor and device 622 * IDs against our list and return a device name if we find a match. 623 */ 624static int 625vge_probe(device_t dev) 626{ 627 struct vge_type *t; 628 629 t = vge_devs; 630 631 while (t->vge_name != NULL) { 632 if ((pci_get_vendor(dev) == t->vge_vid) && 633 (pci_get_device(dev) == t->vge_did)) { 634 device_set_desc(dev, t->vge_name); 635 return (BUS_PROBE_DEFAULT); 636 } 637 t++; 638 } 639 640 return (ENXIO); 641} 642 643/* 644 * Map a single buffer address. 645 */ 646 647struct vge_dmamap_arg { 648 bus_addr_t vge_busaddr; 649}; 650 651static void 652vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 653{ 654 struct vge_dmamap_arg *ctx; 655 656 if (error != 0) 657 return; 658 659 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 660 661 ctx = (struct vge_dmamap_arg *)arg; 662 ctx->vge_busaddr = segs[0].ds_addr; 663} 664 665static int 666vge_dma_alloc(struct vge_softc *sc) 667{ 668 struct vge_dmamap_arg ctx; 669 struct vge_txdesc *txd; 670 struct vge_rxdesc *rxd; 671 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 672 int error, i; 673 674 lowaddr = BUS_SPACE_MAXADDR; 675 676again: 677 /* Create parent ring tag. */ 678 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 679 1, 0, /* algnmnt, boundary */ 680 lowaddr, /* lowaddr */ 681 BUS_SPACE_MAXADDR, /* highaddr */ 682 NULL, NULL, /* filter, filterarg */ 683 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 684 0, /* nsegments */ 685 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 686 0, /* flags */ 687 NULL, NULL, /* lockfunc, lockarg */ 688 &sc->vge_cdata.vge_ring_tag); 689 if (error != 0) { 690 device_printf(sc->vge_dev, 691 "could not create parent DMA tag.\n"); 692 goto fail; 693 } 694 695 /* Create tag for Tx ring. */ 696 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 697 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 698 BUS_SPACE_MAXADDR, /* lowaddr */ 699 BUS_SPACE_MAXADDR, /* highaddr */ 700 NULL, NULL, /* filter, filterarg */ 701 VGE_TX_LIST_SZ, /* maxsize */ 702 1, /* nsegments */ 703 VGE_TX_LIST_SZ, /* maxsegsize */ 704 0, /* flags */ 705 NULL, NULL, /* lockfunc, lockarg */ 706 &sc->vge_cdata.vge_tx_ring_tag); 707 if (error != 0) { 708 device_printf(sc->vge_dev, 709 "could not allocate Tx ring DMA tag.\n"); 710 goto fail; 711 } 712 713 /* Create tag for Rx ring. */ 714 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 715 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 716 BUS_SPACE_MAXADDR, /* lowaddr */ 717 BUS_SPACE_MAXADDR, /* highaddr */ 718 NULL, NULL, /* filter, filterarg */ 719 VGE_RX_LIST_SZ, /* maxsize */ 720 1, /* nsegments */ 721 VGE_RX_LIST_SZ, /* maxsegsize */ 722 0, /* flags */ 723 NULL, NULL, /* lockfunc, lockarg */ 724 &sc->vge_cdata.vge_rx_ring_tag); 725 if (error != 0) { 726 device_printf(sc->vge_dev, 727 "could not allocate Rx ring DMA tag.\n"); 728 goto fail; 729 } 730 731 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 732 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 733 (void **)&sc->vge_rdata.vge_tx_ring, 734 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 735 &sc->vge_cdata.vge_tx_ring_map); 736 if (error != 0) { 737 device_printf(sc->vge_dev, 738 "could not allocate DMA'able memory for Tx ring.\n"); 739 goto fail; 740 } 741 742 ctx.vge_busaddr = 0; 743 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 744 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 745 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 746 if (error != 0 || ctx.vge_busaddr == 0) { 747 device_printf(sc->vge_dev, 748 "could not load DMA'able memory for Tx ring.\n"); 749 goto fail; 750 } 751 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 752 753 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 754 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 755 (void **)&sc->vge_rdata.vge_rx_ring, 756 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 757 &sc->vge_cdata.vge_rx_ring_map); 758 if (error != 0) { 759 device_printf(sc->vge_dev, 760 "could not allocate DMA'able memory for Rx ring.\n"); 761 goto fail; 762 } 763 764 ctx.vge_busaddr = 0; 765 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 766 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 767 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 768 if (error != 0 || ctx.vge_busaddr == 0) { 769 device_printf(sc->vge_dev, 770 "could not load DMA'able memory for Rx ring.\n"); 771 goto fail; 772 } 773 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 774 775 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 776 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 777 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 778 if ((VGE_ADDR_HI(tx_ring_end) != 779 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 780 (VGE_ADDR_HI(rx_ring_end) != 781 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 782 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 783 device_printf(sc->vge_dev, "4GB boundary crossed, " 784 "switching to 32bit DMA address mode.\n"); 785 vge_dma_free(sc); 786 /* Limit DMA address space to 32bit and try again. */ 787 lowaddr = BUS_SPACE_MAXADDR_32BIT; 788 goto again; 789 } 790 791 /* Create parent buffer tag. */ 792 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 793 1, 0, /* algnmnt, boundary */ 794 VGE_BUF_DMA_MAXADDR, /* lowaddr */ 795 BUS_SPACE_MAXADDR, /* highaddr */ 796 NULL, NULL, /* filter, filterarg */ 797 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 798 0, /* nsegments */ 799 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 800 0, /* flags */ 801 NULL, NULL, /* lockfunc, lockarg */ 802 &sc->vge_cdata.vge_buffer_tag); 803 if (error != 0) { 804 device_printf(sc->vge_dev, 805 "could not create parent buffer DMA tag.\n"); 806 goto fail; 807 } 808 809 /* Create tag for Tx buffers. */ 810 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 811 1, 0, /* algnmnt, boundary */ 812 BUS_SPACE_MAXADDR, /* lowaddr */ 813 BUS_SPACE_MAXADDR, /* highaddr */ 814 NULL, NULL, /* filter, filterarg */ 815 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 816 VGE_MAXTXSEGS, /* nsegments */ 817 MCLBYTES, /* maxsegsize */ 818 0, /* flags */ 819 NULL, NULL, /* lockfunc, lockarg */ 820 &sc->vge_cdata.vge_tx_tag); 821 if (error != 0) { 822 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 823 goto fail; 824 } 825 826 /* Create tag for Rx buffers. */ 827 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 828 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 829 BUS_SPACE_MAXADDR, /* lowaddr */ 830 BUS_SPACE_MAXADDR, /* highaddr */ 831 NULL, NULL, /* filter, filterarg */ 832 MCLBYTES, /* maxsize */ 833 1, /* nsegments */ 834 MCLBYTES, /* maxsegsize */ 835 0, /* flags */ 836 NULL, NULL, /* lockfunc, lockarg */ 837 &sc->vge_cdata.vge_rx_tag); 838 if (error != 0) { 839 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 840 goto fail; 841 } 842 843 /* Create DMA maps for Tx buffers. */ 844 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 845 txd = &sc->vge_cdata.vge_txdesc[i]; 846 txd->tx_m = NULL; 847 txd->tx_dmamap = NULL; 848 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 849 &txd->tx_dmamap); 850 if (error != 0) { 851 device_printf(sc->vge_dev, 852 "could not create Tx dmamap.\n"); 853 goto fail; 854 } 855 } 856 /* Create DMA maps for Rx buffers. */ 857 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 858 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 859 device_printf(sc->vge_dev, 860 "could not create spare Rx dmamap.\n"); 861 goto fail; 862 } 863 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 864 rxd = &sc->vge_cdata.vge_rxdesc[i]; 865 rxd->rx_m = NULL; 866 rxd->rx_dmamap = NULL; 867 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 868 &rxd->rx_dmamap); 869 if (error != 0) { 870 device_printf(sc->vge_dev, 871 "could not create Rx dmamap.\n"); 872 goto fail; 873 } 874 } 875 876fail: 877 return (error); 878} 879 880static void 881vge_dma_free(struct vge_softc *sc) 882{ 883 struct vge_txdesc *txd; 884 struct vge_rxdesc *rxd; 885 int i; 886 887 /* Tx ring. */ 888 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 889 if (sc->vge_cdata.vge_tx_ring_map) 890 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 891 sc->vge_cdata.vge_tx_ring_map); 892 if (sc->vge_cdata.vge_tx_ring_map && 893 sc->vge_rdata.vge_tx_ring) 894 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 895 sc->vge_rdata.vge_tx_ring, 896 sc->vge_cdata.vge_tx_ring_map); 897 sc->vge_rdata.vge_tx_ring = NULL; 898 sc->vge_cdata.vge_tx_ring_map = NULL; 899 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 900 sc->vge_cdata.vge_tx_ring_tag = NULL; 901 } 902 /* Rx ring. */ 903 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 904 if (sc->vge_cdata.vge_rx_ring_map) 905 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 906 sc->vge_cdata.vge_rx_ring_map); 907 if (sc->vge_cdata.vge_rx_ring_map && 908 sc->vge_rdata.vge_rx_ring) 909 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 910 sc->vge_rdata.vge_rx_ring, 911 sc->vge_cdata.vge_rx_ring_map); 912 sc->vge_rdata.vge_rx_ring = NULL; 913 sc->vge_cdata.vge_rx_ring_map = NULL; 914 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 915 sc->vge_cdata.vge_rx_ring_tag = NULL; 916 } 917 /* Tx buffers. */ 918 if (sc->vge_cdata.vge_tx_tag != NULL) { 919 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 920 txd = &sc->vge_cdata.vge_txdesc[i]; 921 if (txd->tx_dmamap != NULL) { 922 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 923 txd->tx_dmamap); 924 txd->tx_dmamap = NULL; 925 } 926 } 927 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 928 sc->vge_cdata.vge_tx_tag = NULL; 929 } 930 /* Rx buffers. */ 931 if (sc->vge_cdata.vge_rx_tag != NULL) { 932 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 933 rxd = &sc->vge_cdata.vge_rxdesc[i]; 934 if (rxd->rx_dmamap != NULL) { 935 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 936 rxd->rx_dmamap); 937 rxd->rx_dmamap = NULL; 938 } 939 } 940 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 941 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 942 sc->vge_cdata.vge_rx_sparemap); 943 sc->vge_cdata.vge_rx_sparemap = NULL; 944 } 945 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 946 sc->vge_cdata.vge_rx_tag = NULL; 947 } 948 949 if (sc->vge_cdata.vge_buffer_tag != NULL) { 950 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 951 sc->vge_cdata.vge_buffer_tag = NULL; 952 } 953 if (sc->vge_cdata.vge_ring_tag != NULL) { 954 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 955 sc->vge_cdata.vge_ring_tag = NULL; 956 } 957} 958 959/* 960 * Attach the interface. Allocate softc structures, do ifmedia 961 * setup and ethernet/BPF attach. 962 */ 963static int 964vge_attach(device_t dev) 965{ 966 u_char eaddr[ETHER_ADDR_LEN]; 967 struct vge_softc *sc; 968 struct ifnet *ifp; 969 int error = 0, rid; 970 971 sc = device_get_softc(dev); 972 sc->vge_dev = dev; 973 974 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 975 MTX_DEF); 976 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 977 978 /* 979 * Map control/status registers. 980 */ 981 pci_enable_busmaster(dev); 982 983 rid = PCIR_BAR(1); 984 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 985 RF_ACTIVE); 986 987 if (sc->vge_res == NULL) { 988 device_printf(dev, "couldn't map ports/memory\n"); 989 error = ENXIO; 990 goto fail; 991 } 992 993 /* Allocate interrupt */ 994 rid = 0; 995 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 996 RF_SHAREABLE | RF_ACTIVE); 997 998 if (sc->vge_irq == NULL) { 999 device_printf(dev, "couldn't map interrupt\n"); 1000 error = ENXIO; 1001 goto fail; 1002 } 1003 1004 /* Reset the adapter. */ 1005 vge_reset(sc); 1006 1007 /* 1008 * Get station address from the EEPROM. 1009 */ 1010 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1011 1012 error = vge_dma_alloc(sc); 1013 if (error) 1014 goto fail; 1015 1016 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1017 if (ifp == NULL) { 1018 device_printf(dev, "can not if_alloc()\n"); 1019 error = ENOSPC; 1020 goto fail; 1021 } 1022 1023 /* Do MII setup */ 1024 if (mii_phy_probe(dev, &sc->vge_miibus, 1025 vge_ifmedia_upd, vge_ifmedia_sts)) { 1026 device_printf(dev, "MII without any phy!\n"); 1027 error = ENXIO; 1028 goto fail; 1029 } 1030 1031 ifp->if_softc = sc; 1032 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1033 ifp->if_mtu = ETHERMTU; 1034 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1035 ifp->if_ioctl = vge_ioctl; 1036 ifp->if_capabilities = IFCAP_VLAN_MTU; 1037 ifp->if_start = vge_start; 1038 ifp->if_hwassist = VGE_CSUM_FEATURES; 1039 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1040 ifp->if_capenable = ifp->if_capabilities; 1041#ifdef DEVICE_POLLING 1042 ifp->if_capabilities |= IFCAP_POLLING; 1043#endif 1044 ifp->if_init = vge_init; 1045 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 1046 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN; 1047 IFQ_SET_READY(&ifp->if_snd); 1048 1049 /* 1050 * Call MI attach routine. 1051 */ 1052 ether_ifattach(ifp, eaddr); 1053 1054 /* Hook interrupt last to avoid having to lock softc */ 1055 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1056 NULL, vge_intr, sc, &sc->vge_intrhand); 1057 1058 if (error) { 1059 device_printf(dev, "couldn't set up irq\n"); 1060 ether_ifdetach(ifp); 1061 goto fail; 1062 } 1063 1064fail: 1065 if (error) 1066 vge_detach(dev); 1067 1068 return (error); 1069} 1070 1071/* 1072 * Shutdown hardware and free up resources. This can be called any 1073 * time after the mutex has been initialized. It is called in both 1074 * the error case in attach and the normal detach case so it needs 1075 * to be careful about only freeing resources that have actually been 1076 * allocated. 1077 */ 1078static int 1079vge_detach(device_t dev) 1080{ 1081 struct vge_softc *sc; 1082 struct ifnet *ifp; 1083 1084 sc = device_get_softc(dev); 1085 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1086 ifp = sc->vge_ifp; 1087 1088#ifdef DEVICE_POLLING 1089 if (ifp->if_capenable & IFCAP_POLLING) 1090 ether_poll_deregister(ifp); 1091#endif 1092 1093 /* These should only be active if attach succeeded */ 1094 if (device_is_attached(dev)) { 1095 ether_ifdetach(ifp); 1096 VGE_LOCK(sc); 1097 vge_stop(sc); 1098 VGE_UNLOCK(sc); 1099 callout_drain(&sc->vge_watchdog); 1100 } 1101 if (sc->vge_miibus) 1102 device_delete_child(dev, sc->vge_miibus); 1103 bus_generic_detach(dev); 1104 1105 if (sc->vge_intrhand) 1106 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1107 if (sc->vge_irq) 1108 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1109 if (sc->vge_res) 1110 bus_release_resource(dev, SYS_RES_MEMORY, 1111 PCIR_BAR(1), sc->vge_res); 1112 if (ifp) 1113 if_free(ifp); 1114 1115 vge_dma_free(sc); 1116 mtx_destroy(&sc->vge_mtx); 1117 1118 return (0); 1119} 1120 1121static void 1122vge_discard_rxbuf(struct vge_softc *sc, int prod) 1123{ 1124 struct vge_rxdesc *rxd; 1125 int i; 1126 1127 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1128 rxd->rx_desc->vge_sts = 0; 1129 rxd->rx_desc->vge_ctl = 0; 1130 1131 /* 1132 * Note: the manual fails to document the fact that for 1133 * proper opration, the driver needs to replentish the RX 1134 * DMA ring 4 descriptors at a time (rather than one at a 1135 * time, like most chips). We can allocate the new buffers 1136 * but we should not set the OWN bits until we're ready 1137 * to hand back 4 of them in one shot. 1138 */ 1139 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1140 for (i = VGE_RXCHUNK; i > 0; i--) { 1141 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1142 rxd = rxd->rxd_prev; 1143 } 1144 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1145 } 1146} 1147 1148static int 1149vge_newbuf(struct vge_softc *sc, int prod) 1150{ 1151 struct vge_rxdesc *rxd; 1152 struct mbuf *m; 1153 bus_dma_segment_t segs[1]; 1154 bus_dmamap_t map; 1155 int i, nsegs; 1156 1157 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1158 if (m == NULL) 1159 return (ENOBUFS); 1160 /* 1161 * This is part of an evil trick to deal with strict-alignment 1162 * architectures. The VIA chip requires RX buffers to be aligned 1163 * on 32-bit boundaries, but that will hose strict-alignment 1164 * architectures. To get around this, we leave some empty space 1165 * at the start of each buffer and for non-strict-alignment hosts, 1166 * we copy the buffer back two bytes to achieve word alignment. 1167 * This is slightly more efficient than allocating a new buffer, 1168 * copying the contents, and discarding the old buffer. 1169 */ 1170 m->m_len = m->m_pkthdr.len = MCLBYTES; 1171 m_adj(m, VGE_RX_BUF_ALIGN); 1172 1173 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1174 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1175 m_freem(m); 1176 return (ENOBUFS); 1177 } 1178 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1179 1180 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1181 if (rxd->rx_m != NULL) { 1182 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1183 BUS_DMASYNC_POSTREAD); 1184 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1185 } 1186 map = rxd->rx_dmamap; 1187 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1188 sc->vge_cdata.vge_rx_sparemap = map; 1189 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1190 BUS_DMASYNC_PREREAD); 1191 rxd->rx_m = m; 1192 1193 rxd->rx_desc->vge_sts = 0; 1194 rxd->rx_desc->vge_ctl = 0; 1195 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1196 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1197 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1198 1199 /* 1200 * Note: the manual fails to document the fact that for 1201 * proper operation, the driver needs to replenish the RX 1202 * DMA ring 4 descriptors at a time (rather than one at a 1203 * time, like most chips). We can allocate the new buffers 1204 * but we should not set the OWN bits until we're ready 1205 * to hand back 4 of them in one shot. 1206 */ 1207 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1208 for (i = VGE_RXCHUNK; i > 0; i--) { 1209 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1210 rxd = rxd->rxd_prev; 1211 } 1212 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1213 } 1214 1215 return (0); 1216} 1217 1218static int 1219vge_tx_list_init(struct vge_softc *sc) 1220{ 1221 struct vge_ring_data *rd; 1222 struct vge_txdesc *txd; 1223 int i; 1224 1225 VGE_LOCK_ASSERT(sc); 1226 1227 sc->vge_cdata.vge_tx_prodidx = 0; 1228 sc->vge_cdata.vge_tx_considx = 0; 1229 sc->vge_cdata.vge_tx_cnt = 0; 1230 1231 rd = &sc->vge_rdata; 1232 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1233 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1234 txd = &sc->vge_cdata.vge_txdesc[i]; 1235 txd->tx_m = NULL; 1236 txd->tx_desc = &rd->vge_tx_ring[i]; 1237 } 1238 1239 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1240 sc->vge_cdata.vge_tx_ring_map, 1241 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1242 1243 return (0); 1244} 1245 1246static int 1247vge_rx_list_init(struct vge_softc *sc) 1248{ 1249 struct vge_ring_data *rd; 1250 struct vge_rxdesc *rxd; 1251 int i; 1252 1253 VGE_LOCK_ASSERT(sc); 1254 1255 sc->vge_cdata.vge_rx_prodidx = 0; 1256 sc->vge_cdata.vge_head = NULL; 1257 sc->vge_cdata.vge_tail = NULL; 1258 sc->vge_cdata.vge_rx_commit = 0; 1259 1260 rd = &sc->vge_rdata; 1261 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1262 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1263 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1264 rxd->rx_m = NULL; 1265 rxd->rx_desc = &rd->vge_rx_ring[i]; 1266 if (i == 0) 1267 rxd->rxd_prev = 1268 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1269 else 1270 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1271 if (vge_newbuf(sc, i) != 0) 1272 return (ENOBUFS); 1273 } 1274 1275 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1276 sc->vge_cdata.vge_rx_ring_map, 1277 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1278 1279 sc->vge_cdata.vge_rx_commit = 0; 1280 1281 return (0); 1282} 1283 1284static void 1285vge_freebufs(struct vge_softc *sc) 1286{ 1287 struct vge_txdesc *txd; 1288 struct vge_rxdesc *rxd; 1289 struct ifnet *ifp; 1290 int i; 1291 1292 VGE_LOCK_ASSERT(sc); 1293 1294 ifp = sc->vge_ifp; 1295 /* 1296 * Free RX and TX mbufs still in the queues. 1297 */ 1298 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1299 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1300 if (rxd->rx_m != NULL) { 1301 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1302 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1303 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1304 rxd->rx_dmamap); 1305 m_freem(rxd->rx_m); 1306 rxd->rx_m = NULL; 1307 } 1308 } 1309 1310 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1311 txd = &sc->vge_cdata.vge_txdesc[i]; 1312 if (txd->tx_m != NULL) { 1313 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1314 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1315 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1316 txd->tx_dmamap); 1317 m_freem(txd->tx_m); 1318 txd->tx_m = NULL; 1319 ifp->if_oerrors++; 1320 } 1321 } 1322} 1323 1324#ifndef __NO_STRICT_ALIGNMENT 1325static __inline void 1326vge_fixup_rx(struct mbuf *m) 1327{ 1328 int i; 1329 uint16_t *src, *dst; 1330 1331 src = mtod(m, uint16_t *); 1332 dst = src - 1; 1333 1334 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1335 *dst++ = *src++; 1336 1337 m->m_data -= ETHER_ALIGN; 1338} 1339#endif 1340 1341/* 1342 * RX handler. We support the reception of jumbo frames that have 1343 * been fragmented across multiple 2K mbuf cluster buffers. 1344 */ 1345static int 1346vge_rxeof(struct vge_softc *sc, int count) 1347{ 1348 struct mbuf *m; 1349 struct ifnet *ifp; 1350 int prod, prog, total_len; 1351 struct vge_rxdesc *rxd; 1352 struct vge_rx_desc *cur_rx; 1353 uint32_t rxstat, rxctl; 1354 1355 VGE_LOCK_ASSERT(sc); 1356 1357 ifp = sc->vge_ifp; 1358 1359 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1360 sc->vge_cdata.vge_rx_ring_map, 1361 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1362 1363 prod = sc->vge_cdata.vge_rx_prodidx; 1364 for (prog = 0; count > 0 && 1365 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1366 VGE_RX_DESC_INC(prod)) { 1367 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1368 rxstat = le32toh(cur_rx->vge_sts); 1369 if ((rxstat & VGE_RDSTS_OWN) != 0) 1370 break; 1371 count--; 1372 prog++; 1373 rxctl = le32toh(cur_rx->vge_ctl); 1374 total_len = VGE_RXBYTES(rxstat); 1375 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1376 m = rxd->rx_m; 1377 1378 /* 1379 * If the 'start of frame' bit is set, this indicates 1380 * either the first fragment in a multi-fragment receive, 1381 * or an intermediate fragment. Either way, we want to 1382 * accumulate the buffers. 1383 */ 1384 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1385 if (vge_newbuf(sc, prod) != 0) { 1386 ifp->if_iqdrops++; 1387 VGE_CHAIN_RESET(sc); 1388 vge_discard_rxbuf(sc, prod); 1389 continue; 1390 } 1391 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1392 if (sc->vge_cdata.vge_head == NULL) { 1393 sc->vge_cdata.vge_head = m; 1394 sc->vge_cdata.vge_tail = m; 1395 } else { 1396 m->m_flags &= ~M_PKTHDR; 1397 sc->vge_cdata.vge_tail->m_next = m; 1398 sc->vge_cdata.vge_tail = m; 1399 } 1400 continue; 1401 } 1402 1403 /* 1404 * Bad/error frames will have the RXOK bit cleared. 1405 * However, there's one error case we want to allow: 1406 * if a VLAN tagged frame arrives and the chip can't 1407 * match it against the CAM filter, it considers this 1408 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1409 * We don't want to drop the frame though: our VLAN 1410 * filtering is done in software. 1411 * We also want to receive bad-checksummed frames and 1412 * and frames with bad-length. 1413 */ 1414 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1415 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1416 VGE_RDSTS_CSUMERR)) == 0) { 1417 ifp->if_ierrors++; 1418 /* 1419 * If this is part of a multi-fragment packet, 1420 * discard all the pieces. 1421 */ 1422 VGE_CHAIN_RESET(sc); 1423 vge_discard_rxbuf(sc, prod); 1424 continue; 1425 } 1426 1427 if (vge_newbuf(sc, prod) != 0) { 1428 ifp->if_iqdrops++; 1429 VGE_CHAIN_RESET(sc); 1430 vge_discard_rxbuf(sc, prod); 1431 continue; 1432 } 1433 1434 /* Chain received mbufs. */ 1435 if (sc->vge_cdata.vge_head != NULL) { 1436 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1437 /* 1438 * Special case: if there's 4 bytes or less 1439 * in this buffer, the mbuf can be discarded: 1440 * the last 4 bytes is the CRC, which we don't 1441 * care about anyway. 1442 */ 1443 if (m->m_len <= ETHER_CRC_LEN) { 1444 sc->vge_cdata.vge_tail->m_len -= 1445 (ETHER_CRC_LEN - m->m_len); 1446 m_freem(m); 1447 } else { 1448 m->m_len -= ETHER_CRC_LEN; 1449 m->m_flags &= ~M_PKTHDR; 1450 sc->vge_cdata.vge_tail->m_next = m; 1451 } 1452 m = sc->vge_cdata.vge_head; 1453 m->m_flags |= M_PKTHDR; 1454 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1455 } else { 1456 m->m_flags |= M_PKTHDR; 1457 m->m_pkthdr.len = m->m_len = 1458 (total_len - ETHER_CRC_LEN); 1459 } 1460 1461#ifndef __NO_STRICT_ALIGNMENT 1462 vge_fixup_rx(m); 1463#endif 1464 m->m_pkthdr.rcvif = ifp; 1465 1466 /* Do RX checksumming if enabled */ 1467 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1468 (rxctl & VGE_RDCTL_FRAG) == 0) { 1469 /* Check IP header checksum */ 1470 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1471 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1472 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1473 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1474 1475 /* Check TCP/UDP checksum */ 1476 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1477 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1478 m->m_pkthdr.csum_flags |= 1479 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1480 m->m_pkthdr.csum_data = 0xffff; 1481 } 1482 } 1483 1484 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1485 /* 1486 * The 32-bit rxctl register is stored in little-endian. 1487 * However, the 16-bit vlan tag is stored in big-endian, 1488 * so we have to byte swap it. 1489 */ 1490 m->m_pkthdr.ether_vtag = 1491 bswap16(rxctl & VGE_RDCTL_VLANID); 1492 m->m_flags |= M_VLANTAG; 1493 } 1494 1495 VGE_UNLOCK(sc); 1496 (*ifp->if_input)(ifp, m); 1497 VGE_LOCK(sc); 1498 sc->vge_cdata.vge_head = NULL; 1499 sc->vge_cdata.vge_tail = NULL; 1500 } 1501 1502 if (prog > 0) { 1503 sc->vge_cdata.vge_rx_prodidx = prod; 1504 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1505 sc->vge_cdata.vge_rx_ring_map, 1506 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1507 /* Update residue counter. */ 1508 if (sc->vge_cdata.vge_rx_commit != 0) { 1509 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1510 sc->vge_cdata.vge_rx_commit); 1511 sc->vge_cdata.vge_rx_commit = 0; 1512 } 1513 } 1514 return (prog); 1515} 1516 1517static void 1518vge_txeof(struct vge_softc *sc) 1519{ 1520 struct ifnet *ifp; 1521 struct vge_tx_desc *cur_tx; 1522 struct vge_txdesc *txd; 1523 uint32_t txstat; 1524 int cons, prod; 1525 1526 VGE_LOCK_ASSERT(sc); 1527 1528 ifp = sc->vge_ifp; 1529 1530 if (sc->vge_cdata.vge_tx_cnt == 0) 1531 return; 1532 1533 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1534 sc->vge_cdata.vge_tx_ring_map, 1535 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1536 1537 /* 1538 * Go through our tx list and free mbufs for those 1539 * frames that have been transmitted. 1540 */ 1541 cons = sc->vge_cdata.vge_tx_considx; 1542 prod = sc->vge_cdata.vge_tx_prodidx; 1543 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1544 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1545 txstat = le32toh(cur_tx->vge_sts); 1546 if ((txstat & VGE_TDSTS_OWN) != 0) 1547 break; 1548 sc->vge_cdata.vge_tx_cnt--; 1549 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1550 1551 txd = &sc->vge_cdata.vge_txdesc[cons]; 1552 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1553 BUS_DMASYNC_POSTWRITE); 1554 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1555 1556 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1557 __func__)); 1558 m_freem(txd->tx_m); 1559 txd->tx_m = NULL; 1560 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1561 } 1562 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1563 sc->vge_cdata.vge_tx_ring_map, 1564 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1565 sc->vge_cdata.vge_tx_considx = cons; 1566 if (sc->vge_cdata.vge_tx_cnt == 0) 1567 sc->vge_timer = 0; 1568 else { 1569 /* 1570 * If not all descriptors have been released reaped yet, 1571 * reload the timer so that we will eventually get another 1572 * interrupt that will cause us to re-enter this routine. 1573 * This is done in case the transmitter has gone idle. 1574 */ 1575 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1576 } 1577} 1578 1579static void 1580vge_tick(void *xsc) 1581{ 1582 struct vge_softc *sc; 1583 struct ifnet *ifp; 1584 struct mii_data *mii; 1585 1586 sc = xsc; 1587 ifp = sc->vge_ifp; 1588 VGE_LOCK_ASSERT(sc); 1589 mii = device_get_softc(sc->vge_miibus); 1590 1591 mii_tick(mii); 1592 if (sc->vge_link) { 1593 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1594 sc->vge_link = 0; 1595 if_link_state_change(sc->vge_ifp, 1596 LINK_STATE_DOWN); 1597 } 1598 } else { 1599 if (mii->mii_media_status & IFM_ACTIVE && 1600 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1601 sc->vge_link = 1; 1602 if_link_state_change(sc->vge_ifp, 1603 LINK_STATE_UP); 1604 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1605 vge_start_locked(ifp); 1606 } 1607 } 1608 1609 return; 1610} 1611 1612#ifdef DEVICE_POLLING 1613static int 1614vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1615{ 1616 struct vge_softc *sc = ifp->if_softc; 1617 int rx_npkts = 0; 1618 1619 VGE_LOCK(sc); 1620 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1621 goto done; 1622 1623 rx_npkts = vge_rxeof(sc, count); 1624 vge_txeof(sc); 1625 1626 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1627 vge_start_locked(ifp); 1628 1629 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1630 u_int32_t status; 1631 status = CSR_READ_4(sc, VGE_ISR); 1632 if (status == 0xFFFFFFFF) 1633 goto done; 1634 if (status) 1635 CSR_WRITE_4(sc, VGE_ISR, status); 1636 1637 /* 1638 * XXX check behaviour on receiver stalls. 1639 */ 1640 1641 if (status & VGE_ISR_TXDMA_STALL || 1642 status & VGE_ISR_RXDMA_STALL) { 1643 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1644 vge_init_locked(sc); 1645 } 1646 1647 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1648 vge_rxeof(sc, count); 1649 ifp->if_ierrors++; 1650 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1651 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1652 } 1653 } 1654done: 1655 VGE_UNLOCK(sc); 1656 return (rx_npkts); 1657} 1658#endif /* DEVICE_POLLING */ 1659 1660static void 1661vge_intr(void *arg) 1662{ 1663 struct vge_softc *sc; 1664 struct ifnet *ifp; 1665 u_int32_t status; 1666 1667 sc = arg; 1668 1669 if (sc->suspended) { 1670 return; 1671 } 1672 1673 VGE_LOCK(sc); 1674 ifp = sc->vge_ifp; 1675 1676 if (!(ifp->if_flags & IFF_UP)) { 1677 VGE_UNLOCK(sc); 1678 return; 1679 } 1680 1681#ifdef DEVICE_POLLING 1682 if (ifp->if_capenable & IFCAP_POLLING) { 1683 VGE_UNLOCK(sc); 1684 return; 1685 } 1686#endif 1687 1688 /* Disable interrupts */ 1689 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1690 1691 for (;;) { 1692 1693 status = CSR_READ_4(sc, VGE_ISR); 1694 /* If the card has gone away the read returns 0xffff. */ 1695 if (status == 0xFFFFFFFF) 1696 break; 1697 1698 if (status) 1699 CSR_WRITE_4(sc, VGE_ISR, status); 1700 1701 if ((status & VGE_INTRS) == 0) 1702 break; 1703 1704 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1705 vge_rxeof(sc, VGE_RX_DESC_CNT); 1706 1707 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1708 vge_rxeof(sc, VGE_RX_DESC_CNT); 1709 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1710 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1711 } 1712 1713 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1714 vge_txeof(sc); 1715 1716 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1717 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1718 vge_init_locked(sc); 1719 } 1720 1721 if (status & VGE_ISR_LINKSTS) 1722 vge_tick(sc); 1723 } 1724 1725 /* Re-enable interrupts */ 1726 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1727 1728 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1729 vge_start_locked(ifp); 1730 1731 VGE_UNLOCK(sc); 1732 1733 return; 1734} 1735 1736static int 1737vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1738{ 1739 struct vge_txdesc *txd; 1740 struct vge_tx_frag *frag; 1741 struct mbuf *m; 1742 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1743 int error, i, nsegs, padlen; 1744 uint32_t cflags; 1745 1746 VGE_LOCK_ASSERT(sc); 1747 1748 M_ASSERTPKTHDR((*m_head)); 1749 1750 /* Argh. This chip does not autopad short frames. */ 1751 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1752 m = *m_head; 1753 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1754 if (M_WRITABLE(m) == 0) { 1755 /* Get a writable copy. */ 1756 m = m_dup(*m_head, M_DONTWAIT); 1757 m_freem(*m_head); 1758 if (m == NULL) { 1759 *m_head = NULL; 1760 return (ENOBUFS); 1761 } 1762 *m_head = m; 1763 } 1764 if (M_TRAILINGSPACE(m) < padlen) { 1765 m = m_defrag(m, M_DONTWAIT); 1766 if (m == NULL) { 1767 m_freem(*m_head); 1768 *m_head = NULL; 1769 return (ENOBUFS); 1770 } 1771 } 1772 /* 1773 * Manually pad short frames, and zero the pad space 1774 * to avoid leaking data. 1775 */ 1776 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1777 m->m_pkthdr.len += padlen; 1778 m->m_len = m->m_pkthdr.len; 1779 *m_head = m; 1780 } 1781 1782 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1783 1784 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1785 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1786 if (error == EFBIG) { 1787 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1788 if (m == NULL) { 1789 m_freem(*m_head); 1790 *m_head = NULL; 1791 return (ENOMEM); 1792 } 1793 *m_head = m; 1794 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1795 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1796 if (error != 0) { 1797 m_freem(*m_head); 1798 *m_head = NULL; 1799 return (error); 1800 } 1801 } else if (error != 0) 1802 return (error); 1803 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1804 BUS_DMASYNC_PREWRITE); 1805 1806 m = *m_head; 1807 cflags = 0; 1808 1809 /* Configure checksum offload. */ 1810 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1811 cflags |= VGE_TDCTL_IPCSUM; 1812 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1813 cflags |= VGE_TDCTL_TCPCSUM; 1814 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1815 cflags |= VGE_TDCTL_UDPCSUM; 1816 1817 /* Configure VLAN. */ 1818 if ((m->m_flags & M_VLANTAG) != 0) 1819 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1820 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1821 /* 1822 * XXX 1823 * Velocity family seems to support TSO but no information 1824 * for MSS configuration is available. Also the number of 1825 * fragments supported by a descriptor is too small to hold 1826 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1827 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1828 * longer chain of buffers but no additional information is 1829 * available. 1830 * 1831 * When telling the chip how many segments there are, we 1832 * must use nsegs + 1 instead of just nsegs. Darned if I 1833 * know why. This also means we can't use the last fragment 1834 * field of Tx descriptor. 1835 */ 1836 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1837 VGE_TD_LS_NORM); 1838 for (i = 0; i < nsegs; i++) { 1839 frag = &txd->tx_desc->vge_frag[i]; 1840 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1841 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1842 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1843 } 1844 1845 sc->vge_cdata.vge_tx_cnt++; 1846 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1847 1848 /* 1849 * Finally request interrupt and give the first descriptor 1850 * ownership to hardware. 1851 */ 1852 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1853 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1854 txd->tx_m = m; 1855 1856 return (0); 1857} 1858 1859/* 1860 * Main transmit routine. 1861 */ 1862 1863static void 1864vge_start(struct ifnet *ifp) 1865{ 1866 struct vge_softc *sc; 1867 1868 sc = ifp->if_softc; 1869 VGE_LOCK(sc); 1870 vge_start_locked(ifp); 1871 VGE_UNLOCK(sc); 1872} 1873 1874 1875static void 1876vge_start_locked(struct ifnet *ifp) 1877{ 1878 struct vge_softc *sc; 1879 struct vge_txdesc *txd; 1880 struct mbuf *m_head; 1881 int enq, idx; 1882 1883 sc = ifp->if_softc; 1884 1885 VGE_LOCK_ASSERT(sc); 1886 1887 if (sc->vge_link == 0 || 1888 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1889 IFF_DRV_RUNNING) 1890 return; 1891 1892 idx = sc->vge_cdata.vge_tx_prodidx; 1893 VGE_TX_DESC_DEC(idx); 1894 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1895 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1896 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1897 if (m_head == NULL) 1898 break; 1899 /* 1900 * Pack the data into the transmit ring. If we 1901 * don't have room, set the OACTIVE flag and wait 1902 * for the NIC to drain the ring. 1903 */ 1904 if (vge_encap(sc, &m_head)) { 1905 if (m_head == NULL) 1906 break; 1907 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1908 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1909 break; 1910 } 1911 1912 txd = &sc->vge_cdata.vge_txdesc[idx]; 1913 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1914 VGE_TX_DESC_INC(idx); 1915 1916 enq++; 1917 /* 1918 * If there's a BPF listener, bounce a copy of this frame 1919 * to him. 1920 */ 1921 ETHER_BPF_MTAP(ifp, m_head); 1922 } 1923 1924 if (enq > 0) { 1925 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1926 sc->vge_cdata.vge_tx_ring_map, 1927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1928 /* Issue a transmit command. */ 1929 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1930 /* 1931 * Use the countdown timer for interrupt moderation. 1932 * 'TX done' interrupts are disabled. Instead, we reset the 1933 * countdown timer, which will begin counting until it hits 1934 * the value in the SSTIMER register, and then trigger an 1935 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1936 * the timer count is reloaded. Only when the transmitter 1937 * is idle will the timer hit 0 and an interrupt fire. 1938 */ 1939 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1940 1941 /* 1942 * Set a timeout in case the chip goes out to lunch. 1943 */ 1944 sc->vge_timer = 5; 1945 } 1946} 1947 1948static void 1949vge_init(void *xsc) 1950{ 1951 struct vge_softc *sc = xsc; 1952 1953 VGE_LOCK(sc); 1954 vge_init_locked(sc); 1955 VGE_UNLOCK(sc); 1956} 1957 1958static void 1959vge_init_locked(struct vge_softc *sc) 1960{ 1961 struct ifnet *ifp = sc->vge_ifp; 1962 struct mii_data *mii; 1963 int error, i; 1964 1965 VGE_LOCK_ASSERT(sc); 1966 mii = device_get_softc(sc->vge_miibus); 1967 1968 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1969 return; 1970 1971 /* 1972 * Cancel pending I/O and free all RX/TX buffers. 1973 */ 1974 vge_stop(sc); 1975 vge_reset(sc); 1976 1977 /* 1978 * Initialize the RX and TX descriptors and mbufs. 1979 */ 1980 1981 error = vge_rx_list_init(sc); 1982 if (error != 0) { 1983 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 1984 return; 1985 } 1986 vge_tx_list_init(sc); 1987 1988 /* Set our station address */ 1989 for (i = 0; i < ETHER_ADDR_LEN; i++) 1990 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 1991 1992 /* 1993 * Set receive FIFO threshold. Also allow transmission and 1994 * reception of VLAN tagged frames. 1995 */ 1996 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1997 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1998 1999 /* Set DMA burst length */ 2000 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2001 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2002 2003 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2004 2005 /* Set collision backoff algorithm */ 2006 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2007 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2008 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2009 2010 /* Disable LPSEL field in priority resolution */ 2011 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2012 2013 /* 2014 * Load the addresses of the DMA queues into the chip. 2015 * Note that we only use one transmit queue. 2016 */ 2017 2018 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2019 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2020 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2021 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2022 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2023 2024 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2025 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2026 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2027 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2028 2029 /* Enable and wake up the RX descriptor queue */ 2030 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2031 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2032 2033 /* Enable the TX descriptor queue */ 2034 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2035 2036 /* Set up the receive filter -- allow large frames for VLANs. */ 2037 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 2038 2039 /* If we want promiscuous mode, set the allframes bit. */ 2040 if (ifp->if_flags & IFF_PROMISC) { 2041 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2042 } 2043 2044 /* Set capture broadcast bit to capture broadcast frames. */ 2045 if (ifp->if_flags & IFF_BROADCAST) { 2046 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 2047 } 2048 2049 /* Set multicast bit to capture multicast frames. */ 2050 if (ifp->if_flags & IFF_MULTICAST) { 2051 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 2052 } 2053 2054 /* Init the cam filter. */ 2055 vge_cam_clear(sc); 2056 2057 /* Init the multicast filter. */ 2058 vge_setmulti(sc); 2059 2060 /* Enable flow control */ 2061 2062 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2063 2064 /* Enable jumbo frame reception (if desired) */ 2065 2066 /* Start the MAC. */ 2067 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2068 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2069 CSR_WRITE_1(sc, VGE_CRS0, 2070 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2071 2072 /* 2073 * Configure one-shot timer for microsecond 2074 * resolution and load it for 500 usecs. 2075 */ 2076 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2077 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2078 2079 /* 2080 * Configure interrupt moderation for receive. Enable 2081 * the holdoff counter and load it, and set the RX 2082 * suppression count to the number of descriptors we 2083 * want to allow before triggering an interrupt. 2084 * The holdoff timer is in units of 20 usecs. 2085 */ 2086 2087#ifdef notyet 2088 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2089 /* Select the interrupt holdoff timer page. */ 2090 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2091 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2092 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2093 2094 /* Enable use of the holdoff timer. */ 2095 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2096 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2097 2098 /* Select the RX suppression threshold page. */ 2099 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2100 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2101 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2102 2103 /* Restore the page select bits. */ 2104 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2105 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2106#endif 2107 2108#ifdef DEVICE_POLLING 2109 /* 2110 * Disable interrupts if we are polling. 2111 */ 2112 if (ifp->if_capenable & IFCAP_POLLING) { 2113 CSR_WRITE_4(sc, VGE_IMR, 0); 2114 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2115 } else /* otherwise ... */ 2116#endif 2117 { 2118 /* 2119 * Enable interrupts. 2120 */ 2121 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2122 CSR_WRITE_4(sc, VGE_ISR, 0); 2123 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2124 } 2125 2126 mii_mediachg(mii); 2127 2128 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2129 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2130 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2131 2132 sc->vge_link = 0; 2133} 2134 2135/* 2136 * Set media options. 2137 */ 2138static int 2139vge_ifmedia_upd(struct ifnet *ifp) 2140{ 2141 struct vge_softc *sc; 2142 struct mii_data *mii; 2143 2144 sc = ifp->if_softc; 2145 VGE_LOCK(sc); 2146 mii = device_get_softc(sc->vge_miibus); 2147 mii_mediachg(mii); 2148 VGE_UNLOCK(sc); 2149 2150 return (0); 2151} 2152 2153/* 2154 * Report current media status. 2155 */ 2156static void 2157vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2158{ 2159 struct vge_softc *sc; 2160 struct mii_data *mii; 2161 2162 sc = ifp->if_softc; 2163 mii = device_get_softc(sc->vge_miibus); 2164 2165 VGE_LOCK(sc); 2166 mii_pollstat(mii); 2167 VGE_UNLOCK(sc); 2168 ifmr->ifm_active = mii->mii_media_active; 2169 ifmr->ifm_status = mii->mii_media_status; 2170 2171 return; 2172} 2173 2174static void 2175vge_miibus_statchg(device_t dev) 2176{ 2177 struct vge_softc *sc; 2178 struct mii_data *mii; 2179 struct ifmedia_entry *ife; 2180 2181 sc = device_get_softc(dev); 2182 mii = device_get_softc(sc->vge_miibus); 2183 ife = mii->mii_media.ifm_cur; 2184 2185 /* 2186 * If the user manually selects a media mode, we need to turn 2187 * on the forced MAC mode bit in the DIAGCTL register. If the 2188 * user happens to choose a full duplex mode, we also need to 2189 * set the 'force full duplex' bit. This applies only to 2190 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2191 * mode is disabled, and in 1000baseT mode, full duplex is 2192 * always implied, so we turn on the forced mode bit but leave 2193 * the FDX bit cleared. 2194 */ 2195 2196 switch (IFM_SUBTYPE(ife->ifm_media)) { 2197 case IFM_AUTO: 2198 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2199 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2200 break; 2201 case IFM_1000_T: 2202 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2203 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2204 break; 2205 case IFM_100_TX: 2206 case IFM_10_T: 2207 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2208 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2209 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2210 } else { 2211 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2212 } 2213 break; 2214 default: 2215 device_printf(dev, "unknown media type: %x\n", 2216 IFM_SUBTYPE(ife->ifm_media)); 2217 break; 2218 } 2219 2220 return; 2221} 2222 2223static int 2224vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2225{ 2226 struct vge_softc *sc = ifp->if_softc; 2227 struct ifreq *ifr = (struct ifreq *) data; 2228 struct mii_data *mii; 2229 int error = 0; 2230 2231 switch (command) { 2232 case SIOCSIFMTU: 2233 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2234 error = EINVAL; 2235 ifp->if_mtu = ifr->ifr_mtu; 2236 break; 2237 case SIOCSIFFLAGS: 2238 VGE_LOCK(sc); 2239 if (ifp->if_flags & IFF_UP) { 2240 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2241 ifp->if_flags & IFF_PROMISC && 2242 !(sc->vge_if_flags & IFF_PROMISC)) { 2243 CSR_SETBIT_1(sc, VGE_RXCTL, 2244 VGE_RXCTL_RX_PROMISC); 2245 vge_setmulti(sc); 2246 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2247 !(ifp->if_flags & IFF_PROMISC) && 2248 sc->vge_if_flags & IFF_PROMISC) { 2249 CSR_CLRBIT_1(sc, VGE_RXCTL, 2250 VGE_RXCTL_RX_PROMISC); 2251 vge_setmulti(sc); 2252 } else 2253 vge_init_locked(sc); 2254 } else { 2255 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2256 vge_stop(sc); 2257 } 2258 sc->vge_if_flags = ifp->if_flags; 2259 VGE_UNLOCK(sc); 2260 break; 2261 case SIOCADDMULTI: 2262 case SIOCDELMULTI: 2263 VGE_LOCK(sc); 2264 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2265 vge_setmulti(sc); 2266 VGE_UNLOCK(sc); 2267 break; 2268 case SIOCGIFMEDIA: 2269 case SIOCSIFMEDIA: 2270 mii = device_get_softc(sc->vge_miibus); 2271 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2272 break; 2273 case SIOCSIFCAP: 2274 { 2275 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2276#ifdef DEVICE_POLLING 2277 if (mask & IFCAP_POLLING) { 2278 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2279 error = ether_poll_register(vge_poll, ifp); 2280 if (error) 2281 return(error); 2282 VGE_LOCK(sc); 2283 /* Disable interrupts */ 2284 CSR_WRITE_4(sc, VGE_IMR, 0); 2285 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2286 ifp->if_capenable |= IFCAP_POLLING; 2287 VGE_UNLOCK(sc); 2288 } else { 2289 error = ether_poll_deregister(ifp); 2290 /* Enable interrupts. */ 2291 VGE_LOCK(sc); 2292 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2293 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2294 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2295 ifp->if_capenable &= ~IFCAP_POLLING; 2296 VGE_UNLOCK(sc); 2297 } 2298 } 2299#endif /* DEVICE_POLLING */ 2300 VGE_LOCK(sc); 2301 if ((mask & IFCAP_TXCSUM) != 0 && 2302 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2303 ifp->if_capenable ^= IFCAP_TXCSUM; 2304 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2305 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2306 else 2307 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2308 } 2309 if ((mask & IFCAP_RXCSUM) != 0 && 2310 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2311 ifp->if_capenable ^= IFCAP_RXCSUM; 2312 VGE_UNLOCK(sc); 2313 } 2314 break; 2315 default: 2316 error = ether_ioctl(ifp, command, data); 2317 break; 2318 } 2319 2320 return (error); 2321} 2322 2323static void 2324vge_watchdog(void *arg) 2325{ 2326 struct vge_softc *sc; 2327 struct ifnet *ifp; 2328 2329 sc = arg; 2330 VGE_LOCK_ASSERT(sc); 2331 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2332 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2333 return; 2334 2335 ifp = sc->vge_ifp; 2336 if_printf(ifp, "watchdog timeout\n"); 2337 ifp->if_oerrors++; 2338 2339 vge_txeof(sc); 2340 vge_rxeof(sc, VGE_RX_DESC_CNT); 2341 2342 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2343 vge_init_locked(sc); 2344 2345 return; 2346} 2347 2348/* 2349 * Stop the adapter and free any mbufs allocated to the 2350 * RX and TX lists. 2351 */ 2352static void 2353vge_stop(struct vge_softc *sc) 2354{ 2355 struct ifnet *ifp; 2356 2357 VGE_LOCK_ASSERT(sc); 2358 ifp = sc->vge_ifp; 2359 sc->vge_timer = 0; 2360 callout_stop(&sc->vge_watchdog); 2361 2362 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2363 2364 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2365 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2366 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2367 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2368 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2369 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2370 2371 VGE_CHAIN_RESET(sc); 2372 vge_txeof(sc); 2373 vge_freebufs(sc); 2374} 2375 2376/* 2377 * Device suspend routine. Stop the interface and save some PCI 2378 * settings in case the BIOS doesn't restore them properly on 2379 * resume. 2380 */ 2381static int 2382vge_suspend(device_t dev) 2383{ 2384 struct vge_softc *sc; 2385 2386 sc = device_get_softc(dev); 2387 2388 VGE_LOCK(sc); 2389 vge_stop(sc); 2390 2391 sc->suspended = 1; 2392 VGE_UNLOCK(sc); 2393 2394 return (0); 2395} 2396 2397/* 2398 * Device resume routine. Restore some PCI settings in case the BIOS 2399 * doesn't, re-enable busmastering, and restart the interface if 2400 * appropriate. 2401 */ 2402static int 2403vge_resume(device_t dev) 2404{ 2405 struct vge_softc *sc; 2406 struct ifnet *ifp; 2407 2408 sc = device_get_softc(dev); 2409 ifp = sc->vge_ifp; 2410 2411 /* reenable busmastering */ 2412 pci_enable_busmaster(dev); 2413 pci_enable_io(dev, SYS_RES_MEMORY); 2414 2415 /* reinitialize interface if necessary */ 2416 VGE_LOCK(sc); 2417 if (ifp->if_flags & IFF_UP) { 2418 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2419 vge_init_locked(sc); 2420 } 2421 sc->suspended = 0; 2422 VGE_UNLOCK(sc); 2423 2424 return (0); 2425} 2426 2427/* 2428 * Stop all chip I/O so that the kernel's probe routines don't 2429 * get confused by errant DMAs when rebooting. 2430 */ 2431static int 2432vge_shutdown(device_t dev) 2433{ 2434 struct vge_softc *sc; 2435 2436 sc = device_get_softc(dev); 2437 2438 VGE_LOCK(sc); 2439 vge_stop(sc); 2440 VGE_UNLOCK(sc); 2441 2442 return (0); 2443} 2444