35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#include <sys/param.h> 84#include <sys/endian.h> 85#include <sys/systm.h> 86#include <sys/sockio.h> 87#include <sys/mbuf.h> 88#include <sys/malloc.h> 89#include <sys/module.h> 90#include <sys/kernel.h> 91#include <sys/socket.h> 92#include <sys/taskqueue.h> 93 94#include <net/if.h> 95#include <net/if_arp.h> 96#include <net/ethernet.h> 97#include <net/if_dl.h> 98#include <net/if_media.h> 99#include <net/if_types.h> 100#include <net/if_vlan_var.h> 101 102#include <net/bpf.h> 103 104#include <machine/bus.h> 105#include <machine/resource.h> 106#include <sys/bus.h> 107#include <sys/rman.h> 108 109#include <dev/mii/mii.h> 110#include <dev/mii/miivar.h> 111 112#include <dev/pci/pcireg.h> 113#include <dev/pci/pcivar.h> 114 115MODULE_DEPEND(vge, pci, 1, 1, 1); 116MODULE_DEPEND(vge, ether, 1, 1, 1); 117MODULE_DEPEND(vge, miibus, 1, 1, 1); 118 119/* "controller miibus0" required. See GENERIC if you get errors here. */ 120#include "miibus_if.h" 121 122#include <dev/vge/if_vgereg.h> 123#include <dev/vge/if_vgevar.h> 124 125#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 126 127/* 128 * Various supported device vendors/types and their names. 129 */ 130static struct vge_type vge_devs[] = { 131 { VIA_VENDORID, VIA_DEVICEID_61XX, 132 "VIA Networking Gigabit Ethernet" }, 133 { 0, 0, NULL } 134}; 135 136static int vge_probe (device_t); 137static int vge_attach (device_t); 138static int vge_detach (device_t); 139 140static int vge_encap (struct vge_softc *, struct mbuf *, int); 141 142static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 143static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 144 bus_size_t, int); 145static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 146 bus_size_t, int); 147static int vge_allocmem (device_t, struct vge_softc *); 148static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 149static int vge_rx_list_init (struct vge_softc *); 150static int vge_tx_list_init (struct vge_softc *); 151#ifdef VGE_FIXUP_RX 152static __inline void vge_fixup_rx 153 (struct mbuf *); 154#endif 155static void vge_rxeof (struct vge_softc *); 156static void vge_txeof (struct vge_softc *); 157static void vge_intr (void *); 158static void vge_tick (void *); 159static void vge_tx_task (void *, int); 160static void vge_start (struct ifnet *); 161static int vge_ioctl (struct ifnet *, u_long, caddr_t); 162static void vge_init (void *); 163static void vge_stop (struct vge_softc *); 164static void vge_watchdog (struct ifnet *); 165static int vge_suspend (device_t); 166static int vge_resume (device_t); 167static void vge_shutdown (device_t); 168static int vge_ifmedia_upd (struct ifnet *); 169static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 170 171#ifdef VGE_EEPROM 172static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 173#endif 174static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 175 176static void vge_miipoll_start (struct vge_softc *); 177static void vge_miipoll_stop (struct vge_softc *); 178static int vge_miibus_readreg (device_t, int, int); 179static int vge_miibus_writereg (device_t, int, int, int); 180static void vge_miibus_statchg (device_t); 181 182static void vge_cam_clear (struct vge_softc *); 183static int vge_cam_set (struct vge_softc *, uint8_t *); 184#if __FreeBSD_version < 502113 185static uint32_t vge_mchash (uint8_t *); 186#endif 187static void vge_setmulti (struct vge_softc *); 188static void vge_reset (struct vge_softc *); 189 190#define VGE_PCI_LOIO 0x10 191#define VGE_PCI_LOMEM 0x14 192 193static device_method_t vge_methods[] = { 194 /* Device interface */ 195 DEVMETHOD(device_probe, vge_probe), 196 DEVMETHOD(device_attach, vge_attach), 197 DEVMETHOD(device_detach, vge_detach), 198 DEVMETHOD(device_suspend, vge_suspend), 199 DEVMETHOD(device_resume, vge_resume), 200 DEVMETHOD(device_shutdown, vge_shutdown), 201 202 /* bus interface */ 203 DEVMETHOD(bus_print_child, bus_generic_print_child), 204 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 205 206 /* MII interface */ 207 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 208 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 209 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 210 211 { 0, 0 } 212}; 213 214static driver_t vge_driver = { 215 "vge", 216 vge_methods, 217 sizeof(struct vge_softc) 218}; 219 220static devclass_t vge_devclass; 221 222DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 223DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0); 224DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 225 226#ifdef VGE_EEPROM 227/* 228 * Read a word of data stored in the EEPROM at address 'addr.' 229 */ 230static void 231vge_eeprom_getword(sc, addr, dest) 232 struct vge_softc *sc; 233 int addr; 234 u_int16_t *dest; 235{ 236 register int i; 237 u_int16_t word = 0; 238 239 /* 240 * Enter EEPROM embedded programming mode. In order to 241 * access the EEPROM at all, we first have to set the 242 * EELOAD bit in the CHIPCFG2 register. 243 */ 244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 246 247 /* Select the address of the word we want to read */ 248 CSR_WRITE_1(sc, VGE_EEADDR, addr); 249 250 /* Issue read command */ 251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 252 253 /* Wait for the done bit to be set. */ 254 for (i = 0; i < VGE_TIMEOUT; i++) { 255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 256 break; 257 } 258 259 if (i == VGE_TIMEOUT) { 260 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 261 *dest = 0; 262 return; 263 } 264 265 /* Read the result */ 266 word = CSR_READ_2(sc, VGE_EERDDAT); 267 268 /* Turn off EEPROM access mode. */ 269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 271 272 *dest = word; 273 274 return; 275} 276#endif 277 278/* 279 * Read a sequence of words from the EEPROM. 280 */ 281static void 282vge_read_eeprom(sc, dest, off, cnt, swap) 283 struct vge_softc *sc; 284 caddr_t dest; 285 int off; 286 int cnt; 287 int swap; 288{ 289 int i; 290#ifdef VGE_EEPROM 291 u_int16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (u_int16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301#else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304#endif 305} 306 307static void 308vge_miipoll_stop(sc) 309 struct vge_softc *sc; 310{ 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 324 return; 325} 326 327static void 328vge_miipoll_start(sc) 329 struct vge_softc *sc; 330{ 331 int i; 332 333 /* First, make sure we're idle. */ 334 335 CSR_WRITE_1(sc, VGE_MIICMD, 0); 336 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 337 338 for (i = 0; i < VGE_TIMEOUT; i++) { 339 DELAY(1); 340 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 341 break; 342 } 343 344 if (i == VGE_TIMEOUT) { 345 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 346 return; 347 } 348 349 /* Now enable auto poll mode. */ 350 351 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 352 353 /* And make sure it started. */ 354 355 for (i = 0; i < VGE_TIMEOUT; i++) { 356 DELAY(1); 357 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 358 break; 359 } 360 361 if (i == VGE_TIMEOUT) 362 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 363 364 return; 365} 366 367static int 368vge_miibus_readreg(dev, phy, reg) 369 device_t dev; 370 int phy, reg; 371{ 372 struct vge_softc *sc; 373 int i; 374 u_int16_t rval = 0; 375 376 sc = device_get_softc(dev); 377 378 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 379 return(0); 380 381 VGE_LOCK(sc); 382 vge_miipoll_stop(sc); 383 384 /* Specify the register we want to read. */ 385 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 386 387 /* Issue read command. */ 388 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 389 390 /* Wait for the read command bit to self-clear. */ 391 for (i = 0; i < VGE_TIMEOUT; i++) { 392 DELAY(1); 393 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 394 break; 395 } 396 397 if (i == VGE_TIMEOUT) 398 device_printf(sc->vge_dev, "MII read timed out\n"); 399 else 400 rval = CSR_READ_2(sc, VGE_MIIDATA); 401 402 vge_miipoll_start(sc); 403 VGE_UNLOCK(sc); 404 405 return (rval); 406} 407 408static int 409vge_miibus_writereg(dev, phy, reg, data) 410 device_t dev; 411 int phy, reg, data; 412{ 413 struct vge_softc *sc; 414 int i, rval = 0; 415 416 sc = device_get_softc(dev); 417 418 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 419 return(0); 420 421 VGE_LOCK(sc); 422 vge_miipoll_stop(sc); 423 424 /* Specify the register we want to write. */ 425 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 426 427 /* Specify the data we want to write. */ 428 CSR_WRITE_2(sc, VGE_MIIDATA, data); 429 430 /* Issue write command. */ 431 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 432 433 /* Wait for the write command bit to self-clear. */ 434 for (i = 0; i < VGE_TIMEOUT; i++) { 435 DELAY(1); 436 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 437 break; 438 } 439 440 if (i == VGE_TIMEOUT) { 441 device_printf(sc->vge_dev, "MII write timed out\n"); 442 rval = EIO; 443 } 444 445 vge_miipoll_start(sc); 446 VGE_UNLOCK(sc); 447 448 return (rval); 449} 450 451static void 452vge_cam_clear(sc) 453 struct vge_softc *sc; 454{ 455 int i; 456 457 /* 458 * Turn off all the mask bits. This tells the chip 459 * that none of the entries in the CAM filter are valid. 460 * desired entries will be enabled as we fill the filter in. 461 */ 462 463 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 466 for (i = 0; i < 8; i++) 467 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 468 469 /* Clear the VLAN filter too. */ 470 471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 472 for (i = 0; i < 8; i++) 473 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 474 475 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 476 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 477 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 478 479 sc->vge_camidx = 0; 480 481 return; 482} 483 484static int 485vge_cam_set(sc, addr) 486 struct vge_softc *sc; 487 uint8_t *addr; 488{ 489 int i, error = 0; 490 491 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 492 return(ENOSPC); 493 494 /* Select the CAM data page. */ 495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 497 498 /* Set the filter entry we want to update and enable writing. */ 499 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 500 501 /* Write the address to the CAM registers */ 502 for (i = 0; i < ETHER_ADDR_LEN; i++) 503 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 504 505 /* Issue a write command. */ 506 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 507 508 /* Wake for it to clear. */ 509 for (i = 0; i < VGE_TIMEOUT; i++) { 510 DELAY(1); 511 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 512 break; 513 } 514 515 if (i == VGE_TIMEOUT) { 516 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 517 error = EIO; 518 goto fail; 519 } 520 521 /* Select the CAM mask page. */ 522 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 523 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 524 525 /* Set the mask bit that enables this filter. */ 526 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 527 1<<(sc->vge_camidx & 7)); 528 529 sc->vge_camidx++; 530 531fail: 532 /* Turn off access to CAM. */ 533 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 534 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 535 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 536 537 return (error); 538} 539 540#if __FreeBSD_version < 502113 541static uint32_t 542vge_mchash(addr) 543 uint8_t *addr; 544{ 545 uint32_t crc, carry; 546 int idx, bit; 547 uint8_t data; 548 549 /* Compute CRC for the address value. */ 550 crc = 0xFFFFFFFF; /* initial value */ 551 552 for (idx = 0; idx < 6; idx++) { 553 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 554 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); 555 crc <<= 1; 556 if (carry) 557 crc = (crc ^ 0x04c11db6) | carry; 558 } 559 } 560 561 return(crc); 562} 563#endif 564 565/* 566 * Program the multicast filter. We use the 64-entry CAM filter 567 * for perfect filtering. If there's more than 64 multicast addresses, 568 * we use the hash filter insted. 569 */ 570static void 571vge_setmulti(sc) 572 struct vge_softc *sc; 573{ 574 struct ifnet *ifp; 575 int error = 0/*, h = 0*/; 576 struct ifmultiaddr *ifma; 577 u_int32_t h, hashes[2] = { 0, 0 }; 578 579 ifp = sc->vge_ifp; 580 581 /* First, zot all the multicast entries. */ 582 vge_cam_clear(sc); 583 CSR_WRITE_4(sc, VGE_MAR0, 0); 584 CSR_WRITE_4(sc, VGE_MAR1, 0); 585 586 /* 587 * If the user wants allmulti or promisc mode, enable reception 588 * of all multicast frames. 589 */ 590 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 591 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 592 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 593 return; 594 } 595 596 /* Now program new ones */ 597 IF_ADDR_LOCK(ifp); 598 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 599 if (ifma->ifma_addr->sa_family != AF_LINK) 600 continue; 601 error = vge_cam_set(sc, 602 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 603 if (error) 604 break; 605 } 606 607 /* If there were too many addresses, use the hash filter. */ 608 if (error) { 609 vge_cam_clear(sc); 610 611 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 612 if (ifma->ifma_addr->sa_family != AF_LINK) 613 continue; 614#if __FreeBSD_version < 502113 615 h = vge_mchash(LLADDR((struct sockaddr_dl *) 616 ifma->ifma_addr)) >> 26; 617#else 618 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 619 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 620#endif 621 if (h < 32) 622 hashes[0] |= (1 << h); 623 else 624 hashes[1] |= (1 << (h - 32)); 625 } 626 627 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 628 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 629 } 630 IF_ADDR_UNLOCK(ifp); 631 632 return; 633} 634 635static void 636vge_reset(sc) 637 struct vge_softc *sc; 638{ 639 register int i; 640 641 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 642 643 for (i = 0; i < VGE_TIMEOUT; i++) { 644 DELAY(5); 645 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 646 break; 647 } 648 649 if (i == VGE_TIMEOUT) { 650 device_printf(sc->vge_dev, "soft reset timed out"); 651 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 652 DELAY(2000); 653 } 654 655 DELAY(5000); 656 657 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 658 659 for (i = 0; i < VGE_TIMEOUT; i++) { 660 DELAY(5); 661 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 662 break; 663 } 664 665 if (i == VGE_TIMEOUT) { 666 device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 667 return; 668 } 669 670 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 671 672 return; 673} 674 675/* 676 * Probe for a VIA gigabit chip. Check the PCI vendor and device 677 * IDs against our list and return a device name if we find a match. 678 */ 679static int 680vge_probe(dev) 681 device_t dev; 682{ 683 struct vge_type *t; 684 struct vge_softc *sc; 685 686 t = vge_devs; 687 sc = device_get_softc(dev); 688 689 while (t->vge_name != NULL) { 690 if ((pci_get_vendor(dev) == t->vge_vid) && 691 (pci_get_device(dev) == t->vge_did)) { 692 device_set_desc(dev, t->vge_name); 693 return (BUS_PROBE_DEFAULT); 694 } 695 t++; 696 } 697 698 return (ENXIO); 699} 700 701static void 702vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) 703 void *arg; 704 bus_dma_segment_t *segs; 705 int nseg; 706 bus_size_t mapsize; 707 int error; 708{ 709 710 struct vge_dmaload_arg *ctx; 711 struct vge_rx_desc *d = NULL; 712 713 if (error) 714 return; 715 716 ctx = arg; 717 718 /* Signal error to caller if there's too many segments */ 719 if (nseg > ctx->vge_maxsegs) { 720 ctx->vge_maxsegs = 0; 721 return; 722 } 723 724 /* 725 * Map the segment array into descriptors. 726 */ 727 728 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 729 730 /* If this descriptor is still owned by the chip, bail. */ 731 732 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 733 device_printf(ctx->sc->vge_dev, 734 "tried to map busy descriptor\n"); 735 ctx->vge_maxsegs = 0; 736 return; 737 } 738 739 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 740 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 741 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 742 d->vge_sts = 0; 743 d->vge_ctl = 0; 744 745 ctx->vge_maxsegs = 1; 746 747 return; 748} 749 750static void 751vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 752 void *arg; 753 bus_dma_segment_t *segs; 754 int nseg; 755 bus_size_t mapsize; 756 int error; 757{ 758 struct vge_dmaload_arg *ctx; 759 struct vge_tx_desc *d = NULL; 760 struct vge_tx_frag *f; 761 int i = 0; 762 763 if (error) 764 return; 765 766 ctx = arg; 767 768 /* Signal error to caller if there's too many segments */ 769 if (nseg > ctx->vge_maxsegs) { 770 ctx->vge_maxsegs = 0; 771 return; 772 } 773 774 /* Map the segment array into descriptors. */ 775 776 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 777 778 /* If this descriptor is still owned by the chip, bail. */ 779 780 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 781 ctx->vge_maxsegs = 0; 782 return; 783 } 784 785 for (i = 0; i < nseg; i++) { 786 f = &d->vge_frag[i]; 787 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 788 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 789 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 790 } 791 792 /* Argh. This chip does not autopad short frames */ 793 794 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 795 f = &d->vge_frag[i]; 796 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 797 ctx->vge_m0->m_pkthdr.len)); 798 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 799 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 800 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 801 i++; 802 } 803 804 /* 805 * When telling the chip how many segments there are, we 806 * must use nsegs + 1 instead of just nsegs. Darned if I 807 * know why. 808 */ 809 i++; 810 811 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 812 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 813 814 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 815 d->vge_ctl |= VGE_TDCTL_JUMBO; 816 817 ctx->vge_maxsegs = nseg; 818 819 return; 820} 821 822/* 823 * Map a single buffer address. 824 */ 825 826static void 827vge_dma_map_addr(arg, segs, nseg, error) 828 void *arg; 829 bus_dma_segment_t *segs; 830 int nseg; 831 int error; 832{ 833 bus_addr_t *addr; 834 835 if (error) 836 return; 837 838 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 839 addr = arg; 840 *addr = segs->ds_addr; 841 842 return; 843} 844 845static int 846vge_allocmem(dev, sc) 847 device_t dev; 848 struct vge_softc *sc; 849{ 850 int error; 851 int nseg; 852 int i; 853 854 /* 855 * Allocate map for RX mbufs. 856 */ 857 nseg = 32; 858 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 859 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 860 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 861 NULL, NULL, &sc->vge_ldata.vge_mtag); 862 if (error) { 863 device_printf(dev, "could not allocate dma tag\n"); 864 return (ENOMEM); 865 } 866 867 /* 868 * Allocate map for TX descriptor list. 869 */ 870 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 871 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 872 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 873 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); 874 if (error) { 875 device_printf(dev, "could not allocate dma tag\n"); 876 return (ENOMEM); 877 } 878 879 /* Allocate DMA'able memory for the TX ring */ 880 881 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 882 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 883 &sc->vge_ldata.vge_tx_list_map); 884 if (error) 885 return (ENOMEM); 886 887 /* Load the map for the TX ring. */ 888 889 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 890 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, 891 VGE_TX_LIST_SZ, vge_dma_map_addr, 892 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); 893 894 /* Create DMA maps for TX buffers */ 895 896 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 897 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 898 &sc->vge_ldata.vge_tx_dmamap[i]); 899 if (error) { 900 device_printf(dev, "can't create DMA map for TX\n"); 901 return (ENOMEM); 902 } 903 } 904 905 /* 906 * Allocate map for RX descriptor list. 907 */ 908 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 909 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 910 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 911 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); 912 if (error) { 913 device_printf(dev, "could not allocate dma tag\n"); 914 return (ENOMEM); 915 } 916 917 /* Allocate DMA'able memory for the RX ring */ 918 919 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 920 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 921 &sc->vge_ldata.vge_rx_list_map); 922 if (error) 923 return (ENOMEM); 924 925 /* Load the map for the RX ring. */ 926 927 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 928 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, 929 VGE_TX_LIST_SZ, vge_dma_map_addr, 930 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); 931 932 /* Create DMA maps for RX buffers */ 933 934 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 935 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 936 &sc->vge_ldata.vge_rx_dmamap[i]); 937 if (error) { 938 device_printf(dev, "can't create DMA map for RX\n"); 939 return (ENOMEM); 940 } 941 } 942 943 return (0); 944} 945 946/* 947 * Attach the interface. Allocate softc structures, do ifmedia 948 * setup and ethernet/BPF attach. 949 */ 950static int 951vge_attach(dev) 952 device_t dev; 953{ 954 u_char eaddr[ETHER_ADDR_LEN]; 955 struct vge_softc *sc; 956 struct ifnet *ifp; 957 int unit, error = 0, rid; 958 959 sc = device_get_softc(dev); 960 unit = device_get_unit(dev); 961 sc->vge_dev = dev; 962 963 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 964 MTX_DEF | MTX_RECURSE); 965 /* 966 * Map control/status registers. 967 */ 968 pci_enable_busmaster(dev); 969 970 rid = VGE_PCI_LOMEM; 971 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 972 0, ~0, 1, RF_ACTIVE); 973 974 if (sc->vge_res == NULL) { 975 printf ("vge%d: couldn't map ports/memory\n", unit); 976 error = ENXIO; 977 goto fail; 978 } 979 980 sc->vge_btag = rman_get_bustag(sc->vge_res); 981 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 982 983 /* Allocate interrupt */ 984 rid = 0; 985 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 986 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 987 988 if (sc->vge_irq == NULL) { 989 printf("vge%d: couldn't map interrupt\n", unit); 990 error = ENXIO; 991 goto fail; 992 } 993 994 /* Reset the adapter. */ 995 vge_reset(sc); 996 997 /* 998 * Get station address from the EEPROM. 999 */ 1000 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1001 1002 sc->vge_unit = unit; 1003 1004#if __FreeBSD_version < 502113 1005 printf("vge%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1006#endif 1007 1008 /* 1009 * Allocate the parent bus DMA tag appropriate for PCI. 1010 */ 1011#define VGE_NSEG_NEW 32 1012 error = bus_dma_tag_create(NULL, /* parent */ 1013 1, 0, /* alignment, boundary */ 1014 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1015 BUS_SPACE_MAXADDR, /* highaddr */ 1016 NULL, NULL, /* filter, filterarg */ 1017 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 1018 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1019 BUS_DMA_ALLOCNOW, /* flags */ 1020 NULL, NULL, /* lockfunc, lockarg */ 1021 &sc->vge_parent_tag); 1022 if (error) 1023 goto fail; 1024 1025 error = vge_allocmem(dev, sc); 1026 1027 if (error) 1028 goto fail; 1029 1030 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1031 if (ifp == NULL) { 1032 printf("vge%d: can not if_alloc()\n", sc->vge_unit); 1033 error = ENOSPC; 1034 goto fail; 1035 } 1036 1037 /* Do MII setup */ 1038 if (mii_phy_probe(dev, &sc->vge_miibus, 1039 vge_ifmedia_upd, vge_ifmedia_sts)) { 1040 printf("vge%d: MII without any phy!\n", sc->vge_unit); 1041 error = ENXIO; 1042 goto fail; 1043 } 1044 1045 ifp->if_softc = sc; 1046 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1047 ifp->if_mtu = ETHERMTU; 1048 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1049 ifp->if_ioctl = vge_ioctl; 1050 ifp->if_capabilities = IFCAP_VLAN_MTU; 1051 ifp->if_start = vge_start; 1052 ifp->if_hwassist = VGE_CSUM_FEATURES; 1053 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1054#ifdef DEVICE_POLLING 1055#ifdef IFCAP_POLLING 1056 ifp->if_capabilities |= IFCAP_POLLING; 1057#endif 1058#endif 1059 ifp->if_watchdog = vge_watchdog; 1060 ifp->if_init = vge_init; 1061 ifp->if_baudrate = 1000000000; 1062 ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN; 1063 ifp->if_capenable = ifp->if_capabilities; 1064 1065 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); 1066 1067 /* 1068 * Call MI attach routine. 1069 */ 1070 ether_ifattach(ifp, eaddr); 1071 1072 /* Hook interrupt last to avoid having to lock softc */ 1073 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1074 vge_intr, sc, &sc->vge_intrhand); 1075 1076 if (error) { 1077 printf("vge%d: couldn't set up irq\n", unit); 1078 ether_ifdetach(ifp); 1079 goto fail; 1080 } 1081 1082fail: 1083 if (error) 1084 vge_detach(dev); 1085 1086 return (error); 1087} 1088 1089/* 1090 * Shutdown hardware and free up resources. This can be called any 1091 * time after the mutex has been initialized. It is called in both 1092 * the error case in attach and the normal detach case so it needs 1093 * to be careful about only freeing resources that have actually been 1094 * allocated. 1095 */ 1096static int 1097vge_detach(dev) 1098 device_t dev; 1099{ 1100 struct vge_softc *sc; 1101 struct ifnet *ifp; 1102 int i; 1103 1104 sc = device_get_softc(dev); 1105 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1106 ifp = sc->vge_ifp; 1107 1108 /* These should only be active if attach succeeded */ 1109 if (device_is_attached(dev)) { 1110 vge_stop(sc); 1111 /* 1112 * Force off the IFF_UP flag here, in case someone 1113 * still had a BPF descriptor attached to this 1114 * interface. If they do, ether_ifattach() will cause 1115 * the BPF code to try and clear the promisc mode 1116 * flag, which will bubble down to vge_ioctl(), 1117 * which will try to call vge_init() again. This will 1118 * turn the NIC back on and restart the MII ticker, 1119 * which will panic the system when the kernel tries 1120 * to invoke the vge_tick() function that isn't there 1121 * anymore. 1122 */ 1123 ifp->if_flags &= ~IFF_UP; 1124 ether_ifdetach(ifp); 1125 if_free(ifp); 1126 } 1127 if (sc->vge_miibus) 1128 device_delete_child(dev, sc->vge_miibus); 1129 bus_generic_detach(dev); 1130 1131 if (sc->vge_intrhand) 1132 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1133 if (sc->vge_irq) 1134 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1135 if (sc->vge_res) 1136 bus_release_resource(dev, SYS_RES_MEMORY, 1137 VGE_PCI_LOMEM, sc->vge_res); 1138 1139 /* Unload and free the RX DMA ring memory and map */ 1140 1141 if (sc->vge_ldata.vge_rx_list_tag) { 1142 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 1143 sc->vge_ldata.vge_rx_list_map); 1144 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 1145 sc->vge_ldata.vge_rx_list, 1146 sc->vge_ldata.vge_rx_list_map); 1147 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 1148 } 1149 1150 /* Unload and free the TX DMA ring memory and map */ 1151 1152 if (sc->vge_ldata.vge_tx_list_tag) { 1153 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 1154 sc->vge_ldata.vge_tx_list_map); 1155 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 1156 sc->vge_ldata.vge_tx_list, 1157 sc->vge_ldata.vge_tx_list_map); 1158 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 1159 } 1160 1161 /* Destroy all the RX and TX buffer maps */ 1162 1163 if (sc->vge_ldata.vge_mtag) { 1164 for (i = 0; i < VGE_TX_DESC_CNT; i++) 1165 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1166 sc->vge_ldata.vge_tx_dmamap[i]); 1167 for (i = 0; i < VGE_RX_DESC_CNT; i++) 1168 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1169 sc->vge_ldata.vge_rx_dmamap[i]); 1170 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 1171 } 1172 1173 if (sc->vge_parent_tag) 1174 bus_dma_tag_destroy(sc->vge_parent_tag); 1175 1176 mtx_destroy(&sc->vge_mtx); 1177 1178 return (0); 1179} 1180 1181static int 1182vge_newbuf(sc, idx, m) 1183 struct vge_softc *sc; 1184 int idx; 1185 struct mbuf *m; 1186{ 1187 struct vge_dmaload_arg arg; 1188 struct mbuf *n = NULL; 1189 int i, error; 1190 1191 if (m == NULL) { 1192 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1193 if (n == NULL) 1194 return (ENOBUFS); 1195 m = n; 1196 } else 1197 m->m_data = m->m_ext.ext_buf; 1198 1199 1200#ifdef VGE_FIXUP_RX 1201 /* 1202 * This is part of an evil trick to deal with non-x86 platforms. 1203 * The VIA chip requires RX buffers to be aligned on 32-bit 1204 * boundaries, but that will hose non-x86 machines. To get around 1205 * this, we leave some empty space at the start of each buffer 1206 * and for non-x86 hosts, we copy the buffer back two bytes 1207 * to achieve word alignment. This is slightly more efficient 1208 * than allocating a new buffer, copying the contents, and 1209 * discarding the old buffer. 1210 */ 1211 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1212 m_adj(m, VGE_ETHER_ALIGN); 1213#else 1214 m->m_len = m->m_pkthdr.len = MCLBYTES; 1215#endif 1216 1217 arg.sc = sc; 1218 arg.vge_idx = idx; 1219 arg.vge_maxsegs = 1; 1220 arg.vge_flags = 0; 1221 1222 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1223 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, 1224 &arg, BUS_DMA_NOWAIT); 1225 if (error || arg.vge_maxsegs != 1) { 1226 if (n != NULL) 1227 m_freem(n); 1228 return (ENOMEM); 1229 } 1230 1231 /* 1232 * Note: the manual fails to document the fact that for 1233 * proper opration, the driver needs to replentish the RX 1234 * DMA ring 4 descriptors at a time (rather than one at a 1235 * time, like most chips). We can allocate the new buffers 1236 * but we should not set the OWN bits until we're ready 1237 * to hand back 4 of them in one shot. 1238 */ 1239 1240#define VGE_RXCHUNK 4 1241 sc->vge_rx_consumed++; 1242 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1243 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 1244 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1245 htole32(VGE_RDSTS_OWN); 1246 sc->vge_rx_consumed = 0; 1247 } 1248 1249 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1250 1251 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1252 sc->vge_ldata.vge_rx_dmamap[idx], 1253 BUS_DMASYNC_PREREAD); 1254 1255 return (0); 1256} 1257 1258static int 1259vge_tx_list_init(sc) 1260 struct vge_softc *sc; 1261{ 1262 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1263 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1264 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1265 1266 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1267 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1268 sc->vge_ldata.vge_tx_prodidx = 0; 1269 sc->vge_ldata.vge_tx_considx = 0; 1270 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1271 1272 return (0); 1273} 1274 1275static int 1276vge_rx_list_init(sc) 1277 struct vge_softc *sc; 1278{ 1279 int i; 1280 1281 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1282 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 1283 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 1284 1285 sc->vge_rx_consumed = 0; 1286 1287 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1288 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1289 return (ENOBUFS); 1290 } 1291 1292 /* Flush the RX descriptors */ 1293 1294 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1295 sc->vge_ldata.vge_rx_list_map, 1296 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1297 1298 sc->vge_ldata.vge_rx_prodidx = 0; 1299 sc->vge_rx_consumed = 0; 1300 sc->vge_head = sc->vge_tail = NULL; 1301 1302 return (0); 1303} 1304 1305#ifdef VGE_FIXUP_RX 1306static __inline void 1307vge_fixup_rx(m) 1308 struct mbuf *m; 1309{ 1310 int i; 1311 uint16_t *src, *dst; 1312 1313 src = mtod(m, uint16_t *); 1314 dst = src - 1; 1315 1316 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1317 *dst++ = *src++; 1318 1319 m->m_data -= ETHER_ALIGN; 1320 1321 return; 1322} 1323#endif 1324 1325/* 1326 * RX handler. We support the reception of jumbo frames that have 1327 * been fragmented across multiple 2K mbuf cluster buffers. 1328 */ 1329static void 1330vge_rxeof(sc) 1331 struct vge_softc *sc; 1332{ 1333 struct mbuf *m; 1334 struct ifnet *ifp; 1335 int i, total_len; 1336 int lim = 0; 1337 struct vge_rx_desc *cur_rx; 1338 u_int32_t rxstat, rxctl; 1339 1340 VGE_LOCK_ASSERT(sc); 1341 ifp = sc->vge_ifp; 1342 i = sc->vge_ldata.vge_rx_prodidx; 1343 1344 /* Invalidate the descriptor memory */ 1345 1346 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1347 sc->vge_ldata.vge_rx_list_map, 1348 BUS_DMASYNC_POSTREAD); 1349 1350 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1351 1352#ifdef DEVICE_POLLING 1353 if (ifp->if_flags & IFF_POLLING) { 1354 if (sc->rxcycles <= 0) 1355 break; 1356 sc->rxcycles--; 1357 } 1358#endif /* DEVICE_POLLING */ 1359 1360 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1361 m = sc->vge_ldata.vge_rx_mbuf[i]; 1362 total_len = VGE_RXBYTES(cur_rx); 1363 rxstat = le32toh(cur_rx->vge_sts); 1364 rxctl = le32toh(cur_rx->vge_ctl); 1365 1366 /* Invalidate the RX mbuf and unload its map */ 1367 1368 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1369 sc->vge_ldata.vge_rx_dmamap[i], 1370 BUS_DMASYNC_POSTWRITE); 1371 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1372 sc->vge_ldata.vge_rx_dmamap[i]); 1373 1374 /* 1375 * If the 'start of frame' bit is set, this indicates 1376 * either the first fragment in a multi-fragment receive, 1377 * or an intermediate fragment. Either way, we want to 1378 * accumulate the buffers. 1379 */ 1380 if (rxstat & VGE_RXPKT_SOF) { 1381 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1382 if (sc->vge_head == NULL) 1383 sc->vge_head = sc->vge_tail = m; 1384 else { 1385 m->m_flags &= ~M_PKTHDR; 1386 sc->vge_tail->m_next = m; 1387 sc->vge_tail = m; 1388 } 1389 vge_newbuf(sc, i, NULL); 1390 VGE_RX_DESC_INC(i); 1391 continue; 1392 } 1393 1394 /* 1395 * Bad/error frames will have the RXOK bit cleared. 1396 * However, there's one error case we want to allow: 1397 * if a VLAN tagged frame arrives and the chip can't 1398 * match it against the CAM filter, it considers this 1399 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1400 * We don't want to drop the frame though: our VLAN 1401 * filtering is done in software. 1402 */ 1403 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1404 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1405 ifp->if_ierrors++; 1406 /* 1407 * If this is part of a multi-fragment packet, 1408 * discard all the pieces. 1409 */ 1410 if (sc->vge_head != NULL) { 1411 m_freem(sc->vge_head); 1412 sc->vge_head = sc->vge_tail = NULL; 1413 } 1414 vge_newbuf(sc, i, m); 1415 VGE_RX_DESC_INC(i); 1416 continue; 1417 } 1418 1419 /* 1420 * If allocating a replacement mbuf fails, 1421 * reload the current one. 1422 */ 1423 1424 if (vge_newbuf(sc, i, NULL)) { 1425 ifp->if_ierrors++; 1426 if (sc->vge_head != NULL) { 1427 m_freem(sc->vge_head); 1428 sc->vge_head = sc->vge_tail = NULL; 1429 } 1430 vge_newbuf(sc, i, m); 1431 VGE_RX_DESC_INC(i); 1432 continue; 1433 } 1434 1435 VGE_RX_DESC_INC(i); 1436 1437 if (sc->vge_head != NULL) { 1438 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1439 /* 1440 * Special case: if there's 4 bytes or less 1441 * in this buffer, the mbuf can be discarded: 1442 * the last 4 bytes is the CRC, which we don't 1443 * care about anyway. 1444 */ 1445 if (m->m_len <= ETHER_CRC_LEN) { 1446 sc->vge_tail->m_len -= 1447 (ETHER_CRC_LEN - m->m_len); 1448 m_freem(m); 1449 } else { 1450 m->m_len -= ETHER_CRC_LEN; 1451 m->m_flags &= ~M_PKTHDR; 1452 sc->vge_tail->m_next = m; 1453 } 1454 m = sc->vge_head; 1455 sc->vge_head = sc->vge_tail = NULL; 1456 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1457 } else 1458 m->m_pkthdr.len = m->m_len = 1459 (total_len - ETHER_CRC_LEN); 1460 1461#ifdef VGE_FIXUP_RX 1462 vge_fixup_rx(m); 1463#endif 1464 ifp->if_ipackets++; 1465 m->m_pkthdr.rcvif = ifp; 1466 1467 /* Do RX checksumming if enabled */ 1468 if (ifp->if_capenable & IFCAP_RXCSUM) { 1469 1470 /* Check IP header checksum */ 1471 if (rxctl & VGE_RDCTL_IPPKT) 1472 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1473 if (rxctl & VGE_RDCTL_IPCSUMOK) 1474 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1475 1476 /* Check TCP/UDP checksum */ 1477 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1478 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1479 m->m_pkthdr.csum_flags |= 1480 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1481 m->m_pkthdr.csum_data = 0xffff; 1482 } 1483 } 1484 1485 if (rxstat & VGE_RDSTS_VTAG) 1486 VLAN_INPUT_TAG(ifp, m, 1487 ntohs((rxctl & VGE_RDCTL_VLANID)), continue); 1488 1489 VGE_UNLOCK(sc); 1490 (*ifp->if_input)(ifp, m); 1491 VGE_LOCK(sc); 1492 1493 lim++; 1494 if (lim == VGE_RX_DESC_CNT) 1495 break; 1496 1497 } 1498 1499 /* Flush the RX DMA ring */ 1500 1501 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1502 sc->vge_ldata.vge_rx_list_map, 1503 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1504 1505 sc->vge_ldata.vge_rx_prodidx = i; 1506 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1507 1508 1509 return; 1510} 1511 1512static void 1513vge_txeof(sc) 1514 struct vge_softc *sc; 1515{ 1516 struct ifnet *ifp; 1517 u_int32_t txstat; 1518 int idx; 1519 1520 ifp = sc->vge_ifp; 1521 idx = sc->vge_ldata.vge_tx_considx; 1522 1523 /* Invalidate the TX descriptor list */ 1524 1525 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1526 sc->vge_ldata.vge_tx_list_map, 1527 BUS_DMASYNC_POSTREAD); 1528 1529 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1530 1531 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1532 if (txstat & VGE_TDSTS_OWN) 1533 break; 1534 1535 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1536 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1537 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1538 sc->vge_ldata.vge_tx_dmamap[idx]); 1539 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1540 ifp->if_collisions++; 1541 if (txstat & VGE_TDSTS_TXERR) 1542 ifp->if_oerrors++; 1543 else 1544 ifp->if_opackets++; 1545 1546 sc->vge_ldata.vge_tx_free++; 1547 VGE_TX_DESC_INC(idx); 1548 } 1549 1550 /* No changes made to the TX ring, so no flush needed */ 1551 1552 if (idx != sc->vge_ldata.vge_tx_considx) { 1553 sc->vge_ldata.vge_tx_considx = idx;
| 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#include <sys/param.h> 84#include <sys/endian.h> 85#include <sys/systm.h> 86#include <sys/sockio.h> 87#include <sys/mbuf.h> 88#include <sys/malloc.h> 89#include <sys/module.h> 90#include <sys/kernel.h> 91#include <sys/socket.h> 92#include <sys/taskqueue.h> 93 94#include <net/if.h> 95#include <net/if_arp.h> 96#include <net/ethernet.h> 97#include <net/if_dl.h> 98#include <net/if_media.h> 99#include <net/if_types.h> 100#include <net/if_vlan_var.h> 101 102#include <net/bpf.h> 103 104#include <machine/bus.h> 105#include <machine/resource.h> 106#include <sys/bus.h> 107#include <sys/rman.h> 108 109#include <dev/mii/mii.h> 110#include <dev/mii/miivar.h> 111 112#include <dev/pci/pcireg.h> 113#include <dev/pci/pcivar.h> 114 115MODULE_DEPEND(vge, pci, 1, 1, 1); 116MODULE_DEPEND(vge, ether, 1, 1, 1); 117MODULE_DEPEND(vge, miibus, 1, 1, 1); 118 119/* "controller miibus0" required. See GENERIC if you get errors here. */ 120#include "miibus_if.h" 121 122#include <dev/vge/if_vgereg.h> 123#include <dev/vge/if_vgevar.h> 124 125#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 126 127/* 128 * Various supported device vendors/types and their names. 129 */ 130static struct vge_type vge_devs[] = { 131 { VIA_VENDORID, VIA_DEVICEID_61XX, 132 "VIA Networking Gigabit Ethernet" }, 133 { 0, 0, NULL } 134}; 135 136static int vge_probe (device_t); 137static int vge_attach (device_t); 138static int vge_detach (device_t); 139 140static int vge_encap (struct vge_softc *, struct mbuf *, int); 141 142static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 143static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 144 bus_size_t, int); 145static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 146 bus_size_t, int); 147static int vge_allocmem (device_t, struct vge_softc *); 148static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 149static int vge_rx_list_init (struct vge_softc *); 150static int vge_tx_list_init (struct vge_softc *); 151#ifdef VGE_FIXUP_RX 152static __inline void vge_fixup_rx 153 (struct mbuf *); 154#endif 155static void vge_rxeof (struct vge_softc *); 156static void vge_txeof (struct vge_softc *); 157static void vge_intr (void *); 158static void vge_tick (void *); 159static void vge_tx_task (void *, int); 160static void vge_start (struct ifnet *); 161static int vge_ioctl (struct ifnet *, u_long, caddr_t); 162static void vge_init (void *); 163static void vge_stop (struct vge_softc *); 164static void vge_watchdog (struct ifnet *); 165static int vge_suspend (device_t); 166static int vge_resume (device_t); 167static void vge_shutdown (device_t); 168static int vge_ifmedia_upd (struct ifnet *); 169static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 170 171#ifdef VGE_EEPROM 172static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 173#endif 174static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 175 176static void vge_miipoll_start (struct vge_softc *); 177static void vge_miipoll_stop (struct vge_softc *); 178static int vge_miibus_readreg (device_t, int, int); 179static int vge_miibus_writereg (device_t, int, int, int); 180static void vge_miibus_statchg (device_t); 181 182static void vge_cam_clear (struct vge_softc *); 183static int vge_cam_set (struct vge_softc *, uint8_t *); 184#if __FreeBSD_version < 502113 185static uint32_t vge_mchash (uint8_t *); 186#endif 187static void vge_setmulti (struct vge_softc *); 188static void vge_reset (struct vge_softc *); 189 190#define VGE_PCI_LOIO 0x10 191#define VGE_PCI_LOMEM 0x14 192 193static device_method_t vge_methods[] = { 194 /* Device interface */ 195 DEVMETHOD(device_probe, vge_probe), 196 DEVMETHOD(device_attach, vge_attach), 197 DEVMETHOD(device_detach, vge_detach), 198 DEVMETHOD(device_suspend, vge_suspend), 199 DEVMETHOD(device_resume, vge_resume), 200 DEVMETHOD(device_shutdown, vge_shutdown), 201 202 /* bus interface */ 203 DEVMETHOD(bus_print_child, bus_generic_print_child), 204 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 205 206 /* MII interface */ 207 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 208 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 209 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 210 211 { 0, 0 } 212}; 213 214static driver_t vge_driver = { 215 "vge", 216 vge_methods, 217 sizeof(struct vge_softc) 218}; 219 220static devclass_t vge_devclass; 221 222DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 223DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0); 224DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 225 226#ifdef VGE_EEPROM 227/* 228 * Read a word of data stored in the EEPROM at address 'addr.' 229 */ 230static void 231vge_eeprom_getword(sc, addr, dest) 232 struct vge_softc *sc; 233 int addr; 234 u_int16_t *dest; 235{ 236 register int i; 237 u_int16_t word = 0; 238 239 /* 240 * Enter EEPROM embedded programming mode. In order to 241 * access the EEPROM at all, we first have to set the 242 * EELOAD bit in the CHIPCFG2 register. 243 */ 244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 246 247 /* Select the address of the word we want to read */ 248 CSR_WRITE_1(sc, VGE_EEADDR, addr); 249 250 /* Issue read command */ 251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 252 253 /* Wait for the done bit to be set. */ 254 for (i = 0; i < VGE_TIMEOUT; i++) { 255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 256 break; 257 } 258 259 if (i == VGE_TIMEOUT) { 260 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 261 *dest = 0; 262 return; 263 } 264 265 /* Read the result */ 266 word = CSR_READ_2(sc, VGE_EERDDAT); 267 268 /* Turn off EEPROM access mode. */ 269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 271 272 *dest = word; 273 274 return; 275} 276#endif 277 278/* 279 * Read a sequence of words from the EEPROM. 280 */ 281static void 282vge_read_eeprom(sc, dest, off, cnt, swap) 283 struct vge_softc *sc; 284 caddr_t dest; 285 int off; 286 int cnt; 287 int swap; 288{ 289 int i; 290#ifdef VGE_EEPROM 291 u_int16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (u_int16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301#else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304#endif 305} 306 307static void 308vge_miipoll_stop(sc) 309 struct vge_softc *sc; 310{ 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 324 return; 325} 326 327static void 328vge_miipoll_start(sc) 329 struct vge_softc *sc; 330{ 331 int i; 332 333 /* First, make sure we're idle. */ 334 335 CSR_WRITE_1(sc, VGE_MIICMD, 0); 336 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 337 338 for (i = 0; i < VGE_TIMEOUT; i++) { 339 DELAY(1); 340 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 341 break; 342 } 343 344 if (i == VGE_TIMEOUT) { 345 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 346 return; 347 } 348 349 /* Now enable auto poll mode. */ 350 351 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 352 353 /* And make sure it started. */ 354 355 for (i = 0; i < VGE_TIMEOUT; i++) { 356 DELAY(1); 357 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 358 break; 359 } 360 361 if (i == VGE_TIMEOUT) 362 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 363 364 return; 365} 366 367static int 368vge_miibus_readreg(dev, phy, reg) 369 device_t dev; 370 int phy, reg; 371{ 372 struct vge_softc *sc; 373 int i; 374 u_int16_t rval = 0; 375 376 sc = device_get_softc(dev); 377 378 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 379 return(0); 380 381 VGE_LOCK(sc); 382 vge_miipoll_stop(sc); 383 384 /* Specify the register we want to read. */ 385 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 386 387 /* Issue read command. */ 388 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 389 390 /* Wait for the read command bit to self-clear. */ 391 for (i = 0; i < VGE_TIMEOUT; i++) { 392 DELAY(1); 393 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 394 break; 395 } 396 397 if (i == VGE_TIMEOUT) 398 device_printf(sc->vge_dev, "MII read timed out\n"); 399 else 400 rval = CSR_READ_2(sc, VGE_MIIDATA); 401 402 vge_miipoll_start(sc); 403 VGE_UNLOCK(sc); 404 405 return (rval); 406} 407 408static int 409vge_miibus_writereg(dev, phy, reg, data) 410 device_t dev; 411 int phy, reg, data; 412{ 413 struct vge_softc *sc; 414 int i, rval = 0; 415 416 sc = device_get_softc(dev); 417 418 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 419 return(0); 420 421 VGE_LOCK(sc); 422 vge_miipoll_stop(sc); 423 424 /* Specify the register we want to write. */ 425 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 426 427 /* Specify the data we want to write. */ 428 CSR_WRITE_2(sc, VGE_MIIDATA, data); 429 430 /* Issue write command. */ 431 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 432 433 /* Wait for the write command bit to self-clear. */ 434 for (i = 0; i < VGE_TIMEOUT; i++) { 435 DELAY(1); 436 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 437 break; 438 } 439 440 if (i == VGE_TIMEOUT) { 441 device_printf(sc->vge_dev, "MII write timed out\n"); 442 rval = EIO; 443 } 444 445 vge_miipoll_start(sc); 446 VGE_UNLOCK(sc); 447 448 return (rval); 449} 450 451static void 452vge_cam_clear(sc) 453 struct vge_softc *sc; 454{ 455 int i; 456 457 /* 458 * Turn off all the mask bits. This tells the chip 459 * that none of the entries in the CAM filter are valid. 460 * desired entries will be enabled as we fill the filter in. 461 */ 462 463 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 466 for (i = 0; i < 8; i++) 467 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 468 469 /* Clear the VLAN filter too. */ 470 471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 472 for (i = 0; i < 8; i++) 473 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 474 475 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 476 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 477 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 478 479 sc->vge_camidx = 0; 480 481 return; 482} 483 484static int 485vge_cam_set(sc, addr) 486 struct vge_softc *sc; 487 uint8_t *addr; 488{ 489 int i, error = 0; 490 491 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 492 return(ENOSPC); 493 494 /* Select the CAM data page. */ 495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 497 498 /* Set the filter entry we want to update and enable writing. */ 499 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 500 501 /* Write the address to the CAM registers */ 502 for (i = 0; i < ETHER_ADDR_LEN; i++) 503 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 504 505 /* Issue a write command. */ 506 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 507 508 /* Wake for it to clear. */ 509 for (i = 0; i < VGE_TIMEOUT; i++) { 510 DELAY(1); 511 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 512 break; 513 } 514 515 if (i == VGE_TIMEOUT) { 516 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 517 error = EIO; 518 goto fail; 519 } 520 521 /* Select the CAM mask page. */ 522 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 523 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 524 525 /* Set the mask bit that enables this filter. */ 526 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 527 1<<(sc->vge_camidx & 7)); 528 529 sc->vge_camidx++; 530 531fail: 532 /* Turn off access to CAM. */ 533 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 534 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 535 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 536 537 return (error); 538} 539 540#if __FreeBSD_version < 502113 541static uint32_t 542vge_mchash(addr) 543 uint8_t *addr; 544{ 545 uint32_t crc, carry; 546 int idx, bit; 547 uint8_t data; 548 549 /* Compute CRC for the address value. */ 550 crc = 0xFFFFFFFF; /* initial value */ 551 552 for (idx = 0; idx < 6; idx++) { 553 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 554 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); 555 crc <<= 1; 556 if (carry) 557 crc = (crc ^ 0x04c11db6) | carry; 558 } 559 } 560 561 return(crc); 562} 563#endif 564 565/* 566 * Program the multicast filter. We use the 64-entry CAM filter 567 * for perfect filtering. If there's more than 64 multicast addresses, 568 * we use the hash filter insted. 569 */ 570static void 571vge_setmulti(sc) 572 struct vge_softc *sc; 573{ 574 struct ifnet *ifp; 575 int error = 0/*, h = 0*/; 576 struct ifmultiaddr *ifma; 577 u_int32_t h, hashes[2] = { 0, 0 }; 578 579 ifp = sc->vge_ifp; 580 581 /* First, zot all the multicast entries. */ 582 vge_cam_clear(sc); 583 CSR_WRITE_4(sc, VGE_MAR0, 0); 584 CSR_WRITE_4(sc, VGE_MAR1, 0); 585 586 /* 587 * If the user wants allmulti or promisc mode, enable reception 588 * of all multicast frames. 589 */ 590 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 591 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 592 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 593 return; 594 } 595 596 /* Now program new ones */ 597 IF_ADDR_LOCK(ifp); 598 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 599 if (ifma->ifma_addr->sa_family != AF_LINK) 600 continue; 601 error = vge_cam_set(sc, 602 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 603 if (error) 604 break; 605 } 606 607 /* If there were too many addresses, use the hash filter. */ 608 if (error) { 609 vge_cam_clear(sc); 610 611 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 612 if (ifma->ifma_addr->sa_family != AF_LINK) 613 continue; 614#if __FreeBSD_version < 502113 615 h = vge_mchash(LLADDR((struct sockaddr_dl *) 616 ifma->ifma_addr)) >> 26; 617#else 618 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 619 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 620#endif 621 if (h < 32) 622 hashes[0] |= (1 << h); 623 else 624 hashes[1] |= (1 << (h - 32)); 625 } 626 627 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 628 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 629 } 630 IF_ADDR_UNLOCK(ifp); 631 632 return; 633} 634 635static void 636vge_reset(sc) 637 struct vge_softc *sc; 638{ 639 register int i; 640 641 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 642 643 for (i = 0; i < VGE_TIMEOUT; i++) { 644 DELAY(5); 645 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 646 break; 647 } 648 649 if (i == VGE_TIMEOUT) { 650 device_printf(sc->vge_dev, "soft reset timed out"); 651 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 652 DELAY(2000); 653 } 654 655 DELAY(5000); 656 657 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 658 659 for (i = 0; i < VGE_TIMEOUT; i++) { 660 DELAY(5); 661 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 662 break; 663 } 664 665 if (i == VGE_TIMEOUT) { 666 device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 667 return; 668 } 669 670 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 671 672 return; 673} 674 675/* 676 * Probe for a VIA gigabit chip. Check the PCI vendor and device 677 * IDs against our list and return a device name if we find a match. 678 */ 679static int 680vge_probe(dev) 681 device_t dev; 682{ 683 struct vge_type *t; 684 struct vge_softc *sc; 685 686 t = vge_devs; 687 sc = device_get_softc(dev); 688 689 while (t->vge_name != NULL) { 690 if ((pci_get_vendor(dev) == t->vge_vid) && 691 (pci_get_device(dev) == t->vge_did)) { 692 device_set_desc(dev, t->vge_name); 693 return (BUS_PROBE_DEFAULT); 694 } 695 t++; 696 } 697 698 return (ENXIO); 699} 700 701static void 702vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) 703 void *arg; 704 bus_dma_segment_t *segs; 705 int nseg; 706 bus_size_t mapsize; 707 int error; 708{ 709 710 struct vge_dmaload_arg *ctx; 711 struct vge_rx_desc *d = NULL; 712 713 if (error) 714 return; 715 716 ctx = arg; 717 718 /* Signal error to caller if there's too many segments */ 719 if (nseg > ctx->vge_maxsegs) { 720 ctx->vge_maxsegs = 0; 721 return; 722 } 723 724 /* 725 * Map the segment array into descriptors. 726 */ 727 728 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 729 730 /* If this descriptor is still owned by the chip, bail. */ 731 732 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 733 device_printf(ctx->sc->vge_dev, 734 "tried to map busy descriptor\n"); 735 ctx->vge_maxsegs = 0; 736 return; 737 } 738 739 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 740 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 741 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 742 d->vge_sts = 0; 743 d->vge_ctl = 0; 744 745 ctx->vge_maxsegs = 1; 746 747 return; 748} 749 750static void 751vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 752 void *arg; 753 bus_dma_segment_t *segs; 754 int nseg; 755 bus_size_t mapsize; 756 int error; 757{ 758 struct vge_dmaload_arg *ctx; 759 struct vge_tx_desc *d = NULL; 760 struct vge_tx_frag *f; 761 int i = 0; 762 763 if (error) 764 return; 765 766 ctx = arg; 767 768 /* Signal error to caller if there's too many segments */ 769 if (nseg > ctx->vge_maxsegs) { 770 ctx->vge_maxsegs = 0; 771 return; 772 } 773 774 /* Map the segment array into descriptors. */ 775 776 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 777 778 /* If this descriptor is still owned by the chip, bail. */ 779 780 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 781 ctx->vge_maxsegs = 0; 782 return; 783 } 784 785 for (i = 0; i < nseg; i++) { 786 f = &d->vge_frag[i]; 787 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 788 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 789 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 790 } 791 792 /* Argh. This chip does not autopad short frames */ 793 794 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 795 f = &d->vge_frag[i]; 796 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 797 ctx->vge_m0->m_pkthdr.len)); 798 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 799 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 800 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 801 i++; 802 } 803 804 /* 805 * When telling the chip how many segments there are, we 806 * must use nsegs + 1 instead of just nsegs. Darned if I 807 * know why. 808 */ 809 i++; 810 811 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 812 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 813 814 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 815 d->vge_ctl |= VGE_TDCTL_JUMBO; 816 817 ctx->vge_maxsegs = nseg; 818 819 return; 820} 821 822/* 823 * Map a single buffer address. 824 */ 825 826static void 827vge_dma_map_addr(arg, segs, nseg, error) 828 void *arg; 829 bus_dma_segment_t *segs; 830 int nseg; 831 int error; 832{ 833 bus_addr_t *addr; 834 835 if (error) 836 return; 837 838 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 839 addr = arg; 840 *addr = segs->ds_addr; 841 842 return; 843} 844 845static int 846vge_allocmem(dev, sc) 847 device_t dev; 848 struct vge_softc *sc; 849{ 850 int error; 851 int nseg; 852 int i; 853 854 /* 855 * Allocate map for RX mbufs. 856 */ 857 nseg = 32; 858 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 859 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 860 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 861 NULL, NULL, &sc->vge_ldata.vge_mtag); 862 if (error) { 863 device_printf(dev, "could not allocate dma tag\n"); 864 return (ENOMEM); 865 } 866 867 /* 868 * Allocate map for TX descriptor list. 869 */ 870 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 871 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 872 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 873 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); 874 if (error) { 875 device_printf(dev, "could not allocate dma tag\n"); 876 return (ENOMEM); 877 } 878 879 /* Allocate DMA'able memory for the TX ring */ 880 881 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 882 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 883 &sc->vge_ldata.vge_tx_list_map); 884 if (error) 885 return (ENOMEM); 886 887 /* Load the map for the TX ring. */ 888 889 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 890 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, 891 VGE_TX_LIST_SZ, vge_dma_map_addr, 892 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); 893 894 /* Create DMA maps for TX buffers */ 895 896 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 897 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 898 &sc->vge_ldata.vge_tx_dmamap[i]); 899 if (error) { 900 device_printf(dev, "can't create DMA map for TX\n"); 901 return (ENOMEM); 902 } 903 } 904 905 /* 906 * Allocate map for RX descriptor list. 907 */ 908 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 909 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 910 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 911 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); 912 if (error) { 913 device_printf(dev, "could not allocate dma tag\n"); 914 return (ENOMEM); 915 } 916 917 /* Allocate DMA'able memory for the RX ring */ 918 919 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 920 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 921 &sc->vge_ldata.vge_rx_list_map); 922 if (error) 923 return (ENOMEM); 924 925 /* Load the map for the RX ring. */ 926 927 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 928 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, 929 VGE_TX_LIST_SZ, vge_dma_map_addr, 930 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); 931 932 /* Create DMA maps for RX buffers */ 933 934 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 935 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 936 &sc->vge_ldata.vge_rx_dmamap[i]); 937 if (error) { 938 device_printf(dev, "can't create DMA map for RX\n"); 939 return (ENOMEM); 940 } 941 } 942 943 return (0); 944} 945 946/* 947 * Attach the interface. Allocate softc structures, do ifmedia 948 * setup and ethernet/BPF attach. 949 */ 950static int 951vge_attach(dev) 952 device_t dev; 953{ 954 u_char eaddr[ETHER_ADDR_LEN]; 955 struct vge_softc *sc; 956 struct ifnet *ifp; 957 int unit, error = 0, rid; 958 959 sc = device_get_softc(dev); 960 unit = device_get_unit(dev); 961 sc->vge_dev = dev; 962 963 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 964 MTX_DEF | MTX_RECURSE); 965 /* 966 * Map control/status registers. 967 */ 968 pci_enable_busmaster(dev); 969 970 rid = VGE_PCI_LOMEM; 971 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 972 0, ~0, 1, RF_ACTIVE); 973 974 if (sc->vge_res == NULL) { 975 printf ("vge%d: couldn't map ports/memory\n", unit); 976 error = ENXIO; 977 goto fail; 978 } 979 980 sc->vge_btag = rman_get_bustag(sc->vge_res); 981 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 982 983 /* Allocate interrupt */ 984 rid = 0; 985 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 986 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 987 988 if (sc->vge_irq == NULL) { 989 printf("vge%d: couldn't map interrupt\n", unit); 990 error = ENXIO; 991 goto fail; 992 } 993 994 /* Reset the adapter. */ 995 vge_reset(sc); 996 997 /* 998 * Get station address from the EEPROM. 999 */ 1000 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1001 1002 sc->vge_unit = unit; 1003 1004#if __FreeBSD_version < 502113 1005 printf("vge%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1006#endif 1007 1008 /* 1009 * Allocate the parent bus DMA tag appropriate for PCI. 1010 */ 1011#define VGE_NSEG_NEW 32 1012 error = bus_dma_tag_create(NULL, /* parent */ 1013 1, 0, /* alignment, boundary */ 1014 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1015 BUS_SPACE_MAXADDR, /* highaddr */ 1016 NULL, NULL, /* filter, filterarg */ 1017 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 1018 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1019 BUS_DMA_ALLOCNOW, /* flags */ 1020 NULL, NULL, /* lockfunc, lockarg */ 1021 &sc->vge_parent_tag); 1022 if (error) 1023 goto fail; 1024 1025 error = vge_allocmem(dev, sc); 1026 1027 if (error) 1028 goto fail; 1029 1030 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1031 if (ifp == NULL) { 1032 printf("vge%d: can not if_alloc()\n", sc->vge_unit); 1033 error = ENOSPC; 1034 goto fail; 1035 } 1036 1037 /* Do MII setup */ 1038 if (mii_phy_probe(dev, &sc->vge_miibus, 1039 vge_ifmedia_upd, vge_ifmedia_sts)) { 1040 printf("vge%d: MII without any phy!\n", sc->vge_unit); 1041 error = ENXIO; 1042 goto fail; 1043 } 1044 1045 ifp->if_softc = sc; 1046 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1047 ifp->if_mtu = ETHERMTU; 1048 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1049 ifp->if_ioctl = vge_ioctl; 1050 ifp->if_capabilities = IFCAP_VLAN_MTU; 1051 ifp->if_start = vge_start; 1052 ifp->if_hwassist = VGE_CSUM_FEATURES; 1053 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1054#ifdef DEVICE_POLLING 1055#ifdef IFCAP_POLLING 1056 ifp->if_capabilities |= IFCAP_POLLING; 1057#endif 1058#endif 1059 ifp->if_watchdog = vge_watchdog; 1060 ifp->if_init = vge_init; 1061 ifp->if_baudrate = 1000000000; 1062 ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN; 1063 ifp->if_capenable = ifp->if_capabilities; 1064 1065 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); 1066 1067 /* 1068 * Call MI attach routine. 1069 */ 1070 ether_ifattach(ifp, eaddr); 1071 1072 /* Hook interrupt last to avoid having to lock softc */ 1073 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1074 vge_intr, sc, &sc->vge_intrhand); 1075 1076 if (error) { 1077 printf("vge%d: couldn't set up irq\n", unit); 1078 ether_ifdetach(ifp); 1079 goto fail; 1080 } 1081 1082fail: 1083 if (error) 1084 vge_detach(dev); 1085 1086 return (error); 1087} 1088 1089/* 1090 * Shutdown hardware and free up resources. This can be called any 1091 * time after the mutex has been initialized. It is called in both 1092 * the error case in attach and the normal detach case so it needs 1093 * to be careful about only freeing resources that have actually been 1094 * allocated. 1095 */ 1096static int 1097vge_detach(dev) 1098 device_t dev; 1099{ 1100 struct vge_softc *sc; 1101 struct ifnet *ifp; 1102 int i; 1103 1104 sc = device_get_softc(dev); 1105 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1106 ifp = sc->vge_ifp; 1107 1108 /* These should only be active if attach succeeded */ 1109 if (device_is_attached(dev)) { 1110 vge_stop(sc); 1111 /* 1112 * Force off the IFF_UP flag here, in case someone 1113 * still had a BPF descriptor attached to this 1114 * interface. If they do, ether_ifattach() will cause 1115 * the BPF code to try and clear the promisc mode 1116 * flag, which will bubble down to vge_ioctl(), 1117 * which will try to call vge_init() again. This will 1118 * turn the NIC back on and restart the MII ticker, 1119 * which will panic the system when the kernel tries 1120 * to invoke the vge_tick() function that isn't there 1121 * anymore. 1122 */ 1123 ifp->if_flags &= ~IFF_UP; 1124 ether_ifdetach(ifp); 1125 if_free(ifp); 1126 } 1127 if (sc->vge_miibus) 1128 device_delete_child(dev, sc->vge_miibus); 1129 bus_generic_detach(dev); 1130 1131 if (sc->vge_intrhand) 1132 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1133 if (sc->vge_irq) 1134 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1135 if (sc->vge_res) 1136 bus_release_resource(dev, SYS_RES_MEMORY, 1137 VGE_PCI_LOMEM, sc->vge_res); 1138 1139 /* Unload and free the RX DMA ring memory and map */ 1140 1141 if (sc->vge_ldata.vge_rx_list_tag) { 1142 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 1143 sc->vge_ldata.vge_rx_list_map); 1144 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 1145 sc->vge_ldata.vge_rx_list, 1146 sc->vge_ldata.vge_rx_list_map); 1147 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 1148 } 1149 1150 /* Unload and free the TX DMA ring memory and map */ 1151 1152 if (sc->vge_ldata.vge_tx_list_tag) { 1153 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 1154 sc->vge_ldata.vge_tx_list_map); 1155 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 1156 sc->vge_ldata.vge_tx_list, 1157 sc->vge_ldata.vge_tx_list_map); 1158 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 1159 } 1160 1161 /* Destroy all the RX and TX buffer maps */ 1162 1163 if (sc->vge_ldata.vge_mtag) { 1164 for (i = 0; i < VGE_TX_DESC_CNT; i++) 1165 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1166 sc->vge_ldata.vge_tx_dmamap[i]); 1167 for (i = 0; i < VGE_RX_DESC_CNT; i++) 1168 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1169 sc->vge_ldata.vge_rx_dmamap[i]); 1170 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 1171 } 1172 1173 if (sc->vge_parent_tag) 1174 bus_dma_tag_destroy(sc->vge_parent_tag); 1175 1176 mtx_destroy(&sc->vge_mtx); 1177 1178 return (0); 1179} 1180 1181static int 1182vge_newbuf(sc, idx, m) 1183 struct vge_softc *sc; 1184 int idx; 1185 struct mbuf *m; 1186{ 1187 struct vge_dmaload_arg arg; 1188 struct mbuf *n = NULL; 1189 int i, error; 1190 1191 if (m == NULL) { 1192 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1193 if (n == NULL) 1194 return (ENOBUFS); 1195 m = n; 1196 } else 1197 m->m_data = m->m_ext.ext_buf; 1198 1199 1200#ifdef VGE_FIXUP_RX 1201 /* 1202 * This is part of an evil trick to deal with non-x86 platforms. 1203 * The VIA chip requires RX buffers to be aligned on 32-bit 1204 * boundaries, but that will hose non-x86 machines. To get around 1205 * this, we leave some empty space at the start of each buffer 1206 * and for non-x86 hosts, we copy the buffer back two bytes 1207 * to achieve word alignment. This is slightly more efficient 1208 * than allocating a new buffer, copying the contents, and 1209 * discarding the old buffer. 1210 */ 1211 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1212 m_adj(m, VGE_ETHER_ALIGN); 1213#else 1214 m->m_len = m->m_pkthdr.len = MCLBYTES; 1215#endif 1216 1217 arg.sc = sc; 1218 arg.vge_idx = idx; 1219 arg.vge_maxsegs = 1; 1220 arg.vge_flags = 0; 1221 1222 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1223 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, 1224 &arg, BUS_DMA_NOWAIT); 1225 if (error || arg.vge_maxsegs != 1) { 1226 if (n != NULL) 1227 m_freem(n); 1228 return (ENOMEM); 1229 } 1230 1231 /* 1232 * Note: the manual fails to document the fact that for 1233 * proper opration, the driver needs to replentish the RX 1234 * DMA ring 4 descriptors at a time (rather than one at a 1235 * time, like most chips). We can allocate the new buffers 1236 * but we should not set the OWN bits until we're ready 1237 * to hand back 4 of them in one shot. 1238 */ 1239 1240#define VGE_RXCHUNK 4 1241 sc->vge_rx_consumed++; 1242 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1243 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 1244 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1245 htole32(VGE_RDSTS_OWN); 1246 sc->vge_rx_consumed = 0; 1247 } 1248 1249 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1250 1251 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1252 sc->vge_ldata.vge_rx_dmamap[idx], 1253 BUS_DMASYNC_PREREAD); 1254 1255 return (0); 1256} 1257 1258static int 1259vge_tx_list_init(sc) 1260 struct vge_softc *sc; 1261{ 1262 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1263 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1264 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1265 1266 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1267 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1268 sc->vge_ldata.vge_tx_prodidx = 0; 1269 sc->vge_ldata.vge_tx_considx = 0; 1270 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1271 1272 return (0); 1273} 1274 1275static int 1276vge_rx_list_init(sc) 1277 struct vge_softc *sc; 1278{ 1279 int i; 1280 1281 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1282 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 1283 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 1284 1285 sc->vge_rx_consumed = 0; 1286 1287 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1288 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1289 return (ENOBUFS); 1290 } 1291 1292 /* Flush the RX descriptors */ 1293 1294 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1295 sc->vge_ldata.vge_rx_list_map, 1296 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1297 1298 sc->vge_ldata.vge_rx_prodidx = 0; 1299 sc->vge_rx_consumed = 0; 1300 sc->vge_head = sc->vge_tail = NULL; 1301 1302 return (0); 1303} 1304 1305#ifdef VGE_FIXUP_RX 1306static __inline void 1307vge_fixup_rx(m) 1308 struct mbuf *m; 1309{ 1310 int i; 1311 uint16_t *src, *dst; 1312 1313 src = mtod(m, uint16_t *); 1314 dst = src - 1; 1315 1316 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1317 *dst++ = *src++; 1318 1319 m->m_data -= ETHER_ALIGN; 1320 1321 return; 1322} 1323#endif 1324 1325/* 1326 * RX handler. We support the reception of jumbo frames that have 1327 * been fragmented across multiple 2K mbuf cluster buffers. 1328 */ 1329static void 1330vge_rxeof(sc) 1331 struct vge_softc *sc; 1332{ 1333 struct mbuf *m; 1334 struct ifnet *ifp; 1335 int i, total_len; 1336 int lim = 0; 1337 struct vge_rx_desc *cur_rx; 1338 u_int32_t rxstat, rxctl; 1339 1340 VGE_LOCK_ASSERT(sc); 1341 ifp = sc->vge_ifp; 1342 i = sc->vge_ldata.vge_rx_prodidx; 1343 1344 /* Invalidate the descriptor memory */ 1345 1346 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1347 sc->vge_ldata.vge_rx_list_map, 1348 BUS_DMASYNC_POSTREAD); 1349 1350 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1351 1352#ifdef DEVICE_POLLING 1353 if (ifp->if_flags & IFF_POLLING) { 1354 if (sc->rxcycles <= 0) 1355 break; 1356 sc->rxcycles--; 1357 } 1358#endif /* DEVICE_POLLING */ 1359 1360 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1361 m = sc->vge_ldata.vge_rx_mbuf[i]; 1362 total_len = VGE_RXBYTES(cur_rx); 1363 rxstat = le32toh(cur_rx->vge_sts); 1364 rxctl = le32toh(cur_rx->vge_ctl); 1365 1366 /* Invalidate the RX mbuf and unload its map */ 1367 1368 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1369 sc->vge_ldata.vge_rx_dmamap[i], 1370 BUS_DMASYNC_POSTWRITE); 1371 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1372 sc->vge_ldata.vge_rx_dmamap[i]); 1373 1374 /* 1375 * If the 'start of frame' bit is set, this indicates 1376 * either the first fragment in a multi-fragment receive, 1377 * or an intermediate fragment. Either way, we want to 1378 * accumulate the buffers. 1379 */ 1380 if (rxstat & VGE_RXPKT_SOF) { 1381 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1382 if (sc->vge_head == NULL) 1383 sc->vge_head = sc->vge_tail = m; 1384 else { 1385 m->m_flags &= ~M_PKTHDR; 1386 sc->vge_tail->m_next = m; 1387 sc->vge_tail = m; 1388 } 1389 vge_newbuf(sc, i, NULL); 1390 VGE_RX_DESC_INC(i); 1391 continue; 1392 } 1393 1394 /* 1395 * Bad/error frames will have the RXOK bit cleared. 1396 * However, there's one error case we want to allow: 1397 * if a VLAN tagged frame arrives and the chip can't 1398 * match it against the CAM filter, it considers this 1399 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1400 * We don't want to drop the frame though: our VLAN 1401 * filtering is done in software. 1402 */ 1403 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1404 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1405 ifp->if_ierrors++; 1406 /* 1407 * If this is part of a multi-fragment packet, 1408 * discard all the pieces. 1409 */ 1410 if (sc->vge_head != NULL) { 1411 m_freem(sc->vge_head); 1412 sc->vge_head = sc->vge_tail = NULL; 1413 } 1414 vge_newbuf(sc, i, m); 1415 VGE_RX_DESC_INC(i); 1416 continue; 1417 } 1418 1419 /* 1420 * If allocating a replacement mbuf fails, 1421 * reload the current one. 1422 */ 1423 1424 if (vge_newbuf(sc, i, NULL)) { 1425 ifp->if_ierrors++; 1426 if (sc->vge_head != NULL) { 1427 m_freem(sc->vge_head); 1428 sc->vge_head = sc->vge_tail = NULL; 1429 } 1430 vge_newbuf(sc, i, m); 1431 VGE_RX_DESC_INC(i); 1432 continue; 1433 } 1434 1435 VGE_RX_DESC_INC(i); 1436 1437 if (sc->vge_head != NULL) { 1438 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1439 /* 1440 * Special case: if there's 4 bytes or less 1441 * in this buffer, the mbuf can be discarded: 1442 * the last 4 bytes is the CRC, which we don't 1443 * care about anyway. 1444 */ 1445 if (m->m_len <= ETHER_CRC_LEN) { 1446 sc->vge_tail->m_len -= 1447 (ETHER_CRC_LEN - m->m_len); 1448 m_freem(m); 1449 } else { 1450 m->m_len -= ETHER_CRC_LEN; 1451 m->m_flags &= ~M_PKTHDR; 1452 sc->vge_tail->m_next = m; 1453 } 1454 m = sc->vge_head; 1455 sc->vge_head = sc->vge_tail = NULL; 1456 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1457 } else 1458 m->m_pkthdr.len = m->m_len = 1459 (total_len - ETHER_CRC_LEN); 1460 1461#ifdef VGE_FIXUP_RX 1462 vge_fixup_rx(m); 1463#endif 1464 ifp->if_ipackets++; 1465 m->m_pkthdr.rcvif = ifp; 1466 1467 /* Do RX checksumming if enabled */ 1468 if (ifp->if_capenable & IFCAP_RXCSUM) { 1469 1470 /* Check IP header checksum */ 1471 if (rxctl & VGE_RDCTL_IPPKT) 1472 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1473 if (rxctl & VGE_RDCTL_IPCSUMOK) 1474 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1475 1476 /* Check TCP/UDP checksum */ 1477 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1478 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1479 m->m_pkthdr.csum_flags |= 1480 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1481 m->m_pkthdr.csum_data = 0xffff; 1482 } 1483 } 1484 1485 if (rxstat & VGE_RDSTS_VTAG) 1486 VLAN_INPUT_TAG(ifp, m, 1487 ntohs((rxctl & VGE_RDCTL_VLANID)), continue); 1488 1489 VGE_UNLOCK(sc); 1490 (*ifp->if_input)(ifp, m); 1491 VGE_LOCK(sc); 1492 1493 lim++; 1494 if (lim == VGE_RX_DESC_CNT) 1495 break; 1496 1497 } 1498 1499 /* Flush the RX DMA ring */ 1500 1501 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1502 sc->vge_ldata.vge_rx_list_map, 1503 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1504 1505 sc->vge_ldata.vge_rx_prodidx = i; 1506 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1507 1508 1509 return; 1510} 1511 1512static void 1513vge_txeof(sc) 1514 struct vge_softc *sc; 1515{ 1516 struct ifnet *ifp; 1517 u_int32_t txstat; 1518 int idx; 1519 1520 ifp = sc->vge_ifp; 1521 idx = sc->vge_ldata.vge_tx_considx; 1522 1523 /* Invalidate the TX descriptor list */ 1524 1525 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1526 sc->vge_ldata.vge_tx_list_map, 1527 BUS_DMASYNC_POSTREAD); 1528 1529 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1530 1531 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1532 if (txstat & VGE_TDSTS_OWN) 1533 break; 1534 1535 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1536 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1537 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1538 sc->vge_ldata.vge_tx_dmamap[idx]); 1539 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1540 ifp->if_collisions++; 1541 if (txstat & VGE_TDSTS_TXERR) 1542 ifp->if_oerrors++; 1543 else 1544 ifp->if_opackets++; 1545 1546 sc->vge_ldata.vge_tx_free++; 1547 VGE_TX_DESC_INC(idx); 1548 } 1549 1550 /* No changes made to the TX ring, so no flush needed */ 1551 1552 if (idx != sc->vge_ldata.vge_tx_considx) { 1553 sc->vge_ldata.vge_tx_considx = idx;
|
1906 break; 1907 } 1908 1909 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1910 htole16(VGE_TXDESC_Q); 1911 1912 pidx = idx; 1913 VGE_TX_DESC_INC(idx); 1914 1915 /* 1916 * If there's a BPF listener, bounce a copy of this frame 1917 * to him. 1918 */ 1919 BPF_MTAP(ifp, m_head); 1920 } 1921 1922 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1923 VGE_UNLOCK(sc); 1924 return; 1925 } 1926 1927 /* Flush the TX descriptors */ 1928 1929 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1930 sc->vge_ldata.vge_tx_list_map, 1931 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1932 1933 /* Issue a transmit command. */ 1934 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1935 1936 sc->vge_ldata.vge_tx_prodidx = idx; 1937 1938 /* 1939 * Use the countdown timer for interrupt moderation. 1940 * 'TX done' interrupts are disabled. Instead, we reset the 1941 * countdown timer, which will begin counting until it hits 1942 * the value in the SSTIMER register, and then trigger an 1943 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1944 * the timer count is reloaded. Only when the transmitter 1945 * is idle will the timer hit 0 and an interrupt fire. 1946 */ 1947 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1948 1949 VGE_UNLOCK(sc); 1950 1951 /* 1952 * Set a timeout in case the chip goes out to lunch. 1953 */ 1954 ifp->if_timer = 5; 1955 1956 return; 1957} 1958 1959static void 1960vge_init(xsc) 1961 void *xsc; 1962{ 1963 struct vge_softc *sc = xsc; 1964 struct ifnet *ifp = sc->vge_ifp; 1965 struct mii_data *mii; 1966 int i; 1967 1968 VGE_LOCK(sc); 1969 mii = device_get_softc(sc->vge_miibus); 1970 1971 /* 1972 * Cancel pending I/O and free all RX/TX buffers. 1973 */ 1974 vge_stop(sc); 1975 vge_reset(sc); 1976 1977 /* 1978 * Initialize the RX and TX descriptors and mbufs. 1979 */ 1980 1981 vge_rx_list_init(sc); 1982 vge_tx_list_init(sc); 1983 1984 /* Set our station address */ 1985 for (i = 0; i < ETHER_ADDR_LEN; i++) 1986 CSR_WRITE_1(sc, VGE_PAR0 + i, IFP2ENADDR(sc->vge_ifp)[i]); 1987 1988 /* 1989 * Set receive FIFO threshold. Also allow transmission and 1990 * reception of VLAN tagged frames. 1991 */ 1992 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1993 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1994 1995 /* Set DMA burst length */ 1996 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1997 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1998 1999 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2000 2001 /* Set collision backoff algorithm */ 2002 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2003 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2004 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2005 2006 /* Disable LPSEL field in priority resolution */ 2007 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2008 2009 /* 2010 * Load the addresses of the DMA queues into the chip. 2011 * Note that we only use one transmit queue. 2012 */ 2013 2014 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2015 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 2016 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2017 2018 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2019 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 2020 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2021 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2022 2023 /* Enable and wake up the RX descriptor queue */ 2024 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2025 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2026 2027 /* Enable the TX descriptor queue */ 2028 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2029 2030 /* Set up the receive filter -- allow large frames for VLANs. */ 2031 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 2032 2033 /* If we want promiscuous mode, set the allframes bit. */ 2034 if (ifp->if_flags & IFF_PROMISC) { 2035 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2036 } 2037 2038 /* Set capture broadcast bit to capture broadcast frames. */ 2039 if (ifp->if_flags & IFF_BROADCAST) { 2040 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 2041 } 2042 2043 /* Set multicast bit to capture multicast frames. */ 2044 if (ifp->if_flags & IFF_MULTICAST) { 2045 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 2046 } 2047 2048 /* Init the cam filter. */ 2049 vge_cam_clear(sc); 2050 2051 /* Init the multicast filter. */ 2052 vge_setmulti(sc); 2053 2054 /* Enable flow control */ 2055 2056 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2057 2058 /* Enable jumbo frame reception (if desired) */ 2059 2060 /* Start the MAC. */ 2061 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2062 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2063 CSR_WRITE_1(sc, VGE_CRS0, 2064 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2065 2066 /* 2067 * Configure one-shot timer for microsecond 2068 * resulution and load it for 500 usecs. 2069 */ 2070 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2071 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2072 2073 /* 2074 * Configure interrupt moderation for receive. Enable 2075 * the holdoff counter and load it, and set the RX 2076 * suppression count to the number of descriptors we 2077 * want to allow before triggering an interrupt. 2078 * The holdoff timer is in units of 20 usecs. 2079 */ 2080 2081#ifdef notyet 2082 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2083 /* Select the interrupt holdoff timer page. */ 2084 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2085 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2086 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2087 2088 /* Enable use of the holdoff timer. */ 2089 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2090 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2091 2092 /* Select the RX suppression threshold page. */ 2093 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2094 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2095 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2096 2097 /* Restore the page select bits. */ 2098 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2099 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2100#endif 2101 2102#ifdef DEVICE_POLLING 2103 /* 2104 * Disable interrupts if we are polling. 2105 */ 2106 if (ifp->if_flags & IFF_POLLING) { 2107 CSR_WRITE_4(sc, VGE_IMR, 0); 2108 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2109 } else /* otherwise ... */ 2110#endif /* DEVICE_POLLING */ 2111 { 2112 /* 2113 * Enable interrupts. 2114 */ 2115 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2116 CSR_WRITE_4(sc, VGE_ISR, 0); 2117 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2118 } 2119 2120 mii_mediachg(mii); 2121
| 1906 break; 1907 } 1908 1909 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1910 htole16(VGE_TXDESC_Q); 1911 1912 pidx = idx; 1913 VGE_TX_DESC_INC(idx); 1914 1915 /* 1916 * If there's a BPF listener, bounce a copy of this frame 1917 * to him. 1918 */ 1919 BPF_MTAP(ifp, m_head); 1920 } 1921 1922 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1923 VGE_UNLOCK(sc); 1924 return; 1925 } 1926 1927 /* Flush the TX descriptors */ 1928 1929 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1930 sc->vge_ldata.vge_tx_list_map, 1931 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1932 1933 /* Issue a transmit command. */ 1934 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1935 1936 sc->vge_ldata.vge_tx_prodidx = idx; 1937 1938 /* 1939 * Use the countdown timer for interrupt moderation. 1940 * 'TX done' interrupts are disabled. Instead, we reset the 1941 * countdown timer, which will begin counting until it hits 1942 * the value in the SSTIMER register, and then trigger an 1943 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1944 * the timer count is reloaded. Only when the transmitter 1945 * is idle will the timer hit 0 and an interrupt fire. 1946 */ 1947 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1948 1949 VGE_UNLOCK(sc); 1950 1951 /* 1952 * Set a timeout in case the chip goes out to lunch. 1953 */ 1954 ifp->if_timer = 5; 1955 1956 return; 1957} 1958 1959static void 1960vge_init(xsc) 1961 void *xsc; 1962{ 1963 struct vge_softc *sc = xsc; 1964 struct ifnet *ifp = sc->vge_ifp; 1965 struct mii_data *mii; 1966 int i; 1967 1968 VGE_LOCK(sc); 1969 mii = device_get_softc(sc->vge_miibus); 1970 1971 /* 1972 * Cancel pending I/O and free all RX/TX buffers. 1973 */ 1974 vge_stop(sc); 1975 vge_reset(sc); 1976 1977 /* 1978 * Initialize the RX and TX descriptors and mbufs. 1979 */ 1980 1981 vge_rx_list_init(sc); 1982 vge_tx_list_init(sc); 1983 1984 /* Set our station address */ 1985 for (i = 0; i < ETHER_ADDR_LEN; i++) 1986 CSR_WRITE_1(sc, VGE_PAR0 + i, IFP2ENADDR(sc->vge_ifp)[i]); 1987 1988 /* 1989 * Set receive FIFO threshold. Also allow transmission and 1990 * reception of VLAN tagged frames. 1991 */ 1992 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1993 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1994 1995 /* Set DMA burst length */ 1996 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1997 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1998 1999 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2000 2001 /* Set collision backoff algorithm */ 2002 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2003 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2004 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2005 2006 /* Disable LPSEL field in priority resolution */ 2007 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2008 2009 /* 2010 * Load the addresses of the DMA queues into the chip. 2011 * Note that we only use one transmit queue. 2012 */ 2013 2014 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2015 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 2016 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2017 2018 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2019 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 2020 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2021 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2022 2023 /* Enable and wake up the RX descriptor queue */ 2024 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2025 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2026 2027 /* Enable the TX descriptor queue */ 2028 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2029 2030 /* Set up the receive filter -- allow large frames for VLANs. */ 2031 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 2032 2033 /* If we want promiscuous mode, set the allframes bit. */ 2034 if (ifp->if_flags & IFF_PROMISC) { 2035 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2036 } 2037 2038 /* Set capture broadcast bit to capture broadcast frames. */ 2039 if (ifp->if_flags & IFF_BROADCAST) { 2040 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 2041 } 2042 2043 /* Set multicast bit to capture multicast frames. */ 2044 if (ifp->if_flags & IFF_MULTICAST) { 2045 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 2046 } 2047 2048 /* Init the cam filter. */ 2049 vge_cam_clear(sc); 2050 2051 /* Init the multicast filter. */ 2052 vge_setmulti(sc); 2053 2054 /* Enable flow control */ 2055 2056 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2057 2058 /* Enable jumbo frame reception (if desired) */ 2059 2060 /* Start the MAC. */ 2061 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2062 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2063 CSR_WRITE_1(sc, VGE_CRS0, 2064 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2065 2066 /* 2067 * Configure one-shot timer for microsecond 2068 * resulution and load it for 500 usecs. 2069 */ 2070 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2071 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2072 2073 /* 2074 * Configure interrupt moderation for receive. Enable 2075 * the holdoff counter and load it, and set the RX 2076 * suppression count to the number of descriptors we 2077 * want to allow before triggering an interrupt. 2078 * The holdoff timer is in units of 20 usecs. 2079 */ 2080 2081#ifdef notyet 2082 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2083 /* Select the interrupt holdoff timer page. */ 2084 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2085 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2086 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2087 2088 /* Enable use of the holdoff timer. */ 2089 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2090 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2091 2092 /* Select the RX suppression threshold page. */ 2093 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2094 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2095 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2096 2097 /* Restore the page select bits. */ 2098 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2099 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2100#endif 2101 2102#ifdef DEVICE_POLLING 2103 /* 2104 * Disable interrupts if we are polling. 2105 */ 2106 if (ifp->if_flags & IFF_POLLING) { 2107 CSR_WRITE_4(sc, VGE_IMR, 0); 2108 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2109 } else /* otherwise ... */ 2110#endif /* DEVICE_POLLING */ 2111 { 2112 /* 2113 * Enable interrupts. 2114 */ 2115 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2116 CSR_WRITE_4(sc, VGE_ISR, 0); 2117 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2118 } 2119 2120 mii_mediachg(mii); 2121
|