if_vge.c revision 200522
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200522 2009-12-14 18:27:34Z yongari $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96 97#include <net/if.h> 98#include <net/if_arp.h> 99#include <net/ethernet.h> 100#include <net/if_dl.h> 101#include <net/if_media.h> 102#include <net/if_types.h> 103#include <net/if_vlan_var.h> 104 105#include <net/bpf.h> 106 107#include <machine/bus.h> 108#include <machine/resource.h> 109#include <sys/bus.h> 110#include <sys/rman.h> 111 112#include <dev/mii/mii.h> 113#include <dev/mii/miivar.h> 114 115#include <dev/pci/pcireg.h> 116#include <dev/pci/pcivar.h> 117 118MODULE_DEPEND(vge, pci, 1, 1, 1); 119MODULE_DEPEND(vge, ether, 1, 1, 1); 120MODULE_DEPEND(vge, miibus, 1, 1, 1); 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125#include <dev/vge/if_vgereg.h> 126#include <dev/vge/if_vgevar.h> 127 128#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 129 130/* 131 * Various supported device vendors/types and their names. 132 */ 133static struct vge_type vge_devs[] = { 134 { VIA_VENDORID, VIA_DEVICEID_61XX, 135 "VIA Networking Gigabit Ethernet" }, 136 { 0, 0, NULL } 137}; 138 139static int vge_probe (device_t); 140static int vge_attach (device_t); 141static int vge_detach (device_t); 142 143static int vge_encap (struct vge_softc *, struct mbuf *, int); 144 145static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 146static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 147 bus_size_t, int); 148static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 149 bus_size_t, int); 150static int vge_allocmem (device_t, struct vge_softc *); 151static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 152static int vge_rx_list_init (struct vge_softc *); 153static int vge_tx_list_init (struct vge_softc *); 154#ifdef VGE_FIXUP_RX 155static __inline void vge_fixup_rx 156 (struct mbuf *); 157#endif 158static int vge_rxeof (struct vge_softc *); 159static void vge_txeof (struct vge_softc *); 160static void vge_intr (void *); 161static void vge_tick (void *); 162static void vge_start (struct ifnet *); 163static void vge_start_locked (struct ifnet *); 164static int vge_ioctl (struct ifnet *, u_long, caddr_t); 165static void vge_init (void *); 166static void vge_init_locked (struct vge_softc *); 167static void vge_stop (struct vge_softc *); 168static void vge_watchdog (void *); 169static int vge_suspend (device_t); 170static int vge_resume (device_t); 171static int vge_shutdown (device_t); 172static int vge_ifmedia_upd (struct ifnet *); 173static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 174 175#ifdef VGE_EEPROM 176static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 177#endif 178static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 179 180static void vge_miipoll_start (struct vge_softc *); 181static void vge_miipoll_stop (struct vge_softc *); 182static int vge_miibus_readreg (device_t, int, int); 183static int vge_miibus_writereg (device_t, int, int, int); 184static void vge_miibus_statchg (device_t); 185 186static void vge_cam_clear (struct vge_softc *); 187static int vge_cam_set (struct vge_softc *, uint8_t *); 188static void vge_setmulti (struct vge_softc *); 189static void vge_reset (struct vge_softc *); 190 191#define VGE_PCI_LOIO 0x10 192#define VGE_PCI_LOMEM 0x14 193 194static device_method_t vge_methods[] = { 195 /* Device interface */ 196 DEVMETHOD(device_probe, vge_probe), 197 DEVMETHOD(device_attach, vge_attach), 198 DEVMETHOD(device_detach, vge_detach), 199 DEVMETHOD(device_suspend, vge_suspend), 200 DEVMETHOD(device_resume, vge_resume), 201 DEVMETHOD(device_shutdown, vge_shutdown), 202 203 /* bus interface */ 204 DEVMETHOD(bus_print_child, bus_generic_print_child), 205 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 206 207 /* MII interface */ 208 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 209 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 210 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 211 212 { 0, 0 } 213}; 214 215static driver_t vge_driver = { 216 "vge", 217 vge_methods, 218 sizeof(struct vge_softc) 219}; 220 221static devclass_t vge_devclass; 222 223DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 224DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 225 226#ifdef VGE_EEPROM 227/* 228 * Read a word of data stored in the EEPROM at address 'addr.' 229 */ 230static void 231vge_eeprom_getword(sc, addr, dest) 232 struct vge_softc *sc; 233 int addr; 234 u_int16_t *dest; 235{ 236 int i; 237 u_int16_t word = 0; 238 239 /* 240 * Enter EEPROM embedded programming mode. In order to 241 * access the EEPROM at all, we first have to set the 242 * EELOAD bit in the CHIPCFG2 register. 243 */ 244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 246 247 /* Select the address of the word we want to read */ 248 CSR_WRITE_1(sc, VGE_EEADDR, addr); 249 250 /* Issue read command */ 251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 252 253 /* Wait for the done bit to be set. */ 254 for (i = 0; i < VGE_TIMEOUT; i++) { 255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 256 break; 257 } 258 259 if (i == VGE_TIMEOUT) { 260 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 261 *dest = 0; 262 return; 263 } 264 265 /* Read the result */ 266 word = CSR_READ_2(sc, VGE_EERDDAT); 267 268 /* Turn off EEPROM access mode. */ 269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 271 272 *dest = word; 273 274 return; 275} 276#endif 277 278/* 279 * Read a sequence of words from the EEPROM. 280 */ 281static void 282vge_read_eeprom(sc, dest, off, cnt, swap) 283 struct vge_softc *sc; 284 caddr_t dest; 285 int off; 286 int cnt; 287 int swap; 288{ 289 int i; 290#ifdef VGE_EEPROM 291 u_int16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (u_int16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301#else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304#endif 305} 306 307static void 308vge_miipoll_stop(sc) 309 struct vge_softc *sc; 310{ 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 324 return; 325} 326 327static void 328vge_miipoll_start(sc) 329 struct vge_softc *sc; 330{ 331 int i; 332 333 /* First, make sure we're idle. */ 334 335 CSR_WRITE_1(sc, VGE_MIICMD, 0); 336 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 337 338 for (i = 0; i < VGE_TIMEOUT; i++) { 339 DELAY(1); 340 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 341 break; 342 } 343 344 if (i == VGE_TIMEOUT) { 345 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 346 return; 347 } 348 349 /* Now enable auto poll mode. */ 350 351 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 352 353 /* And make sure it started. */ 354 355 for (i = 0; i < VGE_TIMEOUT; i++) { 356 DELAY(1); 357 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 358 break; 359 } 360 361 if (i == VGE_TIMEOUT) 362 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 363 364 return; 365} 366 367static int 368vge_miibus_readreg(dev, phy, reg) 369 device_t dev; 370 int phy, reg; 371{ 372 struct vge_softc *sc; 373 int i; 374 u_int16_t rval = 0; 375 376 sc = device_get_softc(dev); 377 378 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 379 return(0); 380 381 vge_miipoll_stop(sc); 382 383 /* Specify the register we want to read. */ 384 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 385 386 /* Issue read command. */ 387 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 388 389 /* Wait for the read command bit to self-clear. */ 390 for (i = 0; i < VGE_TIMEOUT; i++) { 391 DELAY(1); 392 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 393 break; 394 } 395 396 if (i == VGE_TIMEOUT) 397 device_printf(sc->vge_dev, "MII read timed out\n"); 398 else 399 rval = CSR_READ_2(sc, VGE_MIIDATA); 400 401 vge_miipoll_start(sc); 402 403 return (rval); 404} 405 406static int 407vge_miibus_writereg(dev, phy, reg, data) 408 device_t dev; 409 int phy, reg, data; 410{ 411 struct vge_softc *sc; 412 int i, rval = 0; 413 414 sc = device_get_softc(dev); 415 416 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 417 return(0); 418 419 vge_miipoll_stop(sc); 420 421 /* Specify the register we want to write. */ 422 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 423 424 /* Specify the data we want to write. */ 425 CSR_WRITE_2(sc, VGE_MIIDATA, data); 426 427 /* Issue write command. */ 428 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 429 430 /* Wait for the write command bit to self-clear. */ 431 for (i = 0; i < VGE_TIMEOUT; i++) { 432 DELAY(1); 433 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 434 break; 435 } 436 437 if (i == VGE_TIMEOUT) { 438 device_printf(sc->vge_dev, "MII write timed out\n"); 439 rval = EIO; 440 } 441 442 vge_miipoll_start(sc); 443 444 return (rval); 445} 446 447static void 448vge_cam_clear(sc) 449 struct vge_softc *sc; 450{ 451 int i; 452 453 /* 454 * Turn off all the mask bits. This tells the chip 455 * that none of the entries in the CAM filter are valid. 456 * desired entries will be enabled as we fill the filter in. 457 */ 458 459 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 460 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 461 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 462 for (i = 0; i < 8; i++) 463 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 464 465 /* Clear the VLAN filter too. */ 466 467 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 468 for (i = 0; i < 8; i++) 469 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 470 471 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 472 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 473 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 474 475 sc->vge_camidx = 0; 476 477 return; 478} 479 480static int 481vge_cam_set(sc, addr) 482 struct vge_softc *sc; 483 uint8_t *addr; 484{ 485 int i, error = 0; 486 487 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 488 return(ENOSPC); 489 490 /* Select the CAM data page. */ 491 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 492 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 493 494 /* Set the filter entry we want to update and enable writing. */ 495 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 496 497 /* Write the address to the CAM registers */ 498 for (i = 0; i < ETHER_ADDR_LEN; i++) 499 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 500 501 /* Issue a write command. */ 502 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 503 504 /* Wake for it to clear. */ 505 for (i = 0; i < VGE_TIMEOUT; i++) { 506 DELAY(1); 507 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 508 break; 509 } 510 511 if (i == VGE_TIMEOUT) { 512 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 513 error = EIO; 514 goto fail; 515 } 516 517 /* Select the CAM mask page. */ 518 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 519 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 520 521 /* Set the mask bit that enables this filter. */ 522 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 523 1<<(sc->vge_camidx & 7)); 524 525 sc->vge_camidx++; 526 527fail: 528 /* Turn off access to CAM. */ 529 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 530 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 531 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 532 533 return (error); 534} 535 536/* 537 * Program the multicast filter. We use the 64-entry CAM filter 538 * for perfect filtering. If there's more than 64 multicast addresses, 539 * we use the hash filter instead. 540 */ 541static void 542vge_setmulti(sc) 543 struct vge_softc *sc; 544{ 545 struct ifnet *ifp; 546 int error = 0/*, h = 0*/; 547 struct ifmultiaddr *ifma; 548 u_int32_t h, hashes[2] = { 0, 0 }; 549 550 ifp = sc->vge_ifp; 551 552 /* First, zot all the multicast entries. */ 553 vge_cam_clear(sc); 554 CSR_WRITE_4(sc, VGE_MAR0, 0); 555 CSR_WRITE_4(sc, VGE_MAR1, 0); 556 557 /* 558 * If the user wants allmulti or promisc mode, enable reception 559 * of all multicast frames. 560 */ 561 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 562 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 563 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 564 return; 565 } 566 567 /* Now program new ones */ 568 if_maddr_rlock(ifp); 569 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 570 if (ifma->ifma_addr->sa_family != AF_LINK) 571 continue; 572 error = vge_cam_set(sc, 573 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 574 if (error) 575 break; 576 } 577 578 /* If there were too many addresses, use the hash filter. */ 579 if (error) { 580 vge_cam_clear(sc); 581 582 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 583 if (ifma->ifma_addr->sa_family != AF_LINK) 584 continue; 585 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 586 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 587 if (h < 32) 588 hashes[0] |= (1 << h); 589 else 590 hashes[1] |= (1 << (h - 32)); 591 } 592 593 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 594 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 595 } 596 if_maddr_runlock(ifp); 597 598 return; 599} 600 601static void 602vge_reset(sc) 603 struct vge_softc *sc; 604{ 605 int i; 606 607 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 608 609 for (i = 0; i < VGE_TIMEOUT; i++) { 610 DELAY(5); 611 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 612 break; 613 } 614 615 if (i == VGE_TIMEOUT) { 616 device_printf(sc->vge_dev, "soft reset timed out"); 617 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 618 DELAY(2000); 619 } 620 621 DELAY(5000); 622 623 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 624 625 for (i = 0; i < VGE_TIMEOUT; i++) { 626 DELAY(5); 627 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 628 break; 629 } 630 631 if (i == VGE_TIMEOUT) { 632 device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 633 return; 634 } 635 636 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 637 638 return; 639} 640 641/* 642 * Probe for a VIA gigabit chip. Check the PCI vendor and device 643 * IDs against our list and return a device name if we find a match. 644 */ 645static int 646vge_probe(dev) 647 device_t dev; 648{ 649 struct vge_type *t; 650 651 t = vge_devs; 652 653 while (t->vge_name != NULL) { 654 if ((pci_get_vendor(dev) == t->vge_vid) && 655 (pci_get_device(dev) == t->vge_did)) { 656 device_set_desc(dev, t->vge_name); 657 return (BUS_PROBE_DEFAULT); 658 } 659 t++; 660 } 661 662 return (ENXIO); 663} 664 665static void 666vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) 667 void *arg; 668 bus_dma_segment_t *segs; 669 int nseg; 670 bus_size_t mapsize; 671 int error; 672{ 673 674 struct vge_dmaload_arg *ctx; 675 struct vge_rx_desc *d = NULL; 676 677 if (error) 678 return; 679 680 ctx = arg; 681 682 /* Signal error to caller if there's too many segments */ 683 if (nseg > ctx->vge_maxsegs) { 684 ctx->vge_maxsegs = 0; 685 return; 686 } 687 688 /* 689 * Map the segment array into descriptors. 690 */ 691 692 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 693 694 /* If this descriptor is still owned by the chip, bail. */ 695 696 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 697 device_printf(ctx->sc->vge_dev, 698 "tried to map busy descriptor\n"); 699 ctx->vge_maxsegs = 0; 700 return; 701 } 702 703 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 704 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 705 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 706 d->vge_sts = 0; 707 d->vge_ctl = 0; 708 709 ctx->vge_maxsegs = 1; 710 711 return; 712} 713 714static void 715vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 716 void *arg; 717 bus_dma_segment_t *segs; 718 int nseg; 719 bus_size_t mapsize; 720 int error; 721{ 722 struct vge_dmaload_arg *ctx; 723 struct vge_tx_desc *d = NULL; 724 struct vge_tx_frag *f; 725 int i = 0; 726 727 if (error) 728 return; 729 730 ctx = arg; 731 732 /* Signal error to caller if there's too many segments */ 733 if (nseg > ctx->vge_maxsegs) { 734 ctx->vge_maxsegs = 0; 735 return; 736 } 737 738 /* Map the segment array into descriptors. */ 739 740 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 741 742 /* If this descriptor is still owned by the chip, bail. */ 743 744 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 745 ctx->vge_maxsegs = 0; 746 return; 747 } 748 749 for (i = 0; i < nseg; i++) { 750 f = &d->vge_frag[i]; 751 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 752 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 753 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 754 } 755 756 /* Argh. This chip does not autopad short frames */ 757 758 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 759 f = &d->vge_frag[i]; 760 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 761 ctx->vge_m0->m_pkthdr.len)); 762 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 763 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 764 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 765 i++; 766 } 767 768 /* 769 * When telling the chip how many segments there are, we 770 * must use nsegs + 1 instead of just nsegs. Darned if I 771 * know why. 772 */ 773 i++; 774 775 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 776 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 777 778 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 779 d->vge_ctl |= VGE_TDCTL_JUMBO; 780 781 ctx->vge_maxsegs = nseg; 782 783 return; 784} 785 786/* 787 * Map a single buffer address. 788 */ 789 790static void 791vge_dma_map_addr(arg, segs, nseg, error) 792 void *arg; 793 bus_dma_segment_t *segs; 794 int nseg; 795 int error; 796{ 797 bus_addr_t *addr; 798 799 if (error) 800 return; 801 802 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 803 addr = arg; 804 *addr = segs->ds_addr; 805 806 return; 807} 808 809static int 810vge_allocmem(dev, sc) 811 device_t dev; 812 struct vge_softc *sc; 813{ 814 int error; 815 int nseg; 816 int i; 817 818 /* 819 * Allocate map for RX mbufs. 820 */ 821 nseg = 32; 822 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 823 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 824 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 825 NULL, NULL, &sc->vge_ldata.vge_mtag); 826 if (error) { 827 device_printf(dev, "could not allocate dma tag\n"); 828 return (ENOMEM); 829 } 830 831 /* 832 * Allocate map for TX descriptor list. 833 */ 834 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 835 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 836 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 837 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); 838 if (error) { 839 device_printf(dev, "could not allocate dma tag\n"); 840 return (ENOMEM); 841 } 842 843 /* Allocate DMA'able memory for the TX ring */ 844 845 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 846 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 847 &sc->vge_ldata.vge_tx_list_map); 848 if (error) 849 return (ENOMEM); 850 851 /* Load the map for the TX ring. */ 852 853 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 854 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, 855 VGE_TX_LIST_SZ, vge_dma_map_addr, 856 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); 857 858 /* Create DMA maps for TX buffers */ 859 860 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 861 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 862 &sc->vge_ldata.vge_tx_dmamap[i]); 863 if (error) { 864 device_printf(dev, "can't create DMA map for TX\n"); 865 return (ENOMEM); 866 } 867 } 868 869 /* 870 * Allocate map for RX descriptor list. 871 */ 872 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 873 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 874 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 875 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); 876 if (error) { 877 device_printf(dev, "could not allocate dma tag\n"); 878 return (ENOMEM); 879 } 880 881 /* Allocate DMA'able memory for the RX ring */ 882 883 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 884 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 885 &sc->vge_ldata.vge_rx_list_map); 886 if (error) 887 return (ENOMEM); 888 889 /* Load the map for the RX ring. */ 890 891 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 892 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, 893 VGE_TX_LIST_SZ, vge_dma_map_addr, 894 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); 895 896 /* Create DMA maps for RX buffers */ 897 898 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 899 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 900 &sc->vge_ldata.vge_rx_dmamap[i]); 901 if (error) { 902 device_printf(dev, "can't create DMA map for RX\n"); 903 return (ENOMEM); 904 } 905 } 906 907 return (0); 908} 909 910/* 911 * Attach the interface. Allocate softc structures, do ifmedia 912 * setup and ethernet/BPF attach. 913 */ 914static int 915vge_attach(dev) 916 device_t dev; 917{ 918 u_char eaddr[ETHER_ADDR_LEN]; 919 struct vge_softc *sc; 920 struct ifnet *ifp; 921 int error = 0, rid; 922 923 sc = device_get_softc(dev); 924 sc->vge_dev = dev; 925 926 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 927 MTX_DEF); 928 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 929 930 /* 931 * Map control/status registers. 932 */ 933 pci_enable_busmaster(dev); 934 935 rid = VGE_PCI_LOMEM; 936 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 937 RF_ACTIVE); 938 939 if (sc->vge_res == NULL) { 940 device_printf(dev, "couldn't map ports/memory\n"); 941 error = ENXIO; 942 goto fail; 943 } 944 945 /* Allocate interrupt */ 946 rid = 0; 947 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 948 RF_SHAREABLE | RF_ACTIVE); 949 950 if (sc->vge_irq == NULL) { 951 device_printf(dev, "couldn't map interrupt\n"); 952 error = ENXIO; 953 goto fail; 954 } 955 956 /* Reset the adapter. */ 957 vge_reset(sc); 958 959 /* 960 * Get station address from the EEPROM. 961 */ 962 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 963 964 /* 965 * Allocate the parent bus DMA tag appropriate for PCI. 966 */ 967#define VGE_NSEG_NEW 32 968 error = bus_dma_tag_create(NULL, /* parent */ 969 1, 0, /* alignment, boundary */ 970 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 971 BUS_SPACE_MAXADDR, /* highaddr */ 972 NULL, NULL, /* filter, filterarg */ 973 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 974 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 975 BUS_DMA_ALLOCNOW, /* flags */ 976 NULL, NULL, /* lockfunc, lockarg */ 977 &sc->vge_parent_tag); 978 if (error) 979 goto fail; 980 981 error = vge_allocmem(dev, sc); 982 983 if (error) 984 goto fail; 985 986 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 987 if (ifp == NULL) { 988 device_printf(dev, "can not if_alloc()\n"); 989 error = ENOSPC; 990 goto fail; 991 } 992 993 /* Do MII setup */ 994 if (mii_phy_probe(dev, &sc->vge_miibus, 995 vge_ifmedia_upd, vge_ifmedia_sts)) { 996 device_printf(dev, "MII without any phy!\n"); 997 error = ENXIO; 998 goto fail; 999 } 1000 1001 ifp->if_softc = sc; 1002 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1003 ifp->if_mtu = ETHERMTU; 1004 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1005 ifp->if_ioctl = vge_ioctl; 1006 ifp->if_capabilities = IFCAP_VLAN_MTU; 1007 ifp->if_start = vge_start; 1008 ifp->if_hwassist = VGE_CSUM_FEATURES; 1009 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1010 ifp->if_capenable = ifp->if_capabilities; 1011#ifdef DEVICE_POLLING 1012 ifp->if_capabilities |= IFCAP_POLLING; 1013#endif 1014 ifp->if_init = vge_init; 1015 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 1016 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN; 1017 IFQ_SET_READY(&ifp->if_snd); 1018 1019 /* 1020 * Call MI attach routine. 1021 */ 1022 ether_ifattach(ifp, eaddr); 1023 1024 /* Hook interrupt last to avoid having to lock softc */ 1025 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1026 NULL, vge_intr, sc, &sc->vge_intrhand); 1027 1028 if (error) { 1029 device_printf(dev, "couldn't set up irq\n"); 1030 ether_ifdetach(ifp); 1031 goto fail; 1032 } 1033 1034fail: 1035 if (error) 1036 vge_detach(dev); 1037 1038 return (error); 1039} 1040 1041/* 1042 * Shutdown hardware and free up resources. This can be called any 1043 * time after the mutex has been initialized. It is called in both 1044 * the error case in attach and the normal detach case so it needs 1045 * to be careful about only freeing resources that have actually been 1046 * allocated. 1047 */ 1048static int 1049vge_detach(dev) 1050 device_t dev; 1051{ 1052 struct vge_softc *sc; 1053 struct ifnet *ifp; 1054 int i; 1055 1056 sc = device_get_softc(dev); 1057 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1058 ifp = sc->vge_ifp; 1059 1060#ifdef DEVICE_POLLING 1061 if (ifp->if_capenable & IFCAP_POLLING) 1062 ether_poll_deregister(ifp); 1063#endif 1064 1065 /* These should only be active if attach succeeded */ 1066 if (device_is_attached(dev)) { 1067 ether_ifdetach(ifp); 1068 VGE_LOCK(sc); 1069 vge_stop(sc); 1070 VGE_UNLOCK(sc); 1071 callout_drain(&sc->vge_watchdog); 1072 } 1073 if (sc->vge_miibus) 1074 device_delete_child(dev, sc->vge_miibus); 1075 bus_generic_detach(dev); 1076 1077 if (sc->vge_intrhand) 1078 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1079 if (sc->vge_irq) 1080 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1081 if (sc->vge_res) 1082 bus_release_resource(dev, SYS_RES_MEMORY, 1083 VGE_PCI_LOMEM, sc->vge_res); 1084 if (ifp) 1085 if_free(ifp); 1086 1087 /* Unload and free the RX DMA ring memory and map */ 1088 1089 if (sc->vge_ldata.vge_rx_list_tag) { 1090 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 1091 sc->vge_ldata.vge_rx_list_map); 1092 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 1093 sc->vge_ldata.vge_rx_list, 1094 sc->vge_ldata.vge_rx_list_map); 1095 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 1096 } 1097 1098 /* Unload and free the TX DMA ring memory and map */ 1099 1100 if (sc->vge_ldata.vge_tx_list_tag) { 1101 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 1102 sc->vge_ldata.vge_tx_list_map); 1103 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 1104 sc->vge_ldata.vge_tx_list, 1105 sc->vge_ldata.vge_tx_list_map); 1106 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 1107 } 1108 1109 /* Destroy all the RX and TX buffer maps */ 1110 1111 if (sc->vge_ldata.vge_mtag) { 1112 for (i = 0; i < VGE_TX_DESC_CNT; i++) 1113 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1114 sc->vge_ldata.vge_tx_dmamap[i]); 1115 for (i = 0; i < VGE_RX_DESC_CNT; i++) 1116 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1117 sc->vge_ldata.vge_rx_dmamap[i]); 1118 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 1119 } 1120 1121 if (sc->vge_parent_tag) 1122 bus_dma_tag_destroy(sc->vge_parent_tag); 1123 1124 mtx_destroy(&sc->vge_mtx); 1125 1126 return (0); 1127} 1128 1129static int 1130vge_newbuf(sc, idx, m) 1131 struct vge_softc *sc; 1132 int idx; 1133 struct mbuf *m; 1134{ 1135 struct vge_dmaload_arg arg; 1136 struct mbuf *n = NULL; 1137 int i, error; 1138 1139 if (m == NULL) { 1140 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1141 if (n == NULL) 1142 return (ENOBUFS); 1143 m = n; 1144 } else 1145 m->m_data = m->m_ext.ext_buf; 1146 1147 1148#ifdef VGE_FIXUP_RX 1149 /* 1150 * This is part of an evil trick to deal with non-x86 platforms. 1151 * The VIA chip requires RX buffers to be aligned on 32-bit 1152 * boundaries, but that will hose non-x86 machines. To get around 1153 * this, we leave some empty space at the start of each buffer 1154 * and for non-x86 hosts, we copy the buffer back two bytes 1155 * to achieve word alignment. This is slightly more efficient 1156 * than allocating a new buffer, copying the contents, and 1157 * discarding the old buffer. 1158 */ 1159 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1160 m_adj(m, VGE_ETHER_ALIGN); 1161#else 1162 m->m_len = m->m_pkthdr.len = MCLBYTES; 1163#endif 1164 1165 arg.sc = sc; 1166 arg.vge_idx = idx; 1167 arg.vge_maxsegs = 1; 1168 arg.vge_flags = 0; 1169 1170 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1171 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, 1172 &arg, BUS_DMA_NOWAIT); 1173 if (error || arg.vge_maxsegs != 1) { 1174 if (n != NULL) 1175 m_freem(n); 1176 return (ENOMEM); 1177 } 1178 1179 /* 1180 * Note: the manual fails to document the fact that for 1181 * proper operation, the driver needs to replenish the RX 1182 * DMA ring 4 descriptors at a time (rather than one at a 1183 * time, like most chips). We can allocate the new buffers 1184 * but we should not set the OWN bits until we're ready 1185 * to hand back 4 of them in one shot. 1186 */ 1187 1188#define VGE_RXCHUNK 4 1189 sc->vge_rx_consumed++; 1190 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1191 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 1192 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1193 htole32(VGE_RDSTS_OWN); 1194 sc->vge_rx_consumed = 0; 1195 } 1196 1197 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1198 1199 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1200 sc->vge_ldata.vge_rx_dmamap[idx], 1201 BUS_DMASYNC_PREREAD); 1202 1203 return (0); 1204} 1205 1206static int 1207vge_tx_list_init(sc) 1208 struct vge_softc *sc; 1209{ 1210 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1211 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1212 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1213 1214 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1215 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1216 sc->vge_ldata.vge_tx_prodidx = 0; 1217 sc->vge_ldata.vge_tx_considx = 0; 1218 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1219 1220 return (0); 1221} 1222 1223static int 1224vge_rx_list_init(sc) 1225 struct vge_softc *sc; 1226{ 1227 int i; 1228 1229 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1230 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 1231 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 1232 1233 sc->vge_rx_consumed = 0; 1234 1235 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1236 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1237 return (ENOBUFS); 1238 } 1239 1240 /* Flush the RX descriptors */ 1241 1242 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1243 sc->vge_ldata.vge_rx_list_map, 1244 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1245 1246 sc->vge_ldata.vge_rx_prodidx = 0; 1247 sc->vge_rx_consumed = 0; 1248 sc->vge_head = sc->vge_tail = NULL; 1249 1250 return (0); 1251} 1252 1253#ifdef VGE_FIXUP_RX 1254static __inline void 1255vge_fixup_rx(m) 1256 struct mbuf *m; 1257{ 1258 int i; 1259 uint16_t *src, *dst; 1260 1261 src = mtod(m, uint16_t *); 1262 dst = src - 1; 1263 1264 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1265 *dst++ = *src++; 1266 1267 m->m_data -= ETHER_ALIGN; 1268 1269 return; 1270} 1271#endif 1272 1273/* 1274 * RX handler. We support the reception of jumbo frames that have 1275 * been fragmented across multiple 2K mbuf cluster buffers. 1276 */ 1277static int 1278vge_rxeof(sc) 1279 struct vge_softc *sc; 1280{ 1281 struct mbuf *m; 1282 struct ifnet *ifp; 1283 int i, total_len; 1284 int lim = 0; 1285 struct vge_rx_desc *cur_rx; 1286 u_int32_t rxstat, rxctl; 1287 1288 VGE_LOCK_ASSERT(sc); 1289 ifp = sc->vge_ifp; 1290 i = sc->vge_ldata.vge_rx_prodidx; 1291 1292 /* Invalidate the descriptor memory */ 1293 1294 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1295 sc->vge_ldata.vge_rx_list_map, 1296 BUS_DMASYNC_POSTREAD); 1297 1298 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1299 1300#ifdef DEVICE_POLLING 1301 if (ifp->if_capenable & IFCAP_POLLING) { 1302 if (sc->rxcycles <= 0) 1303 break; 1304 sc->rxcycles--; 1305 } 1306#endif 1307 1308 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1309 m = sc->vge_ldata.vge_rx_mbuf[i]; 1310 total_len = VGE_RXBYTES(cur_rx); 1311 rxstat = le32toh(cur_rx->vge_sts); 1312 rxctl = le32toh(cur_rx->vge_ctl); 1313 1314 /* Invalidate the RX mbuf and unload its map */ 1315 1316 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1317 sc->vge_ldata.vge_rx_dmamap[i], 1318 BUS_DMASYNC_POSTWRITE); 1319 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1320 sc->vge_ldata.vge_rx_dmamap[i]); 1321 1322 /* 1323 * If the 'start of frame' bit is set, this indicates 1324 * either the first fragment in a multi-fragment receive, 1325 * or an intermediate fragment. Either way, we want to 1326 * accumulate the buffers. 1327 */ 1328 if (rxstat & VGE_RXPKT_SOF) { 1329 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1330 if (sc->vge_head == NULL) 1331 sc->vge_head = sc->vge_tail = m; 1332 else { 1333 m->m_flags &= ~M_PKTHDR; 1334 sc->vge_tail->m_next = m; 1335 sc->vge_tail = m; 1336 } 1337 vge_newbuf(sc, i, NULL); 1338 VGE_RX_DESC_INC(i); 1339 continue; 1340 } 1341 1342 /* 1343 * Bad/error frames will have the RXOK bit cleared. 1344 * However, there's one error case we want to allow: 1345 * if a VLAN tagged frame arrives and the chip can't 1346 * match it against the CAM filter, it considers this 1347 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1348 * We don't want to drop the frame though: our VLAN 1349 * filtering is done in software. 1350 */ 1351 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1352 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1353 ifp->if_ierrors++; 1354 /* 1355 * If this is part of a multi-fragment packet, 1356 * discard all the pieces. 1357 */ 1358 if (sc->vge_head != NULL) { 1359 m_freem(sc->vge_head); 1360 sc->vge_head = sc->vge_tail = NULL; 1361 } 1362 vge_newbuf(sc, i, m); 1363 VGE_RX_DESC_INC(i); 1364 continue; 1365 } 1366 1367 /* 1368 * If allocating a replacement mbuf fails, 1369 * reload the current one. 1370 */ 1371 1372 if (vge_newbuf(sc, i, NULL)) { 1373 ifp->if_ierrors++; 1374 if (sc->vge_head != NULL) { 1375 m_freem(sc->vge_head); 1376 sc->vge_head = sc->vge_tail = NULL; 1377 } 1378 vge_newbuf(sc, i, m); 1379 VGE_RX_DESC_INC(i); 1380 continue; 1381 } 1382 1383 VGE_RX_DESC_INC(i); 1384 1385 if (sc->vge_head != NULL) { 1386 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1387 /* 1388 * Special case: if there's 4 bytes or less 1389 * in this buffer, the mbuf can be discarded: 1390 * the last 4 bytes is the CRC, which we don't 1391 * care about anyway. 1392 */ 1393 if (m->m_len <= ETHER_CRC_LEN) { 1394 sc->vge_tail->m_len -= 1395 (ETHER_CRC_LEN - m->m_len); 1396 m_freem(m); 1397 } else { 1398 m->m_len -= ETHER_CRC_LEN; 1399 m->m_flags &= ~M_PKTHDR; 1400 sc->vge_tail->m_next = m; 1401 } 1402 m = sc->vge_head; 1403 sc->vge_head = sc->vge_tail = NULL; 1404 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1405 } else 1406 m->m_pkthdr.len = m->m_len = 1407 (total_len - ETHER_CRC_LEN); 1408 1409#ifdef VGE_FIXUP_RX 1410 vge_fixup_rx(m); 1411#endif 1412 ifp->if_ipackets++; 1413 m->m_pkthdr.rcvif = ifp; 1414 1415 /* Do RX checksumming if enabled */ 1416 if (ifp->if_capenable & IFCAP_RXCSUM) { 1417 1418 /* Check IP header checksum */ 1419 if (rxctl & VGE_RDCTL_IPPKT) 1420 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1421 if (rxctl & VGE_RDCTL_IPCSUMOK) 1422 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1423 1424 /* Check TCP/UDP checksum */ 1425 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1426 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1427 m->m_pkthdr.csum_flags |= 1428 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1429 m->m_pkthdr.csum_data = 0xffff; 1430 } 1431 } 1432 1433 if (rxstat & VGE_RDSTS_VTAG) { 1434 /* 1435 * The 32-bit rxctl register is stored in little-endian. 1436 * However, the 16-bit vlan tag is stored in big-endian, 1437 * so we have to byte swap it. 1438 */ 1439 m->m_pkthdr.ether_vtag = 1440 bswap16(rxctl & VGE_RDCTL_VLANID); 1441 m->m_flags |= M_VLANTAG; 1442 } 1443 1444 VGE_UNLOCK(sc); 1445 (*ifp->if_input)(ifp, m); 1446 VGE_LOCK(sc); 1447 1448 lim++; 1449 if (lim == VGE_RX_DESC_CNT) 1450 break; 1451 1452 } 1453 1454 /* Flush the RX DMA ring */ 1455 1456 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1457 sc->vge_ldata.vge_rx_list_map, 1458 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1459 1460 sc->vge_ldata.vge_rx_prodidx = i; 1461 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1462 1463 1464 return (lim); 1465} 1466 1467static void 1468vge_txeof(sc) 1469 struct vge_softc *sc; 1470{ 1471 struct ifnet *ifp; 1472 u_int32_t txstat; 1473 int idx; 1474 1475 ifp = sc->vge_ifp; 1476 idx = sc->vge_ldata.vge_tx_considx; 1477 1478 /* Invalidate the TX descriptor list */ 1479 1480 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1481 sc->vge_ldata.vge_tx_list_map, 1482 BUS_DMASYNC_POSTREAD); 1483 1484 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1485 1486 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1487 if (txstat & VGE_TDSTS_OWN) 1488 break; 1489 1490 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1491 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1492 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1493 sc->vge_ldata.vge_tx_dmamap[idx]); 1494 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1495 ifp->if_collisions++; 1496 if (txstat & VGE_TDSTS_TXERR) 1497 ifp->if_oerrors++; 1498 else 1499 ifp->if_opackets++; 1500 1501 sc->vge_ldata.vge_tx_free++; 1502 VGE_TX_DESC_INC(idx); 1503 } 1504 1505 /* No changes made to the TX ring, so no flush needed */ 1506 1507 if (idx != sc->vge_ldata.vge_tx_considx) { 1508 sc->vge_ldata.vge_tx_considx = idx; 1509 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1510 sc->vge_timer = 0; 1511 } 1512 1513 /* 1514 * If not all descriptors have been released reaped yet, 1515 * reload the timer so that we will eventually get another 1516 * interrupt that will cause us to re-enter this routine. 1517 * This is done in case the transmitter has gone idle. 1518 */ 1519 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) { 1520 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1521 } 1522 1523 return; 1524} 1525 1526static void 1527vge_tick(xsc) 1528 void *xsc; 1529{ 1530 struct vge_softc *sc; 1531 struct ifnet *ifp; 1532 struct mii_data *mii; 1533 1534 sc = xsc; 1535 ifp = sc->vge_ifp; 1536 VGE_LOCK_ASSERT(sc); 1537 mii = device_get_softc(sc->vge_miibus); 1538 1539 mii_tick(mii); 1540 if (sc->vge_link) { 1541 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1542 sc->vge_link = 0; 1543 if_link_state_change(sc->vge_ifp, 1544 LINK_STATE_DOWN); 1545 } 1546 } else { 1547 if (mii->mii_media_status & IFM_ACTIVE && 1548 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1549 sc->vge_link = 1; 1550 if_link_state_change(sc->vge_ifp, 1551 LINK_STATE_UP); 1552 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1553 vge_start_locked(ifp); 1554 } 1555 } 1556 1557 return; 1558} 1559 1560#ifdef DEVICE_POLLING 1561static int 1562vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1563{ 1564 struct vge_softc *sc = ifp->if_softc; 1565 int rx_npkts = 0; 1566 1567 VGE_LOCK(sc); 1568 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1569 goto done; 1570 1571 sc->rxcycles = count; 1572 rx_npkts = vge_rxeof(sc); 1573 vge_txeof(sc); 1574 1575 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1576 vge_start_locked(ifp); 1577 1578 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1579 u_int32_t status; 1580 status = CSR_READ_4(sc, VGE_ISR); 1581 if (status == 0xFFFFFFFF) 1582 goto done; 1583 if (status) 1584 CSR_WRITE_4(sc, VGE_ISR, status); 1585 1586 /* 1587 * XXX check behaviour on receiver stalls. 1588 */ 1589 1590 if (status & VGE_ISR_TXDMA_STALL || 1591 status & VGE_ISR_RXDMA_STALL) 1592 vge_init_locked(sc); 1593 1594 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1595 vge_rxeof(sc); 1596 ifp->if_ierrors++; 1597 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1598 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1599 } 1600 } 1601done: 1602 VGE_UNLOCK(sc); 1603 return (rx_npkts); 1604} 1605#endif /* DEVICE_POLLING */ 1606 1607static void 1608vge_intr(arg) 1609 void *arg; 1610{ 1611 struct vge_softc *sc; 1612 struct ifnet *ifp; 1613 u_int32_t status; 1614 1615 sc = arg; 1616 1617 if (sc->suspended) { 1618 return; 1619 } 1620 1621 VGE_LOCK(sc); 1622 ifp = sc->vge_ifp; 1623 1624 if (!(ifp->if_flags & IFF_UP)) { 1625 VGE_UNLOCK(sc); 1626 return; 1627 } 1628 1629#ifdef DEVICE_POLLING 1630 if (ifp->if_capenable & IFCAP_POLLING) { 1631 VGE_UNLOCK(sc); 1632 return; 1633 } 1634#endif 1635 1636 /* Disable interrupts */ 1637 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1638 1639 for (;;) { 1640 1641 status = CSR_READ_4(sc, VGE_ISR); 1642 /* If the card has gone away the read returns 0xffff. */ 1643 if (status == 0xFFFFFFFF) 1644 break; 1645 1646 if (status) 1647 CSR_WRITE_4(sc, VGE_ISR, status); 1648 1649 if ((status & VGE_INTRS) == 0) 1650 break; 1651 1652 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1653 vge_rxeof(sc); 1654 1655 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1656 vge_rxeof(sc); 1657 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1658 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1659 } 1660 1661 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1662 vge_txeof(sc); 1663 1664 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1665 vge_init_locked(sc); 1666 1667 if (status & VGE_ISR_LINKSTS) 1668 vge_tick(sc); 1669 } 1670 1671 /* Re-enable interrupts */ 1672 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1673 1674 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1675 vge_start_locked(ifp); 1676 1677 VGE_UNLOCK(sc); 1678 1679 return; 1680} 1681 1682static int 1683vge_encap(sc, m_head, idx) 1684 struct vge_softc *sc; 1685 struct mbuf *m_head; 1686 int idx; 1687{ 1688 struct mbuf *m_new = NULL; 1689 struct vge_dmaload_arg arg; 1690 bus_dmamap_t map; 1691 int error; 1692 1693 if (sc->vge_ldata.vge_tx_free <= 2) 1694 return (EFBIG); 1695 1696 arg.vge_flags = 0; 1697 1698 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1699 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1700 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1701 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1702 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1703 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1704 1705 arg.sc = sc; 1706 arg.vge_idx = idx; 1707 arg.vge_m0 = m_head; 1708 arg.vge_maxsegs = VGE_TX_FRAGS; 1709 1710 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1711 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1712 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1713 1714 if (error && error != EFBIG) { 1715 if_printf(sc->vge_ifp, "can't map mbuf (error %d)\n", error); 1716 return (ENOBUFS); 1717 } 1718 1719 /* Too many segments to map, coalesce into a single mbuf */ 1720 1721 if (error || arg.vge_maxsegs == 0) { 1722 m_new = m_defrag(m_head, M_DONTWAIT); 1723 if (m_new == NULL) 1724 return (1); 1725 else 1726 m_head = m_new; 1727 1728 arg.sc = sc; 1729 arg.vge_m0 = m_head; 1730 arg.vge_idx = idx; 1731 arg.vge_maxsegs = 1; 1732 1733 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1734 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1735 if (error) { 1736 if_printf(sc->vge_ifp, "can't map mbuf (error %d)\n", 1737 error); 1738 return (EFBIG); 1739 } 1740 } 1741 1742 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1743 sc->vge_ldata.vge_tx_free--; 1744 1745 /* 1746 * Set up hardware VLAN tagging. 1747 */ 1748 1749 if (m_head->m_flags & M_VLANTAG) 1750 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1751 htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG); 1752 1753 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1754 1755 return (0); 1756} 1757 1758/* 1759 * Main transmit routine. 1760 */ 1761 1762static void 1763vge_start(ifp) 1764 struct ifnet *ifp; 1765{ 1766 struct vge_softc *sc; 1767 1768 sc = ifp->if_softc; 1769 VGE_LOCK(sc); 1770 vge_start_locked(ifp); 1771 VGE_UNLOCK(sc); 1772} 1773 1774static void 1775vge_start_locked(ifp) 1776 struct ifnet *ifp; 1777{ 1778 struct vge_softc *sc; 1779 struct mbuf *m_head = NULL; 1780 int idx, pidx = 0; 1781 1782 sc = ifp->if_softc; 1783 VGE_LOCK_ASSERT(sc); 1784 1785 if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) 1786 return; 1787 1788 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1789 return; 1790 1791 idx = sc->vge_ldata.vge_tx_prodidx; 1792 1793 pidx = idx - 1; 1794 if (pidx < 0) 1795 pidx = VGE_TX_DESC_CNT - 1; 1796 1797 1798 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1799 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1800 if (m_head == NULL) 1801 break; 1802 1803 if (vge_encap(sc, m_head, idx)) { 1804 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1805 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1806 break; 1807 } 1808 1809 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1810 htole16(VGE_TXDESC_Q); 1811 1812 pidx = idx; 1813 VGE_TX_DESC_INC(idx); 1814 1815 /* 1816 * If there's a BPF listener, bounce a copy of this frame 1817 * to him. 1818 */ 1819 ETHER_BPF_MTAP(ifp, m_head); 1820 } 1821 1822 if (idx == sc->vge_ldata.vge_tx_prodidx) 1823 return; 1824 1825 /* Flush the TX descriptors */ 1826 1827 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1828 sc->vge_ldata.vge_tx_list_map, 1829 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1830 1831 /* Issue a transmit command. */ 1832 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1833 1834 sc->vge_ldata.vge_tx_prodidx = idx; 1835 1836 /* 1837 * Use the countdown timer for interrupt moderation. 1838 * 'TX done' interrupts are disabled. Instead, we reset the 1839 * countdown timer, which will begin counting until it hits 1840 * the value in the SSTIMER register, and then trigger an 1841 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1842 * the timer count is reloaded. Only when the transmitter 1843 * is idle will the timer hit 0 and an interrupt fire. 1844 */ 1845 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1846 1847 /* 1848 * Set a timeout in case the chip goes out to lunch. 1849 */ 1850 sc->vge_timer = 5; 1851 1852 return; 1853} 1854 1855static void 1856vge_init(xsc) 1857 void *xsc; 1858{ 1859 struct vge_softc *sc = xsc; 1860 1861 VGE_LOCK(sc); 1862 vge_init_locked(sc); 1863 VGE_UNLOCK(sc); 1864} 1865 1866static void 1867vge_init_locked(struct vge_softc *sc) 1868{ 1869 struct ifnet *ifp = sc->vge_ifp; 1870 struct mii_data *mii; 1871 int i; 1872 1873 VGE_LOCK_ASSERT(sc); 1874 mii = device_get_softc(sc->vge_miibus); 1875 1876 /* 1877 * Cancel pending I/O and free all RX/TX buffers. 1878 */ 1879 vge_stop(sc); 1880 vge_reset(sc); 1881 1882 /* 1883 * Initialize the RX and TX descriptors and mbufs. 1884 */ 1885 1886 vge_rx_list_init(sc); 1887 vge_tx_list_init(sc); 1888 1889 /* Set our station address */ 1890 for (i = 0; i < ETHER_ADDR_LEN; i++) 1891 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 1892 1893 /* 1894 * Set receive FIFO threshold. Also allow transmission and 1895 * reception of VLAN tagged frames. 1896 */ 1897 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1898 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1899 1900 /* Set DMA burst length */ 1901 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1902 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1903 1904 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1905 1906 /* Set collision backoff algorithm */ 1907 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1908 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1909 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1910 1911 /* Disable LPSEL field in priority resolution */ 1912 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1913 1914 /* 1915 * Load the addresses of the DMA queues into the chip. 1916 * Note that we only use one transmit queue. 1917 */ 1918 1919 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1920 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1921 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1922 1923 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1924 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1925 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1926 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1927 1928 /* Enable and wake up the RX descriptor queue */ 1929 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1930 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1931 1932 /* Enable the TX descriptor queue */ 1933 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1934 1935 /* Set up the receive filter -- allow large frames for VLANs. */ 1936 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1937 1938 /* If we want promiscuous mode, set the allframes bit. */ 1939 if (ifp->if_flags & IFF_PROMISC) { 1940 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1941 } 1942 1943 /* Set capture broadcast bit to capture broadcast frames. */ 1944 if (ifp->if_flags & IFF_BROADCAST) { 1945 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1946 } 1947 1948 /* Set multicast bit to capture multicast frames. */ 1949 if (ifp->if_flags & IFF_MULTICAST) { 1950 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1951 } 1952 1953 /* Init the cam filter. */ 1954 vge_cam_clear(sc); 1955 1956 /* Init the multicast filter. */ 1957 vge_setmulti(sc); 1958 1959 /* Enable flow control */ 1960 1961 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1962 1963 /* Enable jumbo frame reception (if desired) */ 1964 1965 /* Start the MAC. */ 1966 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1967 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1968 CSR_WRITE_1(sc, VGE_CRS0, 1969 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1970 1971 /* 1972 * Configure one-shot timer for microsecond 1973 * resolution and load it for 500 usecs. 1974 */ 1975 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1976 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1977 1978 /* 1979 * Configure interrupt moderation for receive. Enable 1980 * the holdoff counter and load it, and set the RX 1981 * suppression count to the number of descriptors we 1982 * want to allow before triggering an interrupt. 1983 * The holdoff timer is in units of 20 usecs. 1984 */ 1985 1986#ifdef notyet 1987 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 1988 /* Select the interrupt holdoff timer page. */ 1989 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1990 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 1991 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 1992 1993 /* Enable use of the holdoff timer. */ 1994 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 1995 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 1996 1997 /* Select the RX suppression threshold page. */ 1998 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 1999 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2000 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2001 2002 /* Restore the page select bits. */ 2003 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2004 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2005#endif 2006 2007#ifdef DEVICE_POLLING 2008 /* 2009 * Disable interrupts if we are polling. 2010 */ 2011 if (ifp->if_capenable & IFCAP_POLLING) { 2012 CSR_WRITE_4(sc, VGE_IMR, 0); 2013 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2014 } else /* otherwise ... */ 2015#endif 2016 { 2017 /* 2018 * Enable interrupts. 2019 */ 2020 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2021 CSR_WRITE_4(sc, VGE_ISR, 0); 2022 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2023 } 2024 2025 mii_mediachg(mii); 2026 2027 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2028 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2029 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2030 2031 sc->vge_if_flags = 0; 2032 sc->vge_link = 0; 2033 2034 return; 2035} 2036 2037/* 2038 * Set media options. 2039 */ 2040static int 2041vge_ifmedia_upd(ifp) 2042 struct ifnet *ifp; 2043{ 2044 struct vge_softc *sc; 2045 struct mii_data *mii; 2046 2047 sc = ifp->if_softc; 2048 VGE_LOCK(sc); 2049 mii = device_get_softc(sc->vge_miibus); 2050 mii_mediachg(mii); 2051 VGE_UNLOCK(sc); 2052 2053 return (0); 2054} 2055 2056/* 2057 * Report current media status. 2058 */ 2059static void 2060vge_ifmedia_sts(ifp, ifmr) 2061 struct ifnet *ifp; 2062 struct ifmediareq *ifmr; 2063{ 2064 struct vge_softc *sc; 2065 struct mii_data *mii; 2066 2067 sc = ifp->if_softc; 2068 mii = device_get_softc(sc->vge_miibus); 2069 2070 VGE_LOCK(sc); 2071 mii_pollstat(mii); 2072 VGE_UNLOCK(sc); 2073 ifmr->ifm_active = mii->mii_media_active; 2074 ifmr->ifm_status = mii->mii_media_status; 2075 2076 return; 2077} 2078 2079static void 2080vge_miibus_statchg(dev) 2081 device_t dev; 2082{ 2083 struct vge_softc *sc; 2084 struct mii_data *mii; 2085 struct ifmedia_entry *ife; 2086 2087 sc = device_get_softc(dev); 2088 mii = device_get_softc(sc->vge_miibus); 2089 ife = mii->mii_media.ifm_cur; 2090 2091 /* 2092 * If the user manually selects a media mode, we need to turn 2093 * on the forced MAC mode bit in the DIAGCTL register. If the 2094 * user happens to choose a full duplex mode, we also need to 2095 * set the 'force full duplex' bit. This applies only to 2096 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2097 * mode is disabled, and in 1000baseT mode, full duplex is 2098 * always implied, so we turn on the forced mode bit but leave 2099 * the FDX bit cleared. 2100 */ 2101 2102 switch (IFM_SUBTYPE(ife->ifm_media)) { 2103 case IFM_AUTO: 2104 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2105 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2106 break; 2107 case IFM_1000_T: 2108 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2109 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2110 break; 2111 case IFM_100_TX: 2112 case IFM_10_T: 2113 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2114 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2115 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2116 } else { 2117 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2118 } 2119 break; 2120 default: 2121 device_printf(dev, "unknown media type: %x\n", 2122 IFM_SUBTYPE(ife->ifm_media)); 2123 break; 2124 } 2125 2126 return; 2127} 2128 2129static int 2130vge_ioctl(ifp, command, data) 2131 struct ifnet *ifp; 2132 u_long command; 2133 caddr_t data; 2134{ 2135 struct vge_softc *sc = ifp->if_softc; 2136 struct ifreq *ifr = (struct ifreq *) data; 2137 struct mii_data *mii; 2138 int error = 0; 2139 2140 switch (command) { 2141 case SIOCSIFMTU: 2142 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2143 error = EINVAL; 2144 ifp->if_mtu = ifr->ifr_mtu; 2145 break; 2146 case SIOCSIFFLAGS: 2147 VGE_LOCK(sc); 2148 if (ifp->if_flags & IFF_UP) { 2149 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2150 ifp->if_flags & IFF_PROMISC && 2151 !(sc->vge_if_flags & IFF_PROMISC)) { 2152 CSR_SETBIT_1(sc, VGE_RXCTL, 2153 VGE_RXCTL_RX_PROMISC); 2154 vge_setmulti(sc); 2155 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2156 !(ifp->if_flags & IFF_PROMISC) && 2157 sc->vge_if_flags & IFF_PROMISC) { 2158 CSR_CLRBIT_1(sc, VGE_RXCTL, 2159 VGE_RXCTL_RX_PROMISC); 2160 vge_setmulti(sc); 2161 } else 2162 vge_init_locked(sc); 2163 } else { 2164 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2165 vge_stop(sc); 2166 } 2167 sc->vge_if_flags = ifp->if_flags; 2168 VGE_UNLOCK(sc); 2169 break; 2170 case SIOCADDMULTI: 2171 case SIOCDELMULTI: 2172 VGE_LOCK(sc); 2173 vge_setmulti(sc); 2174 VGE_UNLOCK(sc); 2175 break; 2176 case SIOCGIFMEDIA: 2177 case SIOCSIFMEDIA: 2178 mii = device_get_softc(sc->vge_miibus); 2179 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2180 break; 2181 case SIOCSIFCAP: 2182 { 2183 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2184#ifdef DEVICE_POLLING 2185 if (mask & IFCAP_POLLING) { 2186 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2187 error = ether_poll_register(vge_poll, ifp); 2188 if (error) 2189 return(error); 2190 VGE_LOCK(sc); 2191 /* Disable interrupts */ 2192 CSR_WRITE_4(sc, VGE_IMR, 0); 2193 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2194 ifp->if_capenable |= IFCAP_POLLING; 2195 VGE_UNLOCK(sc); 2196 } else { 2197 error = ether_poll_deregister(ifp); 2198 /* Enable interrupts. */ 2199 VGE_LOCK(sc); 2200 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2201 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2202 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2203 ifp->if_capenable &= ~IFCAP_POLLING; 2204 VGE_UNLOCK(sc); 2205 } 2206 } 2207#endif /* DEVICE_POLLING */ 2208 VGE_LOCK(sc); 2209 if ((mask & IFCAP_TXCSUM) != 0 && 2210 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2211 ifp->if_capenable ^= IFCAP_TXCSUM; 2212 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2213 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2214 else 2215 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2216 } 2217 if ((mask & IFCAP_RXCSUM) != 0 && 2218 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2219 ifp->if_capenable ^= IFCAP_RXCSUM; 2220 VGE_UNLOCK(sc); 2221 } 2222 break; 2223 default: 2224 error = ether_ioctl(ifp, command, data); 2225 break; 2226 } 2227 2228 return (error); 2229} 2230 2231static void 2232vge_watchdog(void *arg) 2233{ 2234 struct vge_softc *sc; 2235 struct ifnet *ifp; 2236 2237 sc = arg; 2238 VGE_LOCK_ASSERT(sc); 2239 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2240 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2241 return; 2242 2243 ifp = sc->vge_ifp; 2244 if_printf(ifp, "watchdog timeout\n"); 2245 ifp->if_oerrors++; 2246 2247 vge_txeof(sc); 2248 vge_rxeof(sc); 2249 2250 vge_init_locked(sc); 2251 2252 return; 2253} 2254 2255/* 2256 * Stop the adapter and free any mbufs allocated to the 2257 * RX and TX lists. 2258 */ 2259static void 2260vge_stop(sc) 2261 struct vge_softc *sc; 2262{ 2263 int i; 2264 struct ifnet *ifp; 2265 2266 VGE_LOCK_ASSERT(sc); 2267 ifp = sc->vge_ifp; 2268 sc->vge_timer = 0; 2269 callout_stop(&sc->vge_watchdog); 2270 2271 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2272 2273 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2274 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2275 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2276 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2277 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2278 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2279 2280 if (sc->vge_head != NULL) { 2281 m_freem(sc->vge_head); 2282 sc->vge_head = sc->vge_tail = NULL; 2283 } 2284 2285 /* Free the TX list buffers. */ 2286 2287 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2288 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2289 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2290 sc->vge_ldata.vge_tx_dmamap[i]); 2291 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2292 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2293 } 2294 } 2295 2296 /* Free the RX list buffers. */ 2297 2298 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2299 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2300 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2301 sc->vge_ldata.vge_rx_dmamap[i]); 2302 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2303 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2304 } 2305 } 2306 2307 return; 2308} 2309 2310/* 2311 * Device suspend routine. Stop the interface and save some PCI 2312 * settings in case the BIOS doesn't restore them properly on 2313 * resume. 2314 */ 2315static int 2316vge_suspend(dev) 2317 device_t dev; 2318{ 2319 struct vge_softc *sc; 2320 2321 sc = device_get_softc(dev); 2322 2323 VGE_LOCK(sc); 2324 vge_stop(sc); 2325 2326 sc->suspended = 1; 2327 VGE_UNLOCK(sc); 2328 2329 return (0); 2330} 2331 2332/* 2333 * Device resume routine. Restore some PCI settings in case the BIOS 2334 * doesn't, re-enable busmastering, and restart the interface if 2335 * appropriate. 2336 */ 2337static int 2338vge_resume(dev) 2339 device_t dev; 2340{ 2341 struct vge_softc *sc; 2342 struct ifnet *ifp; 2343 2344 sc = device_get_softc(dev); 2345 ifp = sc->vge_ifp; 2346 2347 /* reenable busmastering */ 2348 pci_enable_busmaster(dev); 2349 pci_enable_io(dev, SYS_RES_MEMORY); 2350 2351 /* reinitialize interface if necessary */ 2352 VGE_LOCK(sc); 2353 if (ifp->if_flags & IFF_UP) 2354 vge_init_locked(sc); 2355 2356 sc->suspended = 0; 2357 VGE_UNLOCK(sc); 2358 2359 return (0); 2360} 2361 2362/* 2363 * Stop all chip I/O so that the kernel's probe routines don't 2364 * get confused by errant DMAs when rebooting. 2365 */ 2366static int 2367vge_shutdown(dev) 2368 device_t dev; 2369{ 2370 struct vge_softc *sc; 2371 2372 sc = device_get_softc(dev); 2373 2374 VGE_LOCK(sc); 2375 vge_stop(sc); 2376 VGE_UNLOCK(sc); 2377 2378 return (0); 2379} 2380