if_vge.c revision 198987
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 198987 2009-11-06 14:52:37Z jhb $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96#include <sys/taskqueue.h> 97 98#include <net/if.h> 99#include <net/if_arp.h> 100#include <net/ethernet.h> 101#include <net/if_dl.h> 102#include <net/if_media.h> 103#include <net/if_types.h> 104#include <net/if_vlan_var.h> 105 106#include <net/bpf.h> 107 108#include <machine/bus.h> 109#include <machine/resource.h> 110#include <sys/bus.h> 111#include <sys/rman.h> 112 113#include <dev/mii/mii.h> 114#include <dev/mii/miivar.h> 115 116#include <dev/pci/pcireg.h> 117#include <dev/pci/pcivar.h> 118 119MODULE_DEPEND(vge, pci, 1, 1, 1); 120MODULE_DEPEND(vge, ether, 1, 1, 1); 121MODULE_DEPEND(vge, miibus, 1, 1, 1); 122 123/* "device miibus" required. See GENERIC if you get errors here. */ 124#include "miibus_if.h" 125 126#include <dev/vge/if_vgereg.h> 127#include <dev/vge/if_vgevar.h> 128 129#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130 131/* 132 * Various supported device vendors/types and their names. 133 */ 134static struct vge_type vge_devs[] = { 135 { VIA_VENDORID, VIA_DEVICEID_61XX, 136 "VIA Networking Gigabit Ethernet" }, 137 { 0, 0, NULL } 138}; 139 140static int vge_probe (device_t); 141static int vge_attach (device_t); 142static int vge_detach (device_t); 143 144static int vge_encap (struct vge_softc *, struct mbuf *, int); 145 146static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 147static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, 148 bus_size_t, int); 149static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 150 bus_size_t, int); 151static int vge_allocmem (device_t, struct vge_softc *); 152static int vge_newbuf (struct vge_softc *, int, struct mbuf *); 153static int vge_rx_list_init (struct vge_softc *); 154static int vge_tx_list_init (struct vge_softc *); 155#ifdef VGE_FIXUP_RX 156static __inline void vge_fixup_rx 157 (struct mbuf *); 158#endif 159static int vge_rxeof (struct vge_softc *); 160static void vge_txeof (struct vge_softc *); 161static void vge_intr (void *); 162static void vge_tick (void *); 163static void vge_tx_task (void *, int); 164static void vge_start (struct ifnet *); 165static int vge_ioctl (struct ifnet *, u_long, caddr_t); 166static void vge_init (void *); 167static void vge_stop (struct vge_softc *); 168static void vge_watchdog (struct ifnet *); 169static int vge_suspend (device_t); 170static int vge_resume (device_t); 171static int vge_shutdown (device_t); 172static int vge_ifmedia_upd (struct ifnet *); 173static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 174 175#ifdef VGE_EEPROM 176static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); 177#endif 178static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); 179 180static void vge_miipoll_start (struct vge_softc *); 181static void vge_miipoll_stop (struct vge_softc *); 182static int vge_miibus_readreg (device_t, int, int); 183static int vge_miibus_writereg (device_t, int, int, int); 184static void vge_miibus_statchg (device_t); 185 186static void vge_cam_clear (struct vge_softc *); 187static int vge_cam_set (struct vge_softc *, uint8_t *); 188static void vge_setmulti (struct vge_softc *); 189static void vge_reset (struct vge_softc *); 190 191#define VGE_PCI_LOIO 0x10 192#define VGE_PCI_LOMEM 0x14 193 194static device_method_t vge_methods[] = { 195 /* Device interface */ 196 DEVMETHOD(device_probe, vge_probe), 197 DEVMETHOD(device_attach, vge_attach), 198 DEVMETHOD(device_detach, vge_detach), 199 DEVMETHOD(device_suspend, vge_suspend), 200 DEVMETHOD(device_resume, vge_resume), 201 DEVMETHOD(device_shutdown, vge_shutdown), 202 203 /* bus interface */ 204 DEVMETHOD(bus_print_child, bus_generic_print_child), 205 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 206 207 /* MII interface */ 208 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 209 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 210 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 211 212 { 0, 0 } 213}; 214 215static driver_t vge_driver = { 216 "vge", 217 vge_methods, 218 sizeof(struct vge_softc) 219}; 220 221static devclass_t vge_devclass; 222 223DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 224DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 225 226#ifdef VGE_EEPROM 227/* 228 * Read a word of data stored in the EEPROM at address 'addr.' 229 */ 230static void 231vge_eeprom_getword(sc, addr, dest) 232 struct vge_softc *sc; 233 int addr; 234 u_int16_t *dest; 235{ 236 register int i; 237 u_int16_t word = 0; 238 239 /* 240 * Enter EEPROM embedded programming mode. In order to 241 * access the EEPROM at all, we first have to set the 242 * EELOAD bit in the CHIPCFG2 register. 243 */ 244 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 245 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 246 247 /* Select the address of the word we want to read */ 248 CSR_WRITE_1(sc, VGE_EEADDR, addr); 249 250 /* Issue read command */ 251 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 252 253 /* Wait for the done bit to be set. */ 254 for (i = 0; i < VGE_TIMEOUT; i++) { 255 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 256 break; 257 } 258 259 if (i == VGE_TIMEOUT) { 260 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 261 *dest = 0; 262 return; 263 } 264 265 /* Read the result */ 266 word = CSR_READ_2(sc, VGE_EERDDAT); 267 268 /* Turn off EEPROM access mode. */ 269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 271 272 *dest = word; 273 274 return; 275} 276#endif 277 278/* 279 * Read a sequence of words from the EEPROM. 280 */ 281static void 282vge_read_eeprom(sc, dest, off, cnt, swap) 283 struct vge_softc *sc; 284 caddr_t dest; 285 int off; 286 int cnt; 287 int swap; 288{ 289 int i; 290#ifdef VGE_EEPROM 291 u_int16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (u_int16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301#else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304#endif 305} 306 307static void 308vge_miipoll_stop(sc) 309 struct vge_softc *sc; 310{ 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 324 return; 325} 326 327static void 328vge_miipoll_start(sc) 329 struct vge_softc *sc; 330{ 331 int i; 332 333 /* First, make sure we're idle. */ 334 335 CSR_WRITE_1(sc, VGE_MIICMD, 0); 336 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 337 338 for (i = 0; i < VGE_TIMEOUT; i++) { 339 DELAY(1); 340 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 341 break; 342 } 343 344 if (i == VGE_TIMEOUT) { 345 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 346 return; 347 } 348 349 /* Now enable auto poll mode. */ 350 351 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 352 353 /* And make sure it started. */ 354 355 for (i = 0; i < VGE_TIMEOUT; i++) { 356 DELAY(1); 357 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 358 break; 359 } 360 361 if (i == VGE_TIMEOUT) 362 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 363 364 return; 365} 366 367static int 368vge_miibus_readreg(dev, phy, reg) 369 device_t dev; 370 int phy, reg; 371{ 372 struct vge_softc *sc; 373 int i; 374 u_int16_t rval = 0; 375 376 sc = device_get_softc(dev); 377 378 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 379 return(0); 380 381 VGE_LOCK(sc); 382 vge_miipoll_stop(sc); 383 384 /* Specify the register we want to read. */ 385 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 386 387 /* Issue read command. */ 388 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 389 390 /* Wait for the read command bit to self-clear. */ 391 for (i = 0; i < VGE_TIMEOUT; i++) { 392 DELAY(1); 393 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 394 break; 395 } 396 397 if (i == VGE_TIMEOUT) 398 device_printf(sc->vge_dev, "MII read timed out\n"); 399 else 400 rval = CSR_READ_2(sc, VGE_MIIDATA); 401 402 vge_miipoll_start(sc); 403 VGE_UNLOCK(sc); 404 405 return (rval); 406} 407 408static int 409vge_miibus_writereg(dev, phy, reg, data) 410 device_t dev; 411 int phy, reg, data; 412{ 413 struct vge_softc *sc; 414 int i, rval = 0; 415 416 sc = device_get_softc(dev); 417 418 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) 419 return(0); 420 421 VGE_LOCK(sc); 422 vge_miipoll_stop(sc); 423 424 /* Specify the register we want to write. */ 425 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 426 427 /* Specify the data we want to write. */ 428 CSR_WRITE_2(sc, VGE_MIIDATA, data); 429 430 /* Issue write command. */ 431 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 432 433 /* Wait for the write command bit to self-clear. */ 434 for (i = 0; i < VGE_TIMEOUT; i++) { 435 DELAY(1); 436 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 437 break; 438 } 439 440 if (i == VGE_TIMEOUT) { 441 device_printf(sc->vge_dev, "MII write timed out\n"); 442 rval = EIO; 443 } 444 445 vge_miipoll_start(sc); 446 VGE_UNLOCK(sc); 447 448 return (rval); 449} 450 451static void 452vge_cam_clear(sc) 453 struct vge_softc *sc; 454{ 455 int i; 456 457 /* 458 * Turn off all the mask bits. This tells the chip 459 * that none of the entries in the CAM filter are valid. 460 * desired entries will be enabled as we fill the filter in. 461 */ 462 463 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 464 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 466 for (i = 0; i < 8; i++) 467 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 468 469 /* Clear the VLAN filter too. */ 470 471 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 472 for (i = 0; i < 8; i++) 473 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 474 475 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 476 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 477 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 478 479 sc->vge_camidx = 0; 480 481 return; 482} 483 484static int 485vge_cam_set(sc, addr) 486 struct vge_softc *sc; 487 uint8_t *addr; 488{ 489 int i, error = 0; 490 491 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 492 return(ENOSPC); 493 494 /* Select the CAM data page. */ 495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 497 498 /* Set the filter entry we want to update and enable writing. */ 499 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 500 501 /* Write the address to the CAM registers */ 502 for (i = 0; i < ETHER_ADDR_LEN; i++) 503 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 504 505 /* Issue a write command. */ 506 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 507 508 /* Wake for it to clear. */ 509 for (i = 0; i < VGE_TIMEOUT; i++) { 510 DELAY(1); 511 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 512 break; 513 } 514 515 if (i == VGE_TIMEOUT) { 516 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 517 error = EIO; 518 goto fail; 519 } 520 521 /* Select the CAM mask page. */ 522 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 523 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 524 525 /* Set the mask bit that enables this filter. */ 526 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 527 1<<(sc->vge_camidx & 7)); 528 529 sc->vge_camidx++; 530 531fail: 532 /* Turn off access to CAM. */ 533 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 534 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 535 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 536 537 return (error); 538} 539 540/* 541 * Program the multicast filter. We use the 64-entry CAM filter 542 * for perfect filtering. If there's more than 64 multicast addresses, 543 * we use the hash filter insted. 544 */ 545static void 546vge_setmulti(sc) 547 struct vge_softc *sc; 548{ 549 struct ifnet *ifp; 550 int error = 0/*, h = 0*/; 551 struct ifmultiaddr *ifma; 552 u_int32_t h, hashes[2] = { 0, 0 }; 553 554 ifp = sc->vge_ifp; 555 556 /* First, zot all the multicast entries. */ 557 vge_cam_clear(sc); 558 CSR_WRITE_4(sc, VGE_MAR0, 0); 559 CSR_WRITE_4(sc, VGE_MAR1, 0); 560 561 /* 562 * If the user wants allmulti or promisc mode, enable reception 563 * of all multicast frames. 564 */ 565 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 566 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 567 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 568 return; 569 } 570 571 /* Now program new ones */ 572 if_maddr_rlock(ifp); 573 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 574 if (ifma->ifma_addr->sa_family != AF_LINK) 575 continue; 576 error = vge_cam_set(sc, 577 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 578 if (error) 579 break; 580 } 581 582 /* If there were too many addresses, use the hash filter. */ 583 if (error) { 584 vge_cam_clear(sc); 585 586 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 587 if (ifma->ifma_addr->sa_family != AF_LINK) 588 continue; 589 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 590 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 591 if (h < 32) 592 hashes[0] |= (1 << h); 593 else 594 hashes[1] |= (1 << (h - 32)); 595 } 596 597 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 598 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 599 } 600 if_maddr_runlock(ifp); 601 602 return; 603} 604 605static void 606vge_reset(sc) 607 struct vge_softc *sc; 608{ 609 register int i; 610 611 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 612 613 for (i = 0; i < VGE_TIMEOUT; i++) { 614 DELAY(5); 615 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 616 break; 617 } 618 619 if (i == VGE_TIMEOUT) { 620 device_printf(sc->vge_dev, "soft reset timed out"); 621 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 622 DELAY(2000); 623 } 624 625 DELAY(5000); 626 627 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 628 629 for (i = 0; i < VGE_TIMEOUT; i++) { 630 DELAY(5); 631 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 632 break; 633 } 634 635 if (i == VGE_TIMEOUT) { 636 device_printf(sc->vge_dev, "EEPROM reload timed out\n"); 637 return; 638 } 639 640 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 641 642 return; 643} 644 645/* 646 * Probe for a VIA gigabit chip. Check the PCI vendor and device 647 * IDs against our list and return a device name if we find a match. 648 */ 649static int 650vge_probe(dev) 651 device_t dev; 652{ 653 struct vge_type *t; 654 655 t = vge_devs; 656 657 while (t->vge_name != NULL) { 658 if ((pci_get_vendor(dev) == t->vge_vid) && 659 (pci_get_device(dev) == t->vge_did)) { 660 device_set_desc(dev, t->vge_name); 661 return (BUS_PROBE_DEFAULT); 662 } 663 t++; 664 } 665 666 return (ENXIO); 667} 668 669static void 670vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) 671 void *arg; 672 bus_dma_segment_t *segs; 673 int nseg; 674 bus_size_t mapsize; 675 int error; 676{ 677 678 struct vge_dmaload_arg *ctx; 679 struct vge_rx_desc *d = NULL; 680 681 if (error) 682 return; 683 684 ctx = arg; 685 686 /* Signal error to caller if there's too many segments */ 687 if (nseg > ctx->vge_maxsegs) { 688 ctx->vge_maxsegs = 0; 689 return; 690 } 691 692 /* 693 * Map the segment array into descriptors. 694 */ 695 696 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; 697 698 /* If this descriptor is still owned by the chip, bail. */ 699 700 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { 701 device_printf(ctx->sc->vge_dev, 702 "tried to map busy descriptor\n"); 703 ctx->vge_maxsegs = 0; 704 return; 705 } 706 707 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); 708 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 709 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 710 d->vge_sts = 0; 711 d->vge_ctl = 0; 712 713 ctx->vge_maxsegs = 1; 714 715 return; 716} 717 718static void 719vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 720 void *arg; 721 bus_dma_segment_t *segs; 722 int nseg; 723 bus_size_t mapsize; 724 int error; 725{ 726 struct vge_dmaload_arg *ctx; 727 struct vge_tx_desc *d = NULL; 728 struct vge_tx_frag *f; 729 int i = 0; 730 731 if (error) 732 return; 733 734 ctx = arg; 735 736 /* Signal error to caller if there's too many segments */ 737 if (nseg > ctx->vge_maxsegs) { 738 ctx->vge_maxsegs = 0; 739 return; 740 } 741 742 /* Map the segment array into descriptors. */ 743 744 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; 745 746 /* If this descriptor is still owned by the chip, bail. */ 747 748 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { 749 ctx->vge_maxsegs = 0; 750 return; 751 } 752 753 for (i = 0; i < nseg; i++) { 754 f = &d->vge_frag[i]; 755 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); 756 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); 757 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); 758 } 759 760 /* Argh. This chip does not autopad short frames */ 761 762 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { 763 f = &d->vge_frag[i]; 764 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - 765 ctx->vge_m0->m_pkthdr.len)); 766 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 767 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); 768 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; 769 i++; 770 } 771 772 /* 773 * When telling the chip how many segments there are, we 774 * must use nsegs + 1 instead of just nsegs. Darned if I 775 * know why. 776 */ 777 i++; 778 779 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; 780 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; 781 782 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) 783 d->vge_ctl |= VGE_TDCTL_JUMBO; 784 785 ctx->vge_maxsegs = nseg; 786 787 return; 788} 789 790/* 791 * Map a single buffer address. 792 */ 793 794static void 795vge_dma_map_addr(arg, segs, nseg, error) 796 void *arg; 797 bus_dma_segment_t *segs; 798 int nseg; 799 int error; 800{ 801 bus_addr_t *addr; 802 803 if (error) 804 return; 805 806 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 807 addr = arg; 808 *addr = segs->ds_addr; 809 810 return; 811} 812 813static int 814vge_allocmem(dev, sc) 815 device_t dev; 816 struct vge_softc *sc; 817{ 818 int error; 819 int nseg; 820 int i; 821 822 /* 823 * Allocate map for RX mbufs. 824 */ 825 nseg = 32; 826 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, 827 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 828 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 829 NULL, NULL, &sc->vge_ldata.vge_mtag); 830 if (error) { 831 device_printf(dev, "could not allocate dma tag\n"); 832 return (ENOMEM); 833 } 834 835 /* 836 * Allocate map for TX descriptor list. 837 */ 838 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 839 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 840 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 841 NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); 842 if (error) { 843 device_printf(dev, "could not allocate dma tag\n"); 844 return (ENOMEM); 845 } 846 847 /* Allocate DMA'able memory for the TX ring */ 848 849 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, 850 (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 851 &sc->vge_ldata.vge_tx_list_map); 852 if (error) 853 return (ENOMEM); 854 855 /* Load the map for the TX ring. */ 856 857 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, 858 sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, 859 VGE_TX_LIST_SZ, vge_dma_map_addr, 860 &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); 861 862 /* Create DMA maps for TX buffers */ 863 864 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 865 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 866 &sc->vge_ldata.vge_tx_dmamap[i]); 867 if (error) { 868 device_printf(dev, "can't create DMA map for TX\n"); 869 return (ENOMEM); 870 } 871 } 872 873 /* 874 * Allocate map for RX descriptor list. 875 */ 876 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 877 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 878 NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, 879 NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); 880 if (error) { 881 device_printf(dev, "could not allocate dma tag\n"); 882 return (ENOMEM); 883 } 884 885 /* Allocate DMA'able memory for the RX ring */ 886 887 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, 888 (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 889 &sc->vge_ldata.vge_rx_list_map); 890 if (error) 891 return (ENOMEM); 892 893 /* Load the map for the RX ring. */ 894 895 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, 896 sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, 897 VGE_TX_LIST_SZ, vge_dma_map_addr, 898 &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); 899 900 /* Create DMA maps for RX buffers */ 901 902 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 903 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, 904 &sc->vge_ldata.vge_rx_dmamap[i]); 905 if (error) { 906 device_printf(dev, "can't create DMA map for RX\n"); 907 return (ENOMEM); 908 } 909 } 910 911 return (0); 912} 913 914/* 915 * Attach the interface. Allocate softc structures, do ifmedia 916 * setup and ethernet/BPF attach. 917 */ 918static int 919vge_attach(dev) 920 device_t dev; 921{ 922 u_char eaddr[ETHER_ADDR_LEN]; 923 struct vge_softc *sc; 924 struct ifnet *ifp; 925 int unit, error = 0, rid; 926 927 sc = device_get_softc(dev); 928 unit = device_get_unit(dev); 929 sc->vge_dev = dev; 930 931 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 932 MTX_DEF | MTX_RECURSE); 933 /* 934 * Map control/status registers. 935 */ 936 pci_enable_busmaster(dev); 937 938 rid = VGE_PCI_LOMEM; 939 sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 940 0, ~0, 1, RF_ACTIVE); 941 942 if (sc->vge_res == NULL) { 943 printf ("vge%d: couldn't map ports/memory\n", unit); 944 error = ENXIO; 945 goto fail; 946 } 947 948 sc->vge_btag = rman_get_bustag(sc->vge_res); 949 sc->vge_bhandle = rman_get_bushandle(sc->vge_res); 950 951 /* Allocate interrupt */ 952 rid = 0; 953 sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 954 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); 955 956 if (sc->vge_irq == NULL) { 957 printf("vge%d: couldn't map interrupt\n", unit); 958 error = ENXIO; 959 goto fail; 960 } 961 962 /* Reset the adapter. */ 963 vge_reset(sc); 964 965 /* 966 * Get station address from the EEPROM. 967 */ 968 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 969 970 /* 971 * Allocate the parent bus DMA tag appropriate for PCI. 972 */ 973#define VGE_NSEG_NEW 32 974 error = bus_dma_tag_create(NULL, /* parent */ 975 1, 0, /* alignment, boundary */ 976 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 977 BUS_SPACE_MAXADDR, /* highaddr */ 978 NULL, NULL, /* filter, filterarg */ 979 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ 980 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 981 BUS_DMA_ALLOCNOW, /* flags */ 982 NULL, NULL, /* lockfunc, lockarg */ 983 &sc->vge_parent_tag); 984 if (error) 985 goto fail; 986 987 error = vge_allocmem(dev, sc); 988 989 if (error) 990 goto fail; 991 992 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 993 if (ifp == NULL) { 994 device_printf(dev, "can not if_alloc()\n"); 995 error = ENOSPC; 996 goto fail; 997 } 998 999 /* Do MII setup */ 1000 if (mii_phy_probe(dev, &sc->vge_miibus, 1001 vge_ifmedia_upd, vge_ifmedia_sts)) { 1002 device_printf(dev, "MII without any phy!\n"); 1003 error = ENXIO; 1004 goto fail; 1005 } 1006 1007 ifp->if_softc = sc; 1008 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1009 ifp->if_mtu = ETHERMTU; 1010 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1011 ifp->if_ioctl = vge_ioctl; 1012 ifp->if_capabilities = IFCAP_VLAN_MTU; 1013 ifp->if_start = vge_start; 1014 ifp->if_hwassist = VGE_CSUM_FEATURES; 1015 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1016 ifp->if_capenable = ifp->if_capabilities; 1017#ifdef DEVICE_POLLING 1018 ifp->if_capabilities |= IFCAP_POLLING; 1019#endif 1020 ifp->if_watchdog = vge_watchdog; 1021 ifp->if_init = vge_init; 1022 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); 1023 ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN; 1024 IFQ_SET_READY(&ifp->if_snd); 1025 1026 TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); 1027 1028 /* 1029 * Call MI attach routine. 1030 */ 1031 ether_ifattach(ifp, eaddr); 1032 1033 /* Hook interrupt last to avoid having to lock softc */ 1034 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1035 NULL, vge_intr, sc, &sc->vge_intrhand); 1036 1037 if (error) { 1038 printf("vge%d: couldn't set up irq\n", unit); 1039 ether_ifdetach(ifp); 1040 goto fail; 1041 } 1042 1043fail: 1044 if (error) 1045 vge_detach(dev); 1046 1047 return (error); 1048} 1049 1050/* 1051 * Shutdown hardware and free up resources. This can be called any 1052 * time after the mutex has been initialized. It is called in both 1053 * the error case in attach and the normal detach case so it needs 1054 * to be careful about only freeing resources that have actually been 1055 * allocated. 1056 */ 1057static int 1058vge_detach(dev) 1059 device_t dev; 1060{ 1061 struct vge_softc *sc; 1062 struct ifnet *ifp; 1063 int i; 1064 1065 sc = device_get_softc(dev); 1066 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1067 ifp = sc->vge_ifp; 1068 1069#ifdef DEVICE_POLLING 1070 if (ifp->if_capenable & IFCAP_POLLING) 1071 ether_poll_deregister(ifp); 1072#endif 1073 1074 /* These should only be active if attach succeeded */ 1075 if (device_is_attached(dev)) { 1076 vge_stop(sc); 1077 /* 1078 * Force off the IFF_UP flag here, in case someone 1079 * still had a BPF descriptor attached to this 1080 * interface. If they do, ether_ifattach() will cause 1081 * the BPF code to try and clear the promisc mode 1082 * flag, which will bubble down to vge_ioctl(), 1083 * which will try to call vge_init() again. This will 1084 * turn the NIC back on and restart the MII ticker, 1085 * which will panic the system when the kernel tries 1086 * to invoke the vge_tick() function that isn't there 1087 * anymore. 1088 */ 1089 ifp->if_flags &= ~IFF_UP; 1090 ether_ifdetach(ifp); 1091 } 1092 if (sc->vge_miibus) 1093 device_delete_child(dev, sc->vge_miibus); 1094 bus_generic_detach(dev); 1095 1096 if (sc->vge_intrhand) 1097 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1098 if (sc->vge_irq) 1099 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); 1100 if (sc->vge_res) 1101 bus_release_resource(dev, SYS_RES_MEMORY, 1102 VGE_PCI_LOMEM, sc->vge_res); 1103 if (ifp) 1104 if_free(ifp); 1105 1106 /* Unload and free the RX DMA ring memory and map */ 1107 1108 if (sc->vge_ldata.vge_rx_list_tag) { 1109 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, 1110 sc->vge_ldata.vge_rx_list_map); 1111 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, 1112 sc->vge_ldata.vge_rx_list, 1113 sc->vge_ldata.vge_rx_list_map); 1114 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); 1115 } 1116 1117 /* Unload and free the TX DMA ring memory and map */ 1118 1119 if (sc->vge_ldata.vge_tx_list_tag) { 1120 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, 1121 sc->vge_ldata.vge_tx_list_map); 1122 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, 1123 sc->vge_ldata.vge_tx_list, 1124 sc->vge_ldata.vge_tx_list_map); 1125 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); 1126 } 1127 1128 /* Destroy all the RX and TX buffer maps */ 1129 1130 if (sc->vge_ldata.vge_mtag) { 1131 for (i = 0; i < VGE_TX_DESC_CNT; i++) 1132 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1133 sc->vge_ldata.vge_tx_dmamap[i]); 1134 for (i = 0; i < VGE_RX_DESC_CNT; i++) 1135 bus_dmamap_destroy(sc->vge_ldata.vge_mtag, 1136 sc->vge_ldata.vge_rx_dmamap[i]); 1137 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); 1138 } 1139 1140 if (sc->vge_parent_tag) 1141 bus_dma_tag_destroy(sc->vge_parent_tag); 1142 1143 mtx_destroy(&sc->vge_mtx); 1144 1145 return (0); 1146} 1147 1148static int 1149vge_newbuf(sc, idx, m) 1150 struct vge_softc *sc; 1151 int idx; 1152 struct mbuf *m; 1153{ 1154 struct vge_dmaload_arg arg; 1155 struct mbuf *n = NULL; 1156 int i, error; 1157 1158 if (m == NULL) { 1159 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1160 if (n == NULL) 1161 return (ENOBUFS); 1162 m = n; 1163 } else 1164 m->m_data = m->m_ext.ext_buf; 1165 1166 1167#ifdef VGE_FIXUP_RX 1168 /* 1169 * This is part of an evil trick to deal with non-x86 platforms. 1170 * The VIA chip requires RX buffers to be aligned on 32-bit 1171 * boundaries, but that will hose non-x86 machines. To get around 1172 * this, we leave some empty space at the start of each buffer 1173 * and for non-x86 hosts, we copy the buffer back two bytes 1174 * to achieve word alignment. This is slightly more efficient 1175 * than allocating a new buffer, copying the contents, and 1176 * discarding the old buffer. 1177 */ 1178 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; 1179 m_adj(m, VGE_ETHER_ALIGN); 1180#else 1181 m->m_len = m->m_pkthdr.len = MCLBYTES; 1182#endif 1183 1184 arg.sc = sc; 1185 arg.vge_idx = idx; 1186 arg.vge_maxsegs = 1; 1187 arg.vge_flags = 0; 1188 1189 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, 1190 sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, 1191 &arg, BUS_DMA_NOWAIT); 1192 if (error || arg.vge_maxsegs != 1) { 1193 if (n != NULL) 1194 m_freem(n); 1195 return (ENOMEM); 1196 } 1197 1198 /* 1199 * Note: the manual fails to document the fact that for 1200 * proper opration, the driver needs to replentish the RX 1201 * DMA ring 4 descriptors at a time (rather than one at a 1202 * time, like most chips). We can allocate the new buffers 1203 * but we should not set the OWN bits until we're ready 1204 * to hand back 4 of them in one shot. 1205 */ 1206 1207#define VGE_RXCHUNK 4 1208 sc->vge_rx_consumed++; 1209 if (sc->vge_rx_consumed == VGE_RXCHUNK) { 1210 for (i = idx; i != idx - sc->vge_rx_consumed; i--) 1211 sc->vge_ldata.vge_rx_list[i].vge_sts |= 1212 htole32(VGE_RDSTS_OWN); 1213 sc->vge_rx_consumed = 0; 1214 } 1215 1216 sc->vge_ldata.vge_rx_mbuf[idx] = m; 1217 1218 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1219 sc->vge_ldata.vge_rx_dmamap[idx], 1220 BUS_DMASYNC_PREREAD); 1221 1222 return (0); 1223} 1224 1225static int 1226vge_tx_list_init(sc) 1227 struct vge_softc *sc; 1228{ 1229 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); 1230 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, 1231 (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); 1232 1233 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1234 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); 1235 sc->vge_ldata.vge_tx_prodidx = 0; 1236 sc->vge_ldata.vge_tx_considx = 0; 1237 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; 1238 1239 return (0); 1240} 1241 1242static int 1243vge_rx_list_init(sc) 1244 struct vge_softc *sc; 1245{ 1246 int i; 1247 1248 bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); 1249 bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, 1250 (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); 1251 1252 sc->vge_rx_consumed = 0; 1253 1254 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1255 if (vge_newbuf(sc, i, NULL) == ENOBUFS) 1256 return (ENOBUFS); 1257 } 1258 1259 /* Flush the RX descriptors */ 1260 1261 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1262 sc->vge_ldata.vge_rx_list_map, 1263 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1264 1265 sc->vge_ldata.vge_rx_prodidx = 0; 1266 sc->vge_rx_consumed = 0; 1267 sc->vge_head = sc->vge_tail = NULL; 1268 1269 return (0); 1270} 1271 1272#ifdef VGE_FIXUP_RX 1273static __inline void 1274vge_fixup_rx(m) 1275 struct mbuf *m; 1276{ 1277 int i; 1278 uint16_t *src, *dst; 1279 1280 src = mtod(m, uint16_t *); 1281 dst = src - 1; 1282 1283 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1284 *dst++ = *src++; 1285 1286 m->m_data -= ETHER_ALIGN; 1287 1288 return; 1289} 1290#endif 1291 1292/* 1293 * RX handler. We support the reception of jumbo frames that have 1294 * been fragmented across multiple 2K mbuf cluster buffers. 1295 */ 1296static int 1297vge_rxeof(sc) 1298 struct vge_softc *sc; 1299{ 1300 struct mbuf *m; 1301 struct ifnet *ifp; 1302 int i, total_len; 1303 int lim = 0; 1304 struct vge_rx_desc *cur_rx; 1305 u_int32_t rxstat, rxctl; 1306 1307 VGE_LOCK_ASSERT(sc); 1308 ifp = sc->vge_ifp; 1309 i = sc->vge_ldata.vge_rx_prodidx; 1310 1311 /* Invalidate the descriptor memory */ 1312 1313 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1314 sc->vge_ldata.vge_rx_list_map, 1315 BUS_DMASYNC_POSTREAD); 1316 1317 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { 1318 1319#ifdef DEVICE_POLLING 1320 if (ifp->if_capenable & IFCAP_POLLING) { 1321 if (sc->rxcycles <= 0) 1322 break; 1323 sc->rxcycles--; 1324 } 1325#endif 1326 1327 cur_rx = &sc->vge_ldata.vge_rx_list[i]; 1328 m = sc->vge_ldata.vge_rx_mbuf[i]; 1329 total_len = VGE_RXBYTES(cur_rx); 1330 rxstat = le32toh(cur_rx->vge_sts); 1331 rxctl = le32toh(cur_rx->vge_ctl); 1332 1333 /* Invalidate the RX mbuf and unload its map */ 1334 1335 bus_dmamap_sync(sc->vge_ldata.vge_mtag, 1336 sc->vge_ldata.vge_rx_dmamap[i], 1337 BUS_DMASYNC_POSTWRITE); 1338 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1339 sc->vge_ldata.vge_rx_dmamap[i]); 1340 1341 /* 1342 * If the 'start of frame' bit is set, this indicates 1343 * either the first fragment in a multi-fragment receive, 1344 * or an intermediate fragment. Either way, we want to 1345 * accumulate the buffers. 1346 */ 1347 if (rxstat & VGE_RXPKT_SOF) { 1348 m->m_len = MCLBYTES - VGE_ETHER_ALIGN; 1349 if (sc->vge_head == NULL) 1350 sc->vge_head = sc->vge_tail = m; 1351 else { 1352 m->m_flags &= ~M_PKTHDR; 1353 sc->vge_tail->m_next = m; 1354 sc->vge_tail = m; 1355 } 1356 vge_newbuf(sc, i, NULL); 1357 VGE_RX_DESC_INC(i); 1358 continue; 1359 } 1360 1361 /* 1362 * Bad/error frames will have the RXOK bit cleared. 1363 * However, there's one error case we want to allow: 1364 * if a VLAN tagged frame arrives and the chip can't 1365 * match it against the CAM filter, it considers this 1366 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1367 * We don't want to drop the frame though: our VLAN 1368 * filtering is done in software. 1369 */ 1370 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) 1371 && !(rxstat & VGE_RDSTS_CSUMERR)) { 1372 ifp->if_ierrors++; 1373 /* 1374 * If this is part of a multi-fragment packet, 1375 * discard all the pieces. 1376 */ 1377 if (sc->vge_head != NULL) { 1378 m_freem(sc->vge_head); 1379 sc->vge_head = sc->vge_tail = NULL; 1380 } 1381 vge_newbuf(sc, i, m); 1382 VGE_RX_DESC_INC(i); 1383 continue; 1384 } 1385 1386 /* 1387 * If allocating a replacement mbuf fails, 1388 * reload the current one. 1389 */ 1390 1391 if (vge_newbuf(sc, i, NULL)) { 1392 ifp->if_ierrors++; 1393 if (sc->vge_head != NULL) { 1394 m_freem(sc->vge_head); 1395 sc->vge_head = sc->vge_tail = NULL; 1396 } 1397 vge_newbuf(sc, i, m); 1398 VGE_RX_DESC_INC(i); 1399 continue; 1400 } 1401 1402 VGE_RX_DESC_INC(i); 1403 1404 if (sc->vge_head != NULL) { 1405 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); 1406 /* 1407 * Special case: if there's 4 bytes or less 1408 * in this buffer, the mbuf can be discarded: 1409 * the last 4 bytes is the CRC, which we don't 1410 * care about anyway. 1411 */ 1412 if (m->m_len <= ETHER_CRC_LEN) { 1413 sc->vge_tail->m_len -= 1414 (ETHER_CRC_LEN - m->m_len); 1415 m_freem(m); 1416 } else { 1417 m->m_len -= ETHER_CRC_LEN; 1418 m->m_flags &= ~M_PKTHDR; 1419 sc->vge_tail->m_next = m; 1420 } 1421 m = sc->vge_head; 1422 sc->vge_head = sc->vge_tail = NULL; 1423 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1424 } else 1425 m->m_pkthdr.len = m->m_len = 1426 (total_len - ETHER_CRC_LEN); 1427 1428#ifdef VGE_FIXUP_RX 1429 vge_fixup_rx(m); 1430#endif 1431 ifp->if_ipackets++; 1432 m->m_pkthdr.rcvif = ifp; 1433 1434 /* Do RX checksumming if enabled */ 1435 if (ifp->if_capenable & IFCAP_RXCSUM) { 1436 1437 /* Check IP header checksum */ 1438 if (rxctl & VGE_RDCTL_IPPKT) 1439 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1440 if (rxctl & VGE_RDCTL_IPCSUMOK) 1441 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1442 1443 /* Check TCP/UDP checksum */ 1444 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && 1445 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1446 m->m_pkthdr.csum_flags |= 1447 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1448 m->m_pkthdr.csum_data = 0xffff; 1449 } 1450 } 1451 1452 if (rxstat & VGE_RDSTS_VTAG) { 1453 /* 1454 * The 32-bit rxctl register is stored in little-endian. 1455 * However, the 16-bit vlan tag is stored in big-endian, 1456 * so we have to byte swap it. 1457 */ 1458 m->m_pkthdr.ether_vtag = 1459 bswap16(rxctl & VGE_RDCTL_VLANID); 1460 m->m_flags |= M_VLANTAG; 1461 } 1462 1463 VGE_UNLOCK(sc); 1464 (*ifp->if_input)(ifp, m); 1465 VGE_LOCK(sc); 1466 1467 lim++; 1468 if (lim == VGE_RX_DESC_CNT) 1469 break; 1470 1471 } 1472 1473 /* Flush the RX DMA ring */ 1474 1475 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, 1476 sc->vge_ldata.vge_rx_list_map, 1477 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1478 1479 sc->vge_ldata.vge_rx_prodidx = i; 1480 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); 1481 1482 1483 return (lim); 1484} 1485 1486static void 1487vge_txeof(sc) 1488 struct vge_softc *sc; 1489{ 1490 struct ifnet *ifp; 1491 u_int32_t txstat; 1492 int idx; 1493 1494 ifp = sc->vge_ifp; 1495 idx = sc->vge_ldata.vge_tx_considx; 1496 1497 /* Invalidate the TX descriptor list */ 1498 1499 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1500 sc->vge_ldata.vge_tx_list_map, 1501 BUS_DMASYNC_POSTREAD); 1502 1503 while (idx != sc->vge_ldata.vge_tx_prodidx) { 1504 1505 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); 1506 if (txstat & VGE_TDSTS_OWN) 1507 break; 1508 1509 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); 1510 sc->vge_ldata.vge_tx_mbuf[idx] = NULL; 1511 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 1512 sc->vge_ldata.vge_tx_dmamap[idx]); 1513 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) 1514 ifp->if_collisions++; 1515 if (txstat & VGE_TDSTS_TXERR) 1516 ifp->if_oerrors++; 1517 else 1518 ifp->if_opackets++; 1519 1520 sc->vge_ldata.vge_tx_free++; 1521 VGE_TX_DESC_INC(idx); 1522 } 1523 1524 /* No changes made to the TX ring, so no flush needed */ 1525 1526 if (idx != sc->vge_ldata.vge_tx_considx) { 1527 sc->vge_ldata.vge_tx_considx = idx; 1528 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1529 ifp->if_timer = 0; 1530 } 1531 1532 /* 1533 * If not all descriptors have been released reaped yet, 1534 * reload the timer so that we will eventually get another 1535 * interrupt that will cause us to re-enter this routine. 1536 * This is done in case the transmitter has gone idle. 1537 */ 1538 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) { 1539 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1540 } 1541 1542 return; 1543} 1544 1545static void 1546vge_tick(xsc) 1547 void *xsc; 1548{ 1549 struct vge_softc *sc; 1550 struct ifnet *ifp; 1551 struct mii_data *mii; 1552 1553 sc = xsc; 1554 ifp = sc->vge_ifp; 1555 VGE_LOCK(sc); 1556 mii = device_get_softc(sc->vge_miibus); 1557 1558 mii_tick(mii); 1559 if (sc->vge_link) { 1560 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1561 sc->vge_link = 0; 1562 if_link_state_change(sc->vge_ifp, 1563 LINK_STATE_DOWN); 1564 } 1565 } else { 1566 if (mii->mii_media_status & IFM_ACTIVE && 1567 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1568 sc->vge_link = 1; 1569 if_link_state_change(sc->vge_ifp, 1570 LINK_STATE_UP); 1571 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1572 taskqueue_enqueue(taskqueue_swi, 1573 &sc->vge_txtask); 1574 } 1575 } 1576 1577 VGE_UNLOCK(sc); 1578 1579 return; 1580} 1581 1582#ifdef DEVICE_POLLING 1583static int 1584vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1585{ 1586 struct vge_softc *sc = ifp->if_softc; 1587 int rx_npkts = 0; 1588 1589 VGE_LOCK(sc); 1590 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1591 goto done; 1592 1593 sc->rxcycles = count; 1594 rx_npkts = vge_rxeof(sc); 1595 vge_txeof(sc); 1596 1597 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1598 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); 1599 1600 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1601 u_int32_t status; 1602 status = CSR_READ_4(sc, VGE_ISR); 1603 if (status == 0xFFFFFFFF) 1604 goto done; 1605 if (status) 1606 CSR_WRITE_4(sc, VGE_ISR, status); 1607 1608 /* 1609 * XXX check behaviour on receiver stalls. 1610 */ 1611 1612 if (status & VGE_ISR_TXDMA_STALL || 1613 status & VGE_ISR_RXDMA_STALL) 1614 vge_init(sc); 1615 1616 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1617 vge_rxeof(sc); 1618 ifp->if_ierrors++; 1619 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1620 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1621 } 1622 } 1623done: 1624 VGE_UNLOCK(sc); 1625 return (rx_npkts); 1626} 1627#endif /* DEVICE_POLLING */ 1628 1629static void 1630vge_intr(arg) 1631 void *arg; 1632{ 1633 struct vge_softc *sc; 1634 struct ifnet *ifp; 1635 u_int32_t status; 1636 1637 sc = arg; 1638 1639 if (sc->suspended) { 1640 return; 1641 } 1642 1643 VGE_LOCK(sc); 1644 ifp = sc->vge_ifp; 1645 1646 if (!(ifp->if_flags & IFF_UP)) { 1647 VGE_UNLOCK(sc); 1648 return; 1649 } 1650 1651#ifdef DEVICE_POLLING 1652 if (ifp->if_capenable & IFCAP_POLLING) { 1653 VGE_UNLOCK(sc); 1654 return; 1655 } 1656#endif 1657 1658 /* Disable interrupts */ 1659 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1660 1661 for (;;) { 1662 1663 status = CSR_READ_4(sc, VGE_ISR); 1664 /* If the card has gone away the read returns 0xffff. */ 1665 if (status == 0xFFFFFFFF) 1666 break; 1667 1668 if (status) 1669 CSR_WRITE_4(sc, VGE_ISR, status); 1670 1671 if ((status & VGE_INTRS) == 0) 1672 break; 1673 1674 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1675 vge_rxeof(sc); 1676 1677 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1678 vge_rxeof(sc); 1679 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1680 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1681 } 1682 1683 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1684 vge_txeof(sc); 1685 1686 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) 1687 vge_init(sc); 1688 1689 if (status & VGE_ISR_LINKSTS) 1690 vge_tick(sc); 1691 } 1692 1693 /* Re-enable interrupts */ 1694 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1695 1696 VGE_UNLOCK(sc); 1697 1698 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1699 taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); 1700 1701 return; 1702} 1703 1704static int 1705vge_encap(sc, m_head, idx) 1706 struct vge_softc *sc; 1707 struct mbuf *m_head; 1708 int idx; 1709{ 1710 struct mbuf *m_new = NULL; 1711 struct vge_dmaload_arg arg; 1712 bus_dmamap_t map; 1713 int error; 1714 1715 if (sc->vge_ldata.vge_tx_free <= 2) 1716 return (EFBIG); 1717 1718 arg.vge_flags = 0; 1719 1720 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1721 arg.vge_flags |= VGE_TDCTL_IPCSUM; 1722 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1723 arg.vge_flags |= VGE_TDCTL_TCPCSUM; 1724 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1725 arg.vge_flags |= VGE_TDCTL_UDPCSUM; 1726 1727 arg.sc = sc; 1728 arg.vge_idx = idx; 1729 arg.vge_m0 = m_head; 1730 arg.vge_maxsegs = VGE_TX_FRAGS; 1731 1732 map = sc->vge_ldata.vge_tx_dmamap[idx]; 1733 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1734 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1735 1736 if (error && error != EFBIG) { 1737 if_printf(sc->vge_ifp, "can't map mbuf (error %d)\n", error); 1738 return (ENOBUFS); 1739 } 1740 1741 /* Too many segments to map, coalesce into a single mbuf */ 1742 1743 if (error || arg.vge_maxsegs == 0) { 1744 m_new = m_defrag(m_head, M_DONTWAIT); 1745 if (m_new == NULL) 1746 return (1); 1747 else 1748 m_head = m_new; 1749 1750 arg.sc = sc; 1751 arg.vge_m0 = m_head; 1752 arg.vge_idx = idx; 1753 arg.vge_maxsegs = 1; 1754 1755 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, 1756 m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); 1757 if (error) { 1758 if_printf(sc->vge_ifp, "can't map mbuf (error %d)\n", 1759 error); 1760 return (EFBIG); 1761 } 1762 } 1763 1764 sc->vge_ldata.vge_tx_mbuf[idx] = m_head; 1765 sc->vge_ldata.vge_tx_free--; 1766 1767 /* 1768 * Set up hardware VLAN tagging. 1769 */ 1770 1771 if (m_head->m_flags & M_VLANTAG) 1772 sc->vge_ldata.vge_tx_list[idx].vge_ctl |= 1773 htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG); 1774 1775 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); 1776 1777 return (0); 1778} 1779 1780static void 1781vge_tx_task(arg, npending) 1782 void *arg; 1783 int npending; 1784{ 1785 struct ifnet *ifp; 1786 1787 ifp = arg; 1788 vge_start(ifp); 1789 1790 return; 1791} 1792 1793/* 1794 * Main transmit routine. 1795 */ 1796 1797static void 1798vge_start(ifp) 1799 struct ifnet *ifp; 1800{ 1801 struct vge_softc *sc; 1802 struct mbuf *m_head = NULL; 1803 int idx, pidx = 0; 1804 1805 sc = ifp->if_softc; 1806 VGE_LOCK(sc); 1807 1808 if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { 1809 VGE_UNLOCK(sc); 1810 return; 1811 } 1812 1813 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1814 VGE_UNLOCK(sc); 1815 return; 1816 } 1817 1818 idx = sc->vge_ldata.vge_tx_prodidx; 1819 1820 pidx = idx - 1; 1821 if (pidx < 0) 1822 pidx = VGE_TX_DESC_CNT - 1; 1823 1824 1825 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { 1826 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1827 if (m_head == NULL) 1828 break; 1829 1830 if (vge_encap(sc, m_head, idx)) { 1831 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1832 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1833 break; 1834 } 1835 1836 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= 1837 htole16(VGE_TXDESC_Q); 1838 1839 pidx = idx; 1840 VGE_TX_DESC_INC(idx); 1841 1842 /* 1843 * If there's a BPF listener, bounce a copy of this frame 1844 * to him. 1845 */ 1846 ETHER_BPF_MTAP(ifp, m_head); 1847 } 1848 1849 if (idx == sc->vge_ldata.vge_tx_prodidx) { 1850 VGE_UNLOCK(sc); 1851 return; 1852 } 1853 1854 /* Flush the TX descriptors */ 1855 1856 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, 1857 sc->vge_ldata.vge_tx_list_map, 1858 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1859 1860 /* Issue a transmit command. */ 1861 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1862 1863 sc->vge_ldata.vge_tx_prodidx = idx; 1864 1865 /* 1866 * Use the countdown timer for interrupt moderation. 1867 * 'TX done' interrupts are disabled. Instead, we reset the 1868 * countdown timer, which will begin counting until it hits 1869 * the value in the SSTIMER register, and then trigger an 1870 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1871 * the timer count is reloaded. Only when the transmitter 1872 * is idle will the timer hit 0 and an interrupt fire. 1873 */ 1874 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1875 1876 VGE_UNLOCK(sc); 1877 1878 /* 1879 * Set a timeout in case the chip goes out to lunch. 1880 */ 1881 ifp->if_timer = 5; 1882 1883 return; 1884} 1885 1886static void 1887vge_init(xsc) 1888 void *xsc; 1889{ 1890 struct vge_softc *sc = xsc; 1891 struct ifnet *ifp = sc->vge_ifp; 1892 struct mii_data *mii; 1893 int i; 1894 1895 VGE_LOCK(sc); 1896 mii = device_get_softc(sc->vge_miibus); 1897 1898 /* 1899 * Cancel pending I/O and free all RX/TX buffers. 1900 */ 1901 vge_stop(sc); 1902 vge_reset(sc); 1903 1904 /* 1905 * Initialize the RX and TX descriptors and mbufs. 1906 */ 1907 1908 vge_rx_list_init(sc); 1909 vge_tx_list_init(sc); 1910 1911 /* Set our station address */ 1912 for (i = 0; i < ETHER_ADDR_LEN; i++) 1913 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 1914 1915 /* 1916 * Set receive FIFO threshold. Also allow transmission and 1917 * reception of VLAN tagged frames. 1918 */ 1919 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 1920 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 1921 1922 /* Set DMA burst length */ 1923 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 1924 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 1925 1926 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 1927 1928 /* Set collision backoff algorithm */ 1929 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 1930 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 1931 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 1932 1933 /* Disable LPSEL field in priority resolution */ 1934 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 1935 1936 /* 1937 * Load the addresses of the DMA queues into the chip. 1938 * Note that we only use one transmit queue. 1939 */ 1940 1941 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 1942 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); 1943 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 1944 1945 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 1946 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); 1947 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 1948 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 1949 1950 /* Enable and wake up the RX descriptor queue */ 1951 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1952 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1953 1954 /* Enable the TX descriptor queue */ 1955 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 1956 1957 /* Set up the receive filter -- allow large frames for VLANs. */ 1958 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 1959 1960 /* If we want promiscuous mode, set the allframes bit. */ 1961 if (ifp->if_flags & IFF_PROMISC) { 1962 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 1963 } 1964 1965 /* Set capture broadcast bit to capture broadcast frames. */ 1966 if (ifp->if_flags & IFF_BROADCAST) { 1967 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 1968 } 1969 1970 /* Set multicast bit to capture multicast frames. */ 1971 if (ifp->if_flags & IFF_MULTICAST) { 1972 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 1973 } 1974 1975 /* Init the cam filter. */ 1976 vge_cam_clear(sc); 1977 1978 /* Init the multicast filter. */ 1979 vge_setmulti(sc); 1980 1981 /* Enable flow control */ 1982 1983 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 1984 1985 /* Enable jumbo frame reception (if desired) */ 1986 1987 /* Start the MAC. */ 1988 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 1989 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 1990 CSR_WRITE_1(sc, VGE_CRS0, 1991 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 1992 1993 /* 1994 * Configure one-shot timer for microsecond 1995 * resulution and load it for 500 usecs. 1996 */ 1997 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 1998 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 1999 2000 /* 2001 * Configure interrupt moderation for receive. Enable 2002 * the holdoff counter and load it, and set the RX 2003 * suppression count to the number of descriptors we 2004 * want to allow before triggering an interrupt. 2005 * The holdoff timer is in units of 20 usecs. 2006 */ 2007 2008#ifdef notyet 2009 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2010 /* Select the interrupt holdoff timer page. */ 2011 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2012 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2013 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2014 2015 /* Enable use of the holdoff timer. */ 2016 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2017 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2018 2019 /* Select the RX suppression threshold page. */ 2020 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2021 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2022 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2023 2024 /* Restore the page select bits. */ 2025 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2026 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2027#endif 2028 2029#ifdef DEVICE_POLLING 2030 /* 2031 * Disable interrupts if we are polling. 2032 */ 2033 if (ifp->if_capenable & IFCAP_POLLING) { 2034 CSR_WRITE_4(sc, VGE_IMR, 0); 2035 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2036 } else /* otherwise ... */ 2037#endif 2038 { 2039 /* 2040 * Enable interrupts. 2041 */ 2042 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2043 CSR_WRITE_4(sc, VGE_ISR, 0); 2044 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2045 } 2046 2047 mii_mediachg(mii); 2048 2049 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2050 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2051 2052 sc->vge_if_flags = 0; 2053 sc->vge_link = 0; 2054 2055 VGE_UNLOCK(sc); 2056 2057 return; 2058} 2059 2060/* 2061 * Set media options. 2062 */ 2063static int 2064vge_ifmedia_upd(ifp) 2065 struct ifnet *ifp; 2066{ 2067 struct vge_softc *sc; 2068 struct mii_data *mii; 2069 2070 sc = ifp->if_softc; 2071 VGE_LOCK(sc); 2072 mii = device_get_softc(sc->vge_miibus); 2073 mii_mediachg(mii); 2074 VGE_UNLOCK(sc); 2075 2076 return (0); 2077} 2078 2079/* 2080 * Report current media status. 2081 */ 2082static void 2083vge_ifmedia_sts(ifp, ifmr) 2084 struct ifnet *ifp; 2085 struct ifmediareq *ifmr; 2086{ 2087 struct vge_softc *sc; 2088 struct mii_data *mii; 2089 2090 sc = ifp->if_softc; 2091 mii = device_get_softc(sc->vge_miibus); 2092 2093 mii_pollstat(mii); 2094 ifmr->ifm_active = mii->mii_media_active; 2095 ifmr->ifm_status = mii->mii_media_status; 2096 2097 return; 2098} 2099 2100static void 2101vge_miibus_statchg(dev) 2102 device_t dev; 2103{ 2104 struct vge_softc *sc; 2105 struct mii_data *mii; 2106 struct ifmedia_entry *ife; 2107 2108 sc = device_get_softc(dev); 2109 mii = device_get_softc(sc->vge_miibus); 2110 ife = mii->mii_media.ifm_cur; 2111 2112 /* 2113 * If the user manually selects a media mode, we need to turn 2114 * on the forced MAC mode bit in the DIAGCTL register. If the 2115 * user happens to choose a full duplex mode, we also need to 2116 * set the 'force full duplex' bit. This applies only to 2117 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2118 * mode is disabled, and in 1000baseT mode, full duplex is 2119 * always implied, so we turn on the forced mode bit but leave 2120 * the FDX bit cleared. 2121 */ 2122 2123 switch (IFM_SUBTYPE(ife->ifm_media)) { 2124 case IFM_AUTO: 2125 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2126 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2127 break; 2128 case IFM_1000_T: 2129 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2130 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2131 break; 2132 case IFM_100_TX: 2133 case IFM_10_T: 2134 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2135 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2136 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2137 } else { 2138 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2139 } 2140 break; 2141 default: 2142 device_printf(dev, "unknown media type: %x\n", 2143 IFM_SUBTYPE(ife->ifm_media)); 2144 break; 2145 } 2146 2147 return; 2148} 2149 2150static int 2151vge_ioctl(ifp, command, data) 2152 struct ifnet *ifp; 2153 u_long command; 2154 caddr_t data; 2155{ 2156 struct vge_softc *sc = ifp->if_softc; 2157 struct ifreq *ifr = (struct ifreq *) data; 2158 struct mii_data *mii; 2159 int error = 0; 2160 2161 switch (command) { 2162 case SIOCSIFMTU: 2163 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2164 error = EINVAL; 2165 ifp->if_mtu = ifr->ifr_mtu; 2166 break; 2167 case SIOCSIFFLAGS: 2168 if (ifp->if_flags & IFF_UP) { 2169 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2170 ifp->if_flags & IFF_PROMISC && 2171 !(sc->vge_if_flags & IFF_PROMISC)) { 2172 CSR_SETBIT_1(sc, VGE_RXCTL, 2173 VGE_RXCTL_RX_PROMISC); 2174 vge_setmulti(sc); 2175 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2176 !(ifp->if_flags & IFF_PROMISC) && 2177 sc->vge_if_flags & IFF_PROMISC) { 2178 CSR_CLRBIT_1(sc, VGE_RXCTL, 2179 VGE_RXCTL_RX_PROMISC); 2180 vge_setmulti(sc); 2181 } else 2182 vge_init(sc); 2183 } else { 2184 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2185 vge_stop(sc); 2186 } 2187 sc->vge_if_flags = ifp->if_flags; 2188 break; 2189 case SIOCADDMULTI: 2190 case SIOCDELMULTI: 2191 vge_setmulti(sc); 2192 break; 2193 case SIOCGIFMEDIA: 2194 case SIOCSIFMEDIA: 2195 mii = device_get_softc(sc->vge_miibus); 2196 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2197 break; 2198 case SIOCSIFCAP: 2199 { 2200 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2201#ifdef DEVICE_POLLING 2202 if (mask & IFCAP_POLLING) { 2203 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2204 error = ether_poll_register(vge_poll, ifp); 2205 if (error) 2206 return(error); 2207 VGE_LOCK(sc); 2208 /* Disable interrupts */ 2209 CSR_WRITE_4(sc, VGE_IMR, 0); 2210 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2211 ifp->if_capenable |= IFCAP_POLLING; 2212 VGE_UNLOCK(sc); 2213 } else { 2214 error = ether_poll_deregister(ifp); 2215 /* Enable interrupts. */ 2216 VGE_LOCK(sc); 2217 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2218 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2219 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2220 ifp->if_capenable &= ~IFCAP_POLLING; 2221 VGE_UNLOCK(sc); 2222 } 2223 } 2224#endif /* DEVICE_POLLING */ 2225 if ((mask & IFCAP_TXCSUM) != 0 && 2226 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2227 ifp->if_capenable ^= IFCAP_TXCSUM; 2228 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2229 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2230 else 2231 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2232 } 2233 if ((mask & IFCAP_RXCSUM) != 0 && 2234 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2235 ifp->if_capenable ^= IFCAP_RXCSUM; 2236 } 2237 break; 2238 default: 2239 error = ether_ioctl(ifp, command, data); 2240 break; 2241 } 2242 2243 return (error); 2244} 2245 2246static void 2247vge_watchdog(ifp) 2248 struct ifnet *ifp; 2249{ 2250 struct vge_softc *sc; 2251 2252 sc = ifp->if_softc; 2253 VGE_LOCK(sc); 2254 if_printf(ifp, "watchdog timeout\n"); 2255 ifp->if_oerrors++; 2256 2257 vge_txeof(sc); 2258 vge_rxeof(sc); 2259 2260 vge_init(sc); 2261 2262 VGE_UNLOCK(sc); 2263 2264 return; 2265} 2266 2267/* 2268 * Stop the adapter and free any mbufs allocated to the 2269 * RX and TX lists. 2270 */ 2271static void 2272vge_stop(sc) 2273 struct vge_softc *sc; 2274{ 2275 register int i; 2276 struct ifnet *ifp; 2277 2278 VGE_LOCK(sc); 2279 ifp = sc->vge_ifp; 2280 ifp->if_timer = 0; 2281 2282 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2283 2284 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2285 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2286 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2287 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2288 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2289 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2290 2291 if (sc->vge_head != NULL) { 2292 m_freem(sc->vge_head); 2293 sc->vge_head = sc->vge_tail = NULL; 2294 } 2295 2296 /* Free the TX list buffers. */ 2297 2298 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 2299 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { 2300 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2301 sc->vge_ldata.vge_tx_dmamap[i]); 2302 m_freem(sc->vge_ldata.vge_tx_mbuf[i]); 2303 sc->vge_ldata.vge_tx_mbuf[i] = NULL; 2304 } 2305 } 2306 2307 /* Free the RX list buffers. */ 2308 2309 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 2310 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { 2311 bus_dmamap_unload(sc->vge_ldata.vge_mtag, 2312 sc->vge_ldata.vge_rx_dmamap[i]); 2313 m_freem(sc->vge_ldata.vge_rx_mbuf[i]); 2314 sc->vge_ldata.vge_rx_mbuf[i] = NULL; 2315 } 2316 } 2317 2318 VGE_UNLOCK(sc); 2319 2320 return; 2321} 2322 2323/* 2324 * Device suspend routine. Stop the interface and save some PCI 2325 * settings in case the BIOS doesn't restore them properly on 2326 * resume. 2327 */ 2328static int 2329vge_suspend(dev) 2330 device_t dev; 2331{ 2332 struct vge_softc *sc; 2333 2334 sc = device_get_softc(dev); 2335 2336 vge_stop(sc); 2337 2338 sc->suspended = 1; 2339 2340 return (0); 2341} 2342 2343/* 2344 * Device resume routine. Restore some PCI settings in case the BIOS 2345 * doesn't, re-enable busmastering, and restart the interface if 2346 * appropriate. 2347 */ 2348static int 2349vge_resume(dev) 2350 device_t dev; 2351{ 2352 struct vge_softc *sc; 2353 struct ifnet *ifp; 2354 2355 sc = device_get_softc(dev); 2356 ifp = sc->vge_ifp; 2357 2358 /* reenable busmastering */ 2359 pci_enable_busmaster(dev); 2360 pci_enable_io(dev, SYS_RES_MEMORY); 2361 2362 /* reinitialize interface if necessary */ 2363 if (ifp->if_flags & IFF_UP) 2364 vge_init(sc); 2365 2366 sc->suspended = 0; 2367 2368 return (0); 2369} 2370 2371/* 2372 * Stop all chip I/O so that the kernel's probe routines don't 2373 * get confused by errant DMAs when rebooting. 2374 */ 2375static int 2376vge_shutdown(dev) 2377 device_t dev; 2378{ 2379 struct vge_softc *sc; 2380 2381 sc = device_get_softc(dev); 2382 2383 vge_stop(sc); 2384 2385 return (0); 2386} 2387