if_vge.c revision 200558
1/*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/dev/vge/if_vge.c 200558 2009-12-14 22:55:20Z yongari $"); 35 36/* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44/* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83#ifdef HAVE_KERNEL_OPTION_HEADERS 84#include "opt_device_polling.h" 85#endif 86 87#include <sys/param.h> 88#include <sys/endian.h> 89#include <sys/systm.h> 90#include <sys/sockio.h> 91#include <sys/mbuf.h> 92#include <sys/malloc.h> 93#include <sys/module.h> 94#include <sys/kernel.h> 95#include <sys/socket.h> 96 97#include <net/if.h> 98#include <net/if_arp.h> 99#include <net/ethernet.h> 100#include <net/if_dl.h> 101#include <net/if_media.h> 102#include <net/if_types.h> 103#include <net/if_vlan_var.h> 104 105#include <net/bpf.h> 106 107#include <machine/bus.h> 108#include <machine/resource.h> 109#include <sys/bus.h> 110#include <sys/rman.h> 111 112#include <dev/mii/mii.h> 113#include <dev/mii/miivar.h> 114 115#include <dev/pci/pcireg.h> 116#include <dev/pci/pcivar.h> 117 118MODULE_DEPEND(vge, pci, 1, 1, 1); 119MODULE_DEPEND(vge, ether, 1, 1, 1); 120MODULE_DEPEND(vge, miibus, 1, 1, 1); 121 122/* "device miibus" required. See GENERIC if you get errors here. */ 123#include "miibus_if.h" 124 125#include <dev/vge/if_vgereg.h> 126#include <dev/vge/if_vgevar.h> 127 128#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 129 130/* Tunables */ 131static int msi_disable = 0; 132TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 133 134/* 135 * Various supported device vendors/types and their names. 136 */ 137static struct vge_type vge_devs[] = { 138 { VIA_VENDORID, VIA_DEVICEID_61XX, 139 "VIA Networking Gigabit Ethernet" }, 140 { 0, 0, NULL } 141}; 142 143static int vge_attach(device_t); 144static int vge_detach(device_t); 145static int vge_probe(device_t); 146static int vge_resume(device_t); 147static int vge_shutdown(device_t); 148static int vge_suspend(device_t); 149 150static void vge_cam_clear(struct vge_softc *); 151static int vge_cam_set(struct vge_softc *, uint8_t *); 152static void vge_discard_rxbuf(struct vge_softc *, int); 153static int vge_dma_alloc(struct vge_softc *); 154static void vge_dma_free(struct vge_softc *); 155static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 156#ifdef VGE_EEPROM 157static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 158#endif 159static int vge_encap(struct vge_softc *, struct mbuf **); 160#ifndef __NO_STRICT_ALIGNMENT 161static __inline void 162 vge_fixup_rx(struct mbuf *); 163#endif 164static void vge_freebufs(struct vge_softc *); 165static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 166static int vge_ifmedia_upd(struct ifnet *); 167static void vge_init(void *); 168static void vge_init_locked(struct vge_softc *); 169static void vge_intr(void *); 170static int vge_ioctl(struct ifnet *, u_long, caddr_t); 171static void vge_link_statchg(void *); 172static int vge_miibus_readreg(device_t, int, int); 173static void vge_miibus_statchg(device_t); 174static int vge_miibus_writereg(device_t, int, int, int); 175static void vge_miipoll_start(struct vge_softc *); 176static void vge_miipoll_stop(struct vge_softc *); 177static int vge_newbuf(struct vge_softc *, int); 178static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 179static void vge_reset(struct vge_softc *); 180static int vge_rx_list_init(struct vge_softc *); 181static int vge_rxeof(struct vge_softc *, int); 182static void vge_setmulti(struct vge_softc *); 183static void vge_start(struct ifnet *); 184static void vge_start_locked(struct ifnet *); 185static void vge_stop(struct vge_softc *); 186static int vge_tx_list_init(struct vge_softc *); 187static void vge_txeof(struct vge_softc *); 188static void vge_watchdog(void *); 189 190static device_method_t vge_methods[] = { 191 /* Device interface */ 192 DEVMETHOD(device_probe, vge_probe), 193 DEVMETHOD(device_attach, vge_attach), 194 DEVMETHOD(device_detach, vge_detach), 195 DEVMETHOD(device_suspend, vge_suspend), 196 DEVMETHOD(device_resume, vge_resume), 197 DEVMETHOD(device_shutdown, vge_shutdown), 198 199 /* bus interface */ 200 DEVMETHOD(bus_print_child, bus_generic_print_child), 201 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 202 203 /* MII interface */ 204 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 205 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 206 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 207 208 { 0, 0 } 209}; 210 211static driver_t vge_driver = { 212 "vge", 213 vge_methods, 214 sizeof(struct vge_softc) 215}; 216 217static devclass_t vge_devclass; 218 219DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 220DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 221 222#ifdef VGE_EEPROM 223/* 224 * Read a word of data stored in the EEPROM at address 'addr.' 225 */ 226static void 227vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 228{ 229 int i; 230 uint16_t word = 0; 231 232 /* 233 * Enter EEPROM embedded programming mode. In order to 234 * access the EEPROM at all, we first have to set the 235 * EELOAD bit in the CHIPCFG2 register. 236 */ 237 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 238 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 239 240 /* Select the address of the word we want to read */ 241 CSR_WRITE_1(sc, VGE_EEADDR, addr); 242 243 /* Issue read command */ 244 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 245 246 /* Wait for the done bit to be set. */ 247 for (i = 0; i < VGE_TIMEOUT; i++) { 248 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 249 break; 250 } 251 252 if (i == VGE_TIMEOUT) { 253 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 254 *dest = 0; 255 return; 256 } 257 258 /* Read the result */ 259 word = CSR_READ_2(sc, VGE_EERDDAT); 260 261 /* Turn off EEPROM access mode. */ 262 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 263 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 264 265 *dest = word; 266} 267#endif 268 269/* 270 * Read a sequence of words from the EEPROM. 271 */ 272static void 273vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 274{ 275 int i; 276#ifdef VGE_EEPROM 277 uint16_t word = 0, *ptr; 278 279 for (i = 0; i < cnt; i++) { 280 vge_eeprom_getword(sc, off + i, &word); 281 ptr = (uint16_t *)(dest + (i * 2)); 282 if (swap) 283 *ptr = ntohs(word); 284 else 285 *ptr = word; 286 } 287#else 288 for (i = 0; i < ETHER_ADDR_LEN; i++) 289 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 290#endif 291} 292 293static void 294vge_miipoll_stop(struct vge_softc *sc) 295{ 296 int i; 297 298 CSR_WRITE_1(sc, VGE_MIICMD, 0); 299 300 for (i = 0; i < VGE_TIMEOUT; i++) { 301 DELAY(1); 302 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 303 break; 304 } 305 306 if (i == VGE_TIMEOUT) 307 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 308} 309 310static void 311vge_miipoll_start(struct vge_softc *sc) 312{ 313 int i; 314 315 /* First, make sure we're idle. */ 316 317 CSR_WRITE_1(sc, VGE_MIICMD, 0); 318 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 319 320 for (i = 0; i < VGE_TIMEOUT; i++) { 321 DELAY(1); 322 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 323 break; 324 } 325 326 if (i == VGE_TIMEOUT) { 327 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 328 return; 329 } 330 331 /* Now enable auto poll mode. */ 332 333 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 334 335 /* And make sure it started. */ 336 337 for (i = 0; i < VGE_TIMEOUT; i++) { 338 DELAY(1); 339 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 340 break; 341 } 342 343 if (i == VGE_TIMEOUT) 344 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 345} 346 347static int 348vge_miibus_readreg(device_t dev, int phy, int reg) 349{ 350 struct vge_softc *sc; 351 int i; 352 uint16_t rval = 0; 353 354 sc = device_get_softc(dev); 355 356 if (phy != sc->vge_phyaddr) 357 return (0); 358 359 vge_miipoll_stop(sc); 360 361 /* Specify the register we want to read. */ 362 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 363 364 /* Issue read command. */ 365 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 366 367 /* Wait for the read command bit to self-clear. */ 368 for (i = 0; i < VGE_TIMEOUT; i++) { 369 DELAY(1); 370 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 371 break; 372 } 373 374 if (i == VGE_TIMEOUT) 375 device_printf(sc->vge_dev, "MII read timed out\n"); 376 else 377 rval = CSR_READ_2(sc, VGE_MIIDATA); 378 379 vge_miipoll_start(sc); 380 381 return (rval); 382} 383 384static int 385vge_miibus_writereg(device_t dev, int phy, int reg, int data) 386{ 387 struct vge_softc *sc; 388 int i, rval = 0; 389 390 sc = device_get_softc(dev); 391 392 if (phy != sc->vge_phyaddr) 393 return (0); 394 395 vge_miipoll_stop(sc); 396 397 /* Specify the register we want to write. */ 398 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 399 400 /* Specify the data we want to write. */ 401 CSR_WRITE_2(sc, VGE_MIIDATA, data); 402 403 /* Issue write command. */ 404 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 405 406 /* Wait for the write command bit to self-clear. */ 407 for (i = 0; i < VGE_TIMEOUT; i++) { 408 DELAY(1); 409 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 410 break; 411 } 412 413 if (i == VGE_TIMEOUT) { 414 device_printf(sc->vge_dev, "MII write timed out\n"); 415 rval = EIO; 416 } 417 418 vge_miipoll_start(sc); 419 420 return (rval); 421} 422 423static void 424vge_cam_clear(struct vge_softc *sc) 425{ 426 int i; 427 428 /* 429 * Turn off all the mask bits. This tells the chip 430 * that none of the entries in the CAM filter are valid. 431 * desired entries will be enabled as we fill the filter in. 432 */ 433 434 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 436 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 437 for (i = 0; i < 8; i++) 438 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 439 440 /* Clear the VLAN filter too. */ 441 442 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 443 for (i = 0; i < 8; i++) 444 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 445 446 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 447 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 448 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 449 450 sc->vge_camidx = 0; 451} 452 453static int 454vge_cam_set(struct vge_softc *sc, uint8_t *addr) 455{ 456 int i, error = 0; 457 458 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 459 return (ENOSPC); 460 461 /* Select the CAM data page. */ 462 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 463 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 464 465 /* Set the filter entry we want to update and enable writing. */ 466 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 467 468 /* Write the address to the CAM registers */ 469 for (i = 0; i < ETHER_ADDR_LEN; i++) 470 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 471 472 /* Issue a write command. */ 473 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 474 475 /* Wake for it to clear. */ 476 for (i = 0; i < VGE_TIMEOUT; i++) { 477 DELAY(1); 478 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 479 break; 480 } 481 482 if (i == VGE_TIMEOUT) { 483 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 484 error = EIO; 485 goto fail; 486 } 487 488 /* Select the CAM mask page. */ 489 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 490 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 491 492 /* Set the mask bit that enables this filter. */ 493 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 494 1<<(sc->vge_camidx & 7)); 495 496 sc->vge_camidx++; 497 498fail: 499 /* Turn off access to CAM. */ 500 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 501 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 502 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 503 504 return (error); 505} 506 507/* 508 * Program the multicast filter. We use the 64-entry CAM filter 509 * for perfect filtering. If there's more than 64 multicast addresses, 510 * we use the hash filter instead. 511 */ 512static void 513vge_setmulti(struct vge_softc *sc) 514{ 515 struct ifnet *ifp; 516 int error = 0/*, h = 0*/; 517 struct ifmultiaddr *ifma; 518 uint32_t h, hashes[2] = { 0, 0 }; 519 520 VGE_LOCK_ASSERT(sc); 521 522 ifp = sc->vge_ifp; 523 524 /* First, zot all the multicast entries. */ 525 vge_cam_clear(sc); 526 CSR_WRITE_4(sc, VGE_MAR0, 0); 527 CSR_WRITE_4(sc, VGE_MAR1, 0); 528 529 /* 530 * If the user wants allmulti or promisc mode, enable reception 531 * of all multicast frames. 532 */ 533 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 534 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); 535 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); 536 return; 537 } 538 539 /* Now program new ones */ 540 if_maddr_rlock(ifp); 541 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 542 if (ifma->ifma_addr->sa_family != AF_LINK) 543 continue; 544 error = vge_cam_set(sc, 545 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 546 if (error) 547 break; 548 } 549 550 /* If there were too many addresses, use the hash filter. */ 551 if (error) { 552 vge_cam_clear(sc); 553 554 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 555 if (ifma->ifma_addr->sa_family != AF_LINK) 556 continue; 557 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 558 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 559 if (h < 32) 560 hashes[0] |= (1 << h); 561 else 562 hashes[1] |= (1 << (h - 32)); 563 } 564 565 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 566 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 567 } 568 if_maddr_runlock(ifp); 569} 570 571static void 572vge_reset(struct vge_softc *sc) 573{ 574 int i; 575 576 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 577 578 for (i = 0; i < VGE_TIMEOUT; i++) { 579 DELAY(5); 580 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 581 break; 582 } 583 584 if (i == VGE_TIMEOUT) { 585 device_printf(sc->vge_dev, "soft reset timed out\n"); 586 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 587 DELAY(2000); 588 } 589 590 DELAY(5000); 591} 592 593/* 594 * Probe for a VIA gigabit chip. Check the PCI vendor and device 595 * IDs against our list and return a device name if we find a match. 596 */ 597static int 598vge_probe(device_t dev) 599{ 600 struct vge_type *t; 601 602 t = vge_devs; 603 604 while (t->vge_name != NULL) { 605 if ((pci_get_vendor(dev) == t->vge_vid) && 606 (pci_get_device(dev) == t->vge_did)) { 607 device_set_desc(dev, t->vge_name); 608 return (BUS_PROBE_DEFAULT); 609 } 610 t++; 611 } 612 613 return (ENXIO); 614} 615 616/* 617 * Map a single buffer address. 618 */ 619 620struct vge_dmamap_arg { 621 bus_addr_t vge_busaddr; 622}; 623 624static void 625vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 626{ 627 struct vge_dmamap_arg *ctx; 628 629 if (error != 0) 630 return; 631 632 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 633 634 ctx = (struct vge_dmamap_arg *)arg; 635 ctx->vge_busaddr = segs[0].ds_addr; 636} 637 638static int 639vge_dma_alloc(struct vge_softc *sc) 640{ 641 struct vge_dmamap_arg ctx; 642 struct vge_txdesc *txd; 643 struct vge_rxdesc *rxd; 644 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 645 int error, i; 646 647 lowaddr = BUS_SPACE_MAXADDR; 648 649again: 650 /* Create parent ring tag. */ 651 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 652 1, 0, /* algnmnt, boundary */ 653 lowaddr, /* lowaddr */ 654 BUS_SPACE_MAXADDR, /* highaddr */ 655 NULL, NULL, /* filter, filterarg */ 656 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 657 0, /* nsegments */ 658 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 659 0, /* flags */ 660 NULL, NULL, /* lockfunc, lockarg */ 661 &sc->vge_cdata.vge_ring_tag); 662 if (error != 0) { 663 device_printf(sc->vge_dev, 664 "could not create parent DMA tag.\n"); 665 goto fail; 666 } 667 668 /* Create tag for Tx ring. */ 669 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 670 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 671 BUS_SPACE_MAXADDR, /* lowaddr */ 672 BUS_SPACE_MAXADDR, /* highaddr */ 673 NULL, NULL, /* filter, filterarg */ 674 VGE_TX_LIST_SZ, /* maxsize */ 675 1, /* nsegments */ 676 VGE_TX_LIST_SZ, /* maxsegsize */ 677 0, /* flags */ 678 NULL, NULL, /* lockfunc, lockarg */ 679 &sc->vge_cdata.vge_tx_ring_tag); 680 if (error != 0) { 681 device_printf(sc->vge_dev, 682 "could not allocate Tx ring DMA tag.\n"); 683 goto fail; 684 } 685 686 /* Create tag for Rx ring. */ 687 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 688 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 689 BUS_SPACE_MAXADDR, /* lowaddr */ 690 BUS_SPACE_MAXADDR, /* highaddr */ 691 NULL, NULL, /* filter, filterarg */ 692 VGE_RX_LIST_SZ, /* maxsize */ 693 1, /* nsegments */ 694 VGE_RX_LIST_SZ, /* maxsegsize */ 695 0, /* flags */ 696 NULL, NULL, /* lockfunc, lockarg */ 697 &sc->vge_cdata.vge_rx_ring_tag); 698 if (error != 0) { 699 device_printf(sc->vge_dev, 700 "could not allocate Rx ring DMA tag.\n"); 701 goto fail; 702 } 703 704 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 705 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 706 (void **)&sc->vge_rdata.vge_tx_ring, 707 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 708 &sc->vge_cdata.vge_tx_ring_map); 709 if (error != 0) { 710 device_printf(sc->vge_dev, 711 "could not allocate DMA'able memory for Tx ring.\n"); 712 goto fail; 713 } 714 715 ctx.vge_busaddr = 0; 716 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 717 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 718 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 719 if (error != 0 || ctx.vge_busaddr == 0) { 720 device_printf(sc->vge_dev, 721 "could not load DMA'able memory for Tx ring.\n"); 722 goto fail; 723 } 724 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 725 726 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 727 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 728 (void **)&sc->vge_rdata.vge_rx_ring, 729 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 730 &sc->vge_cdata.vge_rx_ring_map); 731 if (error != 0) { 732 device_printf(sc->vge_dev, 733 "could not allocate DMA'able memory for Rx ring.\n"); 734 goto fail; 735 } 736 737 ctx.vge_busaddr = 0; 738 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 739 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 740 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 741 if (error != 0 || ctx.vge_busaddr == 0) { 742 device_printf(sc->vge_dev, 743 "could not load DMA'able memory for Rx ring.\n"); 744 goto fail; 745 } 746 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 747 748 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 749 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 750 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 751 if ((VGE_ADDR_HI(tx_ring_end) != 752 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 753 (VGE_ADDR_HI(rx_ring_end) != 754 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 755 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 756 device_printf(sc->vge_dev, "4GB boundary crossed, " 757 "switching to 32bit DMA address mode.\n"); 758 vge_dma_free(sc); 759 /* Limit DMA address space to 32bit and try again. */ 760 lowaddr = BUS_SPACE_MAXADDR_32BIT; 761 goto again; 762 } 763 764 /* Create parent buffer tag. */ 765 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 766 1, 0, /* algnmnt, boundary */ 767 VGE_BUF_DMA_MAXADDR, /* lowaddr */ 768 BUS_SPACE_MAXADDR, /* highaddr */ 769 NULL, NULL, /* filter, filterarg */ 770 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 771 0, /* nsegments */ 772 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 773 0, /* flags */ 774 NULL, NULL, /* lockfunc, lockarg */ 775 &sc->vge_cdata.vge_buffer_tag); 776 if (error != 0) { 777 device_printf(sc->vge_dev, 778 "could not create parent buffer DMA tag.\n"); 779 goto fail; 780 } 781 782 /* Create tag for Tx buffers. */ 783 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 784 1, 0, /* algnmnt, boundary */ 785 BUS_SPACE_MAXADDR, /* lowaddr */ 786 BUS_SPACE_MAXADDR, /* highaddr */ 787 NULL, NULL, /* filter, filterarg */ 788 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 789 VGE_MAXTXSEGS, /* nsegments */ 790 MCLBYTES, /* maxsegsize */ 791 0, /* flags */ 792 NULL, NULL, /* lockfunc, lockarg */ 793 &sc->vge_cdata.vge_tx_tag); 794 if (error != 0) { 795 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 796 goto fail; 797 } 798 799 /* Create tag for Rx buffers. */ 800 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 801 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 802 BUS_SPACE_MAXADDR, /* lowaddr */ 803 BUS_SPACE_MAXADDR, /* highaddr */ 804 NULL, NULL, /* filter, filterarg */ 805 MCLBYTES, /* maxsize */ 806 1, /* nsegments */ 807 MCLBYTES, /* maxsegsize */ 808 0, /* flags */ 809 NULL, NULL, /* lockfunc, lockarg */ 810 &sc->vge_cdata.vge_rx_tag); 811 if (error != 0) { 812 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 813 goto fail; 814 } 815 816 /* Create DMA maps for Tx buffers. */ 817 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 818 txd = &sc->vge_cdata.vge_txdesc[i]; 819 txd->tx_m = NULL; 820 txd->tx_dmamap = NULL; 821 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 822 &txd->tx_dmamap); 823 if (error != 0) { 824 device_printf(sc->vge_dev, 825 "could not create Tx dmamap.\n"); 826 goto fail; 827 } 828 } 829 /* Create DMA maps for Rx buffers. */ 830 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 831 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 832 device_printf(sc->vge_dev, 833 "could not create spare Rx dmamap.\n"); 834 goto fail; 835 } 836 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 837 rxd = &sc->vge_cdata.vge_rxdesc[i]; 838 rxd->rx_m = NULL; 839 rxd->rx_dmamap = NULL; 840 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 841 &rxd->rx_dmamap); 842 if (error != 0) { 843 device_printf(sc->vge_dev, 844 "could not create Rx dmamap.\n"); 845 goto fail; 846 } 847 } 848 849fail: 850 return (error); 851} 852 853static void 854vge_dma_free(struct vge_softc *sc) 855{ 856 struct vge_txdesc *txd; 857 struct vge_rxdesc *rxd; 858 int i; 859 860 /* Tx ring. */ 861 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 862 if (sc->vge_cdata.vge_tx_ring_map) 863 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 864 sc->vge_cdata.vge_tx_ring_map); 865 if (sc->vge_cdata.vge_tx_ring_map && 866 sc->vge_rdata.vge_tx_ring) 867 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 868 sc->vge_rdata.vge_tx_ring, 869 sc->vge_cdata.vge_tx_ring_map); 870 sc->vge_rdata.vge_tx_ring = NULL; 871 sc->vge_cdata.vge_tx_ring_map = NULL; 872 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 873 sc->vge_cdata.vge_tx_ring_tag = NULL; 874 } 875 /* Rx ring. */ 876 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 877 if (sc->vge_cdata.vge_rx_ring_map) 878 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 879 sc->vge_cdata.vge_rx_ring_map); 880 if (sc->vge_cdata.vge_rx_ring_map && 881 sc->vge_rdata.vge_rx_ring) 882 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 883 sc->vge_rdata.vge_rx_ring, 884 sc->vge_cdata.vge_rx_ring_map); 885 sc->vge_rdata.vge_rx_ring = NULL; 886 sc->vge_cdata.vge_rx_ring_map = NULL; 887 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 888 sc->vge_cdata.vge_rx_ring_tag = NULL; 889 } 890 /* Tx buffers. */ 891 if (sc->vge_cdata.vge_tx_tag != NULL) { 892 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 893 txd = &sc->vge_cdata.vge_txdesc[i]; 894 if (txd->tx_dmamap != NULL) { 895 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 896 txd->tx_dmamap); 897 txd->tx_dmamap = NULL; 898 } 899 } 900 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 901 sc->vge_cdata.vge_tx_tag = NULL; 902 } 903 /* Rx buffers. */ 904 if (sc->vge_cdata.vge_rx_tag != NULL) { 905 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 906 rxd = &sc->vge_cdata.vge_rxdesc[i]; 907 if (rxd->rx_dmamap != NULL) { 908 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 909 rxd->rx_dmamap); 910 rxd->rx_dmamap = NULL; 911 } 912 } 913 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 914 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 915 sc->vge_cdata.vge_rx_sparemap); 916 sc->vge_cdata.vge_rx_sparemap = NULL; 917 } 918 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 919 sc->vge_cdata.vge_rx_tag = NULL; 920 } 921 922 if (sc->vge_cdata.vge_buffer_tag != NULL) { 923 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 924 sc->vge_cdata.vge_buffer_tag = NULL; 925 } 926 if (sc->vge_cdata.vge_ring_tag != NULL) { 927 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 928 sc->vge_cdata.vge_ring_tag = NULL; 929 } 930} 931 932/* 933 * Attach the interface. Allocate softc structures, do ifmedia 934 * setup and ethernet/BPF attach. 935 */ 936static int 937vge_attach(device_t dev) 938{ 939 u_char eaddr[ETHER_ADDR_LEN]; 940 struct vge_softc *sc; 941 struct ifnet *ifp; 942 int error = 0, cap, i, msic, rid; 943 944 sc = device_get_softc(dev); 945 sc->vge_dev = dev; 946 947 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 948 MTX_DEF); 949 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 950 951 /* 952 * Map control/status registers. 953 */ 954 pci_enable_busmaster(dev); 955 956 rid = PCIR_BAR(1); 957 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 958 RF_ACTIVE); 959 960 if (sc->vge_res == NULL) { 961 device_printf(dev, "couldn't map ports/memory\n"); 962 error = ENXIO; 963 goto fail; 964 } 965 966 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) { 967 sc->vge_flags |= VGE_FLAG_PCIE; 968 sc->vge_expcap = cap; 969 } 970 rid = 0; 971 msic = pci_msi_count(dev); 972 if (msi_disable == 0 && msic > 0) { 973 msic = 1; 974 if (pci_alloc_msi(dev, &msic) == 0) { 975 if (msic == 1) { 976 sc->vge_flags |= VGE_FLAG_MSI; 977 device_printf(dev, "Using %d MSI message\n", 978 msic); 979 rid = 1; 980 } else 981 pci_release_msi(dev); 982 } 983 } 984 985 /* Allocate interrupt */ 986 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 987 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 988 if (sc->vge_irq == NULL) { 989 device_printf(dev, "couldn't map interrupt\n"); 990 error = ENXIO; 991 goto fail; 992 } 993 994 /* Reset the adapter. */ 995 vge_reset(sc); 996 /* Reload EEPROM. */ 997 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 998 for (i = 0; i < VGE_TIMEOUT; i++) { 999 DELAY(5); 1000 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1001 break; 1002 } 1003 if (i == VGE_TIMEOUT) 1004 device_printf(dev, "EEPROM reload timed out\n"); 1005 /* 1006 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1007 * MAC will receive magic packet which in turn confuses 1008 * controller. 1009 */ 1010 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1011 1012 /* 1013 * Get station address from the EEPROM. 1014 */ 1015 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1016 /* 1017 * Save configured PHY address. 1018 * It seems the PHY address of PCIe controllers just 1019 * reflects media jump strapping status so we assume the 1020 * internal PHY address of PCIe controller is at 1. 1021 */ 1022 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1023 sc->vge_phyaddr = 1; 1024 else 1025 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1026 VGE_MIICFG_PHYADDR; 1027 error = vge_dma_alloc(sc); 1028 if (error) 1029 goto fail; 1030 1031 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1032 if (ifp == NULL) { 1033 device_printf(dev, "can not if_alloc()\n"); 1034 error = ENOSPC; 1035 goto fail; 1036 } 1037 1038 /* Do MII setup */ 1039 if (mii_phy_probe(dev, &sc->vge_miibus, 1040 vge_ifmedia_upd, vge_ifmedia_sts)) { 1041 device_printf(dev, "MII without any phy!\n"); 1042 error = ENXIO; 1043 goto fail; 1044 } 1045 1046 ifp->if_softc = sc; 1047 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1048 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1049 ifp->if_ioctl = vge_ioctl; 1050 ifp->if_capabilities = IFCAP_VLAN_MTU; 1051 ifp->if_start = vge_start; 1052 ifp->if_hwassist = VGE_CSUM_FEATURES; 1053 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; 1054 ifp->if_capenable = ifp->if_capabilities; 1055#ifdef DEVICE_POLLING 1056 ifp->if_capabilities |= IFCAP_POLLING; 1057#endif 1058 ifp->if_init = vge_init; 1059 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1060 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1061 IFQ_SET_READY(&ifp->if_snd); 1062 1063 /* 1064 * Call MI attach routine. 1065 */ 1066 ether_ifattach(ifp, eaddr); 1067 1068 /* Tell the upper layer(s) we support long frames. */ 1069 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1070 1071 /* Hook interrupt last to avoid having to lock softc */ 1072 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1073 NULL, vge_intr, sc, &sc->vge_intrhand); 1074 1075 if (error) { 1076 device_printf(dev, "couldn't set up irq\n"); 1077 ether_ifdetach(ifp); 1078 goto fail; 1079 } 1080 1081fail: 1082 if (error) 1083 vge_detach(dev); 1084 1085 return (error); 1086} 1087 1088/* 1089 * Shutdown hardware and free up resources. This can be called any 1090 * time after the mutex has been initialized. It is called in both 1091 * the error case in attach and the normal detach case so it needs 1092 * to be careful about only freeing resources that have actually been 1093 * allocated. 1094 */ 1095static int 1096vge_detach(device_t dev) 1097{ 1098 struct vge_softc *sc; 1099 struct ifnet *ifp; 1100 1101 sc = device_get_softc(dev); 1102 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1103 ifp = sc->vge_ifp; 1104 1105#ifdef DEVICE_POLLING 1106 if (ifp->if_capenable & IFCAP_POLLING) 1107 ether_poll_deregister(ifp); 1108#endif 1109 1110 /* These should only be active if attach succeeded */ 1111 if (device_is_attached(dev)) { 1112 ether_ifdetach(ifp); 1113 VGE_LOCK(sc); 1114 vge_stop(sc); 1115 VGE_UNLOCK(sc); 1116 callout_drain(&sc->vge_watchdog); 1117 } 1118 if (sc->vge_miibus) 1119 device_delete_child(dev, sc->vge_miibus); 1120 bus_generic_detach(dev); 1121 1122 if (sc->vge_intrhand) 1123 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1124 if (sc->vge_irq) 1125 bus_release_resource(dev, SYS_RES_IRQ, 1126 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1127 if (sc->vge_flags & VGE_FLAG_MSI) 1128 pci_release_msi(dev); 1129 if (sc->vge_res) 1130 bus_release_resource(dev, SYS_RES_MEMORY, 1131 PCIR_BAR(1), sc->vge_res); 1132 if (ifp) 1133 if_free(ifp); 1134 1135 vge_dma_free(sc); 1136 mtx_destroy(&sc->vge_mtx); 1137 1138 return (0); 1139} 1140 1141static void 1142vge_discard_rxbuf(struct vge_softc *sc, int prod) 1143{ 1144 struct vge_rxdesc *rxd; 1145 int i; 1146 1147 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1148 rxd->rx_desc->vge_sts = 0; 1149 rxd->rx_desc->vge_ctl = 0; 1150 1151 /* 1152 * Note: the manual fails to document the fact that for 1153 * proper opration, the driver needs to replentish the RX 1154 * DMA ring 4 descriptors at a time (rather than one at a 1155 * time, like most chips). We can allocate the new buffers 1156 * but we should not set the OWN bits until we're ready 1157 * to hand back 4 of them in one shot. 1158 */ 1159 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1160 for (i = VGE_RXCHUNK; i > 0; i--) { 1161 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1162 rxd = rxd->rxd_prev; 1163 } 1164 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1165 } 1166} 1167 1168static int 1169vge_newbuf(struct vge_softc *sc, int prod) 1170{ 1171 struct vge_rxdesc *rxd; 1172 struct mbuf *m; 1173 bus_dma_segment_t segs[1]; 1174 bus_dmamap_t map; 1175 int i, nsegs; 1176 1177 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1178 if (m == NULL) 1179 return (ENOBUFS); 1180 /* 1181 * This is part of an evil trick to deal with strict-alignment 1182 * architectures. The VIA chip requires RX buffers to be aligned 1183 * on 32-bit boundaries, but that will hose strict-alignment 1184 * architectures. To get around this, we leave some empty space 1185 * at the start of each buffer and for non-strict-alignment hosts, 1186 * we copy the buffer back two bytes to achieve word alignment. 1187 * This is slightly more efficient than allocating a new buffer, 1188 * copying the contents, and discarding the old buffer. 1189 */ 1190 m->m_len = m->m_pkthdr.len = MCLBYTES; 1191 m_adj(m, VGE_RX_BUF_ALIGN); 1192 1193 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1194 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1195 m_freem(m); 1196 return (ENOBUFS); 1197 } 1198 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1199 1200 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1201 if (rxd->rx_m != NULL) { 1202 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1203 BUS_DMASYNC_POSTREAD); 1204 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1205 } 1206 map = rxd->rx_dmamap; 1207 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1208 sc->vge_cdata.vge_rx_sparemap = map; 1209 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1210 BUS_DMASYNC_PREREAD); 1211 rxd->rx_m = m; 1212 1213 rxd->rx_desc->vge_sts = 0; 1214 rxd->rx_desc->vge_ctl = 0; 1215 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1216 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1217 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1218 1219 /* 1220 * Note: the manual fails to document the fact that for 1221 * proper operation, the driver needs to replenish the RX 1222 * DMA ring 4 descriptors at a time (rather than one at a 1223 * time, like most chips). We can allocate the new buffers 1224 * but we should not set the OWN bits until we're ready 1225 * to hand back 4 of them in one shot. 1226 */ 1227 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1228 for (i = VGE_RXCHUNK; i > 0; i--) { 1229 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1230 rxd = rxd->rxd_prev; 1231 } 1232 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1233 } 1234 1235 return (0); 1236} 1237 1238static int 1239vge_tx_list_init(struct vge_softc *sc) 1240{ 1241 struct vge_ring_data *rd; 1242 struct vge_txdesc *txd; 1243 int i; 1244 1245 VGE_LOCK_ASSERT(sc); 1246 1247 sc->vge_cdata.vge_tx_prodidx = 0; 1248 sc->vge_cdata.vge_tx_considx = 0; 1249 sc->vge_cdata.vge_tx_cnt = 0; 1250 1251 rd = &sc->vge_rdata; 1252 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1253 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1254 txd = &sc->vge_cdata.vge_txdesc[i]; 1255 txd->tx_m = NULL; 1256 txd->tx_desc = &rd->vge_tx_ring[i]; 1257 } 1258 1259 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1260 sc->vge_cdata.vge_tx_ring_map, 1261 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1262 1263 return (0); 1264} 1265 1266static int 1267vge_rx_list_init(struct vge_softc *sc) 1268{ 1269 struct vge_ring_data *rd; 1270 struct vge_rxdesc *rxd; 1271 int i; 1272 1273 VGE_LOCK_ASSERT(sc); 1274 1275 sc->vge_cdata.vge_rx_prodidx = 0; 1276 sc->vge_cdata.vge_head = NULL; 1277 sc->vge_cdata.vge_tail = NULL; 1278 sc->vge_cdata.vge_rx_commit = 0; 1279 1280 rd = &sc->vge_rdata; 1281 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1282 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1283 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1284 rxd->rx_m = NULL; 1285 rxd->rx_desc = &rd->vge_rx_ring[i]; 1286 if (i == 0) 1287 rxd->rxd_prev = 1288 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1289 else 1290 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1291 if (vge_newbuf(sc, i) != 0) 1292 return (ENOBUFS); 1293 } 1294 1295 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1296 sc->vge_cdata.vge_rx_ring_map, 1297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1298 1299 sc->vge_cdata.vge_rx_commit = 0; 1300 1301 return (0); 1302} 1303 1304static void 1305vge_freebufs(struct vge_softc *sc) 1306{ 1307 struct vge_txdesc *txd; 1308 struct vge_rxdesc *rxd; 1309 struct ifnet *ifp; 1310 int i; 1311 1312 VGE_LOCK_ASSERT(sc); 1313 1314 ifp = sc->vge_ifp; 1315 /* 1316 * Free RX and TX mbufs still in the queues. 1317 */ 1318 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1319 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1320 if (rxd->rx_m != NULL) { 1321 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1322 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1323 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1324 rxd->rx_dmamap); 1325 m_freem(rxd->rx_m); 1326 rxd->rx_m = NULL; 1327 } 1328 } 1329 1330 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1331 txd = &sc->vge_cdata.vge_txdesc[i]; 1332 if (txd->tx_m != NULL) { 1333 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1334 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1335 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1336 txd->tx_dmamap); 1337 m_freem(txd->tx_m); 1338 txd->tx_m = NULL; 1339 ifp->if_oerrors++; 1340 } 1341 } 1342} 1343 1344#ifndef __NO_STRICT_ALIGNMENT 1345static __inline void 1346vge_fixup_rx(struct mbuf *m) 1347{ 1348 int i; 1349 uint16_t *src, *dst; 1350 1351 src = mtod(m, uint16_t *); 1352 dst = src - 1; 1353 1354 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1355 *dst++ = *src++; 1356 1357 m->m_data -= ETHER_ALIGN; 1358} 1359#endif 1360 1361/* 1362 * RX handler. We support the reception of jumbo frames that have 1363 * been fragmented across multiple 2K mbuf cluster buffers. 1364 */ 1365static int 1366vge_rxeof(struct vge_softc *sc, int count) 1367{ 1368 struct mbuf *m; 1369 struct ifnet *ifp; 1370 int prod, prog, total_len; 1371 struct vge_rxdesc *rxd; 1372 struct vge_rx_desc *cur_rx; 1373 uint32_t rxstat, rxctl; 1374 1375 VGE_LOCK_ASSERT(sc); 1376 1377 ifp = sc->vge_ifp; 1378 1379 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1380 sc->vge_cdata.vge_rx_ring_map, 1381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1382 1383 prod = sc->vge_cdata.vge_rx_prodidx; 1384 for (prog = 0; count > 0 && 1385 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1386 VGE_RX_DESC_INC(prod)) { 1387 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1388 rxstat = le32toh(cur_rx->vge_sts); 1389 if ((rxstat & VGE_RDSTS_OWN) != 0) 1390 break; 1391 count--; 1392 prog++; 1393 rxctl = le32toh(cur_rx->vge_ctl); 1394 total_len = VGE_RXBYTES(rxstat); 1395 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1396 m = rxd->rx_m; 1397 1398 /* 1399 * If the 'start of frame' bit is set, this indicates 1400 * either the first fragment in a multi-fragment receive, 1401 * or an intermediate fragment. Either way, we want to 1402 * accumulate the buffers. 1403 */ 1404 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1405 if (vge_newbuf(sc, prod) != 0) { 1406 ifp->if_iqdrops++; 1407 VGE_CHAIN_RESET(sc); 1408 vge_discard_rxbuf(sc, prod); 1409 continue; 1410 } 1411 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1412 if (sc->vge_cdata.vge_head == NULL) { 1413 sc->vge_cdata.vge_head = m; 1414 sc->vge_cdata.vge_tail = m; 1415 } else { 1416 m->m_flags &= ~M_PKTHDR; 1417 sc->vge_cdata.vge_tail->m_next = m; 1418 sc->vge_cdata.vge_tail = m; 1419 } 1420 continue; 1421 } 1422 1423 /* 1424 * Bad/error frames will have the RXOK bit cleared. 1425 * However, there's one error case we want to allow: 1426 * if a VLAN tagged frame arrives and the chip can't 1427 * match it against the CAM filter, it considers this 1428 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1429 * We don't want to drop the frame though: our VLAN 1430 * filtering is done in software. 1431 * We also want to receive bad-checksummed frames and 1432 * and frames with bad-length. 1433 */ 1434 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1435 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1436 VGE_RDSTS_CSUMERR)) == 0) { 1437 ifp->if_ierrors++; 1438 /* 1439 * If this is part of a multi-fragment packet, 1440 * discard all the pieces. 1441 */ 1442 VGE_CHAIN_RESET(sc); 1443 vge_discard_rxbuf(sc, prod); 1444 continue; 1445 } 1446 1447 if (vge_newbuf(sc, prod) != 0) { 1448 ifp->if_iqdrops++; 1449 VGE_CHAIN_RESET(sc); 1450 vge_discard_rxbuf(sc, prod); 1451 continue; 1452 } 1453 1454 /* Chain received mbufs. */ 1455 if (sc->vge_cdata.vge_head != NULL) { 1456 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1457 /* 1458 * Special case: if there's 4 bytes or less 1459 * in this buffer, the mbuf can be discarded: 1460 * the last 4 bytes is the CRC, which we don't 1461 * care about anyway. 1462 */ 1463 if (m->m_len <= ETHER_CRC_LEN) { 1464 sc->vge_cdata.vge_tail->m_len -= 1465 (ETHER_CRC_LEN - m->m_len); 1466 m_freem(m); 1467 } else { 1468 m->m_len -= ETHER_CRC_LEN; 1469 m->m_flags &= ~M_PKTHDR; 1470 sc->vge_cdata.vge_tail->m_next = m; 1471 } 1472 m = sc->vge_cdata.vge_head; 1473 m->m_flags |= M_PKTHDR; 1474 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1475 } else { 1476 m->m_flags |= M_PKTHDR; 1477 m->m_pkthdr.len = m->m_len = 1478 (total_len - ETHER_CRC_LEN); 1479 } 1480 1481#ifndef __NO_STRICT_ALIGNMENT 1482 vge_fixup_rx(m); 1483#endif 1484 m->m_pkthdr.rcvif = ifp; 1485 1486 /* Do RX checksumming if enabled */ 1487 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1488 (rxctl & VGE_RDCTL_FRAG) == 0) { 1489 /* Check IP header checksum */ 1490 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1491 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1492 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1493 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1494 1495 /* Check TCP/UDP checksum */ 1496 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1497 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1498 m->m_pkthdr.csum_flags |= 1499 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1500 m->m_pkthdr.csum_data = 0xffff; 1501 } 1502 } 1503 1504 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1505 /* 1506 * The 32-bit rxctl register is stored in little-endian. 1507 * However, the 16-bit vlan tag is stored in big-endian, 1508 * so we have to byte swap it. 1509 */ 1510 m->m_pkthdr.ether_vtag = 1511 bswap16(rxctl & VGE_RDCTL_VLANID); 1512 m->m_flags |= M_VLANTAG; 1513 } 1514 1515 VGE_UNLOCK(sc); 1516 (*ifp->if_input)(ifp, m); 1517 VGE_LOCK(sc); 1518 sc->vge_cdata.vge_head = NULL; 1519 sc->vge_cdata.vge_tail = NULL; 1520 } 1521 1522 if (prog > 0) { 1523 sc->vge_cdata.vge_rx_prodidx = prod; 1524 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1525 sc->vge_cdata.vge_rx_ring_map, 1526 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1527 /* Update residue counter. */ 1528 if (sc->vge_cdata.vge_rx_commit != 0) { 1529 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1530 sc->vge_cdata.vge_rx_commit); 1531 sc->vge_cdata.vge_rx_commit = 0; 1532 } 1533 } 1534 return (prog); 1535} 1536 1537static void 1538vge_txeof(struct vge_softc *sc) 1539{ 1540 struct ifnet *ifp; 1541 struct vge_tx_desc *cur_tx; 1542 struct vge_txdesc *txd; 1543 uint32_t txstat; 1544 int cons, prod; 1545 1546 VGE_LOCK_ASSERT(sc); 1547 1548 ifp = sc->vge_ifp; 1549 1550 if (sc->vge_cdata.vge_tx_cnt == 0) 1551 return; 1552 1553 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1554 sc->vge_cdata.vge_tx_ring_map, 1555 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1556 1557 /* 1558 * Go through our tx list and free mbufs for those 1559 * frames that have been transmitted. 1560 */ 1561 cons = sc->vge_cdata.vge_tx_considx; 1562 prod = sc->vge_cdata.vge_tx_prodidx; 1563 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1564 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1565 txstat = le32toh(cur_tx->vge_sts); 1566 if ((txstat & VGE_TDSTS_OWN) != 0) 1567 break; 1568 sc->vge_cdata.vge_tx_cnt--; 1569 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1570 1571 txd = &sc->vge_cdata.vge_txdesc[cons]; 1572 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1573 BUS_DMASYNC_POSTWRITE); 1574 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1575 1576 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1577 __func__)); 1578 m_freem(txd->tx_m); 1579 txd->tx_m = NULL; 1580 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1581 } 1582 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1583 sc->vge_cdata.vge_tx_ring_map, 1584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1585 sc->vge_cdata.vge_tx_considx = cons; 1586 if (sc->vge_cdata.vge_tx_cnt == 0) 1587 sc->vge_timer = 0; 1588 else { 1589 /* 1590 * If not all descriptors have been released reaped yet, 1591 * reload the timer so that we will eventually get another 1592 * interrupt that will cause us to re-enter this routine. 1593 * This is done in case the transmitter has gone idle. 1594 */ 1595 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1596 } 1597} 1598 1599static void 1600vge_link_statchg(void *xsc) 1601{ 1602 struct vge_softc *sc; 1603 struct ifnet *ifp; 1604 struct mii_data *mii; 1605 1606 sc = xsc; 1607 ifp = sc->vge_ifp; 1608 VGE_LOCK_ASSERT(sc); 1609 mii = device_get_softc(sc->vge_miibus); 1610 1611 mii_pollstat(mii); 1612 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) { 1613 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1614 sc->vge_flags &= ~VGE_FLAG_LINK; 1615 if_link_state_change(sc->vge_ifp, 1616 LINK_STATE_DOWN); 1617 } 1618 } else { 1619 if (mii->mii_media_status & IFM_ACTIVE && 1620 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1621 sc->vge_flags |= VGE_FLAG_LINK; 1622 if_link_state_change(sc->vge_ifp, 1623 LINK_STATE_UP); 1624 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1625 vge_start_locked(ifp); 1626 } 1627 } 1628} 1629 1630#ifdef DEVICE_POLLING 1631static int 1632vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1633{ 1634 struct vge_softc *sc = ifp->if_softc; 1635 int rx_npkts = 0; 1636 1637 VGE_LOCK(sc); 1638 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1639 goto done; 1640 1641 rx_npkts = vge_rxeof(sc, count); 1642 vge_txeof(sc); 1643 1644 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1645 vge_start_locked(ifp); 1646 1647 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1648 uint32_t status; 1649 status = CSR_READ_4(sc, VGE_ISR); 1650 if (status == 0xFFFFFFFF) 1651 goto done; 1652 if (status) 1653 CSR_WRITE_4(sc, VGE_ISR, status); 1654 1655 /* 1656 * XXX check behaviour on receiver stalls. 1657 */ 1658 1659 if (status & VGE_ISR_TXDMA_STALL || 1660 status & VGE_ISR_RXDMA_STALL) { 1661 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1662 vge_init_locked(sc); 1663 } 1664 1665 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1666 vge_rxeof(sc, count); 1667 ifp->if_ierrors++; 1668 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1669 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1670 } 1671 } 1672done: 1673 VGE_UNLOCK(sc); 1674 return (rx_npkts); 1675} 1676#endif /* DEVICE_POLLING */ 1677 1678static void 1679vge_intr(void *arg) 1680{ 1681 struct vge_softc *sc; 1682 struct ifnet *ifp; 1683 uint32_t status; 1684 1685 sc = arg; 1686 1687 if (sc->suspended) { 1688 return; 1689 } 1690 1691 VGE_LOCK(sc); 1692 ifp = sc->vge_ifp; 1693 1694 if (!(ifp->if_flags & IFF_UP)) { 1695 VGE_UNLOCK(sc); 1696 return; 1697 } 1698 1699#ifdef DEVICE_POLLING 1700 if (ifp->if_capenable & IFCAP_POLLING) { 1701 VGE_UNLOCK(sc); 1702 return; 1703 } 1704#endif 1705 1706 /* Disable interrupts */ 1707 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1708 1709 for (;;) { 1710 1711 status = CSR_READ_4(sc, VGE_ISR); 1712 /* If the card has gone away the read returns 0xffff. */ 1713 if (status == 0xFFFFFFFF) 1714 break; 1715 1716 if (status) 1717 CSR_WRITE_4(sc, VGE_ISR, status); 1718 1719 if ((status & VGE_INTRS) == 0) 1720 break; 1721 1722 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1723 vge_rxeof(sc, VGE_RX_DESC_CNT); 1724 1725 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1726 vge_rxeof(sc, VGE_RX_DESC_CNT); 1727 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1728 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1729 } 1730 1731 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) 1732 vge_txeof(sc); 1733 1734 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1735 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1736 vge_init_locked(sc); 1737 } 1738 1739 if (status & VGE_ISR_LINKSTS) 1740 vge_link_statchg(sc); 1741 } 1742 1743 /* Re-enable interrupts */ 1744 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1745 1746 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1747 vge_start_locked(ifp); 1748 1749 VGE_UNLOCK(sc); 1750} 1751 1752static int 1753vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1754{ 1755 struct vge_txdesc *txd; 1756 struct vge_tx_frag *frag; 1757 struct mbuf *m; 1758 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1759 int error, i, nsegs, padlen; 1760 uint32_t cflags; 1761 1762 VGE_LOCK_ASSERT(sc); 1763 1764 M_ASSERTPKTHDR((*m_head)); 1765 1766 /* Argh. This chip does not autopad short frames. */ 1767 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1768 m = *m_head; 1769 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1770 if (M_WRITABLE(m) == 0) { 1771 /* Get a writable copy. */ 1772 m = m_dup(*m_head, M_DONTWAIT); 1773 m_freem(*m_head); 1774 if (m == NULL) { 1775 *m_head = NULL; 1776 return (ENOBUFS); 1777 } 1778 *m_head = m; 1779 } 1780 if (M_TRAILINGSPACE(m) < padlen) { 1781 m = m_defrag(m, M_DONTWAIT); 1782 if (m == NULL) { 1783 m_freem(*m_head); 1784 *m_head = NULL; 1785 return (ENOBUFS); 1786 } 1787 } 1788 /* 1789 * Manually pad short frames, and zero the pad space 1790 * to avoid leaking data. 1791 */ 1792 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1793 m->m_pkthdr.len += padlen; 1794 m->m_len = m->m_pkthdr.len; 1795 *m_head = m; 1796 } 1797 1798 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1799 1800 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1801 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1802 if (error == EFBIG) { 1803 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1804 if (m == NULL) { 1805 m_freem(*m_head); 1806 *m_head = NULL; 1807 return (ENOMEM); 1808 } 1809 *m_head = m; 1810 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1811 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1812 if (error != 0) { 1813 m_freem(*m_head); 1814 *m_head = NULL; 1815 return (error); 1816 } 1817 } else if (error != 0) 1818 return (error); 1819 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1820 BUS_DMASYNC_PREWRITE); 1821 1822 m = *m_head; 1823 cflags = 0; 1824 1825 /* Configure checksum offload. */ 1826 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1827 cflags |= VGE_TDCTL_IPCSUM; 1828 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1829 cflags |= VGE_TDCTL_TCPCSUM; 1830 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1831 cflags |= VGE_TDCTL_UDPCSUM; 1832 1833 /* Configure VLAN. */ 1834 if ((m->m_flags & M_VLANTAG) != 0) 1835 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1836 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1837 /* 1838 * XXX 1839 * Velocity family seems to support TSO but no information 1840 * for MSS configuration is available. Also the number of 1841 * fragments supported by a descriptor is too small to hold 1842 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1843 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1844 * longer chain of buffers but no additional information is 1845 * available. 1846 * 1847 * When telling the chip how many segments there are, we 1848 * must use nsegs + 1 instead of just nsegs. Darned if I 1849 * know why. This also means we can't use the last fragment 1850 * field of Tx descriptor. 1851 */ 1852 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1853 VGE_TD_LS_NORM); 1854 for (i = 0; i < nsegs; i++) { 1855 frag = &txd->tx_desc->vge_frag[i]; 1856 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1857 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1858 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1859 } 1860 1861 sc->vge_cdata.vge_tx_cnt++; 1862 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1863 1864 /* 1865 * Finally request interrupt and give the first descriptor 1866 * ownership to hardware. 1867 */ 1868 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1869 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1870 txd->tx_m = m; 1871 1872 return (0); 1873} 1874 1875/* 1876 * Main transmit routine. 1877 */ 1878 1879static void 1880vge_start(struct ifnet *ifp) 1881{ 1882 struct vge_softc *sc; 1883 1884 sc = ifp->if_softc; 1885 VGE_LOCK(sc); 1886 vge_start_locked(ifp); 1887 VGE_UNLOCK(sc); 1888} 1889 1890 1891static void 1892vge_start_locked(struct ifnet *ifp) 1893{ 1894 struct vge_softc *sc; 1895 struct vge_txdesc *txd; 1896 struct mbuf *m_head; 1897 int enq, idx; 1898 1899 sc = ifp->if_softc; 1900 1901 VGE_LOCK_ASSERT(sc); 1902 1903 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1904 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1905 IFF_DRV_RUNNING) 1906 return; 1907 1908 idx = sc->vge_cdata.vge_tx_prodidx; 1909 VGE_TX_DESC_DEC(idx); 1910 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1911 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1912 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1913 if (m_head == NULL) 1914 break; 1915 /* 1916 * Pack the data into the transmit ring. If we 1917 * don't have room, set the OACTIVE flag and wait 1918 * for the NIC to drain the ring. 1919 */ 1920 if (vge_encap(sc, &m_head)) { 1921 if (m_head == NULL) 1922 break; 1923 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1924 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1925 break; 1926 } 1927 1928 txd = &sc->vge_cdata.vge_txdesc[idx]; 1929 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1930 VGE_TX_DESC_INC(idx); 1931 1932 enq++; 1933 /* 1934 * If there's a BPF listener, bounce a copy of this frame 1935 * to him. 1936 */ 1937 ETHER_BPF_MTAP(ifp, m_head); 1938 } 1939 1940 if (enq > 0) { 1941 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1942 sc->vge_cdata.vge_tx_ring_map, 1943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1944 /* Issue a transmit command. */ 1945 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1946 /* 1947 * Use the countdown timer for interrupt moderation. 1948 * 'TX done' interrupts are disabled. Instead, we reset the 1949 * countdown timer, which will begin counting until it hits 1950 * the value in the SSTIMER register, and then trigger an 1951 * interrupt. Each time we set the TIMER0_ENABLE bit, the 1952 * the timer count is reloaded. Only when the transmitter 1953 * is idle will the timer hit 0 and an interrupt fire. 1954 */ 1955 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); 1956 1957 /* 1958 * Set a timeout in case the chip goes out to lunch. 1959 */ 1960 sc->vge_timer = 5; 1961 } 1962} 1963 1964static void 1965vge_init(void *xsc) 1966{ 1967 struct vge_softc *sc = xsc; 1968 1969 VGE_LOCK(sc); 1970 vge_init_locked(sc); 1971 VGE_UNLOCK(sc); 1972} 1973 1974static void 1975vge_init_locked(struct vge_softc *sc) 1976{ 1977 struct ifnet *ifp = sc->vge_ifp; 1978 struct mii_data *mii; 1979 int error, i; 1980 1981 VGE_LOCK_ASSERT(sc); 1982 mii = device_get_softc(sc->vge_miibus); 1983 1984 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1985 return; 1986 1987 /* 1988 * Cancel pending I/O and free all RX/TX buffers. 1989 */ 1990 vge_stop(sc); 1991 vge_reset(sc); 1992 1993 /* 1994 * Initialize the RX and TX descriptors and mbufs. 1995 */ 1996 1997 error = vge_rx_list_init(sc); 1998 if (error != 0) { 1999 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2000 return; 2001 } 2002 vge_tx_list_init(sc); 2003 2004 /* Set our station address */ 2005 for (i = 0; i < ETHER_ADDR_LEN; i++) 2006 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2007 2008 /* 2009 * Set receive FIFO threshold. Also allow transmission and 2010 * reception of VLAN tagged frames. 2011 */ 2012 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2013 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); 2014 2015 /* Set DMA burst length */ 2016 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2017 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2018 2019 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2020 2021 /* Set collision backoff algorithm */ 2022 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2023 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2024 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2025 2026 /* Disable LPSEL field in priority resolution */ 2027 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2028 2029 /* 2030 * Load the addresses of the DMA queues into the chip. 2031 * Note that we only use one transmit queue. 2032 */ 2033 2034 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2035 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2036 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2037 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2038 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2039 2040 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2041 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2042 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2043 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2044 2045 /* Enable and wake up the RX descriptor queue */ 2046 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2047 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2048 2049 /* Enable the TX descriptor queue */ 2050 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2051 2052 /* Set up the receive filter -- allow large frames for VLANs. */ 2053 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); 2054 2055 /* If we want promiscuous mode, set the allframes bit. */ 2056 if (ifp->if_flags & IFF_PROMISC) { 2057 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); 2058 } 2059 2060 /* Set capture broadcast bit to capture broadcast frames. */ 2061 if (ifp->if_flags & IFF_BROADCAST) { 2062 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); 2063 } 2064 2065 /* Set multicast bit to capture multicast frames. */ 2066 if (ifp->if_flags & IFF_MULTICAST) { 2067 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); 2068 } 2069 2070 /* Init the cam filter. */ 2071 vge_cam_clear(sc); 2072 2073 /* Init the multicast filter. */ 2074 vge_setmulti(sc); 2075 2076 /* Enable flow control */ 2077 2078 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2079 2080 /* Enable jumbo frame reception (if desired) */ 2081 2082 /* Start the MAC. */ 2083 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2084 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2085 CSR_WRITE_1(sc, VGE_CRS0, 2086 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2087 2088 /* 2089 * Configure one-shot timer for microsecond 2090 * resolution and load it for 500 usecs. 2091 */ 2092 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); 2093 CSR_WRITE_2(sc, VGE_SSTIMER, 400); 2094 2095 /* 2096 * Configure interrupt moderation for receive. Enable 2097 * the holdoff counter and load it, and set the RX 2098 * suppression count to the number of descriptors we 2099 * want to allow before triggering an interrupt. 2100 * The holdoff timer is in units of 20 usecs. 2101 */ 2102 2103#ifdef notyet 2104 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); 2105 /* Select the interrupt holdoff timer page. */ 2106 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2107 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2108 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ 2109 2110 /* Enable use of the holdoff timer. */ 2111 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2112 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); 2113 2114 /* Select the RX suppression threshold page. */ 2115 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2116 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2117 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ 2118 2119 /* Restore the page select bits. */ 2120 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 2121 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 2122#endif 2123 2124#ifdef DEVICE_POLLING 2125 /* 2126 * Disable interrupts if we are polling. 2127 */ 2128 if (ifp->if_capenable & IFCAP_POLLING) { 2129 CSR_WRITE_4(sc, VGE_IMR, 0); 2130 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2131 } else /* otherwise ... */ 2132#endif 2133 { 2134 /* 2135 * Enable interrupts. 2136 */ 2137 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2138 CSR_WRITE_4(sc, VGE_ISR, 0); 2139 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2140 } 2141 2142 sc->vge_flags &= ~VGE_FLAG_LINK; 2143 mii_mediachg(mii); 2144 2145 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2146 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2147 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2148} 2149 2150/* 2151 * Set media options. 2152 */ 2153static int 2154vge_ifmedia_upd(struct ifnet *ifp) 2155{ 2156 struct vge_softc *sc; 2157 struct mii_data *mii; 2158 int error; 2159 2160 sc = ifp->if_softc; 2161 VGE_LOCK(sc); 2162 mii = device_get_softc(sc->vge_miibus); 2163 error = mii_mediachg(mii); 2164 VGE_UNLOCK(sc); 2165 2166 return (error); 2167} 2168 2169/* 2170 * Report current media status. 2171 */ 2172static void 2173vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2174{ 2175 struct vge_softc *sc; 2176 struct mii_data *mii; 2177 2178 sc = ifp->if_softc; 2179 mii = device_get_softc(sc->vge_miibus); 2180 2181 VGE_LOCK(sc); 2182 if ((ifp->if_flags & IFF_UP) == 0) { 2183 VGE_UNLOCK(sc); 2184 return; 2185 } 2186 mii_pollstat(mii); 2187 VGE_UNLOCK(sc); 2188 ifmr->ifm_active = mii->mii_media_active; 2189 ifmr->ifm_status = mii->mii_media_status; 2190} 2191 2192static void 2193vge_miibus_statchg(device_t dev) 2194{ 2195 struct vge_softc *sc; 2196 struct mii_data *mii; 2197 struct ifmedia_entry *ife; 2198 2199 sc = device_get_softc(dev); 2200 mii = device_get_softc(sc->vge_miibus); 2201 ife = mii->mii_media.ifm_cur; 2202 2203 /* 2204 * If the user manually selects a media mode, we need to turn 2205 * on the forced MAC mode bit in the DIAGCTL register. If the 2206 * user happens to choose a full duplex mode, we also need to 2207 * set the 'force full duplex' bit. This applies only to 2208 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2209 * mode is disabled, and in 1000baseT mode, full duplex is 2210 * always implied, so we turn on the forced mode bit but leave 2211 * the FDX bit cleared. 2212 */ 2213 2214 switch (IFM_SUBTYPE(ife->ifm_media)) { 2215 case IFM_AUTO: 2216 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2217 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2218 break; 2219 case IFM_1000_T: 2220 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2221 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2222 break; 2223 case IFM_100_TX: 2224 case IFM_10_T: 2225 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2226 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2227 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2228 } else { 2229 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2230 } 2231 break; 2232 default: 2233 device_printf(dev, "unknown media type: %x\n", 2234 IFM_SUBTYPE(ife->ifm_media)); 2235 break; 2236 } 2237} 2238 2239static int 2240vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2241{ 2242 struct vge_softc *sc = ifp->if_softc; 2243 struct ifreq *ifr = (struct ifreq *) data; 2244 struct mii_data *mii; 2245 int error = 0; 2246 2247 switch (command) { 2248 case SIOCSIFMTU: 2249 if (ifr->ifr_mtu > VGE_JUMBO_MTU) 2250 error = EINVAL; 2251 ifp->if_mtu = ifr->ifr_mtu; 2252 break; 2253 case SIOCSIFFLAGS: 2254 VGE_LOCK(sc); 2255 if (ifp->if_flags & IFF_UP) { 2256 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2257 ifp->if_flags & IFF_PROMISC && 2258 !(sc->vge_if_flags & IFF_PROMISC)) { 2259 CSR_SETBIT_1(sc, VGE_RXCTL, 2260 VGE_RXCTL_RX_PROMISC); 2261 vge_setmulti(sc); 2262 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2263 !(ifp->if_flags & IFF_PROMISC) && 2264 sc->vge_if_flags & IFF_PROMISC) { 2265 CSR_CLRBIT_1(sc, VGE_RXCTL, 2266 VGE_RXCTL_RX_PROMISC); 2267 vge_setmulti(sc); 2268 } else 2269 vge_init_locked(sc); 2270 } else { 2271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2272 vge_stop(sc); 2273 } 2274 sc->vge_if_flags = ifp->if_flags; 2275 VGE_UNLOCK(sc); 2276 break; 2277 case SIOCADDMULTI: 2278 case SIOCDELMULTI: 2279 VGE_LOCK(sc); 2280 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2281 vge_setmulti(sc); 2282 VGE_UNLOCK(sc); 2283 break; 2284 case SIOCGIFMEDIA: 2285 case SIOCSIFMEDIA: 2286 mii = device_get_softc(sc->vge_miibus); 2287 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2288 break; 2289 case SIOCSIFCAP: 2290 { 2291 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2292#ifdef DEVICE_POLLING 2293 if (mask & IFCAP_POLLING) { 2294 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2295 error = ether_poll_register(vge_poll, ifp); 2296 if (error) 2297 return (error); 2298 VGE_LOCK(sc); 2299 /* Disable interrupts */ 2300 CSR_WRITE_4(sc, VGE_IMR, 0); 2301 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2302 ifp->if_capenable |= IFCAP_POLLING; 2303 VGE_UNLOCK(sc); 2304 } else { 2305 error = ether_poll_deregister(ifp); 2306 /* Enable interrupts. */ 2307 VGE_LOCK(sc); 2308 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2309 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2310 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2311 ifp->if_capenable &= ~IFCAP_POLLING; 2312 VGE_UNLOCK(sc); 2313 } 2314 } 2315#endif /* DEVICE_POLLING */ 2316 VGE_LOCK(sc); 2317 if ((mask & IFCAP_TXCSUM) != 0 && 2318 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2319 ifp->if_capenable ^= IFCAP_TXCSUM; 2320 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2321 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2322 else 2323 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2324 } 2325 if ((mask & IFCAP_RXCSUM) != 0 && 2326 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2327 ifp->if_capenable ^= IFCAP_RXCSUM; 2328 VGE_UNLOCK(sc); 2329 } 2330 break; 2331 default: 2332 error = ether_ioctl(ifp, command, data); 2333 break; 2334 } 2335 2336 return (error); 2337} 2338 2339static void 2340vge_watchdog(void *arg) 2341{ 2342 struct vge_softc *sc; 2343 struct ifnet *ifp; 2344 2345 sc = arg; 2346 VGE_LOCK_ASSERT(sc); 2347 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2348 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2349 return; 2350 2351 ifp = sc->vge_ifp; 2352 if_printf(ifp, "watchdog timeout\n"); 2353 ifp->if_oerrors++; 2354 2355 vge_txeof(sc); 2356 vge_rxeof(sc, VGE_RX_DESC_CNT); 2357 2358 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2359 vge_init_locked(sc); 2360} 2361 2362/* 2363 * Stop the adapter and free any mbufs allocated to the 2364 * RX and TX lists. 2365 */ 2366static void 2367vge_stop(struct vge_softc *sc) 2368{ 2369 struct ifnet *ifp; 2370 2371 VGE_LOCK_ASSERT(sc); 2372 ifp = sc->vge_ifp; 2373 sc->vge_timer = 0; 2374 callout_stop(&sc->vge_watchdog); 2375 2376 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2377 2378 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2379 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2380 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2381 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2382 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2383 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2384 2385 VGE_CHAIN_RESET(sc); 2386 vge_txeof(sc); 2387 vge_freebufs(sc); 2388} 2389 2390/* 2391 * Device suspend routine. Stop the interface and save some PCI 2392 * settings in case the BIOS doesn't restore them properly on 2393 * resume. 2394 */ 2395static int 2396vge_suspend(device_t dev) 2397{ 2398 struct vge_softc *sc; 2399 2400 sc = device_get_softc(dev); 2401 2402 VGE_LOCK(sc); 2403 vge_stop(sc); 2404 2405 sc->suspended = 1; 2406 VGE_UNLOCK(sc); 2407 2408 return (0); 2409} 2410 2411/* 2412 * Device resume routine. Restore some PCI settings in case the BIOS 2413 * doesn't, re-enable busmastering, and restart the interface if 2414 * appropriate. 2415 */ 2416static int 2417vge_resume(device_t dev) 2418{ 2419 struct vge_softc *sc; 2420 struct ifnet *ifp; 2421 2422 sc = device_get_softc(dev); 2423 ifp = sc->vge_ifp; 2424 2425 /* reenable busmastering */ 2426 pci_enable_busmaster(dev); 2427 pci_enable_io(dev, SYS_RES_MEMORY); 2428 2429 /* reinitialize interface if necessary */ 2430 VGE_LOCK(sc); 2431 if (ifp->if_flags & IFF_UP) { 2432 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2433 vge_init_locked(sc); 2434 } 2435 sc->suspended = 0; 2436 VGE_UNLOCK(sc); 2437 2438 return (0); 2439} 2440 2441/* 2442 * Stop all chip I/O so that the kernel's probe routines don't 2443 * get confused by errant DMAs when rebooting. 2444 */ 2445static int 2446vge_shutdown(device_t dev) 2447{ 2448 struct vge_softc *sc; 2449 2450 sc = device_get_softc(dev); 2451 2452 VGE_LOCK(sc); 2453 vge_stop(sc); 2454 VGE_UNLOCK(sc); 2455 2456 return (0); 2457} 2458