if_ixgb.c revision 195049
1/******************************************************************************* 2 3Copyright (c) 2001-2004, Intel Corporation 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30POSSIBILITY OF SUCH DAMAGE. 31 32***************************************************************************/ 33 34/*$FreeBSD: head/sys/dev/ixgb/if_ixgb.c 195049 2009-06-26 11:45:06Z rwatson $*/ 35 36#ifdef HAVE_KERNEL_OPTION_HEADERS 37#include "opt_device_polling.h" 38#endif 39 40#include <dev/ixgb/if_ixgb.h> 41 42/********************************************************************* 43 * Set this to one to display debug statistics 44 *********************************************************************/ 45int ixgb_display_debug_stats = 0; 46 47/********************************************************************* 48 * Linked list of board private structures for all NICs found 49 *********************************************************************/ 50 51struct adapter *ixgb_adapter_list = NULL; 52 53 54 55/********************************************************************* 56 * Driver version 57 *********************************************************************/ 58 59char ixgb_driver_version[] = "1.0.6"; 60char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; 61 62/********************************************************************* 63 * PCI Device ID Table 64 * 65 * Used by probe to select devices to load on 66 * Last field stores an index into ixgb_strings 67 * Last entry must be all 0s 68 * 69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 70 *********************************************************************/ 71 72static ixgb_vendor_info_t ixgb_vendor_info_array[] = 73{ 74 /* Intel(R) PRO/10000 Network Connection */ 75 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, 76 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, 77 /* required last entry */ 78 {0, 0, 0, 0, 0} 79}; 80 81/********************************************************************* 82 * Table of branding strings for all supported NICs. 83 *********************************************************************/ 84 85static char *ixgb_strings[] = { 86 "Intel(R) PRO/10GbE Network Driver" 87}; 88 89/********************************************************************* 90 * Function prototypes 91 *********************************************************************/ 92static int ixgb_probe(device_t); 93static int ixgb_attach(device_t); 94static int ixgb_detach(device_t); 95static int ixgb_shutdown(device_t); 96static void ixgb_intr(void *); 97static void ixgb_start(struct ifnet *); 98static void ixgb_start_locked(struct ifnet *); 99static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); 100static void ixgb_watchdog(struct ifnet *); 101static void ixgb_init(void *); 102static void ixgb_init_locked(struct adapter *); 103static void ixgb_stop(void *); 104static void ixgb_media_status(struct ifnet *, struct ifmediareq *); 105static int ixgb_media_change(struct ifnet *); 106static void ixgb_identify_hardware(struct adapter *); 107static int ixgb_allocate_pci_resources(struct adapter *); 108static void ixgb_free_pci_resources(struct adapter *); 109static void ixgb_local_timer(void *); 110static int ixgb_hardware_init(struct adapter *); 111static void ixgb_setup_interface(device_t, struct adapter *); 112static int ixgb_setup_transmit_structures(struct adapter *); 113static void ixgb_initialize_transmit_unit(struct adapter *); 114static int ixgb_setup_receive_structures(struct adapter *); 115static void ixgb_initialize_receive_unit(struct adapter *); 116static void ixgb_enable_intr(struct adapter *); 117static void ixgb_disable_intr(struct adapter *); 118static void ixgb_free_transmit_structures(struct adapter *); 119static void ixgb_free_receive_structures(struct adapter *); 120static void ixgb_update_stats_counters(struct adapter *); 121static void ixgb_clean_transmit_interrupts(struct adapter *); 122static int ixgb_allocate_receive_structures(struct adapter *); 123static int ixgb_allocate_transmit_structures(struct adapter *); 124static int ixgb_process_receive_interrupts(struct adapter *, int); 125static void 126ixgb_receive_checksum(struct adapter *, 127 struct ixgb_rx_desc * rx_desc, 128 struct mbuf *); 129static void 130ixgb_transmit_checksum_setup(struct adapter *, 131 struct mbuf *, 132 u_int8_t *); 133static void ixgb_set_promisc(struct adapter *); 134static void ixgb_disable_promisc(struct adapter *); 135static void ixgb_set_multi(struct adapter *); 136static void ixgb_print_hw_stats(struct adapter *); 137static void ixgb_print_link_status(struct adapter *); 138static int 139ixgb_get_buf(int i, struct adapter *, 140 struct mbuf *); 141static void ixgb_enable_vlans(struct adapter * adapter); 142static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); 143static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); 144static int 145ixgb_dma_malloc(struct adapter *, bus_size_t, 146 struct ixgb_dma_alloc *, int); 147static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); 148#ifdef DEVICE_POLLING 149static poll_handler_t ixgb_poll; 150#endif 151 152/********************************************************************* 153 * FreeBSD Device Interface Entry Points 154 *********************************************************************/ 155 156static device_method_t ixgb_methods[] = { 157 /* Device interface */ 158 DEVMETHOD(device_probe, ixgb_probe), 159 DEVMETHOD(device_attach, ixgb_attach), 160 DEVMETHOD(device_detach, ixgb_detach), 161 DEVMETHOD(device_shutdown, ixgb_shutdown), 162 {0, 0} 163}; 164 165static driver_t ixgb_driver = { 166 "ixgb", ixgb_methods, sizeof(struct adapter), 167}; 168 169static devclass_t ixgb_devclass; 170DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); 171 172MODULE_DEPEND(ixgb, pci, 1, 1, 1); 173MODULE_DEPEND(ixgb, ether, 1, 1, 1); 174 175/* some defines for controlling descriptor fetches in h/w */ 176#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ 177#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is 178 * pushed this many descriptors from 179 * head */ 180#define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ 181 182 183/********************************************************************* 184 * Device identification routine 185 * 186 * ixgb_probe determines if the driver should be loaded on 187 * adapter based on PCI vendor/device id of the adapter. 188 * 189 * return 0 on success, positive on failure 190 *********************************************************************/ 191 192static int 193ixgb_probe(device_t dev) 194{ 195 ixgb_vendor_info_t *ent; 196 197 u_int16_t pci_vendor_id = 0; 198 u_int16_t pci_device_id = 0; 199 u_int16_t pci_subvendor_id = 0; 200 u_int16_t pci_subdevice_id = 0; 201 char adapter_name[60]; 202 203 INIT_DEBUGOUT("ixgb_probe: begin"); 204 205 pci_vendor_id = pci_get_vendor(dev); 206 if (pci_vendor_id != IXGB_VENDOR_ID) 207 return (ENXIO); 208 209 pci_device_id = pci_get_device(dev); 210 pci_subvendor_id = pci_get_subvendor(dev); 211 pci_subdevice_id = pci_get_subdevice(dev); 212 213 ent = ixgb_vendor_info_array; 214 while (ent->vendor_id != 0) { 215 if ((pci_vendor_id == ent->vendor_id) && 216 (pci_device_id == ent->device_id) && 217 218 ((pci_subvendor_id == ent->subvendor_id) || 219 (ent->subvendor_id == PCI_ANY_ID)) && 220 221 ((pci_subdevice_id == ent->subdevice_id) || 222 (ent->subdevice_id == PCI_ANY_ID))) { 223 sprintf(adapter_name, "%s, Version - %s", 224 ixgb_strings[ent->index], 225 ixgb_driver_version); 226 device_set_desc_copy(dev, adapter_name); 227 return (BUS_PROBE_DEFAULT); 228 } 229 ent++; 230 } 231 232 return (ENXIO); 233} 234 235/********************************************************************* 236 * Device initialization routine 237 * 238 * The attach entry point is called when the driver is being loaded. 239 * This routine identifies the type of hardware, allocates all resources 240 * and initializes the hardware. 241 * 242 * return 0 on success, positive on failure 243 *********************************************************************/ 244 245static int 246ixgb_attach(device_t dev) 247{ 248 struct adapter *adapter; 249 int tsize, rsize; 250 int error = 0; 251 252 printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright); 253 INIT_DEBUGOUT("ixgb_attach: begin"); 254 255 /* Allocate, clear, and link in our adapter structure */ 256 if (!(adapter = device_get_softc(dev))) { 257 printf("ixgb: adapter structure allocation failed\n"); 258 return (ENOMEM); 259 } 260 bzero(adapter, sizeof(struct adapter)); 261 adapter->dev = dev; 262 adapter->osdep.dev = dev; 263 adapter->unit = device_get_unit(dev); 264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); 265 266 if (ixgb_adapter_list != NULL) 267 ixgb_adapter_list->prev = adapter; 268 adapter->next = ixgb_adapter_list; 269 ixgb_adapter_list = adapter; 270 271 /* SYSCTL APIs */ 272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, 275 (void *)adapter, 0, 276 ixgb_sysctl_stats, "I", "Statistics"); 277 278 callout_init(&adapter->timer, CALLOUT_MPSAFE); 279 280 /* Determine hardware revision */ 281 ixgb_identify_hardware(adapter); 282 283 /* Parameters (to be read from user) */ 284 adapter->num_tx_desc = IXGB_MAX_TXD; 285 adapter->num_rx_desc = IXGB_MAX_RXD; 286 adapter->tx_int_delay = TIDV; 287 adapter->rx_int_delay = RDTR; 288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048; 289 290 adapter->hw.fc.high_water = FCRTH; 291 adapter->hw.fc.low_water = FCRTL; 292 adapter->hw.fc.pause_time = FCPAUSE; 293 adapter->hw.fc.send_xon = TRUE; 294 adapter->hw.fc.type = FLOW_CONTROL; 295 296 297 /* Set the max frame size assuming standard ethernet sized frames */ 298 adapter->hw.max_frame_size = 299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 300 301 if (ixgb_allocate_pci_resources(adapter)) { 302 printf("ixgb%d: Allocation of PCI resources failed\n", 303 adapter->unit); 304 error = ENXIO; 305 goto err_pci; 306 } 307 tsize = IXGB_ROUNDUP(adapter->num_tx_desc * 308 sizeof(struct ixgb_tx_desc), 4096); 309 310 /* Allocate Transmit Descriptor ring */ 311 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 312 printf("ixgb%d: Unable to allocate TxDescriptor memory\n", 313 adapter->unit); 314 error = ENOMEM; 315 goto err_tx_desc; 316 } 317 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; 318 319 rsize = IXGB_ROUNDUP(adapter->num_rx_desc * 320 sizeof(struct ixgb_rx_desc), 4096); 321 322 /* Allocate Receive Descriptor ring */ 323 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 324 printf("ixgb%d: Unable to allocate rx_desc memory\n", 325 adapter->unit); 326 error = ENOMEM; 327 goto err_rx_desc; 328 } 329 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; 330 331 /* Initialize the hardware */ 332 if (ixgb_hardware_init(adapter)) { 333 printf("ixgb%d: Unable to initialize the hardware\n", 334 adapter->unit); 335 error = EIO; 336 goto err_hw_init; 337 } 338 /* Setup OS specific network interface */ 339 ixgb_setup_interface(dev, adapter); 340 341 /* Initialize statistics */ 342 ixgb_clear_hw_cntrs(&adapter->hw); 343 ixgb_update_stats_counters(adapter); 344 345 INIT_DEBUGOUT("ixgb_attach: end"); 346 return (0); 347 348err_hw_init: 349 ixgb_dma_free(adapter, &adapter->rxdma); 350err_rx_desc: 351 ixgb_dma_free(adapter, &adapter->txdma); 352err_tx_desc: 353err_pci: 354 ixgb_free_pci_resources(adapter); 355 sysctl_ctx_free(&adapter->sysctl_ctx); 356 return (error); 357 358} 359 360/********************************************************************* 361 * Device removal routine 362 * 363 * The detach entry point is called when the driver is being removed. 364 * This routine stops the adapter and deallocates all the resources 365 * that were allocated for driver operation. 366 * 367 * return 0 on success, positive on failure 368 *********************************************************************/ 369 370static int 371ixgb_detach(device_t dev) 372{ 373 struct adapter *adapter = device_get_softc(dev); 374 struct ifnet *ifp = adapter->ifp; 375 376 INIT_DEBUGOUT("ixgb_detach: begin"); 377 378#ifdef DEVICE_POLLING 379 if (ifp->if_capenable & IFCAP_POLLING) 380 ether_poll_deregister(ifp); 381#endif 382 383 IXGB_LOCK(adapter); 384 adapter->in_detach = 1; 385 386 ixgb_stop(adapter); 387 IXGB_UNLOCK(adapter); 388 389#if __FreeBSD_version < 500000 390 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED); 391#else 392 ether_ifdetach(adapter->ifp); 393#endif 394 ixgb_free_pci_resources(adapter); 395#if __FreeBSD_version >= 500000 396 if_free(adapter->ifp); 397#endif 398 399 /* Free Transmit Descriptor ring */ 400 if (adapter->tx_desc_base) { 401 ixgb_dma_free(adapter, &adapter->txdma); 402 adapter->tx_desc_base = NULL; 403 } 404 /* Free Receive Descriptor ring */ 405 if (adapter->rx_desc_base) { 406 ixgb_dma_free(adapter, &adapter->rxdma); 407 adapter->rx_desc_base = NULL; 408 } 409 /* Remove from the adapter list */ 410 if (ixgb_adapter_list == adapter) 411 ixgb_adapter_list = adapter->next; 412 if (adapter->next != NULL) 413 adapter->next->prev = adapter->prev; 414 if (adapter->prev != NULL) 415 adapter->prev->next = adapter->next; 416 417 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 418 ifp->if_timer = 0; 419 420 IXGB_LOCK_DESTROY(adapter); 421 return (0); 422} 423 424/********************************************************************* 425 * 426 * Shutdown entry point 427 * 428 **********************************************************************/ 429 430static int 431ixgb_shutdown(device_t dev) 432{ 433 struct adapter *adapter = device_get_softc(dev); 434 IXGB_LOCK(adapter); 435 ixgb_stop(adapter); 436 IXGB_UNLOCK(adapter); 437 return (0); 438} 439 440 441/********************************************************************* 442 * Transmit entry point 443 * 444 * ixgb_start is called by the stack to initiate a transmit. 445 * The driver will remain in this routine as long as there are 446 * packets to transmit and transmit resources are available. 447 * In case resources are not available stack is notified and 448 * the packet is requeued. 449 **********************************************************************/ 450 451static void 452ixgb_start_locked(struct ifnet * ifp) 453{ 454 struct mbuf *m_head; 455 struct adapter *adapter = ifp->if_softc; 456 457 IXGB_LOCK_ASSERT(adapter); 458 459 if (!adapter->link_active) 460 return; 461 462 while (ifp->if_snd.ifq_head != NULL) { 463 IF_DEQUEUE(&ifp->if_snd, m_head); 464 465 if (m_head == NULL) 466 break; 467 468 if (ixgb_encap(adapter, m_head)) { 469 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 470 IF_PREPEND(&ifp->if_snd, m_head); 471 break; 472 } 473 /* Send a copy of the frame to the BPF listener */ 474#if __FreeBSD_version < 500000 475 if (ifp->if_bpf) 476 bpf_mtap(ifp, m_head); 477#else 478 ETHER_BPF_MTAP(ifp, m_head); 479#endif 480 /* Set timeout in case hardware has problems transmitting */ 481 ifp->if_timer = IXGB_TX_TIMEOUT; 482 483 } 484 return; 485} 486 487static void 488ixgb_start(struct ifnet *ifp) 489{ 490 struct adapter *adapter = ifp->if_softc; 491 492 IXGB_LOCK(adapter); 493 ixgb_start_locked(ifp); 494 IXGB_UNLOCK(adapter); 495 return; 496} 497 498/********************************************************************* 499 * Ioctl entry point 500 * 501 * ixgb_ioctl is called when the user wants to configure the 502 * interface. 503 * 504 * return 0 on success, positive on failure 505 **********************************************************************/ 506 507static int 508ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) 509{ 510 int mask, error = 0; 511 struct ifreq *ifr = (struct ifreq *) data; 512 struct adapter *adapter = ifp->if_softc; 513 514 if (adapter->in_detach) 515 goto out; 516 517 switch (command) { 518 case SIOCSIFADDR: 519 case SIOCGIFADDR: 520 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); 521 ether_ioctl(ifp, command, data); 522 break; 523 case SIOCSIFMTU: 524 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 525 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { 526 error = EINVAL; 527 } else { 528 IXGB_LOCK(adapter); 529 ifp->if_mtu = ifr->ifr_mtu; 530 adapter->hw.max_frame_size = 531 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 532 533 ixgb_init_locked(adapter); 534 IXGB_UNLOCK(adapter); 535 } 536 break; 537 case SIOCSIFFLAGS: 538 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 539 IXGB_LOCK(adapter); 540 if (ifp->if_flags & IFF_UP) { 541 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 542 ixgb_init_locked(adapter); 543 } 544 ixgb_disable_promisc(adapter); 545 ixgb_set_promisc(adapter); 546 } else { 547 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 548 ixgb_stop(adapter); 549 } 550 } 551 IXGB_UNLOCK(adapter); 552 break; 553 case SIOCADDMULTI: 554 case SIOCDELMULTI: 555 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 556 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 557 IXGB_LOCK(adapter); 558 ixgb_disable_intr(adapter); 559 ixgb_set_multi(adapter); 560 ixgb_enable_intr(adapter); 561 IXGB_UNLOCK(adapter); 562 } 563 break; 564 case SIOCSIFMEDIA: 565 case SIOCGIFMEDIA: 566 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 567 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 568 break; 569 case SIOCSIFCAP: 570 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 571 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 572#ifdef DEVICE_POLLING 573 if (mask & IFCAP_POLLING) { 574 if (ifr->ifr_reqcap & IFCAP_POLLING) { 575 error = ether_poll_register(ixgb_poll, ifp); 576 if (error) 577 return(error); 578 IXGB_LOCK(adapter); 579 ixgb_disable_intr(adapter); 580 ifp->if_capenable |= IFCAP_POLLING; 581 IXGB_UNLOCK(adapter); 582 } else { 583 error = ether_poll_deregister(ifp); 584 /* Enable interrupt even in error case */ 585 IXGB_LOCK(adapter); 586 ixgb_enable_intr(adapter); 587 ifp->if_capenable &= ~IFCAP_POLLING; 588 IXGB_UNLOCK(adapter); 589 } 590 } 591#endif /* DEVICE_POLLING */ 592 if (mask & IFCAP_HWCSUM) { 593 if (IFCAP_HWCSUM & ifp->if_capenable) 594 ifp->if_capenable &= ~IFCAP_HWCSUM; 595 else 596 ifp->if_capenable |= IFCAP_HWCSUM; 597 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 598 ixgb_init(adapter); 599 } 600 break; 601 default: 602 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); 603 error = EINVAL; 604 } 605 606out: 607 return (error); 608} 609 610/********************************************************************* 611 * Watchdog entry point 612 * 613 * This routine is called whenever hardware quits transmitting. 614 * 615 **********************************************************************/ 616 617static void 618ixgb_watchdog(struct ifnet * ifp) 619{ 620 struct adapter *adapter; 621 adapter = ifp->if_softc; 622 623 /* 624 * If we are in this routine because of pause frames, then don't 625 * reset the hardware. 626 */ 627 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { 628 ifp->if_timer = IXGB_TX_TIMEOUT; 629 return; 630 } 631 printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit); 632 633 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 634 635 636 ixgb_stop(adapter); 637 ixgb_init(adapter); 638 639 640 ifp->if_oerrors++; 641 642 return; 643} 644 645/********************************************************************* 646 * Init entry point 647 * 648 * This routine is used in two ways. It is used by the stack as 649 * init entry point in network interface structure. It is also used 650 * by the driver as a hw/sw initialization routine to get to a 651 * consistent state. 652 * 653 * return 0 on success, positive on failure 654 **********************************************************************/ 655 656static void 657ixgb_init_locked(struct adapter *adapter) 658{ 659 struct ifnet *ifp; 660 661 INIT_DEBUGOUT("ixgb_init: begin"); 662 663 IXGB_LOCK_ASSERT(adapter); 664 665 ixgb_stop(adapter); 666 667 /* Get the latest mac address, User can use a LAA */ 668 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.curr_mac_addr, 669 IXGB_ETH_LENGTH_OF_ADDRESS); 670 671 /* Initialize the hardware */ 672 if (ixgb_hardware_init(adapter)) { 673 printf("ixgb%d: Unable to initialize the hardware\n", 674 adapter->unit); 675 return; 676 } 677 ixgb_enable_vlans(adapter); 678 679 /* Prepare transmit descriptors and buffers */ 680 if (ixgb_setup_transmit_structures(adapter)) { 681 printf("ixgb%d: Could not setup transmit structures\n", 682 adapter->unit); 683 ixgb_stop(adapter); 684 return; 685 } 686 ixgb_initialize_transmit_unit(adapter); 687 688 /* Setup Multicast table */ 689 ixgb_set_multi(adapter); 690 691 /* Prepare receive descriptors and buffers */ 692 if (ixgb_setup_receive_structures(adapter)) { 693 printf("ixgb%d: Could not setup receive structures\n", 694 adapter->unit); 695 ixgb_stop(adapter); 696 return; 697 } 698 ixgb_initialize_receive_unit(adapter); 699 700 /* Don't lose promiscuous settings */ 701 ixgb_set_promisc(adapter); 702 703 ifp = adapter->ifp; 704 ifp->if_drv_flags |= IFF_DRV_RUNNING; 705 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 706 707 708 if (ifp->if_capenable & IFCAP_TXCSUM) 709 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; 710 else 711 ifp->if_hwassist = 0; 712 713 714 /* Enable jumbo frames */ 715 if (ifp->if_mtu > ETHERMTU) { 716 uint32_t temp_reg; 717 IXGB_WRITE_REG(&adapter->hw, MFS, 718 adapter->hw.max_frame_size << IXGB_MFS_SHIFT); 719 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); 720 temp_reg |= IXGB_CTRL0_JFE; 721 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); 722 } 723 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); 724 ixgb_clear_hw_cntrs(&adapter->hw); 725#ifdef DEVICE_POLLING 726 /* 727 * Only disable interrupts if we are polling, make sure they are on 728 * otherwise. 729 */ 730 if (ifp->if_capenable & IFCAP_POLLING) 731 ixgb_disable_intr(adapter); 732 else 733#endif 734 ixgb_enable_intr(adapter); 735 736 return; 737} 738 739static void 740ixgb_init(void *arg) 741{ 742 struct adapter *adapter = arg; 743 744 IXGB_LOCK(adapter); 745 ixgb_init_locked(adapter); 746 IXGB_UNLOCK(adapter); 747 return; 748} 749 750#ifdef DEVICE_POLLING 751static int 752ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) 753{ 754 struct adapter *adapter = ifp->if_softc; 755 u_int32_t reg_icr; 756 int rx_npkts; 757 758 IXGB_LOCK_ASSERT(adapter); 759 760 if (cmd == POLL_AND_CHECK_STATUS) { 761 reg_icr = IXGB_READ_REG(&adapter->hw, ICR); 762 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { 763 callout_stop(&adapter->timer); 764 ixgb_check_for_link(&adapter->hw); 765 ixgb_print_link_status(adapter); 766 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, 767 adapter); 768 } 769 } 770 rx_npkts = ixgb_process_receive_interrupts(adapter, count); 771 ixgb_clean_transmit_interrupts(adapter); 772 773 if (ifp->if_snd.ifq_head != NULL) 774 ixgb_start_locked(ifp); 775 return (rx_npkts); 776} 777 778static int 779ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) 780{ 781 struct adapter *adapter = ifp->if_softc; 782 int rx_npkts = 0; 783 784 IXGB_LOCK(adapter); 785 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 786 rx_npkts = ixgb_poll_locked(ifp, cmd, count); 787 IXGB_UNLOCK(adapter); 788 return (rx_npkts); 789} 790#endif /* DEVICE_POLLING */ 791 792/********************************************************************* 793 * 794 * Interrupt Service routine 795 * 796 **********************************************************************/ 797 798static void 799ixgb_intr(void *arg) 800{ 801 u_int32_t loop_cnt = IXGB_MAX_INTR; 802 u_int32_t reg_icr; 803 struct ifnet *ifp; 804 struct adapter *adapter = arg; 805 boolean_t rxdmt0 = FALSE; 806 807 IXGB_LOCK(adapter); 808 809 ifp = adapter->ifp; 810 811#ifdef DEVICE_POLLING 812 if (ifp->if_capenable & IFCAP_POLLING) { 813 IXGB_UNLOCK(adapter); 814 return; 815 } 816#endif 817 818 reg_icr = IXGB_READ_REG(&adapter->hw, ICR); 819 if (reg_icr == 0) { 820 IXGB_UNLOCK(adapter); 821 return; 822 } 823 824 if (reg_icr & IXGB_INT_RXDMT0) 825 rxdmt0 = TRUE; 826 827#ifdef _SV_ 828 if (reg_icr & IXGB_INT_RXDMT0) 829 adapter->sv_stats.icr_rxdmt0++; 830 if (reg_icr & IXGB_INT_RXO) 831 adapter->sv_stats.icr_rxo++; 832 if (reg_icr & IXGB_INT_RXT0) 833 adapter->sv_stats.icr_rxt0++; 834 if (reg_icr & IXGB_INT_TXDW) 835 adapter->sv_stats.icr_TXDW++; 836#endif /* _SV_ */ 837 838 /* Link status change */ 839 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { 840 callout_stop(&adapter->timer); 841 ixgb_check_for_link(&adapter->hw); 842 ixgb_print_link_status(adapter); 843 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, 844 adapter); 845 } 846 while (loop_cnt > 0) { 847 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 848 ixgb_process_receive_interrupts(adapter, -1); 849 ixgb_clean_transmit_interrupts(adapter); 850 } 851 loop_cnt--; 852 } 853 854 if (rxdmt0 && adapter->raidc) { 855 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); 856 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); 857 } 858 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL) 859 ixgb_start_locked(ifp); 860 861 IXGB_UNLOCK(adapter); 862 return; 863} 864 865 866/********************************************************************* 867 * 868 * Media Ioctl callback 869 * 870 * This routine is called whenever the user queries the status of 871 * the interface using ifconfig. 872 * 873 **********************************************************************/ 874static void 875ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 876{ 877 struct adapter *adapter = ifp->if_softc; 878 879 INIT_DEBUGOUT("ixgb_media_status: begin"); 880 881 ixgb_check_for_link(&adapter->hw); 882 ixgb_print_link_status(adapter); 883 884 ifmr->ifm_status = IFM_AVALID; 885 ifmr->ifm_active = IFM_ETHER; 886 887 if (!adapter->hw.link_up) 888 return; 889 890 ifmr->ifm_status |= IFM_ACTIVE; 891 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 892 893 return; 894} 895 896/********************************************************************* 897 * 898 * Media Ioctl callback 899 * 900 * This routine is called when the user changes speed/duplex using 901 * media/mediopt option with ifconfig. 902 * 903 **********************************************************************/ 904static int 905ixgb_media_change(struct ifnet * ifp) 906{ 907 struct adapter *adapter = ifp->if_softc; 908 struct ifmedia *ifm = &adapter->media; 909 910 INIT_DEBUGOUT("ixgb_media_change: begin"); 911 912 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 913 return (EINVAL); 914 915 return (0); 916} 917 918/********************************************************************* 919 * 920 * This routine maps the mbufs to tx descriptors. 921 * 922 * return 0 on success, positive on failure 923 **********************************************************************/ 924 925static int 926ixgb_encap(struct adapter * adapter, struct mbuf * m_head) 927{ 928 u_int8_t txd_popts; 929 int i, j, error, nsegs; 930 931#if __FreeBSD_version < 500000 932 struct ifvlan *ifv = NULL; 933#endif 934 bus_dma_segment_t segs[IXGB_MAX_SCATTER]; 935 bus_dmamap_t map; 936 struct ixgb_buffer *tx_buffer = NULL; 937 struct ixgb_tx_desc *current_tx_desc = NULL; 938 struct ifnet *ifp = adapter->ifp; 939 940 /* 941 * Force a cleanup if number of TX descriptors available hits the 942 * threshold 943 */ 944 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 945 ixgb_clean_transmit_interrupts(adapter); 946 } 947 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 948 adapter->no_tx_desc_avail1++; 949 return (ENOBUFS); 950 } 951 /* 952 * Map the packet for DMA. 953 */ 954 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { 955 adapter->no_tx_map_avail++; 956 return (ENOMEM); 957 } 958 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, 959 &nsegs, BUS_DMA_NOWAIT); 960 if (error != 0) { 961 adapter->no_tx_dma_setup++; 962 printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; " 963 "error %u\n", adapter->unit, error); 964 bus_dmamap_destroy(adapter->txtag, map); 965 return (error); 966 } 967 KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); 968 969 if (nsegs > adapter->num_tx_desc_avail) { 970 adapter->no_tx_desc_avail2++; 971 bus_dmamap_destroy(adapter->txtag, map); 972 return (ENOBUFS); 973 } 974 if (ifp->if_hwassist > 0) { 975 ixgb_transmit_checksum_setup(adapter, m_head, 976 &txd_popts); 977 } else 978 txd_popts = 0; 979 980 /* Find out if we are in vlan mode */ 981#if __FreeBSD_version < 500000 982 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 983 m_head->m_pkthdr.rcvif != NULL && 984 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 985 ifv = m_head->m_pkthdr.rcvif->if_softc; 986#elseif __FreeBSD_version < 700000 987 mtag = VLAN_OUTPUT_TAG(ifp, m_head); 988#endif 989 i = adapter->next_avail_tx_desc; 990 for (j = 0; j < nsegs; j++) { 991 tx_buffer = &adapter->tx_buffer_area[i]; 992 current_tx_desc = &adapter->tx_desc_base[i]; 993 994 current_tx_desc->buff_addr = htole64(segs[j].ds_addr); 995 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); 996 current_tx_desc->popts = txd_popts; 997 if (++i == adapter->num_tx_desc) 998 i = 0; 999 1000 tx_buffer->m_head = NULL; 1001 } 1002 1003 adapter->num_tx_desc_avail -= nsegs; 1004 adapter->next_avail_tx_desc = i; 1005 1006#if __FreeBSD_version < 500000 1007 if (ifv != NULL) { 1008 /* Set the vlan id */ 1009 current_tx_desc->vlan = ifv->ifv_tag; 1010#elseif __FreeBSD_version < 700000 1011 if (mtag != NULL) { 1012 /* Set the vlan id */ 1013 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); 1014#else 1015 if (m_head->m_flags & M_VLANTAG) { 1016 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag; 1017#endif 1018 1019 /* Tell hardware to add tag */ 1020 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; 1021 } 1022 tx_buffer->m_head = m_head; 1023 tx_buffer->map = map; 1024 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1025 1026 /* 1027 * Last Descriptor of Packet needs End Of Packet (EOP) 1028 */ 1029 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); 1030 1031 /* 1032 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 1033 * that this frame is available to transmit. 1034 */ 1035 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1036 1037 return (0); 1038} 1039 1040static void 1041ixgb_set_promisc(struct adapter * adapter) 1042{ 1043 1044 u_int32_t reg_rctl; 1045 struct ifnet *ifp = adapter->ifp; 1046 1047 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1048 1049 if (ifp->if_flags & IFF_PROMISC) { 1050 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1051 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1052 } else if (ifp->if_flags & IFF_ALLMULTI) { 1053 reg_rctl |= IXGB_RCTL_MPE; 1054 reg_rctl &= ~IXGB_RCTL_UPE; 1055 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1056 } 1057 return; 1058} 1059 1060static void 1061ixgb_disable_promisc(struct adapter * adapter) 1062{ 1063 u_int32_t reg_rctl; 1064 1065 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1066 1067 reg_rctl &= (~IXGB_RCTL_UPE); 1068 reg_rctl &= (~IXGB_RCTL_MPE); 1069 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1070 1071 return; 1072} 1073 1074 1075/********************************************************************* 1076 * Multicast Update 1077 * 1078 * This routine is called whenever multicast address list is updated. 1079 * 1080 **********************************************************************/ 1081 1082static void 1083ixgb_set_multi(struct adapter * adapter) 1084{ 1085 u_int32_t reg_rctl = 0; 1086 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS]; 1087 struct ifmultiaddr *ifma; 1088 int mcnt = 0; 1089 struct ifnet *ifp = adapter->ifp; 1090 1091 IOCTL_DEBUGOUT("ixgb_set_multi: begin"); 1092 1093 if_maddr_rlock(ifp); 1094#if __FreeBSD_version < 500000 1095 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1096#else 1097 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1098#endif 1099 if (ifma->ifma_addr->sa_family != AF_LINK) 1100 continue; 1101 1102 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1103 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); 1104 mcnt++; 1105 } 1106 if_maddr_runlock(ifp); 1107 1108 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { 1109 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1110 reg_rctl |= IXGB_RCTL_MPE; 1111 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1112 } else 1113 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); 1114 1115 return; 1116} 1117 1118 1119/********************************************************************* 1120 * Timer routine 1121 * 1122 * This routine checks for link status and updates statistics. 1123 * 1124 **********************************************************************/ 1125 1126static void 1127ixgb_local_timer(void *arg) 1128{ 1129 struct ifnet *ifp; 1130 struct adapter *adapter = arg; 1131 ifp = adapter->ifp; 1132 1133 IXGB_LOCK(adapter); 1134 1135 ixgb_check_for_link(&adapter->hw); 1136 ixgb_print_link_status(adapter); 1137 ixgb_update_stats_counters(adapter); 1138 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { 1139 ixgb_print_hw_stats(adapter); 1140 } 1141 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); 1142 1143 IXGB_UNLOCK(adapter); 1144 return; 1145} 1146 1147static void 1148ixgb_print_link_status(struct adapter * adapter) 1149{ 1150 if (adapter->hw.link_up) { 1151 if (!adapter->link_active) { 1152 printf("ixgb%d: Link is up %d Mbps %s \n", 1153 adapter->unit, 1154 10000, 1155 "Full Duplex"); 1156 adapter->link_active = 1; 1157 } 1158 } else { 1159 if (adapter->link_active) { 1160 printf("ixgb%d: Link is Down \n", adapter->unit); 1161 adapter->link_active = 0; 1162 } 1163 } 1164 1165 return; 1166} 1167 1168 1169 1170/********************************************************************* 1171 * 1172 * This routine disables all traffic on the adapter by issuing a 1173 * global reset on the MAC and deallocates TX/RX buffers. 1174 * 1175 **********************************************************************/ 1176 1177static void 1178ixgb_stop(void *arg) 1179{ 1180 struct ifnet *ifp; 1181 struct adapter *adapter = arg; 1182 ifp = adapter->ifp; 1183 1184 IXGB_LOCK_ASSERT(adapter); 1185 1186 INIT_DEBUGOUT("ixgb_stop: begin\n"); 1187 ixgb_disable_intr(adapter); 1188 adapter->hw.adapter_stopped = FALSE; 1189 ixgb_adapter_stop(&adapter->hw); 1190 callout_stop(&adapter->timer); 1191 ixgb_free_transmit_structures(adapter); 1192 ixgb_free_receive_structures(adapter); 1193 1194 1195 /* Tell the stack that the interface is no longer active */ 1196 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1197 1198 return; 1199} 1200 1201 1202/********************************************************************* 1203 * 1204 * Determine hardware revision. 1205 * 1206 **********************************************************************/ 1207static void 1208ixgb_identify_hardware(struct adapter * adapter) 1209{ 1210 device_t dev = adapter->dev; 1211 1212 /* Make sure our PCI config space has the necessary stuff set */ 1213 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1214 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && 1215 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { 1216 printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n", 1217 adapter->unit); 1218 adapter->hw.pci_cmd_word |= 1219 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); 1220 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); 1221 } 1222 /* Save off the information about this board */ 1223 adapter->hw.vendor_id = pci_get_vendor(dev); 1224 adapter->hw.device_id = pci_get_device(dev); 1225 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1226 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 1227 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 1228 1229 /* Set MacType, etc. based on this PCI info */ 1230 switch (adapter->hw.device_id) { 1231 case IXGB_DEVICE_ID_82597EX: 1232 case IXGB_DEVICE_ID_82597EX_SR: 1233 adapter->hw.mac_type = ixgb_82597; 1234 break; 1235 default: 1236 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); 1237 printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id); 1238 } 1239 1240 return; 1241} 1242 1243static int 1244ixgb_allocate_pci_resources(struct adapter * adapter) 1245{ 1246 int rid; 1247 device_t dev = adapter->dev; 1248 1249 rid = IXGB_MMBA; 1250 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, 1251 &rid, 0, ~0, 1, 1252 RF_ACTIVE); 1253 if (!(adapter->res_memory)) { 1254 printf("ixgb%d: Unable to allocate bus resource: memory\n", 1255 adapter->unit); 1256 return (ENXIO); 1257 } 1258 adapter->osdep.mem_bus_space_tag = 1259 rman_get_bustag(adapter->res_memory); 1260 adapter->osdep.mem_bus_space_handle = 1261 rman_get_bushandle(adapter->res_memory); 1262 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; 1263 1264 rid = 0x0; 1265 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, 1266 &rid, 0, ~0, 1, 1267 RF_SHAREABLE | RF_ACTIVE); 1268 if (!(adapter->res_interrupt)) { 1269 printf("ixgb%d: Unable to allocate bus resource: interrupt\n", 1270 adapter->unit); 1271 return (ENXIO); 1272 } 1273 if (bus_setup_intr(dev, adapter->res_interrupt, 1274 INTR_TYPE_NET | INTR_MPSAFE, 1275 NULL, (void (*) (void *))ixgb_intr, adapter, 1276 &adapter->int_handler_tag)) { 1277 printf("ixgb%d: Error registering interrupt handler!\n", 1278 adapter->unit); 1279 return (ENXIO); 1280 } 1281 adapter->hw.back = &adapter->osdep; 1282 1283 return (0); 1284} 1285 1286static void 1287ixgb_free_pci_resources(struct adapter * adapter) 1288{ 1289 device_t dev = adapter->dev; 1290 1291 if (adapter->res_interrupt != NULL) { 1292 bus_teardown_intr(dev, adapter->res_interrupt, 1293 adapter->int_handler_tag); 1294 bus_release_resource(dev, SYS_RES_IRQ, 0, 1295 adapter->res_interrupt); 1296 } 1297 if (adapter->res_memory != NULL) { 1298 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, 1299 adapter->res_memory); 1300 } 1301 if (adapter->res_ioport != NULL) { 1302 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 1303 adapter->res_ioport); 1304 } 1305 return; 1306} 1307 1308/********************************************************************* 1309 * 1310 * Initialize the hardware to a configuration as specified by the 1311 * adapter structure. The controller is reset, the EEPROM is 1312 * verified, the MAC address is set, then the shared initialization 1313 * routines are called. 1314 * 1315 **********************************************************************/ 1316static int 1317ixgb_hardware_init(struct adapter * adapter) 1318{ 1319 /* Issue a global reset */ 1320 adapter->hw.adapter_stopped = FALSE; 1321 ixgb_adapter_stop(&adapter->hw); 1322 1323 /* Make sure we have a good EEPROM before we read from it */ 1324 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { 1325 printf("ixgb%d: The EEPROM Checksum Is Not Valid\n", 1326 adapter->unit); 1327 return (EIO); 1328 } 1329 if (!ixgb_init_hw(&adapter->hw)) { 1330 printf("ixgb%d: Hardware Initialization Failed", 1331 adapter->unit); 1332 return (EIO); 1333 } 1334 1335 return (0); 1336} 1337 1338/********************************************************************* 1339 * 1340 * Setup networking device structure and register an interface. 1341 * 1342 **********************************************************************/ 1343static void 1344ixgb_setup_interface(device_t dev, struct adapter * adapter) 1345{ 1346 struct ifnet *ifp; 1347 INIT_DEBUGOUT("ixgb_setup_interface: begin"); 1348 1349 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1350 if (ifp == NULL) 1351 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1352#if __FreeBSD_version >= 502000 1353 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1354#else 1355 ifp->if_unit = adapter->unit; 1356 ifp->if_name = "ixgb"; 1357#endif 1358 ifp->if_mtu = ETHERMTU; 1359 ifp->if_baudrate = 1000000000; 1360 ifp->if_init = ixgb_init; 1361 ifp->if_softc = adapter; 1362 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1363 ifp->if_ioctl = ixgb_ioctl; 1364 ifp->if_start = ixgb_start; 1365 ifp->if_watchdog = ixgb_watchdog; 1366 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; 1367 1368#if __FreeBSD_version < 500000 1369 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1370#else 1371 ether_ifattach(ifp, adapter->hw.curr_mac_addr); 1372#endif 1373 1374 ifp->if_capabilities = IFCAP_HWCSUM; 1375 1376 /* 1377 * Tell the upper layer(s) we support long frames. 1378 */ 1379 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1380 1381#if __FreeBSD_version >= 500000 1382 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1383#endif 1384 1385 ifp->if_capenable = ifp->if_capabilities; 1386 1387#ifdef DEVICE_POLLING 1388 ifp->if_capabilities |= IFCAP_POLLING; 1389#endif 1390 1391 /* 1392 * Specify the media types supported by this adapter and register 1393 * callbacks to update media and link information 1394 */ 1395 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, 1396 ixgb_media_status); 1397 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1398 0, NULL); 1399 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 1400 0, NULL); 1401 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1402 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1403 1404 return; 1405} 1406 1407/******************************************************************** 1408 * Manage DMA'able memory. 1409 *******************************************************************/ 1410static void 1411ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) 1412{ 1413 if (error) 1414 return; 1415 *(bus_addr_t *) arg = segs->ds_addr; 1416 return; 1417} 1418 1419static int 1420ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, 1421 struct ixgb_dma_alloc * dma, int mapflags) 1422{ 1423 int r; 1424 1425 r = bus_dma_tag_create(NULL, /* parent */ 1426 PAGE_SIZE, 0, /* alignment, bounds */ 1427 BUS_SPACE_MAXADDR, /* lowaddr */ 1428 BUS_SPACE_MAXADDR, /* highaddr */ 1429 NULL, NULL, /* filter, filterarg */ 1430 size, /* maxsize */ 1431 1, /* nsegments */ 1432 size, /* maxsegsize */ 1433 BUS_DMA_ALLOCNOW, /* flags */ 1434#if __FreeBSD_version >= 502000 1435 NULL, /* lockfunc */ 1436 NULL, /* lockfuncarg */ 1437#endif 1438 &dma->dma_tag); 1439 if (r != 0) { 1440 printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; " 1441 "error %u\n", adapter->unit, r); 1442 goto fail_0; 1443 } 1444 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 1445 BUS_DMA_NOWAIT, &dma->dma_map); 1446 if (r != 0) { 1447 printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; " 1448 "error %u\n", adapter->unit, r); 1449 goto fail_1; 1450 } 1451 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1452 size, 1453 ixgb_dmamap_cb, 1454 &dma->dma_paddr, 1455 mapflags | BUS_DMA_NOWAIT); 1456 if (r != 0) { 1457 printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; " 1458 "error %u\n", adapter->unit, r); 1459 goto fail_2; 1460 } 1461 dma->dma_size = size; 1462 return (0); 1463fail_2: 1464 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1465fail_1: 1466 bus_dma_tag_destroy(dma->dma_tag); 1467fail_0: 1468 dma->dma_map = NULL; 1469 dma->dma_tag = NULL; 1470 return (r); 1471} 1472 1473 1474 1475static void 1476ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) 1477{ 1478 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1479 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1480 bus_dma_tag_destroy(dma->dma_tag); 1481} 1482 1483/********************************************************************* 1484 * 1485 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1486 * the information needed to transmit a packet on the wire. 1487 * 1488 **********************************************************************/ 1489static int 1490ixgb_allocate_transmit_structures(struct adapter * adapter) 1491{ 1492 if (!(adapter->tx_buffer_area = 1493 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * 1494 adapter->num_tx_desc, M_DEVBUF, 1495 M_NOWAIT | M_ZERO))) { 1496 printf("ixgb%d: Unable to allocate tx_buffer memory\n", 1497 adapter->unit); 1498 return ENOMEM; 1499 } 1500 bzero(adapter->tx_buffer_area, 1501 sizeof(struct ixgb_buffer) * adapter->num_tx_desc); 1502 1503 return 0; 1504} 1505 1506/********************************************************************* 1507 * 1508 * Allocate and initialize transmit structures. 1509 * 1510 **********************************************************************/ 1511static int 1512ixgb_setup_transmit_structures(struct adapter * adapter) 1513{ 1514 /* 1515 * Setup DMA descriptor areas. 1516 */ 1517 if (bus_dma_tag_create(NULL, /* parent */ 1518 PAGE_SIZE, 0, /* alignment, bounds */ 1519 BUS_SPACE_MAXADDR, /* lowaddr */ 1520 BUS_SPACE_MAXADDR, /* highaddr */ 1521 NULL, NULL, /* filter, filterarg */ 1522 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ 1523 IXGB_MAX_SCATTER, /* nsegments */ 1524 MCLBYTES, /* maxsegsize */ 1525 BUS_DMA_ALLOCNOW, /* flags */ 1526#if __FreeBSD_version >= 502000 1527 NULL, /* lockfunc */ 1528 NULL, /* lockfuncarg */ 1529#endif 1530 &adapter->txtag)) { 1531 printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit); 1532 return (ENOMEM); 1533 } 1534 if (ixgb_allocate_transmit_structures(adapter)) 1535 return ENOMEM; 1536 1537 bzero((void *)adapter->tx_desc_base, 1538 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); 1539 1540 adapter->next_avail_tx_desc = 0; 1541 adapter->oldest_used_tx_desc = 0; 1542 1543 /* Set number of descriptors available */ 1544 adapter->num_tx_desc_avail = adapter->num_tx_desc; 1545 1546 /* Set checksum context */ 1547 adapter->active_checksum_context = OFFLOAD_NONE; 1548 1549 return 0; 1550} 1551 1552/********************************************************************* 1553 * 1554 * Enable transmit unit. 1555 * 1556 **********************************************************************/ 1557static void 1558ixgb_initialize_transmit_unit(struct adapter * adapter) 1559{ 1560 u_int32_t reg_tctl; 1561 u_int64_t tdba = adapter->txdma.dma_paddr; 1562 1563 /* Setup the Base and Length of the Tx Descriptor Ring */ 1564 IXGB_WRITE_REG(&adapter->hw, TDBAL, 1565 (tdba & 0x00000000ffffffffULL)); 1566 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); 1567 IXGB_WRITE_REG(&adapter->hw, TDLEN, 1568 adapter->num_tx_desc * 1569 sizeof(struct ixgb_tx_desc)); 1570 1571 /* Setup the HW Tx Head and Tail descriptor pointers */ 1572 IXGB_WRITE_REG(&adapter->hw, TDH, 0); 1573 IXGB_WRITE_REG(&adapter->hw, TDT, 0); 1574 1575 1576 HW_DEBUGOUT2("Base = %x, Length = %x\n", 1577 IXGB_READ_REG(&adapter->hw, TDBAL), 1578 IXGB_READ_REG(&adapter->hw, TDLEN)); 1579 1580 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); 1581 1582 1583 /* Program the Transmit Control Register */ 1584 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); 1585 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; 1586 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); 1587 1588 /* Setup Transmit Descriptor Settings for this adapter */ 1589 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; 1590 1591 if (adapter->tx_int_delay > 0) 1592 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; 1593 return; 1594} 1595 1596/********************************************************************* 1597 * 1598 * Free all transmit related data structures. 1599 * 1600 **********************************************************************/ 1601static void 1602ixgb_free_transmit_structures(struct adapter * adapter) 1603{ 1604 struct ixgb_buffer *tx_buffer; 1605 int i; 1606 1607 INIT_DEBUGOUT("free_transmit_structures: begin"); 1608 1609 if (adapter->tx_buffer_area != NULL) { 1610 tx_buffer = adapter->tx_buffer_area; 1611 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 1612 if (tx_buffer->m_head != NULL) { 1613 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 1614 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 1615 m_freem(tx_buffer->m_head); 1616 } 1617 tx_buffer->m_head = NULL; 1618 } 1619 } 1620 if (adapter->tx_buffer_area != NULL) { 1621 free(adapter->tx_buffer_area, M_DEVBUF); 1622 adapter->tx_buffer_area = NULL; 1623 } 1624 if (adapter->txtag != NULL) { 1625 bus_dma_tag_destroy(adapter->txtag); 1626 adapter->txtag = NULL; 1627 } 1628 return; 1629} 1630 1631/********************************************************************* 1632 * 1633 * The offload context needs to be set when we transfer the first 1634 * packet of a particular protocol (TCP/UDP). We change the 1635 * context only if the protocol type changes. 1636 * 1637 **********************************************************************/ 1638static void 1639ixgb_transmit_checksum_setup(struct adapter * adapter, 1640 struct mbuf * mp, 1641 u_int8_t * txd_popts) 1642{ 1643 struct ixgb_context_desc *TXD; 1644 struct ixgb_buffer *tx_buffer; 1645 int curr_txd; 1646 1647 if (mp->m_pkthdr.csum_flags) { 1648 1649 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1651 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) 1652 return; 1653 else 1654 adapter->active_checksum_context = OFFLOAD_TCP_IP; 1655 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 1656 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1657 if (adapter->active_checksum_context == OFFLOAD_UDP_IP) 1658 return; 1659 else 1660 adapter->active_checksum_context = OFFLOAD_UDP_IP; 1661 } else { 1662 *txd_popts = 0; 1663 return; 1664 } 1665 } else { 1666 *txd_popts = 0; 1667 return; 1668 } 1669 1670 /* 1671 * If we reach this point, the checksum offload context needs to be 1672 * reset. 1673 */ 1674 curr_txd = adapter->next_avail_tx_desc; 1675 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 1676 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; 1677 1678 1679 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); 1680 TXD->tucse = 0; 1681 1682 TXD->mss = 0; 1683 1684 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { 1685 TXD->tucso = 1686 ENET_HEADER_SIZE + sizeof(struct ip) + 1687 offsetof(struct tcphdr, th_sum); 1688 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { 1689 TXD->tucso = 1690 ENET_HEADER_SIZE + sizeof(struct ip) + 1691 offsetof(struct udphdr, uh_sum); 1692 } 1693 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; 1694 1695 tx_buffer->m_head = NULL; 1696 1697 if (++curr_txd == adapter->num_tx_desc) 1698 curr_txd = 0; 1699 1700 adapter->num_tx_desc_avail--; 1701 adapter->next_avail_tx_desc = curr_txd; 1702 return; 1703} 1704 1705/********************************************************************** 1706 * 1707 * Examine each tx_buffer in the used queue. If the hardware is done 1708 * processing the packet then free associated resources. The 1709 * tx_buffer is put back on the free queue. 1710 * 1711 **********************************************************************/ 1712static void 1713ixgb_clean_transmit_interrupts(struct adapter * adapter) 1714{ 1715 int i, num_avail; 1716 struct ixgb_buffer *tx_buffer; 1717 struct ixgb_tx_desc *tx_desc; 1718 1719 IXGB_LOCK_ASSERT(adapter); 1720 1721 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 1722 return; 1723 1724#ifdef _SV_ 1725 adapter->clean_tx_interrupts++; 1726#endif 1727 num_avail = adapter->num_tx_desc_avail; 1728 i = adapter->oldest_used_tx_desc; 1729 1730 tx_buffer = &adapter->tx_buffer_area[i]; 1731 tx_desc = &adapter->tx_desc_base[i]; 1732 1733 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { 1734 1735 tx_desc->status = 0; 1736 num_avail++; 1737 1738 if (tx_buffer->m_head) { 1739 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 1740 BUS_DMASYNC_POSTWRITE); 1741 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 1742 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 1743 m_freem(tx_buffer->m_head); 1744 tx_buffer->m_head = NULL; 1745 } 1746 if (++i == adapter->num_tx_desc) 1747 i = 0; 1748 1749 tx_buffer = &adapter->tx_buffer_area[i]; 1750 tx_desc = &adapter->tx_desc_base[i]; 1751 } 1752 1753 adapter->oldest_used_tx_desc = i; 1754 1755 /* 1756 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that 1757 * it is OK to send packets. If there are no pending descriptors, 1758 * clear the timeout. Otherwise, if some descriptors have been freed, 1759 * restart the timeout. 1760 */ 1761 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { 1762 struct ifnet *ifp = adapter->ifp; 1763 1764 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1765 if (num_avail == adapter->num_tx_desc) 1766 ifp->if_timer = 0; 1767 else if (num_avail == adapter->num_tx_desc_avail) 1768 ifp->if_timer = IXGB_TX_TIMEOUT; 1769 } 1770 adapter->num_tx_desc_avail = num_avail; 1771 return; 1772} 1773 1774 1775/********************************************************************* 1776 * 1777 * Get a buffer from system mbuf buffer pool. 1778 * 1779 **********************************************************************/ 1780static int 1781ixgb_get_buf(int i, struct adapter * adapter, 1782 struct mbuf * nmp) 1783{ 1784 register struct mbuf *mp = nmp; 1785 struct ixgb_buffer *rx_buffer; 1786 struct ifnet *ifp; 1787 bus_addr_t paddr; 1788 int error; 1789 1790 ifp = adapter->ifp; 1791 1792 if (mp == NULL) { 1793 1794 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1795 1796 if (mp == NULL) { 1797 adapter->mbuf_alloc_failed++; 1798 return (ENOBUFS); 1799 } 1800 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1801 } else { 1802 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1803 mp->m_data = mp->m_ext.ext_buf; 1804 mp->m_next = NULL; 1805 } 1806 1807 if (ifp->if_mtu <= ETHERMTU) { 1808 m_adj(mp, ETHER_ALIGN); 1809 } 1810 rx_buffer = &adapter->rx_buffer_area[i]; 1811 1812 /* 1813 * Using memory from the mbuf cluster pool, invoke the bus_dma 1814 * machinery to arrange the memory mapping. 1815 */ 1816 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, 1817 mtod(mp, void *), mp->m_len, 1818 ixgb_dmamap_cb, &paddr, 0); 1819 if (error) { 1820 m_free(mp); 1821 return (error); 1822 } 1823 rx_buffer->m_head = mp; 1824 adapter->rx_desc_base[i].buff_addr = htole64(paddr); 1825 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); 1826 1827 return (0); 1828} 1829 1830/********************************************************************* 1831 * 1832 * Allocate memory for rx_buffer structures. Since we use one 1833 * rx_buffer per received packet, the maximum number of rx_buffer's 1834 * that we'll need is equal to the number of receive descriptors 1835 * that we've allocated. 1836 * 1837 **********************************************************************/ 1838static int 1839ixgb_allocate_receive_structures(struct adapter * adapter) 1840{ 1841 int i, error; 1842 struct ixgb_buffer *rx_buffer; 1843 1844 if (!(adapter->rx_buffer_area = 1845 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * 1846 adapter->num_rx_desc, M_DEVBUF, 1847 M_NOWAIT | M_ZERO))) { 1848 printf("ixgb%d: Unable to allocate rx_buffer memory\n", 1849 adapter->unit); 1850 return (ENOMEM); 1851 } 1852 bzero(adapter->rx_buffer_area, 1853 sizeof(struct ixgb_buffer) * adapter->num_rx_desc); 1854 1855 error = bus_dma_tag_create(NULL, /* parent */ 1856 PAGE_SIZE, 0, /* alignment, bounds */ 1857 BUS_SPACE_MAXADDR, /* lowaddr */ 1858 BUS_SPACE_MAXADDR, /* highaddr */ 1859 NULL, NULL, /* filter, filterarg */ 1860 MCLBYTES, /* maxsize */ 1861 1, /* nsegments */ 1862 MCLBYTES, /* maxsegsize */ 1863 BUS_DMA_ALLOCNOW, /* flags */ 1864#if __FreeBSD_version >= 502000 1865 NULL, /* lockfunc */ 1866 NULL, /* lockfuncarg */ 1867#endif 1868 &adapter->rxtag); 1869 if (error != 0) { 1870 printf("ixgb%d: ixgb_allocate_receive_structures: " 1871 "bus_dma_tag_create failed; error %u\n", 1872 adapter->unit, error); 1873 goto fail_0; 1874 } 1875 rx_buffer = adapter->rx_buffer_area; 1876 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 1877 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 1878 &rx_buffer->map); 1879 if (error != 0) { 1880 printf("ixgb%d: ixgb_allocate_receive_structures: " 1881 "bus_dmamap_create failed; error %u\n", 1882 adapter->unit, error); 1883 goto fail_1; 1884 } 1885 } 1886 1887 for (i = 0; i < adapter->num_rx_desc; i++) { 1888 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { 1889 adapter->rx_buffer_area[i].m_head = NULL; 1890 adapter->rx_desc_base[i].buff_addr = 0; 1891 return (ENOBUFS); 1892 } 1893 } 1894 1895 return (0); 1896fail_1: 1897 bus_dma_tag_destroy(adapter->rxtag); 1898fail_0: 1899 adapter->rxtag = NULL; 1900 free(adapter->rx_buffer_area, M_DEVBUF); 1901 adapter->rx_buffer_area = NULL; 1902 return (error); 1903} 1904 1905/********************************************************************* 1906 * 1907 * Allocate and initialize receive structures. 1908 * 1909 **********************************************************************/ 1910static int 1911ixgb_setup_receive_structures(struct adapter * adapter) 1912{ 1913 bzero((void *)adapter->rx_desc_base, 1914 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); 1915 1916 if (ixgb_allocate_receive_structures(adapter)) 1917 return ENOMEM; 1918 1919 /* Setup our descriptor pointers */ 1920 adapter->next_rx_desc_to_check = 0; 1921 adapter->next_rx_desc_to_use = 0; 1922 return (0); 1923} 1924 1925/********************************************************************* 1926 * 1927 * Enable receive unit. 1928 * 1929 **********************************************************************/ 1930static void 1931ixgb_initialize_receive_unit(struct adapter * adapter) 1932{ 1933 u_int32_t reg_rctl; 1934 u_int32_t reg_rxcsum; 1935 u_int32_t reg_rxdctl; 1936 struct ifnet *ifp; 1937 u_int64_t rdba = adapter->rxdma.dma_paddr; 1938 1939 ifp = adapter->ifp; 1940 1941 /* 1942 * Make sure receives are disabled while setting up the descriptor 1943 * ring 1944 */ 1945 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1946 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); 1947 1948 /* Set the Receive Delay Timer Register */ 1949 IXGB_WRITE_REG(&adapter->hw, RDTR, 1950 adapter->rx_int_delay); 1951 1952 1953 /* Setup the Base and Length of the Rx Descriptor Ring */ 1954 IXGB_WRITE_REG(&adapter->hw, RDBAL, 1955 (rdba & 0x00000000ffffffffULL)); 1956 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); 1957 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * 1958 sizeof(struct ixgb_rx_desc)); 1959 1960 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1961 IXGB_WRITE_REG(&adapter->hw, RDH, 0); 1962 1963 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); 1964 1965 1966 1967 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT 1968 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT 1969 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; 1970 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); 1971 1972 1973 adapter->raidc = 1; 1974 if (adapter->raidc) { 1975 uint32_t raidc; 1976 uint8_t poll_threshold; 1977#define IXGB_RAIDC_POLL_DEFAULT 120 1978 1979 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); 1980 poll_threshold >>= 1; 1981 poll_threshold &= 0x3F; 1982 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | 1983 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | 1984 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | 1985 poll_threshold; 1986 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); 1987 } 1988 /* Enable Receive Checksum Offload for TCP and UDP ? */ 1989 if (ifp->if_capenable & IFCAP_RXCSUM) { 1990 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); 1991 reg_rxcsum |= IXGB_RXCSUM_TUOFL; 1992 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); 1993 } 1994 /* Setup the Receive Control Register */ 1995 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1996 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 1997 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | 1998 IXGB_RCTL_CFF | 1999 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 2000 2001 switch (adapter->rx_buffer_len) { 2002 default: 2003 case IXGB_RXBUFFER_2048: 2004 reg_rctl |= IXGB_RCTL_BSIZE_2048; 2005 break; 2006 case IXGB_RXBUFFER_4096: 2007 reg_rctl |= IXGB_RCTL_BSIZE_4096; 2008 break; 2009 case IXGB_RXBUFFER_8192: 2010 reg_rctl |= IXGB_RCTL_BSIZE_8192; 2011 break; 2012 case IXGB_RXBUFFER_16384: 2013 reg_rctl |= IXGB_RCTL_BSIZE_16384; 2014 break; 2015 } 2016 2017 reg_rctl |= IXGB_RCTL_RXEN; 2018 2019 2020 /* Enable Receives */ 2021 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 2022 2023 return; 2024} 2025 2026/********************************************************************* 2027 * 2028 * Free receive related data structures. 2029 * 2030 **********************************************************************/ 2031static void 2032ixgb_free_receive_structures(struct adapter * adapter) 2033{ 2034 struct ixgb_buffer *rx_buffer; 2035 int i; 2036 2037 INIT_DEBUGOUT("free_receive_structures: begin"); 2038 2039 if (adapter->rx_buffer_area != NULL) { 2040 rx_buffer = adapter->rx_buffer_area; 2041 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2042 if (rx_buffer->map != NULL) { 2043 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2044 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 2045 } 2046 if (rx_buffer->m_head != NULL) 2047 m_freem(rx_buffer->m_head); 2048 rx_buffer->m_head = NULL; 2049 } 2050 } 2051 if (adapter->rx_buffer_area != NULL) { 2052 free(adapter->rx_buffer_area, M_DEVBUF); 2053 adapter->rx_buffer_area = NULL; 2054 } 2055 if (adapter->rxtag != NULL) { 2056 bus_dma_tag_destroy(adapter->rxtag); 2057 adapter->rxtag = NULL; 2058 } 2059 return; 2060} 2061 2062/********************************************************************* 2063 * 2064 * This routine executes in interrupt context. It replenishes 2065 * the mbufs in the descriptor and sends data which has been 2066 * dma'ed into host memory to upper layer. 2067 * 2068 * We loop at most count times if count is > 0, or until done if 2069 * count < 0. 2070 * 2071 *********************************************************************/ 2072static int 2073ixgb_process_receive_interrupts(struct adapter * adapter, int count) 2074{ 2075 struct ifnet *ifp; 2076 struct mbuf *mp; 2077#if __FreeBSD_version < 500000 2078 struct ether_header *eh; 2079#endif 2080 int eop = 0; 2081 int len; 2082 u_int8_t accept_frame = 0; 2083 int i; 2084 int next_to_use = 0; 2085 int eop_desc; 2086 int rx_npkts = 0; 2087 /* Pointer to the receive descriptor being examined. */ 2088 struct ixgb_rx_desc *current_desc; 2089 2090 IXGB_LOCK_ASSERT(adapter); 2091 2092 ifp = adapter->ifp; 2093 i = adapter->next_rx_desc_to_check; 2094 next_to_use = adapter->next_rx_desc_to_use; 2095 eop_desc = adapter->next_rx_desc_to_check; 2096 current_desc = &adapter->rx_desc_base[i]; 2097 2098 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { 2099#ifdef _SV_ 2100 adapter->no_pkts_avail++; 2101#endif 2102 return (rx_npkts); 2103 } 2104 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { 2105 2106 mp = adapter->rx_buffer_area[i].m_head; 2107 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 2108 BUS_DMASYNC_POSTREAD); 2109 accept_frame = 1; 2110 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { 2111 count--; 2112 eop = 1; 2113 } else { 2114 eop = 0; 2115 } 2116 len = current_desc->length; 2117 2118 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 2119 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 2120 IXGB_RX_DESC_ERRORS_RXE)) { 2121 accept_frame = 0; 2122 } 2123 if (accept_frame) { 2124 2125 /* Assign correct length to the current fragment */ 2126 mp->m_len = len; 2127 2128 if (adapter->fmp == NULL) { 2129 mp->m_pkthdr.len = len; 2130 adapter->fmp = mp; /* Store the first mbuf */ 2131 adapter->lmp = mp; 2132 } else { 2133 /* Chain mbuf's together */ 2134 mp->m_flags &= ~M_PKTHDR; 2135 adapter->lmp->m_next = mp; 2136 adapter->lmp = adapter->lmp->m_next; 2137 adapter->fmp->m_pkthdr.len += len; 2138 } 2139 2140 if (eop) { 2141 eop_desc = i; 2142 adapter->fmp->m_pkthdr.rcvif = ifp; 2143 2144#if __FreeBSD_version < 500000 2145 eh = mtod(adapter->fmp, struct ether_header *); 2146 2147 /* Remove ethernet header from mbuf */ 2148 m_adj(adapter->fmp, sizeof(struct ether_header)); 2149 ixgb_receive_checksum(adapter, current_desc, 2150 adapter->fmp); 2151 2152 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) 2153 VLAN_INPUT_TAG(eh, adapter->fmp, 2154 current_desc->special); 2155 else 2156 ether_input(ifp, eh, adapter->fmp); 2157#else 2158 ixgb_receive_checksum(adapter, current_desc, 2159 adapter->fmp); 2160#if __FreeBSD_version < 700000 2161 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) 2162 VLAN_INPUT_TAG(ifp, adapter->fmp, 2163 current_desc->special); 2164#else 2165 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) { 2166 adapter->fmp->m_pkthdr.ether_vtag = 2167 current_desc->special; 2168 adapter->fmp->m_flags |= M_VLANTAG; 2169 } 2170#endif 2171 2172 if (adapter->fmp != NULL) { 2173 IXGB_UNLOCK(adapter); 2174 (*ifp->if_input) (ifp, adapter->fmp); 2175 IXGB_LOCK(adapter); 2176 rx_npkts++; 2177 } 2178#endif 2179 adapter->fmp = NULL; 2180 adapter->lmp = NULL; 2181 } 2182 adapter->rx_buffer_area[i].m_head = NULL; 2183 } else { 2184 adapter->dropped_pkts++; 2185 if (adapter->fmp != NULL) 2186 m_freem(adapter->fmp); 2187 adapter->fmp = NULL; 2188 adapter->lmp = NULL; 2189 } 2190 2191 /* Zero out the receive descriptors status */ 2192 current_desc->status = 0; 2193 2194 /* Advance our pointers to the next descriptor */ 2195 if (++i == adapter->num_rx_desc) { 2196 i = 0; 2197 current_desc = adapter->rx_desc_base; 2198 } else 2199 current_desc++; 2200 } 2201 adapter->next_rx_desc_to_check = i; 2202 2203 if (--i < 0) 2204 i = (adapter->num_rx_desc - 1); 2205 2206 /* 2207 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes 2208 * memory corruption). Avoid using and re-submitting the most recently received RX 2209 * descriptor back to hardware. 2210 * 2211 * if(Last written back descriptor == EOP bit set descriptor) 2212 * then avoid re-submitting the most recently received RX descriptor 2213 * back to hardware. 2214 * if(Last written back descriptor != EOP bit set descriptor) 2215 * then avoid re-submitting the most recently received RX descriptors 2216 * till last EOP bit set descriptor. 2217 */ 2218 if (eop_desc != i) { 2219 if (++eop_desc == adapter->num_rx_desc) 2220 eop_desc = 0; 2221 i = eop_desc; 2222 } 2223 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ 2224 while (next_to_use != i) { 2225 current_desc = &adapter->rx_desc_base[next_to_use]; 2226 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 2227 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 2228 IXGB_RX_DESC_ERRORS_RXE))) { 2229 mp = adapter->rx_buffer_area[next_to_use].m_head; 2230 ixgb_get_buf(next_to_use, adapter, mp); 2231 } else { 2232 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) 2233 break; 2234 } 2235 /* Advance our pointers to the next descriptor */ 2236 if (++next_to_use == adapter->num_rx_desc) { 2237 next_to_use = 0; 2238 current_desc = adapter->rx_desc_base; 2239 } else 2240 current_desc++; 2241 } 2242 adapter->next_rx_desc_to_use = next_to_use; 2243 if (--next_to_use < 0) 2244 next_to_use = (adapter->num_rx_desc - 1); 2245 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ 2246 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); 2247 2248 return (rx_npkts); 2249} 2250 2251/********************************************************************* 2252 * 2253 * Verify that the hardware indicated that the checksum is valid. 2254 * Inform the stack about the status of checksum so that stack 2255 * doesn't spend time verifying the checksum. 2256 * 2257 *********************************************************************/ 2258static void 2259ixgb_receive_checksum(struct adapter * adapter, 2260 struct ixgb_rx_desc * rx_desc, 2261 struct mbuf * mp) 2262{ 2263 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { 2264 mp->m_pkthdr.csum_flags = 0; 2265 return; 2266 } 2267 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { 2268 /* Did it pass? */ 2269 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { 2270 /* IP Checksum Good */ 2271 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2272 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2273 2274 } else { 2275 mp->m_pkthdr.csum_flags = 0; 2276 } 2277 } 2278 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { 2279 /* Did it pass? */ 2280 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { 2281 mp->m_pkthdr.csum_flags |= 2282 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2283 mp->m_pkthdr.csum_data = htons(0xffff); 2284 } 2285 } 2286 return; 2287} 2288 2289 2290static void 2291ixgb_enable_vlans(struct adapter * adapter) 2292{ 2293 uint32_t ctrl; 2294 2295 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2296 ctrl |= IXGB_CTRL0_VME; 2297 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2298 2299 return; 2300} 2301 2302 2303static void 2304ixgb_enable_intr(struct adapter * adapter) 2305{ 2306 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | 2307 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); 2308 return; 2309} 2310 2311static void 2312ixgb_disable_intr(struct adapter * adapter) 2313{ 2314 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 2315 return; 2316} 2317 2318void 2319ixgb_write_pci_cfg(struct ixgb_hw * hw, 2320 uint32_t reg, 2321 uint16_t * value) 2322{ 2323 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, 2324 *value, 2); 2325} 2326 2327/********************************************************************** 2328 * 2329 * Update the board statistics counters. 2330 * 2331 **********************************************************************/ 2332static void 2333ixgb_update_stats_counters(struct adapter * adapter) 2334{ 2335 struct ifnet *ifp; 2336 2337 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); 2338 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 2339 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 2340 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); 2341 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); 2342 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); 2343 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); 2344 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 2345 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); 2346 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); 2347 2348 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); 2349 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); 2350 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); 2351 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); 2352 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); 2353 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); 2354 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); 2355 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); 2356 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); 2357 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); 2358 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); 2359 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); 2360 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); 2361 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); 2362 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); 2363 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); 2364 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); 2365 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); 2366 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 2367 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 2368 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); 2369 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); 2370 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); 2371 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); 2372 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); 2373 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); 2374 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); 2375 2376 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 2377 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 2378 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); 2379 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); 2380 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); 2381 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); 2382 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); 2383 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); 2384 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); 2385 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); 2386 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); 2387 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); 2388 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); 2389 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); 2390 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); 2391 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); 2392 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); 2393 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); 2394 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); 2395 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); 2396 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); 2397 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); 2398 2399 ifp = adapter->ifp; 2400 2401 /* Fill out the OS statistics structure */ 2402 ifp->if_ipackets = adapter->stats.gprcl; 2403 ifp->if_opackets = adapter->stats.gptcl; 2404 ifp->if_ibytes = adapter->stats.gorcl; 2405 ifp->if_obytes = adapter->stats.gotcl; 2406 ifp->if_imcasts = adapter->stats.mprcl; 2407 ifp->if_collisions = 0; 2408 2409 /* Rx Errors */ 2410 ifp->if_ierrors = 2411 adapter->dropped_pkts + 2412 adapter->stats.crcerrs + 2413 adapter->stats.rnbc + 2414 adapter->stats.mpc + 2415 adapter->stats.rlec; 2416 2417 2418} 2419 2420 2421/********************************************************************** 2422 * 2423 * This routine is called only when ixgb_display_debug_stats is enabled. 2424 * This routine provides a way to take a look at important statistics 2425 * maintained by the driver and hardware. 2426 * 2427 **********************************************************************/ 2428static void 2429ixgb_print_hw_stats(struct adapter * adapter) 2430{ 2431 char buf_speed[100], buf_type[100]; 2432 ixgb_bus_speed bus_speed; 2433 ixgb_bus_type bus_type; 2434 int unit = adapter->unit; 2435 2436#ifdef _SV_ 2437 printf("ixgb%d: Packets not Avail = %ld\n", unit, 2438 adapter->no_pkts_avail); 2439 printf("ixgb%d: CleanTxInterrupts = %ld\n", unit, 2440 adapter->clean_tx_interrupts); 2441 printf("ixgb%d: ICR RXDMT0 = %lld\n", unit, 2442 (long long)adapter->sv_stats.icr_rxdmt0); 2443 printf("ixgb%d: ICR RXO = %lld\n", unit, 2444 (long long)adapter->sv_stats.icr_rxo); 2445 printf("ixgb%d: ICR RXT0 = %lld\n", unit, 2446 (long long)adapter->sv_stats.icr_rxt0); 2447 printf("ixgb%d: ICR TXDW = %lld\n", unit, 2448 (long long)adapter->sv_stats.icr_TXDW); 2449#endif /* _SV_ */ 2450 2451 bus_speed = adapter->hw.bus.speed; 2452 bus_type = adapter->hw.bus.type; 2453 sprintf(buf_speed, 2454 bus_speed == ixgb_bus_speed_33 ? "33MHz" : 2455 bus_speed == ixgb_bus_speed_66 ? "66MHz" : 2456 bus_speed == ixgb_bus_speed_100 ? "100MHz" : 2457 bus_speed == ixgb_bus_speed_133 ? "133MHz" : 2458 "UNKNOWN"); 2459 printf("ixgb%d: PCI_Bus_Speed = %s\n", unit, 2460 buf_speed); 2461 2462 sprintf(buf_type, 2463 bus_type == ixgb_bus_type_pci ? "PCI" : 2464 bus_type == ixgb_bus_type_pcix ? "PCI-X" : 2465 "UNKNOWN"); 2466 printf("ixgb%d: PCI_Bus_Type = %s\n", unit, 2467 buf_type); 2468 2469 printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit, 2470 adapter->no_tx_desc_avail1); 2471 printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit, 2472 adapter->no_tx_desc_avail2); 2473 printf("ixgb%d: Std Mbuf Failed = %ld\n", unit, 2474 adapter->mbuf_alloc_failed); 2475 printf("ixgb%d: Std Cluster Failed = %ld\n", unit, 2476 adapter->mbuf_cluster_failed); 2477 2478 printf("ixgb%d: Defer count = %lld\n", unit, 2479 (long long)adapter->stats.dc); 2480 printf("ixgb%d: Missed Packets = %lld\n", unit, 2481 (long long)adapter->stats.mpc); 2482 printf("ixgb%d: Receive No Buffers = %lld\n", unit, 2483 (long long)adapter->stats.rnbc); 2484 printf("ixgb%d: Receive length errors = %lld\n", unit, 2485 (long long)adapter->stats.rlec); 2486 printf("ixgb%d: Crc errors = %lld\n", unit, 2487 (long long)adapter->stats.crcerrs); 2488 printf("ixgb%d: Driver dropped packets = %ld\n", unit, 2489 adapter->dropped_pkts); 2490 2491 printf("ixgb%d: XON Rcvd = %lld\n", unit, 2492 (long long)adapter->stats.xonrxc); 2493 printf("ixgb%d: XON Xmtd = %lld\n", unit, 2494 (long long)adapter->stats.xontxc); 2495 printf("ixgb%d: XOFF Rcvd = %lld\n", unit, 2496 (long long)adapter->stats.xoffrxc); 2497 printf("ixgb%d: XOFF Xmtd = %lld\n", unit, 2498 (long long)adapter->stats.xofftxc); 2499 2500 printf("ixgb%d: Good Packets Rcvd = %lld\n", unit, 2501 (long long)adapter->stats.gprcl); 2502 printf("ixgb%d: Good Packets Xmtd = %lld\n", unit, 2503 (long long)adapter->stats.gptcl); 2504 2505 printf("ixgb%d: Jumbo frames recvd = %lld\n", unit, 2506 (long long)adapter->stats.jprcl); 2507 printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit, 2508 (long long)adapter->stats.jptcl); 2509 2510 return; 2511 2512} 2513 2514static int 2515ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) 2516{ 2517 int error; 2518 int result; 2519 struct adapter *adapter; 2520 2521 result = -1; 2522 error = sysctl_handle_int(oidp, &result, 0, req); 2523 2524 if (error || !req->newptr) 2525 return (error); 2526 2527 if (result == 1) { 2528 adapter = (struct adapter *) arg1; 2529 ixgb_print_hw_stats(adapter); 2530 } 2531 return error; 2532} 2533