if_ixgb.c revision 148654
1/******************************************************************************* 2 3Copyright (c) 2001-2004, Intel Corporation 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30POSSIBILITY OF SUCH DAMAGE. 31 32***************************************************************************/ 33 34/*$FreeBSD: head/sys/dev/ixgb/if_ixgb.c 148654 2005-08-03 00:18:35Z rwatson $*/ 35 36#include <dev/ixgb/if_ixgb.h> 37 38/********************************************************************* 39 * Set this to one to display debug statistics 40 *********************************************************************/ 41int ixgb_display_debug_stats = 0; 42 43/********************************************************************* 44 * Linked list of board private structures for all NICs found 45 *********************************************************************/ 46 47struct adapter *ixgb_adapter_list = NULL; 48 49 50 51/********************************************************************* 52 * Driver version 53 *********************************************************************/ 54 55char ixgb_driver_version[] = "1.0.6"; 56char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; 57 58/********************************************************************* 59 * PCI Device ID Table 60 * 61 * Used by probe to select devices to load on 62 * Last field stores an index into ixgb_strings 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 66 *********************************************************************/ 67 68static ixgb_vendor_info_t ixgb_vendor_info_array[] = 69{ 70 /* Intel(R) PRO/10000 Network Connection */ 71 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, 72 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, 73 /* required last entry */ 74 {0, 0, 0, 0, 0} 75}; 76 77/********************************************************************* 78 * Table of branding strings for all supported NICs. 79 *********************************************************************/ 80 81static char *ixgb_strings[] = { 82 "Intel(R) PRO/10GbE Network Driver" 83}; 84 85/********************************************************************* 86 * Function prototypes 87 *********************************************************************/ 88static int ixgb_probe(device_t); 89static int ixgb_attach(device_t); 90static int ixgb_detach(device_t); 91static int ixgb_shutdown(device_t); 92static void ixgb_intr(void *); 93static void ixgb_start(struct ifnet *); 94static void ixgb_start_locked(struct ifnet *); 95static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); 96static void ixgb_watchdog(struct ifnet *); 97static void ixgb_init(void *); 98static void ixgb_init_locked(struct adapter *); 99static void ixgb_stop(void *); 100static void ixgb_media_status(struct ifnet *, struct ifmediareq *); 101static int ixgb_media_change(struct ifnet *); 102static void ixgb_identify_hardware(struct adapter *); 103static int ixgb_allocate_pci_resources(struct adapter *); 104static void ixgb_free_pci_resources(struct adapter *); 105static void ixgb_local_timer(void *); 106static int ixgb_hardware_init(struct adapter *); 107static void ixgb_setup_interface(device_t, struct adapter *); 108static int ixgb_setup_transmit_structures(struct adapter *); 109static void ixgb_initialize_transmit_unit(struct adapter *); 110static int ixgb_setup_receive_structures(struct adapter *); 111static void ixgb_initialize_receive_unit(struct adapter *); 112static void ixgb_enable_intr(struct adapter *); 113static void ixgb_disable_intr(struct adapter *); 114static void ixgb_free_transmit_structures(struct adapter *); 115static void ixgb_free_receive_structures(struct adapter *); 116static void ixgb_update_stats_counters(struct adapter *); 117static void ixgb_clean_transmit_interrupts(struct adapter *); 118static int ixgb_allocate_receive_structures(struct adapter *); 119static int ixgb_allocate_transmit_structures(struct adapter *); 120static void ixgb_process_receive_interrupts(struct adapter *, int); 121static void 122ixgb_receive_checksum(struct adapter *, 123 struct ixgb_rx_desc * rx_desc, 124 struct mbuf *); 125static void 126ixgb_transmit_checksum_setup(struct adapter *, 127 struct mbuf *, 128 u_int8_t *); 129static void ixgb_set_promisc(struct adapter *); 130static void ixgb_disable_promisc(struct adapter *); 131static void ixgb_set_multi(struct adapter *); 132static void ixgb_print_hw_stats(struct adapter *); 133static void ixgb_print_link_status(struct adapter *); 134static int 135ixgb_get_buf(int i, struct adapter *, 136 struct mbuf *); 137static void ixgb_enable_vlans(struct adapter * adapter); 138static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); 139static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); 140static int 141ixgb_dma_malloc(struct adapter *, bus_size_t, 142 struct ixgb_dma_alloc *, int); 143static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); 144 145/********************************************************************* 146 * FreeBSD Device Interface Entry Points 147 *********************************************************************/ 148 149static device_method_t ixgb_methods[] = { 150 /* Device interface */ 151 DEVMETHOD(device_probe, ixgb_probe), 152 DEVMETHOD(device_attach, ixgb_attach), 153 DEVMETHOD(device_detach, ixgb_detach), 154 DEVMETHOD(device_shutdown, ixgb_shutdown), 155 {0, 0} 156}; 157 158static driver_t ixgb_driver = { 159 "ixgb", ixgb_methods, sizeof(struct adapter), 160}; 161 162static devclass_t ixgb_devclass; 163DRIVER_MODULE(if_ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); 164 165MODULE_DEPEND(if_ixgb, pci, 1, 1, 1); 166MODULE_DEPEND(if_ixgb, ether, 1, 1, 1); 167 168/* some defines for controlling descriptor fetches in h/w */ 169#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ 170#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is 171 * pushed this many descriptors from 172 * head */ 173#define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ 174 175 176/********************************************************************* 177 * Device identification routine 178 * 179 * ixgb_probe determines if the driver should be loaded on 180 * adapter based on PCI vendor/device id of the adapter. 181 * 182 * return 0 on success, positive on failure 183 *********************************************************************/ 184 185static int 186ixgb_probe(device_t dev) 187{ 188 ixgb_vendor_info_t *ent; 189 190 u_int16_t pci_vendor_id = 0; 191 u_int16_t pci_device_id = 0; 192 u_int16_t pci_subvendor_id = 0; 193 u_int16_t pci_subdevice_id = 0; 194 char adapter_name[60]; 195 196 INIT_DEBUGOUT("ixgb_probe: begin"); 197 198 pci_vendor_id = pci_get_vendor(dev); 199 if (pci_vendor_id != IXGB_VENDOR_ID) 200 return (ENXIO); 201 202 pci_device_id = pci_get_device(dev); 203 pci_subvendor_id = pci_get_subvendor(dev); 204 pci_subdevice_id = pci_get_subdevice(dev); 205 206 ent = ixgb_vendor_info_array; 207 while (ent->vendor_id != 0) { 208 if ((pci_vendor_id == ent->vendor_id) && 209 (pci_device_id == ent->device_id) && 210 211 ((pci_subvendor_id == ent->subvendor_id) || 212 (ent->subvendor_id == PCI_ANY_ID)) && 213 214 ((pci_subdevice_id == ent->subdevice_id) || 215 (ent->subdevice_id == PCI_ANY_ID))) { 216 sprintf(adapter_name, "%s, Version - %s", 217 ixgb_strings[ent->index], 218 ixgb_driver_version); 219 device_set_desc_copy(dev, adapter_name); 220 return (BUS_PROBE_DEFAULT); 221 } 222 ent++; 223 } 224 225 return (ENXIO); 226} 227 228/********************************************************************* 229 * Device initialization routine 230 * 231 * The attach entry point is called when the driver is being loaded. 232 * This routine identifies the type of hardware, allocates all resources 233 * and initializes the hardware. 234 * 235 * return 0 on success, positive on failure 236 *********************************************************************/ 237 238static int 239ixgb_attach(device_t dev) 240{ 241 struct adapter *adapter; 242 int tsize, rsize; 243 int error = 0; 244 245 printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright); 246 INIT_DEBUGOUT("ixgb_attach: begin"); 247 248 /* Allocate, clear, and link in our adapter structure */ 249 if (!(adapter = device_get_softc(dev))) { 250 printf("ixgb: adapter structure allocation failed\n"); 251 return (ENOMEM); 252 } 253 bzero(adapter, sizeof(struct adapter)); 254 adapter->dev = dev; 255 adapter->osdep.dev = dev; 256 adapter->unit = device_get_unit(dev); 257 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); 258 259 if (ixgb_adapter_list != NULL) 260 ixgb_adapter_list->prev = adapter; 261 adapter->next = ixgb_adapter_list; 262 ixgb_adapter_list = adapter; 263 264 /* SYSCTL APIs */ 265 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 266 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 267 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, 268 (void *)adapter, 0, 269 ixgb_sysctl_stats, "I", "Statistics"); 270 271 callout_init(&adapter->timer, CALLOUT_MPSAFE); 272 273 /* Determine hardware revision */ 274 ixgb_identify_hardware(adapter); 275 276 /* Parameters (to be read from user) */ 277 adapter->num_tx_desc = IXGB_MAX_TXD; 278 adapter->num_rx_desc = IXGB_MAX_RXD; 279 adapter->tx_int_delay = TIDV; 280 adapter->rx_int_delay = RDTR; 281 adapter->rx_buffer_len = IXGB_RXBUFFER_2048; 282 283 adapter->hw.fc.high_water = FCRTH; 284 adapter->hw.fc.low_water = FCRTL; 285 adapter->hw.fc.pause_time = FCPAUSE; 286 adapter->hw.fc.send_xon = TRUE; 287 adapter->hw.fc.type = FLOW_CONTROL; 288 289 290 /* Set the max frame size assuming standard ethernet sized frames */ 291 adapter->hw.max_frame_size = 292 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 293 294 if (ixgb_allocate_pci_resources(adapter)) { 295 printf("ixgb%d: Allocation of PCI resources failed\n", 296 adapter->unit); 297 error = ENXIO; 298 goto err_pci; 299 } 300 tsize = IXGB_ROUNDUP(adapter->num_tx_desc * 301 sizeof(struct ixgb_tx_desc), 4096); 302 303 /* Allocate Transmit Descriptor ring */ 304 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { 305 printf("ixgb%d: Unable to allocate TxDescriptor memory\n", 306 adapter->unit); 307 error = ENOMEM; 308 goto err_tx_desc; 309 } 310 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; 311 312 rsize = IXGB_ROUNDUP(adapter->num_rx_desc * 313 sizeof(struct ixgb_rx_desc), 4096); 314 315 /* Allocate Receive Descriptor ring */ 316 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { 317 printf("ixgb%d: Unable to allocate rx_desc memory\n", 318 adapter->unit); 319 error = ENOMEM; 320 goto err_rx_desc; 321 } 322 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; 323 324 /* Initialize the hardware */ 325 if (ixgb_hardware_init(adapter)) { 326 printf("ixgb%d: Unable to initialize the hardware\n", 327 adapter->unit); 328 error = EIO; 329 goto err_hw_init; 330 } 331 /* Setup OS specific network interface */ 332 ixgb_setup_interface(dev, adapter); 333 334 /* Initialize statistics */ 335 ixgb_clear_hw_cntrs(&adapter->hw); 336 ixgb_update_stats_counters(adapter); 337 338 INIT_DEBUGOUT("ixgb_attach: end"); 339 return (0); 340 341err_hw_init: 342 ixgb_dma_free(adapter, &adapter->rxdma); 343err_rx_desc: 344 ixgb_dma_free(adapter, &adapter->txdma); 345err_tx_desc: 346err_pci: 347 ixgb_free_pci_resources(adapter); 348 sysctl_ctx_free(&adapter->sysctl_ctx); 349 return (error); 350 351} 352 353/********************************************************************* 354 * Device removal routine 355 * 356 * The detach entry point is called when the driver is being removed. 357 * This routine stops the adapter and deallocates all the resources 358 * that were allocated for driver operation. 359 * 360 * return 0 on success, positive on failure 361 *********************************************************************/ 362 363static int 364ixgb_detach(device_t dev) 365{ 366 struct adapter *adapter = device_get_softc(dev); 367 struct ifnet *ifp = adapter->ifp; 368 369 INIT_DEBUGOUT("ixgb_detach: begin"); 370 371 IXGB_LOCK(adapter); 372 adapter->in_detach = 1; 373 374 ixgb_stop(adapter); 375 IXGB_UNLOCK(adapter); 376 377#if __FreeBSD_version < 500000 378 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED); 379#else 380 ether_ifdetach(adapter->ifp); 381 if_free(adapter->ifp); 382#endif 383 ixgb_free_pci_resources(adapter); 384 385 386 /* Free Transmit Descriptor ring */ 387 if (adapter->tx_desc_base) { 388 ixgb_dma_free(adapter, &adapter->txdma); 389 adapter->tx_desc_base = NULL; 390 } 391 /* Free Receive Descriptor ring */ 392 if (adapter->rx_desc_base) { 393 ixgb_dma_free(adapter, &adapter->rxdma); 394 adapter->rx_desc_base = NULL; 395 } 396 /* Remove from the adapter list */ 397 if (ixgb_adapter_list == adapter) 398 ixgb_adapter_list = adapter->next; 399 if (adapter->next != NULL) 400 adapter->next->prev = adapter->prev; 401 if (adapter->prev != NULL) 402 adapter->prev->next = adapter->next; 403 404 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 405 ifp->if_timer = 0; 406 407 IXGB_LOCK_DESTROY(adapter); 408 return (0); 409} 410 411/********************************************************************* 412 * 413 * Shutdown entry point 414 * 415 **********************************************************************/ 416 417static int 418ixgb_shutdown(device_t dev) 419{ 420 struct adapter *adapter = device_get_softc(dev); 421 IXGB_LOCK(adapter); 422 ixgb_stop(adapter); 423 IXGB_UNLOCK(adapter); 424 return (0); 425} 426 427 428/********************************************************************* 429 * Transmit entry point 430 * 431 * ixgb_start is called by the stack to initiate a transmit. 432 * The driver will remain in this routine as long as there are 433 * packets to transmit and transmit resources are available. 434 * In case resources are not available stack is notified and 435 * the packet is requeued. 436 **********************************************************************/ 437 438static void 439ixgb_start_locked(struct ifnet * ifp) 440{ 441 struct mbuf *m_head; 442 struct adapter *adapter = ifp->if_softc; 443 444 IXGB_LOCK_ASSERT(adapter); 445 446 if (!adapter->link_active) 447 return; 448 449 while (ifp->if_snd.ifq_head != NULL) { 450 IF_DEQUEUE(&ifp->if_snd, m_head); 451 452 if (m_head == NULL) 453 break; 454 455 if (ixgb_encap(adapter, m_head)) { 456 ifp->if_flags |= IFF_OACTIVE; 457 IF_PREPEND(&ifp->if_snd, m_head); 458 break; 459 } 460 /* Send a copy of the frame to the BPF listener */ 461#if __FreeBSD_version < 500000 462 if (ifp->if_bpf) 463 bpf_mtap(ifp, m_head); 464#else 465 BPF_MTAP(ifp, m_head); 466#endif 467 /* Set timeout in case hardware has problems transmitting */ 468 ifp->if_timer = IXGB_TX_TIMEOUT; 469 470 } 471 return; 472} 473 474static void 475ixgb_start(struct ifnet *ifp) 476{ 477 struct adapter *adapter = ifp->if_softc; 478 479 IXGB_LOCK(adapter); 480 ixgb_start_locked(ifp); 481 IXGB_UNLOCK(adapter); 482 return; 483} 484 485/********************************************************************* 486 * Ioctl entry point 487 * 488 * ixgb_ioctl is called when the user wants to configure the 489 * interface. 490 * 491 * return 0 on success, positive on failure 492 **********************************************************************/ 493 494static int 495ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) 496{ 497 int mask, error = 0; 498 struct ifreq *ifr = (struct ifreq *) data; 499 struct adapter *adapter = ifp->if_softc; 500 501 if (adapter->in_detach) 502 goto out; 503 504 switch (command) { 505 case SIOCSIFADDR: 506 case SIOCGIFADDR: 507 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); 508 ether_ioctl(ifp, command, data); 509 break; 510 case SIOCSIFMTU: 511 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); 512 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { 513 error = EINVAL; 514 } else { 515 IXGB_LOCK(adapter); 516 ifp->if_mtu = ifr->ifr_mtu; 517 adapter->hw.max_frame_size = 518 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 519 520 ixgb_init_locked(adapter); 521 IXGB_UNLOCK(adapter); 522 } 523 break; 524 case SIOCSIFFLAGS: 525 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 526 IXGB_LOCK(adapter); 527 if (ifp->if_flags & IFF_UP) { 528 if (!(ifp->if_flags & IFF_RUNNING)) { 529 ixgb_init_locked(adapter); 530 } 531 ixgb_disable_promisc(adapter); 532 ixgb_set_promisc(adapter); 533 } else { 534 if (ifp->if_flags & IFF_RUNNING) { 535 ixgb_stop(adapter); 536 } 537 } 538 IXGB_UNLOCK(adapter); 539 break; 540 case SIOCADDMULTI: 541 case SIOCDELMULTI: 542 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); 543 if (ifp->if_flags & IFF_RUNNING) { 544 IXGB_LOCK(adapter); 545 ixgb_disable_intr(adapter); 546 ixgb_set_multi(adapter); 547 ixgb_enable_intr(adapter); 548 IXGB_UNLOCK(adapter); 549 } 550 break; 551 case SIOCSIFMEDIA: 552 case SIOCGIFMEDIA: 553 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 554 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 555 break; 556 case SIOCSIFCAP: 557 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); 558 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 559 if (mask & IFCAP_HWCSUM) { 560 if (IFCAP_HWCSUM & ifp->if_capenable) 561 ifp->if_capenable &= ~IFCAP_HWCSUM; 562 else 563 ifp->if_capenable |= IFCAP_HWCSUM; 564 if (ifp->if_flags & IFF_RUNNING) 565 ixgb_init(adapter); 566 } 567 break; 568 default: 569 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); 570 error = EINVAL; 571 } 572 573out: 574 return (error); 575} 576 577/********************************************************************* 578 * Watchdog entry point 579 * 580 * This routine is called whenever hardware quits transmitting. 581 * 582 **********************************************************************/ 583 584static void 585ixgb_watchdog(struct ifnet * ifp) 586{ 587 struct adapter *adapter; 588 adapter = ifp->if_softc; 589 590 /* 591 * If we are in this routine because of pause frames, then don't 592 * reset the hardware. 593 */ 594 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { 595 ifp->if_timer = IXGB_TX_TIMEOUT; 596 return; 597 } 598 printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit); 599 600 ifp->if_flags &= ~IFF_RUNNING; 601 602 603 ixgb_stop(adapter); 604 ixgb_init(adapter); 605 606 607 ifp->if_oerrors++; 608 609 return; 610} 611 612/********************************************************************* 613 * Init entry point 614 * 615 * This routine is used in two ways. It is used by the stack as 616 * init entry point in network interface structure. It is also used 617 * by the driver as a hw/sw initialization routine to get to a 618 * consistent state. 619 * 620 * return 0 on success, positive on failure 621 **********************************************************************/ 622 623static void 624ixgb_init_locked(struct adapter *adapter) 625{ 626 struct ifnet *ifp; 627 628 INIT_DEBUGOUT("ixgb_init: begin"); 629 630 IXGB_LOCK_ASSERT(adapter); 631 632 ixgb_stop(adapter); 633 634 /* Get the latest mac address, User can use a LAA */ 635 bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.curr_mac_addr, 636 IXGB_ETH_LENGTH_OF_ADDRESS); 637 638 /* Initialize the hardware */ 639 if (ixgb_hardware_init(adapter)) { 640 printf("ixgb%d: Unable to initialize the hardware\n", 641 adapter->unit); 642 return; 643 } 644 ixgb_enable_vlans(adapter); 645 646 /* Prepare transmit descriptors and buffers */ 647 if (ixgb_setup_transmit_structures(adapter)) { 648 printf("ixgb%d: Could not setup transmit structures\n", 649 adapter->unit); 650 ixgb_stop(adapter); 651 return; 652 } 653 ixgb_initialize_transmit_unit(adapter); 654 655 /* Setup Multicast table */ 656 ixgb_set_multi(adapter); 657 658 /* Prepare receive descriptors and buffers */ 659 if (ixgb_setup_receive_structures(adapter)) { 660 printf("ixgb%d: Could not setup receive structures\n", 661 adapter->unit); 662 ixgb_stop(adapter); 663 return; 664 } 665 ixgb_initialize_receive_unit(adapter); 666 667 /* Don't loose promiscuous settings */ 668 ixgb_set_promisc(adapter); 669 670 ifp = adapter->ifp; 671 ifp->if_flags |= IFF_RUNNING; 672 ifp->if_flags &= ~IFF_OACTIVE; 673 674 675 if (ifp->if_capenable & IFCAP_TXCSUM) 676 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; 677 else 678 ifp->if_hwassist = 0; 679 680 681 /* Enable jumbo frames */ 682 if (ifp->if_mtu > ETHERMTU) { 683 uint32_t temp_reg; 684 IXGB_WRITE_REG(&adapter->hw, MFS, 685 adapter->hw.max_frame_size << IXGB_MFS_SHIFT); 686 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); 687 temp_reg |= IXGB_CTRL0_JFE; 688 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); 689 } 690 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); 691 ixgb_clear_hw_cntrs(&adapter->hw); 692#ifdef DEVICE_POLLING 693 /* 694 * Only disable interrupts if we are polling, make sure they are on 695 * otherwise. 696 */ 697 if (ifp->if_flags & IFF_POLLING) 698 ixgb_disable_intr(adapter); 699 else 700#endif /* DEVICE_POLLING */ 701 ixgb_enable_intr(adapter); 702 703 return; 704} 705 706static void 707ixgb_init(void *arg) 708{ 709 struct adapter *adapter = arg; 710 711 IXGB_LOCK(adapter); 712 ixgb_init_locked(adapter); 713 IXGB_UNLOCK(adapter); 714 return; 715} 716 717#ifdef DEVICE_POLLING 718static poll_handler_t ixgb_poll; 719 720static void 721ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) 722{ 723 struct adapter *adapter = ifp->if_softc; 724 u_int32_t reg_icr; 725 726 IXGB_LOCK_ASSERT(adapter); 727 728 if (!(ifp->if_capenable & IFCAP_POLLING)) { 729 ether_poll_deregister(ifp); 730 cmd = POLL_DEREGISTER; 731 } 732 733 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 734 ixgb_enable_intr(adapter); 735 return; 736 } 737 if (cmd == POLL_AND_CHECK_STATUS) { 738 reg_icr = IXGB_READ_REG(&adapter->hw, ICR); 739 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { 740 callout_stop(&adapter->timer); 741 ixgb_check_for_link(&adapter->hw); 742 ixgb_print_link_status(adapter); 743 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, 744 adapter); 745 } 746 } 747 if (ifp->if_flags & IFF_RUNNING) { 748 ixgb_process_receive_interrupts(adapter, count); 749 ixgb_clean_transmit_interrupts(adapter); 750 } 751 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 752 ixgb_start_locked(ifp); 753} 754 755static void 756ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) 757{ 758 struct adapter *adapter = ifp->if_softc; 759 760 IXGB_LOCK(adapter); 761 ixgb_poll_locked(ifp, cmd, count); 762 IXGB_UNLOCK(adapter); 763} 764#endif /* DEVICE_POLLING */ 765 766/********************************************************************* 767 * 768 * Interrupt Service routine 769 * 770 **********************************************************************/ 771 772static void 773ixgb_intr(void *arg) 774{ 775 u_int32_t loop_cnt = IXGB_MAX_INTR; 776 u_int32_t reg_icr; 777 struct ifnet *ifp; 778 struct adapter *adapter = arg; 779 boolean_t rxdmt0 = FALSE; 780 781 IXGB_LOCK(adapter); 782 783 ifp = adapter->ifp; 784 785#ifdef DEVICE_POLLING 786 if (ifp->if_flags & IFF_POLLING) { 787 IXGB_UNLOCK(adapter); 788 return; 789 } 790 791 if ((ifp->if_capenable & IFCAP_POLLING) && 792 ether_poll_register(ixgb_poll, ifp)) { 793 ixgb_disable_intr(adapter); 794 ixgb_poll_locked(ifp, 0, 1); 795 IXGB_UNLOCK(adapter); 796 return; 797 } 798#endif /* DEVICE_POLLING */ 799 800 reg_icr = IXGB_READ_REG(&adapter->hw, ICR); 801 if (reg_icr == 0) { 802 IXGB_UNLOCK(adapter); 803 return; 804 } 805 806 if (reg_icr & IXGB_INT_RXDMT0) 807 rxdmt0 = TRUE; 808 809#ifdef _SV_ 810 if (reg_icr & IXGB_INT_RXDMT0) 811 adapter->sv_stats.icr_rxdmt0++; 812 if (reg_icr & IXGB_INT_RXO) 813 adapter->sv_stats.icr_rxo++; 814 if (reg_icr & IXGB_INT_RXT0) 815 adapter->sv_stats.icr_rxt0++; 816 if (reg_icr & IXGB_INT_TXDW) 817 adapter->sv_stats.icr_TXDW++; 818#endif /* _SV_ */ 819 820 /* Link status change */ 821 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { 822 callout_stop(&adapter->timer); 823 ixgb_check_for_link(&adapter->hw); 824 ixgb_print_link_status(adapter); 825 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, 826 adapter); 827 } 828 while (loop_cnt > 0) { 829 if (ifp->if_flags & IFF_RUNNING) { 830 ixgb_process_receive_interrupts(adapter, -1); 831 ixgb_clean_transmit_interrupts(adapter); 832 } 833 loop_cnt--; 834 } 835 836 if (rxdmt0 && adapter->raidc) { 837 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); 838 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); 839 } 840 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) 841 ixgb_start_locked(ifp); 842 843 IXGB_UNLOCK(adapter); 844 return; 845} 846 847 848/********************************************************************* 849 * 850 * Media Ioctl callback 851 * 852 * This routine is called whenever the user queries the status of 853 * the interface using ifconfig. 854 * 855 **********************************************************************/ 856static void 857ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 858{ 859 struct adapter *adapter = ifp->if_softc; 860 861 INIT_DEBUGOUT("ixgb_media_status: begin"); 862 863 ixgb_check_for_link(&adapter->hw); 864 ixgb_print_link_status(adapter); 865 866 ifmr->ifm_status = IFM_AVALID; 867 ifmr->ifm_active = IFM_ETHER; 868 869 if (!adapter->hw.link_up) 870 return; 871 872 ifmr->ifm_status |= IFM_ACTIVE; 873 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 874 875 return; 876} 877 878/********************************************************************* 879 * 880 * Media Ioctl callback 881 * 882 * This routine is called when the user changes speed/duplex using 883 * media/mediopt option with ifconfig. 884 * 885 **********************************************************************/ 886static int 887ixgb_media_change(struct ifnet * ifp) 888{ 889 struct adapter *adapter = ifp->if_softc; 890 struct ifmedia *ifm = &adapter->media; 891 892 INIT_DEBUGOUT("ixgb_media_change: begin"); 893 894 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 895 return (EINVAL); 896 897 return (0); 898} 899 900/********************************************************************* 901 * 902 * This routine maps the mbufs to tx descriptors. 903 * 904 * return 0 on success, positive on failure 905 **********************************************************************/ 906 907static int 908ixgb_encap(struct adapter * adapter, struct mbuf * m_head) 909{ 910 u_int8_t txd_popts; 911 int i, j, error, nsegs; 912 913#if __FreeBSD_version < 500000 914 struct ifvlan *ifv = NULL; 915#else 916 struct m_tag *mtag; 917#endif 918 bus_dma_segment_t segs[IXGB_MAX_SCATTER]; 919 bus_dmamap_t map; 920 struct ixgb_buffer *tx_buffer = NULL; 921 struct ixgb_tx_desc *current_tx_desc = NULL; 922 struct ifnet *ifp = adapter->ifp; 923 924 /* 925 * Force a cleanup if number of TX descriptors available hits the 926 * threshold 927 */ 928 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 929 ixgb_clean_transmit_interrupts(adapter); 930 } 931 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 932 adapter->no_tx_desc_avail1++; 933 return (ENOBUFS); 934 } 935 /* 936 * Map the packet for DMA. 937 */ 938 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { 939 adapter->no_tx_map_avail++; 940 return (ENOMEM); 941 } 942 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, 943 &nsegs, BUS_DMA_NOWAIT); 944 if (error != 0) { 945 adapter->no_tx_dma_setup++; 946 printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; " 947 "error %u\n", adapter->unit, error); 948 bus_dmamap_destroy(adapter->txtag, map); 949 return (error); 950 } 951 KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); 952 953 if (nsegs > adapter->num_tx_desc_avail) { 954 adapter->no_tx_desc_avail2++; 955 bus_dmamap_destroy(adapter->txtag, map); 956 return (ENOBUFS); 957 } 958 if (ifp->if_hwassist > 0) { 959 ixgb_transmit_checksum_setup(adapter, m_head, 960 &txd_popts); 961 } else 962 txd_popts = 0; 963 964 /* Find out if we are in vlan mode */ 965#if __FreeBSD_version < 500000 966 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 967 m_head->m_pkthdr.rcvif != NULL && 968 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 969 ifv = m_head->m_pkthdr.rcvif->if_softc; 970#else 971 mtag = VLAN_OUTPUT_TAG(ifp, m_head); 972#endif 973 i = adapter->next_avail_tx_desc; 974 for (j = 0; j < nsegs; j++) { 975 tx_buffer = &adapter->tx_buffer_area[i]; 976 current_tx_desc = &adapter->tx_desc_base[i]; 977 978 current_tx_desc->buff_addr = htole64(segs[j].ds_addr); 979 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); 980 current_tx_desc->popts = txd_popts; 981 if (++i == adapter->num_tx_desc) 982 i = 0; 983 984 tx_buffer->m_head = NULL; 985 } 986 987 adapter->num_tx_desc_avail -= nsegs; 988 adapter->next_avail_tx_desc = i; 989 990#if __FreeBSD_version < 500000 991 if (ifv != NULL) { 992 /* Set the vlan id */ 993 current_tx_desc->vlan = ifv->ifv_tag; 994#else 995 if (mtag != NULL) { 996 /* Set the vlan id */ 997 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); 998#endif 999 1000 /* Tell hardware to add tag */ 1001 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; 1002 } 1003 tx_buffer->m_head = m_head; 1004 tx_buffer->map = map; 1005 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1006 1007 /* 1008 * Last Descriptor of Packet needs End Of Packet (EOP) 1009 */ 1010 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); 1011 1012 /* 1013 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 1014 * that this frame is available to transmit. 1015 */ 1016 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1017 1018 return (0); 1019} 1020 1021static void 1022ixgb_set_promisc(struct adapter * adapter) 1023{ 1024 1025 u_int32_t reg_rctl; 1026 struct ifnet *ifp = adapter->ifp; 1027 1028 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1029 1030 if (ifp->if_flags & IFF_PROMISC) { 1031 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1032 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1033 } else if (ifp->if_flags & IFF_ALLMULTI) { 1034 reg_rctl |= IXGB_RCTL_MPE; 1035 reg_rctl &= ~IXGB_RCTL_UPE; 1036 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1037 } 1038 return; 1039} 1040 1041static void 1042ixgb_disable_promisc(struct adapter * adapter) 1043{ 1044 u_int32_t reg_rctl; 1045 1046 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1047 1048 reg_rctl &= (~IXGB_RCTL_UPE); 1049 reg_rctl &= (~IXGB_RCTL_MPE); 1050 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1051 1052 return; 1053} 1054 1055 1056/********************************************************************* 1057 * Multicast Update 1058 * 1059 * This routine is called whenever multicast address list is updated. 1060 * 1061 **********************************************************************/ 1062 1063static void 1064ixgb_set_multi(struct adapter * adapter) 1065{ 1066 u_int32_t reg_rctl = 0; 1067 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS]; 1068 struct ifmultiaddr *ifma; 1069 int mcnt = 0; 1070 struct ifnet *ifp = adapter->ifp; 1071 1072 IOCTL_DEBUGOUT("ixgb_set_multi: begin"); 1073 1074 IF_ADDR_LOCK(ifp); 1075#if __FreeBSD_version < 500000 1076 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1077#else 1078 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1079#endif 1080 if (ifma->ifma_addr->sa_family != AF_LINK) 1081 continue; 1082 1083 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 1084 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); 1085 mcnt++; 1086 } 1087 IF_ADDR_UNLOCK(ifp); 1088 1089 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { 1090 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1091 reg_rctl |= IXGB_RCTL_MPE; 1092 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 1093 } else 1094 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); 1095 1096 return; 1097} 1098 1099 1100/********************************************************************* 1101 * Timer routine 1102 * 1103 * This routine checks for link status and updates statistics. 1104 * 1105 **********************************************************************/ 1106 1107static void 1108ixgb_local_timer(void *arg) 1109{ 1110 struct ifnet *ifp; 1111 struct adapter *adapter = arg; 1112 ifp = adapter->ifp; 1113 1114 IXGB_LOCK(adapter); 1115 1116 ixgb_check_for_link(&adapter->hw); 1117 ixgb_print_link_status(adapter); 1118 ixgb_update_stats_counters(adapter); 1119 if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING) { 1120 ixgb_print_hw_stats(adapter); 1121 } 1122 callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); 1123 1124 IXGB_UNLOCK(adapter); 1125 return; 1126} 1127 1128static void 1129ixgb_print_link_status(struct adapter * adapter) 1130{ 1131 if (adapter->hw.link_up) { 1132 if (!adapter->link_active) { 1133 printf("ixgb%d: Link is up %d Mbps %s \n", 1134 adapter->unit, 1135 10000, 1136 "Full Duplex"); 1137 adapter->link_active = 1; 1138 } 1139 } else { 1140 if (adapter->link_active) { 1141 printf("ixgb%d: Link is Down \n", adapter->unit); 1142 adapter->link_active = 0; 1143 } 1144 } 1145 1146 return; 1147} 1148 1149 1150 1151/********************************************************************* 1152 * 1153 * This routine disables all traffic on the adapter by issuing a 1154 * global reset on the MAC and deallocates TX/RX buffers. 1155 * 1156 **********************************************************************/ 1157 1158static void 1159ixgb_stop(void *arg) 1160{ 1161 struct ifnet *ifp; 1162 struct adapter *adapter = arg; 1163 ifp = adapter->ifp; 1164 1165 IXGB_LOCK_ASSERT(adapter); 1166 1167 INIT_DEBUGOUT("ixgb_stop: begin\n"); 1168 ixgb_disable_intr(adapter); 1169 adapter->hw.adapter_stopped = FALSE; 1170 ixgb_adapter_stop(&adapter->hw); 1171 callout_stop(&adapter->timer); 1172 ixgb_free_transmit_structures(adapter); 1173 ixgb_free_receive_structures(adapter); 1174 1175 1176 /* Tell the stack that the interface is no longer active */ 1177 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1178 1179 return; 1180} 1181 1182 1183/********************************************************************* 1184 * 1185 * Determine hardware revision. 1186 * 1187 **********************************************************************/ 1188static void 1189ixgb_identify_hardware(struct adapter * adapter) 1190{ 1191 device_t dev = adapter->dev; 1192 1193 /* Make sure our PCI config space has the necessary stuff set */ 1194 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 1195 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && 1196 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { 1197 printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n", 1198 adapter->unit); 1199 adapter->hw.pci_cmd_word |= 1200 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); 1201 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); 1202 } 1203 /* Save off the information about this board */ 1204 adapter->hw.vendor_id = pci_get_vendor(dev); 1205 adapter->hw.device_id = pci_get_device(dev); 1206 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 1207 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 1208 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 1209 1210 /* Set MacType, etc. based on this PCI info */ 1211 switch (adapter->hw.device_id) { 1212 case IXGB_DEVICE_ID_82597EX: 1213 case IXGB_DEVICE_ID_82597EX_SR: 1214 adapter->hw.mac_type = ixgb_82597; 1215 break; 1216 default: 1217 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); 1218 printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id); 1219 } 1220 1221 return; 1222} 1223 1224static int 1225ixgb_allocate_pci_resources(struct adapter * adapter) 1226{ 1227 int rid; 1228 device_t dev = adapter->dev; 1229 1230 rid = IXGB_MMBA; 1231 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, 1232 &rid, 0, ~0, 1, 1233 RF_ACTIVE); 1234 if (!(adapter->res_memory)) { 1235 printf("ixgb%d: Unable to allocate bus resource: memory\n", 1236 adapter->unit); 1237 return (ENXIO); 1238 } 1239 adapter->osdep.mem_bus_space_tag = 1240 rman_get_bustag(adapter->res_memory); 1241 adapter->osdep.mem_bus_space_handle = 1242 rman_get_bushandle(adapter->res_memory); 1243 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; 1244 1245 rid = 0x0; 1246 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, 1247 &rid, 0, ~0, 1, 1248 RF_SHAREABLE | RF_ACTIVE); 1249 if (!(adapter->res_interrupt)) { 1250 printf("ixgb%d: Unable to allocate bus resource: interrupt\n", 1251 adapter->unit); 1252 return (ENXIO); 1253 } 1254 if (bus_setup_intr(dev, adapter->res_interrupt, 1255 INTR_TYPE_NET | INTR_MPSAFE, 1256 (void (*) (void *))ixgb_intr, adapter, 1257 &adapter->int_handler_tag)) { 1258 printf("ixgb%d: Error registering interrupt handler!\n", 1259 adapter->unit); 1260 return (ENXIO); 1261 } 1262 adapter->hw.back = &adapter->osdep; 1263 1264 return (0); 1265} 1266 1267static void 1268ixgb_free_pci_resources(struct adapter * adapter) 1269{ 1270 device_t dev = adapter->dev; 1271 1272 if (adapter->res_interrupt != NULL) { 1273 bus_teardown_intr(dev, adapter->res_interrupt, 1274 adapter->int_handler_tag); 1275 bus_release_resource(dev, SYS_RES_IRQ, 0, 1276 adapter->res_interrupt); 1277 } 1278 if (adapter->res_memory != NULL) { 1279 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, 1280 adapter->res_memory); 1281 } 1282 if (adapter->res_ioport != NULL) { 1283 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 1284 adapter->res_ioport); 1285 } 1286 return; 1287} 1288 1289/********************************************************************* 1290 * 1291 * Initialize the hardware to a configuration as specified by the 1292 * adapter structure. The controller is reset, the EEPROM is 1293 * verified, the MAC address is set, then the shared initialization 1294 * routines are called. 1295 * 1296 **********************************************************************/ 1297static int 1298ixgb_hardware_init(struct adapter * adapter) 1299{ 1300 /* Issue a global reset */ 1301 adapter->hw.adapter_stopped = FALSE; 1302 ixgb_adapter_stop(&adapter->hw); 1303 1304 /* Make sure we have a good EEPROM before we read from it */ 1305 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { 1306 printf("ixgb%d: The EEPROM Checksum Is Not Valid\n", 1307 adapter->unit); 1308 return (EIO); 1309 } 1310 if (!ixgb_init_hw(&adapter->hw)) { 1311 printf("ixgb%d: Hardware Initialization Failed", 1312 adapter->unit); 1313 return (EIO); 1314 } 1315 1316 return (0); 1317} 1318 1319/********************************************************************* 1320 * 1321 * Setup networking device structure and register an interface. 1322 * 1323 **********************************************************************/ 1324static void 1325ixgb_setup_interface(device_t dev, struct adapter * adapter) 1326{ 1327 struct ifnet *ifp; 1328 INIT_DEBUGOUT("ixgb_setup_interface: begin"); 1329 1330 ifp = adapter->ifp = if_alloc(IFT_ETHER); 1331 if (ifp == NULL) 1332 panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); 1333#if __FreeBSD_version >= 502000 1334 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1335#else 1336 ifp->if_unit = adapter->unit; 1337 ifp->if_name = "ixgb"; 1338#endif 1339 ifp->if_mtu = ETHERMTU; 1340 ifp->if_baudrate = 1000000000; 1341 ifp->if_init = ixgb_init; 1342 ifp->if_softc = adapter; 1343 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1344 ifp->if_ioctl = ixgb_ioctl; 1345 ifp->if_start = ixgb_start; 1346 ifp->if_watchdog = ixgb_watchdog; 1347 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; 1348 1349#if __FreeBSD_version < 500000 1350 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 1351#else 1352 ether_ifattach(ifp, adapter->hw.curr_mac_addr); 1353#endif 1354 1355 ifp->if_capabilities = IFCAP_HWCSUM; 1356#ifdef DEVICE_POLLING 1357 ifp->if_capabilities |= IFCAP_POLLING; 1358#endif 1359 1360 /* 1361 * Tell the upper layer(s) we support long frames. 1362 */ 1363 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1364 1365#if __FreeBSD_version >= 500000 1366 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1367#endif 1368 1369 ifp->if_capenable = ifp->if_capabilities; 1370 1371 /* 1372 * Specify the media types supported by this adapter and register 1373 * callbacks to update media and link information 1374 */ 1375 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, 1376 ixgb_media_status); 1377 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1378 0, NULL); 1379 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 1380 0, NULL); 1381 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1382 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1383 1384 return; 1385} 1386 1387/******************************************************************** 1388 * Manage DMA'able memory. 1389 *******************************************************************/ 1390static void 1391ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) 1392{ 1393 if (error) 1394 return; 1395 *(bus_addr_t *) arg = segs->ds_addr; 1396 return; 1397} 1398 1399static int 1400ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, 1401 struct ixgb_dma_alloc * dma, int mapflags) 1402{ 1403 int r; 1404 1405 r = bus_dma_tag_create(NULL, /* parent */ 1406 PAGE_SIZE, 0, /* alignment, bounds */ 1407 BUS_SPACE_MAXADDR, /* lowaddr */ 1408 BUS_SPACE_MAXADDR, /* highaddr */ 1409 NULL, NULL, /* filter, filterarg */ 1410 size, /* maxsize */ 1411 1, /* nsegments */ 1412 size, /* maxsegsize */ 1413 BUS_DMA_ALLOCNOW, /* flags */ 1414#if __FreeBSD_version >= 502000 1415 NULL, /* lockfunc */ 1416 NULL, /* lockfuncarg */ 1417#endif 1418 &dma->dma_tag); 1419 if (r != 0) { 1420 printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; " 1421 "error %u\n", adapter->unit, r); 1422 goto fail_0; 1423 } 1424 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 1425 BUS_DMA_NOWAIT, &dma->dma_map); 1426 if (r != 0) { 1427 printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; " 1428 "error %u\n", adapter->unit, r); 1429 goto fail_1; 1430 } 1431 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1432 size, 1433 ixgb_dmamap_cb, 1434 &dma->dma_paddr, 1435 mapflags | BUS_DMA_NOWAIT); 1436 if (r != 0) { 1437 printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; " 1438 "error %u\n", adapter->unit, r); 1439 goto fail_2; 1440 } 1441 dma->dma_size = size; 1442 return (0); 1443fail_2: 1444 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1445fail_1: 1446 bus_dma_tag_destroy(dma->dma_tag); 1447fail_0: 1448 dma->dma_map = NULL; 1449 dma->dma_tag = NULL; 1450 return (r); 1451} 1452 1453 1454 1455static void 1456ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) 1457{ 1458 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1459 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1460 bus_dma_tag_destroy(dma->dma_tag); 1461} 1462 1463/********************************************************************* 1464 * 1465 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1466 * the information needed to transmit a packet on the wire. 1467 * 1468 **********************************************************************/ 1469static int 1470ixgb_allocate_transmit_structures(struct adapter * adapter) 1471{ 1472 if (!(adapter->tx_buffer_area = 1473 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * 1474 adapter->num_tx_desc, M_DEVBUF, 1475 M_NOWAIT | M_ZERO))) { 1476 printf("ixgb%d: Unable to allocate tx_buffer memory\n", 1477 adapter->unit); 1478 return ENOMEM; 1479 } 1480 bzero(adapter->tx_buffer_area, 1481 sizeof(struct ixgb_buffer) * adapter->num_tx_desc); 1482 1483 return 0; 1484} 1485 1486/********************************************************************* 1487 * 1488 * Allocate and initialize transmit structures. 1489 * 1490 **********************************************************************/ 1491static int 1492ixgb_setup_transmit_structures(struct adapter * adapter) 1493{ 1494 /* 1495 * Setup DMA descriptor areas. 1496 */ 1497 if (bus_dma_tag_create(NULL, /* parent */ 1498 PAGE_SIZE, 0, /* alignment, bounds */ 1499 BUS_SPACE_MAXADDR, /* lowaddr */ 1500 BUS_SPACE_MAXADDR, /* highaddr */ 1501 NULL, NULL, /* filter, filterarg */ 1502 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ 1503 IXGB_MAX_SCATTER, /* nsegments */ 1504 MCLBYTES, /* maxsegsize */ 1505 BUS_DMA_ALLOCNOW, /* flags */ 1506#if __FreeBSD_version >= 502000 1507 NULL, /* lockfunc */ 1508 NULL, /* lockfuncarg */ 1509#endif 1510 &adapter->txtag)) { 1511 printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit); 1512 return (ENOMEM); 1513 } 1514 if (ixgb_allocate_transmit_structures(adapter)) 1515 return ENOMEM; 1516 1517 bzero((void *)adapter->tx_desc_base, 1518 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); 1519 1520 adapter->next_avail_tx_desc = 0; 1521 adapter->oldest_used_tx_desc = 0; 1522 1523 /* Set number of descriptors available */ 1524 adapter->num_tx_desc_avail = adapter->num_tx_desc; 1525 1526 /* Set checksum context */ 1527 adapter->active_checksum_context = OFFLOAD_NONE; 1528 1529 return 0; 1530} 1531 1532/********************************************************************* 1533 * 1534 * Enable transmit unit. 1535 * 1536 **********************************************************************/ 1537static void 1538ixgb_initialize_transmit_unit(struct adapter * adapter) 1539{ 1540 u_int32_t reg_tctl; 1541 u_int64_t tdba = adapter->txdma.dma_paddr; 1542 1543 /* Setup the Base and Length of the Tx Descriptor Ring */ 1544 IXGB_WRITE_REG(&adapter->hw, TDBAL, 1545 (tdba & 0x00000000ffffffffULL)); 1546 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); 1547 IXGB_WRITE_REG(&adapter->hw, TDLEN, 1548 adapter->num_tx_desc * 1549 sizeof(struct ixgb_tx_desc)); 1550 1551 /* Setup the HW Tx Head and Tail descriptor pointers */ 1552 IXGB_WRITE_REG(&adapter->hw, TDH, 0); 1553 IXGB_WRITE_REG(&adapter->hw, TDT, 0); 1554 1555 1556 HW_DEBUGOUT2("Base = %x, Length = %x\n", 1557 IXGB_READ_REG(&adapter->hw, TDBAL), 1558 IXGB_READ_REG(&adapter->hw, TDLEN)); 1559 1560 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); 1561 1562 1563 /* Program the Transmit Control Register */ 1564 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); 1565 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; 1566 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); 1567 1568 /* Setup Transmit Descriptor Settings for this adapter */ 1569 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; 1570 1571 if (adapter->tx_int_delay > 0) 1572 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; 1573 return; 1574} 1575 1576/********************************************************************* 1577 * 1578 * Free all transmit related data structures. 1579 * 1580 **********************************************************************/ 1581static void 1582ixgb_free_transmit_structures(struct adapter * adapter) 1583{ 1584 struct ixgb_buffer *tx_buffer; 1585 int i; 1586 1587 INIT_DEBUGOUT("free_transmit_structures: begin"); 1588 1589 if (adapter->tx_buffer_area != NULL) { 1590 tx_buffer = adapter->tx_buffer_area; 1591 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 1592 if (tx_buffer->m_head != NULL) { 1593 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 1594 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 1595 m_freem(tx_buffer->m_head); 1596 } 1597 tx_buffer->m_head = NULL; 1598 } 1599 } 1600 if (adapter->tx_buffer_area != NULL) { 1601 free(adapter->tx_buffer_area, M_DEVBUF); 1602 adapter->tx_buffer_area = NULL; 1603 } 1604 if (adapter->txtag != NULL) { 1605 bus_dma_tag_destroy(adapter->txtag); 1606 adapter->txtag = NULL; 1607 } 1608 return; 1609} 1610 1611/********************************************************************* 1612 * 1613 * The offload context needs to be set when we transfer the first 1614 * packet of a particular protocol (TCP/UDP). We change the 1615 * context only if the protocol type changes. 1616 * 1617 **********************************************************************/ 1618static void 1619ixgb_transmit_checksum_setup(struct adapter * adapter, 1620 struct mbuf * mp, 1621 u_int8_t * txd_popts) 1622{ 1623 struct ixgb_context_desc *TXD; 1624 struct ixgb_buffer *tx_buffer; 1625 int curr_txd; 1626 1627 if (mp->m_pkthdr.csum_flags) { 1628 1629 if (mp->m_pkthdr.csum_flags & CSUM_TCP) { 1630 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1631 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) 1632 return; 1633 else 1634 adapter->active_checksum_context = OFFLOAD_TCP_IP; 1635 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { 1636 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1637 if (adapter->active_checksum_context == OFFLOAD_UDP_IP) 1638 return; 1639 else 1640 adapter->active_checksum_context = OFFLOAD_UDP_IP; 1641 } else { 1642 *txd_popts = 0; 1643 return; 1644 } 1645 } else { 1646 *txd_popts = 0; 1647 return; 1648 } 1649 1650 /* 1651 * If we reach this point, the checksum offload context needs to be 1652 * reset. 1653 */ 1654 curr_txd = adapter->next_avail_tx_desc; 1655 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 1656 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; 1657 1658 1659 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); 1660 TXD->tucse = 0; 1661 1662 TXD->mss = 0; 1663 1664 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { 1665 TXD->tucso = 1666 ENET_HEADER_SIZE + sizeof(struct ip) + 1667 offsetof(struct tcphdr, th_sum); 1668 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { 1669 TXD->tucso = 1670 ENET_HEADER_SIZE + sizeof(struct ip) + 1671 offsetof(struct udphdr, uh_sum); 1672 } 1673 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; 1674 1675 tx_buffer->m_head = NULL; 1676 1677 if (++curr_txd == adapter->num_tx_desc) 1678 curr_txd = 0; 1679 1680 adapter->num_tx_desc_avail--; 1681 adapter->next_avail_tx_desc = curr_txd; 1682 return; 1683} 1684 1685/********************************************************************** 1686 * 1687 * Examine each tx_buffer in the used queue. If the hardware is done 1688 * processing the packet then free associated resources. The 1689 * tx_buffer is put back on the free queue. 1690 * 1691 **********************************************************************/ 1692static void 1693ixgb_clean_transmit_interrupts(struct adapter * adapter) 1694{ 1695 int i, num_avail; 1696 struct ixgb_buffer *tx_buffer; 1697 struct ixgb_tx_desc *tx_desc; 1698 1699 IXGB_LOCK_ASSERT(adapter); 1700 1701 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 1702 return; 1703 1704#ifdef _SV_ 1705 adapter->clean_tx_interrupts++; 1706#endif 1707 num_avail = adapter->num_tx_desc_avail; 1708 i = adapter->oldest_used_tx_desc; 1709 1710 tx_buffer = &adapter->tx_buffer_area[i]; 1711 tx_desc = &adapter->tx_desc_base[i]; 1712 1713 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { 1714 1715 tx_desc->status = 0; 1716 num_avail++; 1717 1718 if (tx_buffer->m_head) { 1719 bus_dmamap_sync(adapter->txtag, tx_buffer->map, 1720 BUS_DMASYNC_POSTWRITE); 1721 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 1722 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 1723 m_freem(tx_buffer->m_head); 1724 tx_buffer->m_head = NULL; 1725 } 1726 if (++i == adapter->num_tx_desc) 1727 i = 0; 1728 1729 tx_buffer = &adapter->tx_buffer_area[i]; 1730 tx_desc = &adapter->tx_desc_base[i]; 1731 } 1732 1733 adapter->oldest_used_tx_desc = i; 1734 1735 /* 1736 * If we have enough room, clear IFF_OACTIVE to tell the stack that 1737 * it is OK to send packets. If there are no pending descriptors, 1738 * clear the timeout. Otherwise, if some descriptors have been freed, 1739 * restart the timeout. 1740 */ 1741 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { 1742 struct ifnet *ifp = adapter->ifp; 1743 1744 ifp->if_flags &= ~IFF_OACTIVE; 1745 if (num_avail == adapter->num_tx_desc) 1746 ifp->if_timer = 0; 1747 else if (num_avail == adapter->num_tx_desc_avail) 1748 ifp->if_timer = IXGB_TX_TIMEOUT; 1749 } 1750 adapter->num_tx_desc_avail = num_avail; 1751 return; 1752} 1753 1754 1755/********************************************************************* 1756 * 1757 * Get a buffer from system mbuf buffer pool. 1758 * 1759 **********************************************************************/ 1760static int 1761ixgb_get_buf(int i, struct adapter * adapter, 1762 struct mbuf * nmp) 1763{ 1764 register struct mbuf *mp = nmp; 1765 struct ixgb_buffer *rx_buffer; 1766 struct ifnet *ifp; 1767 bus_addr_t paddr; 1768 int error; 1769 1770 ifp = adapter->ifp; 1771 1772 if (mp == NULL) { 1773 1774 mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1775 1776 if (mp == NULL) { 1777 adapter->mbuf_alloc_failed++; 1778 return (ENOBUFS); 1779 } 1780 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1781 } else { 1782 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1783 mp->m_data = mp->m_ext.ext_buf; 1784 mp->m_next = NULL; 1785 } 1786 1787 if (ifp->if_mtu <= ETHERMTU) { 1788 m_adj(mp, ETHER_ALIGN); 1789 } 1790 rx_buffer = &adapter->rx_buffer_area[i]; 1791 1792 /* 1793 * Using memory from the mbuf cluster pool, invoke the bus_dma 1794 * machinery to arrange the memory mapping. 1795 */ 1796 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, 1797 mtod(mp, void *), mp->m_len, 1798 ixgb_dmamap_cb, &paddr, 0); 1799 if (error) { 1800 m_free(mp); 1801 return (error); 1802 } 1803 rx_buffer->m_head = mp; 1804 adapter->rx_desc_base[i].buff_addr = htole64(paddr); 1805 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); 1806 1807 return (0); 1808} 1809 1810/********************************************************************* 1811 * 1812 * Allocate memory for rx_buffer structures. Since we use one 1813 * rx_buffer per received packet, the maximum number of rx_buffer's 1814 * that we'll need is equal to the number of receive descriptors 1815 * that we've allocated. 1816 * 1817 **********************************************************************/ 1818static int 1819ixgb_allocate_receive_structures(struct adapter * adapter) 1820{ 1821 int i, error; 1822 struct ixgb_buffer *rx_buffer; 1823 1824 if (!(adapter->rx_buffer_area = 1825 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * 1826 adapter->num_rx_desc, M_DEVBUF, 1827 M_NOWAIT | M_ZERO))) { 1828 printf("ixgb%d: Unable to allocate rx_buffer memory\n", 1829 adapter->unit); 1830 return (ENOMEM); 1831 } 1832 bzero(adapter->rx_buffer_area, 1833 sizeof(struct ixgb_buffer) * adapter->num_rx_desc); 1834 1835 error = bus_dma_tag_create(NULL, /* parent */ 1836 PAGE_SIZE, 0, /* alignment, bounds */ 1837 BUS_SPACE_MAXADDR, /* lowaddr */ 1838 BUS_SPACE_MAXADDR, /* highaddr */ 1839 NULL, NULL, /* filter, filterarg */ 1840 MCLBYTES, /* maxsize */ 1841 1, /* nsegments */ 1842 MCLBYTES, /* maxsegsize */ 1843 BUS_DMA_ALLOCNOW, /* flags */ 1844#if __FreeBSD_version >= 502000 1845 NULL, /* lockfunc */ 1846 NULL, /* lockfuncarg */ 1847#endif 1848 &adapter->rxtag); 1849 if (error != 0) { 1850 printf("ixgb%d: ixgb_allocate_receive_structures: " 1851 "bus_dma_tag_create failed; error %u\n", 1852 adapter->unit, error); 1853 goto fail_0; 1854 } 1855 rx_buffer = adapter->rx_buffer_area; 1856 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 1857 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, 1858 &rx_buffer->map); 1859 if (error != 0) { 1860 printf("ixgb%d: ixgb_allocate_receive_structures: " 1861 "bus_dmamap_create failed; error %u\n", 1862 adapter->unit, error); 1863 goto fail_1; 1864 } 1865 } 1866 1867 for (i = 0; i < adapter->num_rx_desc; i++) { 1868 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { 1869 adapter->rx_buffer_area[i].m_head = NULL; 1870 adapter->rx_desc_base[i].buff_addr = 0; 1871 return (ENOBUFS); 1872 } 1873 } 1874 1875 return (0); 1876fail_1: 1877 bus_dma_tag_destroy(adapter->rxtag); 1878fail_0: 1879 adapter->rxtag = NULL; 1880 free(adapter->rx_buffer_area, M_DEVBUF); 1881 adapter->rx_buffer_area = NULL; 1882 return (error); 1883} 1884 1885/********************************************************************* 1886 * 1887 * Allocate and initialize receive structures. 1888 * 1889 **********************************************************************/ 1890static int 1891ixgb_setup_receive_structures(struct adapter * adapter) 1892{ 1893 bzero((void *)adapter->rx_desc_base, 1894 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); 1895 1896 if (ixgb_allocate_receive_structures(adapter)) 1897 return ENOMEM; 1898 1899 /* Setup our descriptor pointers */ 1900 adapter->next_rx_desc_to_check = 0; 1901 adapter->next_rx_desc_to_use = 0; 1902 return (0); 1903} 1904 1905/********************************************************************* 1906 * 1907 * Enable receive unit. 1908 * 1909 **********************************************************************/ 1910static void 1911ixgb_initialize_receive_unit(struct adapter * adapter) 1912{ 1913 u_int32_t reg_rctl; 1914 u_int32_t reg_rxcsum; 1915 u_int32_t reg_rxdctl; 1916 struct ifnet *ifp; 1917 u_int64_t rdba = adapter->rxdma.dma_paddr; 1918 1919 ifp = adapter->ifp; 1920 1921 /* 1922 * Make sure receives are disabled while setting up the descriptor 1923 * ring 1924 */ 1925 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1926 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); 1927 1928 /* Set the Receive Delay Timer Register */ 1929 IXGB_WRITE_REG(&adapter->hw, RDTR, 1930 adapter->rx_int_delay); 1931 1932 1933 /* Setup the Base and Length of the Rx Descriptor Ring */ 1934 IXGB_WRITE_REG(&adapter->hw, RDBAL, 1935 (rdba & 0x00000000ffffffffULL)); 1936 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); 1937 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * 1938 sizeof(struct ixgb_rx_desc)); 1939 1940 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1941 IXGB_WRITE_REG(&adapter->hw, RDH, 0); 1942 1943 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); 1944 1945 1946 1947 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT 1948 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT 1949 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; 1950 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); 1951 1952 1953 adapter->raidc = 1; 1954 if (adapter->raidc) { 1955 uint32_t raidc; 1956 uint8_t poll_threshold; 1957#define IXGB_RAIDC_POLL_DEFAULT 120 1958 1959 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); 1960 poll_threshold >>= 1; 1961 poll_threshold &= 0x3F; 1962 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | 1963 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | 1964 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | 1965 poll_threshold; 1966 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); 1967 } 1968 /* Enable Receive Checksum Offload for TCP and UDP ? */ 1969 if (ifp->if_capenable & IFCAP_RXCSUM) { 1970 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); 1971 reg_rxcsum |= IXGB_RXCSUM_TUOFL; 1972 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); 1973 } 1974 /* Setup the Receive Control Register */ 1975 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); 1976 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 1977 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | 1978 IXGB_RCTL_CFF | 1979 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 1980 1981 switch (adapter->rx_buffer_len) { 1982 default: 1983 case IXGB_RXBUFFER_2048: 1984 reg_rctl |= IXGB_RCTL_BSIZE_2048; 1985 break; 1986 case IXGB_RXBUFFER_4096: 1987 reg_rctl |= IXGB_RCTL_BSIZE_4096; 1988 break; 1989 case IXGB_RXBUFFER_8192: 1990 reg_rctl |= IXGB_RCTL_BSIZE_8192; 1991 break; 1992 case IXGB_RXBUFFER_16384: 1993 reg_rctl |= IXGB_RCTL_BSIZE_16384; 1994 break; 1995 } 1996 1997 reg_rctl |= IXGB_RCTL_RXEN; 1998 1999 2000 /* Enable Receives */ 2001 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); 2002 2003 return; 2004} 2005 2006/********************************************************************* 2007 * 2008 * Free receive related data structures. 2009 * 2010 **********************************************************************/ 2011static void 2012ixgb_free_receive_structures(struct adapter * adapter) 2013{ 2014 struct ixgb_buffer *rx_buffer; 2015 int i; 2016 2017 INIT_DEBUGOUT("free_receive_structures: begin"); 2018 2019 if (adapter->rx_buffer_area != NULL) { 2020 rx_buffer = adapter->rx_buffer_area; 2021 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { 2022 if (rx_buffer->map != NULL) { 2023 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2024 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 2025 } 2026 if (rx_buffer->m_head != NULL) 2027 m_freem(rx_buffer->m_head); 2028 rx_buffer->m_head = NULL; 2029 } 2030 } 2031 if (adapter->rx_buffer_area != NULL) { 2032 free(adapter->rx_buffer_area, M_DEVBUF); 2033 adapter->rx_buffer_area = NULL; 2034 } 2035 if (adapter->rxtag != NULL) { 2036 bus_dma_tag_destroy(adapter->rxtag); 2037 adapter->rxtag = NULL; 2038 } 2039 return; 2040} 2041 2042/********************************************************************* 2043 * 2044 * This routine executes in interrupt context. It replenishes 2045 * the mbufs in the descriptor and sends data which has been 2046 * dma'ed into host memory to upper layer. 2047 * 2048 * We loop at most count times if count is > 0, or until done if 2049 * count < 0. 2050 * 2051 *********************************************************************/ 2052static void 2053ixgb_process_receive_interrupts(struct adapter * adapter, int count) 2054{ 2055 struct ifnet *ifp; 2056 struct mbuf *mp; 2057#if __FreeBSD_version < 500000 2058 struct ether_header *eh; 2059#endif 2060 int eop = 0; 2061 int len; 2062 u_int8_t accept_frame = 0; 2063 int i; 2064 int next_to_use = 0; 2065 int eop_desc; 2066 /* Pointer to the receive descriptor being examined. */ 2067 struct ixgb_rx_desc *current_desc; 2068 2069 IXGB_LOCK_ASSERT(adapter); 2070 2071 ifp = adapter->ifp; 2072 i = adapter->next_rx_desc_to_check; 2073 next_to_use = adapter->next_rx_desc_to_use; 2074 eop_desc = adapter->next_rx_desc_to_check; 2075 current_desc = &adapter->rx_desc_base[i]; 2076 2077 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { 2078#ifdef _SV_ 2079 adapter->no_pkts_avail++; 2080#endif 2081 return; 2082 } 2083 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { 2084 2085 mp = adapter->rx_buffer_area[i].m_head; 2086 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 2087 BUS_DMASYNC_POSTREAD); 2088 accept_frame = 1; 2089 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { 2090 count--; 2091 eop = 1; 2092 } else { 2093 eop = 0; 2094 } 2095 len = current_desc->length; 2096 2097 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 2098 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 2099 IXGB_RX_DESC_ERRORS_RXE)) { 2100 accept_frame = 0; 2101 } 2102 if (accept_frame) { 2103 2104 /* Assign correct length to the current fragment */ 2105 mp->m_len = len; 2106 2107 if (adapter->fmp == NULL) { 2108 mp->m_pkthdr.len = len; 2109 adapter->fmp = mp; /* Store the first mbuf */ 2110 adapter->lmp = mp; 2111 } else { 2112 /* Chain mbuf's together */ 2113 mp->m_flags &= ~M_PKTHDR; 2114 adapter->lmp->m_next = mp; 2115 adapter->lmp = adapter->lmp->m_next; 2116 adapter->fmp->m_pkthdr.len += len; 2117 } 2118 2119 if (eop) { 2120 eop_desc = i; 2121 adapter->fmp->m_pkthdr.rcvif = ifp; 2122 2123#if __FreeBSD_version < 500000 2124 eh = mtod(adapter->fmp, struct ether_header *); 2125 2126 /* Remove ethernet header from mbuf */ 2127 m_adj(adapter->fmp, sizeof(struct ether_header)); 2128 ixgb_receive_checksum(adapter, current_desc, 2129 adapter->fmp); 2130 2131 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) 2132 VLAN_INPUT_TAG(eh, adapter->fmp, 2133 current_desc->special); 2134 else 2135 ether_input(ifp, eh, adapter->fmp); 2136#else 2137 ixgb_receive_checksum(adapter, current_desc, 2138 adapter->fmp); 2139 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) 2140 VLAN_INPUT_TAG(ifp, adapter->fmp, 2141 current_desc->special, 2142 adapter->fmp = NULL); 2143 2144 if (adapter->fmp != NULL) { 2145 IXGB_UNLOCK(adapter); 2146 (*ifp->if_input) (ifp, adapter->fmp); 2147 IXGB_LOCK(adapter); 2148 } 2149#endif 2150 adapter->fmp = NULL; 2151 adapter->lmp = NULL; 2152 } 2153 adapter->rx_buffer_area[i].m_head = NULL; 2154 } else { 2155 adapter->dropped_pkts++; 2156 if (adapter->fmp != NULL) 2157 m_freem(adapter->fmp); 2158 adapter->fmp = NULL; 2159 adapter->lmp = NULL; 2160 } 2161 2162 /* Zero out the receive descriptors status */ 2163 current_desc->status = 0; 2164 2165 /* Advance our pointers to the next descriptor */ 2166 if (++i == adapter->num_rx_desc) { 2167 i = 0; 2168 current_desc = adapter->rx_desc_base; 2169 } else 2170 current_desc++; 2171 } 2172 adapter->next_rx_desc_to_check = i; 2173 2174 if (--i < 0) 2175 i = (adapter->num_rx_desc - 1); 2176 2177 /* 2178 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes 2179 * memory corruption). Avoid using and re-submitting the most recently received RX 2180 * descriptor back to hardware. 2181 * 2182 * if(Last written back descriptor == EOP bit set descriptor) 2183 * then avoid re-submitting the most recently received RX descriptor 2184 * back to hardware. 2185 * if(Last written back descriptor != EOP bit set descriptor) 2186 * then avoid re-submitting the most recently received RX descriptors 2187 * till last EOP bit set descriptor. 2188 */ 2189 if (eop_desc != i) { 2190 if (++eop_desc == adapter->num_rx_desc) 2191 eop_desc = 0; 2192 i = eop_desc; 2193 } 2194 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ 2195 while (next_to_use != i) { 2196 current_desc = &adapter->rx_desc_base[next_to_use]; 2197 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 2198 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 2199 IXGB_RX_DESC_ERRORS_RXE))) { 2200 mp = adapter->rx_buffer_area[next_to_use].m_head; 2201 ixgb_get_buf(next_to_use, adapter, mp); 2202 } else { 2203 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) 2204 break; 2205 } 2206 /* Advance our pointers to the next descriptor */ 2207 if (++next_to_use == adapter->num_rx_desc) { 2208 next_to_use = 0; 2209 current_desc = adapter->rx_desc_base; 2210 } else 2211 current_desc++; 2212 } 2213 adapter->next_rx_desc_to_use = next_to_use; 2214 if (--next_to_use < 0) 2215 next_to_use = (adapter->num_rx_desc - 1); 2216 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ 2217 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); 2218 2219 return; 2220} 2221 2222/********************************************************************* 2223 * 2224 * Verify that the hardware indicated that the checksum is valid. 2225 * Inform the stack about the status of checksum so that stack 2226 * doesn't spend time verifying the checksum. 2227 * 2228 *********************************************************************/ 2229static void 2230ixgb_receive_checksum(struct adapter * adapter, 2231 struct ixgb_rx_desc * rx_desc, 2232 struct mbuf * mp) 2233{ 2234 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { 2235 mp->m_pkthdr.csum_flags = 0; 2236 return; 2237 } 2238 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { 2239 /* Did it pass? */ 2240 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { 2241 /* IP Checksum Good */ 2242 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2243 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2244 2245 } else { 2246 mp->m_pkthdr.csum_flags = 0; 2247 } 2248 } 2249 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { 2250 /* Did it pass? */ 2251 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { 2252 mp->m_pkthdr.csum_flags |= 2253 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 2254 mp->m_pkthdr.csum_data = htons(0xffff); 2255 } 2256 } 2257 return; 2258} 2259 2260 2261static void 2262ixgb_enable_vlans(struct adapter * adapter) 2263{ 2264 uint32_t ctrl; 2265 2266 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2267 ctrl |= IXGB_CTRL0_VME; 2268 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2269 2270 return; 2271} 2272 2273 2274static void 2275ixgb_enable_intr(struct adapter * adapter) 2276{ 2277 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | 2278 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); 2279 return; 2280} 2281 2282static void 2283ixgb_disable_intr(struct adapter * adapter) 2284{ 2285 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 2286 return; 2287} 2288 2289void 2290ixgb_write_pci_cfg(struct ixgb_hw * hw, 2291 uint32_t reg, 2292 uint16_t * value) 2293{ 2294 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, 2295 *value, 2); 2296} 2297 2298/********************************************************************** 2299 * 2300 * Update the board statistics counters. 2301 * 2302 **********************************************************************/ 2303static void 2304ixgb_update_stats_counters(struct adapter * adapter) 2305{ 2306 struct ifnet *ifp; 2307 2308 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); 2309 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 2310 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 2311 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); 2312 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); 2313 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); 2314 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); 2315 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 2316 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); 2317 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); 2318 2319 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); 2320 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); 2321 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); 2322 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); 2323 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); 2324 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); 2325 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); 2326 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); 2327 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); 2328 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); 2329 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); 2330 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); 2331 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); 2332 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); 2333 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); 2334 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); 2335 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); 2336 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); 2337 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 2338 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 2339 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); 2340 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); 2341 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); 2342 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); 2343 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); 2344 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); 2345 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); 2346 2347 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 2348 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 2349 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); 2350 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); 2351 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); 2352 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); 2353 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); 2354 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); 2355 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); 2356 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); 2357 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); 2358 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); 2359 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); 2360 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); 2361 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); 2362 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); 2363 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); 2364 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); 2365 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); 2366 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); 2367 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); 2368 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); 2369 2370 ifp = adapter->ifp; 2371 2372 /* Fill out the OS statistics structure */ 2373 ifp->if_ipackets = adapter->stats.gprcl; 2374 ifp->if_opackets = adapter->stats.gptcl; 2375 ifp->if_ibytes = adapter->stats.gorcl; 2376 ifp->if_obytes = adapter->stats.gotcl; 2377 ifp->if_imcasts = adapter->stats.mprcl; 2378 ifp->if_collisions = 0; 2379 2380 /* Rx Errors */ 2381 ifp->if_ierrors = 2382 adapter->dropped_pkts + 2383 adapter->stats.crcerrs + 2384 adapter->stats.rnbc + 2385 adapter->stats.mpc + 2386 adapter->stats.rlec; 2387 2388 2389} 2390 2391 2392/********************************************************************** 2393 * 2394 * This routine is called only when ixgb_display_debug_stats is enabled. 2395 * This routine provides a way to take a look at important statistics 2396 * maintained by the driver and hardware. 2397 * 2398 **********************************************************************/ 2399static void 2400ixgb_print_hw_stats(struct adapter * adapter) 2401{ 2402 char buf_speed[100], buf_type[100]; 2403 ixgb_bus_speed bus_speed; 2404 ixgb_bus_type bus_type; 2405 int unit = adapter->unit; 2406 2407#ifdef _SV_ 2408 printf("ixgb%d: Packets not Avail = %ld\n", unit, 2409 adapter->no_pkts_avail); 2410 printf("ixgb%d: CleanTxInterrupts = %ld\n", unit, 2411 adapter->clean_tx_interrupts); 2412 printf("ixgb%d: ICR RXDMT0 = %lld\n", unit, 2413 (long long)adapter->sv_stats.icr_rxdmt0); 2414 printf("ixgb%d: ICR RXO = %lld\n", unit, 2415 (long long)adapter->sv_stats.icr_rxo); 2416 printf("ixgb%d: ICR RXT0 = %lld\n", unit, 2417 (long long)adapter->sv_stats.icr_rxt0); 2418 printf("ixgb%d: ICR TXDW = %lld\n", unit, 2419 (long long)adapter->sv_stats.icr_TXDW); 2420#endif /* _SV_ */ 2421 2422 bus_speed = adapter->hw.bus.speed; 2423 bus_type = adapter->hw.bus.type; 2424 sprintf(buf_speed, 2425 bus_speed == ixgb_bus_speed_33 ? "33MHz" : 2426 bus_speed == ixgb_bus_speed_66 ? "66MHz" : 2427 bus_speed == ixgb_bus_speed_100 ? "100MHz" : 2428 bus_speed == ixgb_bus_speed_133 ? "133MHz" : 2429 "UNKNOWN"); 2430 printf("ixgb%d: PCI_Bus_Speed = %s\n", unit, 2431 buf_speed); 2432 2433 sprintf(buf_type, 2434 bus_type == ixgb_bus_type_pci ? "PCI" : 2435 bus_type == ixgb_bus_type_pcix ? "PCI-X" : 2436 "UNKNOWN"); 2437 printf("ixgb%d: PCI_Bus_Type = %s\n", unit, 2438 buf_type); 2439 2440 printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit, 2441 adapter->no_tx_desc_avail1); 2442 printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit, 2443 adapter->no_tx_desc_avail2); 2444 printf("ixgb%d: Std Mbuf Failed = %ld\n", unit, 2445 adapter->mbuf_alloc_failed); 2446 printf("ixgb%d: Std Cluster Failed = %ld\n", unit, 2447 adapter->mbuf_cluster_failed); 2448 2449 printf("ixgb%d: Defer count = %lld\n", unit, 2450 (long long)adapter->stats.dc); 2451 printf("ixgb%d: Missed Packets = %lld\n", unit, 2452 (long long)adapter->stats.mpc); 2453 printf("ixgb%d: Receive No Buffers = %lld\n", unit, 2454 (long long)adapter->stats.rnbc); 2455 printf("ixgb%d: Receive length errors = %lld\n", unit, 2456 (long long)adapter->stats.rlec); 2457 printf("ixgb%d: Crc errors = %lld\n", unit, 2458 (long long)adapter->stats.crcerrs); 2459 printf("ixgb%d: Driver dropped packets = %ld\n", unit, 2460 adapter->dropped_pkts); 2461 2462 printf("ixgb%d: XON Rcvd = %lld\n", unit, 2463 (long long)adapter->stats.xonrxc); 2464 printf("ixgb%d: XON Xmtd = %lld\n", unit, 2465 (long long)adapter->stats.xontxc); 2466 printf("ixgb%d: XOFF Rcvd = %lld\n", unit, 2467 (long long)adapter->stats.xoffrxc); 2468 printf("ixgb%d: XOFF Xmtd = %lld\n", unit, 2469 (long long)adapter->stats.xofftxc); 2470 2471 printf("ixgb%d: Good Packets Rcvd = %lld\n", unit, 2472 (long long)adapter->stats.gprcl); 2473 printf("ixgb%d: Good Packets Xmtd = %lld\n", unit, 2474 (long long)adapter->stats.gptcl); 2475 2476 printf("ixgb%d: Jumbo frames recvd = %lld\n", unit, 2477 (long long)adapter->stats.jprcl); 2478 printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit, 2479 (long long)adapter->stats.jptcl); 2480 2481 return; 2482 2483} 2484 2485static int 2486ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) 2487{ 2488 int error; 2489 int result; 2490 struct adapter *adapter; 2491 2492 result = -1; 2493 error = sysctl_handle_int(oidp, &result, 0, req); 2494 2495 if (error || !req->newptr) 2496 return (error); 2497 2498 if (result == 1) { 2499 adapter = (struct adapter *) arg1; 2500 ixgb_print_hw_stats(adapter); 2501 } 2502 return error; 2503} 2504