if_igc.c revision 1.15
1/* $OpenBSD: if_igc.c,v 1.15 2024/01/23 08:48:12 kevlo Exp $ */ 2/*- 3 * SPDX-License-Identifier: BSD-2-Clause 4 * 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * All rights reserved. 7 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include "bpfilter.h" 32#include "vlan.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/sockio.h> 37#include <sys/mbuf.h> 38#include <sys/malloc.h> 39#include <sys/kernel.h> 40#include <sys/socket.h> 41#include <sys/device.h> 42#include <sys/endian.h> 43#include <sys/intrmap.h> 44 45#include <net/if.h> 46#include <net/if_media.h> 47#include <net/toeplitz.h> 48 49#include <netinet/in.h> 50#include <netinet/if_ether.h> 51#include <netinet/ip.h> 52#include <netinet/ip6.h> 53 54#if NBPFILTER > 0 55#include <net/bpf.h> 56#endif 57 58#include <machine/bus.h> 59#include <machine/intr.h> 60 61#include <dev/pci/pcivar.h> 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcidevs.h> 64#include <dev/pci/if_igc.h> 65#include <dev/pci/igc_hw.h> 66 67const struct pci_matchid igc_devices[] = { 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V } 83}; 84 85/********************************************************************* 86 * Function Prototypes 87 *********************************************************************/ 88int igc_match(struct device *, void *, void *); 89void igc_attach(struct device *, struct device *, void *); 90int igc_detach(struct device *, int); 91 92void igc_identify_hardware(struct igc_softc *); 93int igc_allocate_pci_resources(struct igc_softc *); 94int igc_allocate_queues(struct igc_softc *); 95void igc_free_pci_resources(struct igc_softc *); 96void igc_reset(struct igc_softc *); 97void igc_init_dmac(struct igc_softc *, uint32_t); 98int igc_allocate_msix(struct igc_softc *); 99void igc_setup_msix(struct igc_softc *); 100int igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *); 101void igc_dma_free(struct igc_softc *, struct igc_dma_alloc *); 102void igc_setup_interface(struct igc_softc *); 103 104void igc_init(void *); 105void igc_start(struct ifqueue *); 106int igc_txeof(struct tx_ring *); 107void igc_stop(struct igc_softc *); 108int igc_ioctl(struct ifnet *, u_long, caddr_t); 109int igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *); 110int igc_rxfill(struct rx_ring *); 111void igc_rxrefill(void *); 112int igc_rxeof(struct rx_ring *); 113void igc_rx_checksum(uint32_t, struct mbuf *, uint32_t); 114void igc_watchdog(struct ifnet *); 115void igc_media_status(struct ifnet *, struct ifmediareq *); 116int igc_media_change(struct ifnet *); 117void igc_iff(struct igc_softc *); 118void igc_update_link_status(struct igc_softc *); 119int igc_get_buf(struct rx_ring *, int); 120int igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int, uint32_t *); 121 122void igc_configure_queues(struct igc_softc *); 123void igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int); 124void igc_enable_queue(struct igc_softc *, uint32_t); 125void igc_enable_intr(struct igc_softc *); 126void igc_disable_intr(struct igc_softc *); 127int igc_intr_link(void *); 128int igc_intr_queue(void *); 129 130int igc_allocate_transmit_buffers(struct tx_ring *); 131int igc_setup_transmit_structures(struct igc_softc *); 132int igc_setup_transmit_ring(struct tx_ring *); 133void igc_initialize_transmit_unit(struct igc_softc *); 134void igc_free_transmit_structures(struct igc_softc *); 135void igc_free_transmit_buffers(struct tx_ring *); 136int igc_allocate_receive_buffers(struct rx_ring *); 137int igc_setup_receive_structures(struct igc_softc *); 138int igc_setup_receive_ring(struct rx_ring *); 139void igc_initialize_receive_unit(struct igc_softc *); 140void igc_free_receive_structures(struct igc_softc *); 141void igc_free_receive_buffers(struct rx_ring *); 142void igc_initialize_rss_mapping(struct igc_softc *); 143 144void igc_get_hw_control(struct igc_softc *); 145void igc_release_hw_control(struct igc_softc *); 146int igc_is_valid_ether_addr(uint8_t *); 147 148/********************************************************************* 149 * OpenBSD Device Interface Entry Points 150 *********************************************************************/ 151 152struct cfdriver igc_cd = { 153 NULL, "igc", DV_IFNET 154}; 155 156const struct cfattach igc_ca = { 157 sizeof(struct igc_softc), igc_match, igc_attach, igc_detach 158}; 159 160/********************************************************************* 161 * Device identification routine 162 * 163 * igc_match determines if the driver should be loaded on 164 * adapter based on PCI vendor/device id of the adapter. 165 * 166 * return 0 on success, positive on failure 167 *********************************************************************/ 168int 169igc_match(struct device *parent, void *match, void *aux) 170{ 171 return pci_matchbyid((struct pci_attach_args *)aux, igc_devices, 172 nitems(igc_devices)); 173} 174 175/********************************************************************* 176 * Device initialization routine 177 * 178 * The attach entry point is called when the driver is being loaded. 179 * This routine identifies the type of hardware, allocates all resources 180 * and initializes the hardware. 181 * 182 * return 0 on success, positive on failure 183 *********************************************************************/ 184void 185igc_attach(struct device *parent, struct device *self, void *aux) 186{ 187 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 188 struct igc_softc *sc = (struct igc_softc *)self; 189 struct igc_hw *hw = &sc->hw; 190 191 sc->osdep.os_sc = sc; 192 sc->osdep.os_pa = *pa; 193 194 /* Determine hardware and mac info */ 195 igc_identify_hardware(sc); 196 197 sc->num_tx_desc = IGC_DEFAULT_TXD; 198 sc->num_rx_desc = IGC_DEFAULT_RXD; 199 200 /* Setup PCI resources */ 201 if (igc_allocate_pci_resources(sc)) 202 goto err_pci; 203 204 /* Allocate TX/RX queues */ 205 if (igc_allocate_queues(sc)) 206 goto err_pci; 207 208 /* Do shared code initialization */ 209 if (igc_setup_init_funcs(hw, true)) { 210 printf(": Setup of shared code failed\n"); 211 goto err_pci; 212 } 213 214 hw->mac.autoneg = DO_AUTO_NEG; 215 hw->phy.autoneg_wait_to_complete = false; 216 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 217 218 /* Copper options. */ 219 if (hw->phy.media_type == igc_media_type_copper) 220 hw->phy.mdix = AUTO_ALL_MODES; 221 222 /* Set the max frame size. */ 223 sc->hw.mac.max_frame_size = 9234; 224 225 /* Allocate multicast array memory. */ 226 sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES, 227 M_DEVBUF, M_NOWAIT); 228 if (sc->mta == NULL) { 229 printf(": Can not allocate multicast setup array\n"); 230 goto err_late; 231 } 232 233 /* Check SOL/IDER usage. */ 234 if (igc_check_reset_block(hw)) 235 printf(": PHY reset is blocked due to SOL/IDER session\n"); 236 237 /* Disable Energy Efficient Ethernet. */ 238 sc->hw.dev_spec._i225.eee_disable = true; 239 240 igc_reset_hw(hw); 241 242 /* Make sure we have a good EEPROM before we read from it. */ 243 if (igc_validate_nvm_checksum(hw) < 0) { 244 /* 245 * Some PCI-E parts fail the first check due to 246 * the link being in sleep state, call it again, 247 * if it fails a second time its a real issue. 248 */ 249 if (igc_validate_nvm_checksum(hw) < 0) { 250 printf(": The EEPROM checksum is not valid\n"); 251 goto err_late; 252 } 253 } 254 255 /* Copy the permanent MAC address out of the EEPROM. */ 256 if (igc_read_mac_addr(hw) < 0) { 257 printf(": EEPROM read error while reading MAC address\n"); 258 goto err_late; 259 } 260 261 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 262 printf(": Invalid MAC address\n"); 263 goto err_late; 264 } 265 266 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 267 268 if (igc_allocate_msix(sc)) 269 goto err_late; 270 271 /* Setup OS specific network interface. */ 272 igc_setup_interface(sc); 273 274 igc_reset(sc); 275 hw->mac.get_link_status = true; 276 igc_update_link_status(sc); 277 278 /* The driver can now take control from firmware. */ 279 igc_get_hw_control(sc); 280 281 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr)); 282 return; 283 284err_late: 285 igc_release_hw_control(sc); 286err_pci: 287 igc_free_pci_resources(sc); 288 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 289} 290 291/********************************************************************* 292 * Device removal routine 293 * 294 * The detach entry point is called when the driver is being removed. 295 * This routine stops the adapter and deallocates all the resources 296 * that were allocated for driver operation. 297 * 298 * return 0 on success, positive on failure 299 *********************************************************************/ 300int 301igc_detach(struct device *self, int flags) 302{ 303 struct igc_softc *sc = (struct igc_softc *)self; 304 struct ifnet *ifp = &sc->sc_ac.ac_if; 305 306 igc_stop(sc); 307 308 igc_phy_hw_reset(&sc->hw); 309 igc_release_hw_control(sc); 310 311 ether_ifdetach(ifp); 312 if_detach(ifp); 313 314 igc_free_pci_resources(sc); 315 316 igc_free_transmit_structures(sc); 317 igc_free_receive_structures(sc); 318 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 319 320 return 0; 321} 322 323void 324igc_identify_hardware(struct igc_softc *sc) 325{ 326 struct igc_osdep *os = &sc->osdep; 327 struct pci_attach_args *pa = &os->os_pa; 328 329 /* Save off the information about this board. */ 330 sc->hw.device_id = PCI_PRODUCT(pa->pa_id); 331 332 /* Do shared code init and setup. */ 333 if (igc_set_mac_type(&sc->hw)) { 334 printf(": Setup init failure\n"); 335 return; 336 } 337} 338 339int 340igc_allocate_pci_resources(struct igc_softc *sc) 341{ 342 struct igc_osdep *os = &sc->osdep; 343 struct pci_attach_args *pa = &os->os_pa; 344 pcireg_t memtype; 345 346 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG); 347 if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt, 348 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) { 349 printf(": unable to map registers\n"); 350 return ENXIO; 351 } 352 sc->hw.hw_addr = (uint8_t *)os->os_membase; 353 sc->hw.back = os; 354 355 igc_setup_msix(sc); 356 357 return 0; 358} 359 360int 361igc_allocate_queues(struct igc_softc *sc) 362{ 363 struct igc_queue *iq; 364 struct tx_ring *txr; 365 struct rx_ring *rxr; 366 int i, rsize, rxconf, tsize, txconf; 367 368 /* Allocate the top level queue structs. */ 369 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue), 370 M_DEVBUF, M_NOWAIT | M_ZERO); 371 if (sc->queues == NULL) { 372 printf("%s: unable to allocate queue\n", DEVNAME(sc)); 373 goto fail; 374 } 375 376 /* Allocate the TX ring. */ 377 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring), 378 M_DEVBUF, M_NOWAIT | M_ZERO); 379 if (sc->tx_rings == NULL) { 380 printf("%s: unable to allocate TX ring\n", DEVNAME(sc)); 381 goto fail; 382 } 383 384 /* Allocate the RX ring. */ 385 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring), 386 M_DEVBUF, M_NOWAIT | M_ZERO); 387 if (sc->rx_rings == NULL) { 388 printf("%s: unable to allocate RX ring\n", DEVNAME(sc)); 389 goto rx_fail; 390 } 391 392 txconf = rxconf = 0; 393 394 /* Set up the TX queues. */ 395 tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc), 396 IGC_DBA_ALIGN); 397 for (i = 0; i < sc->sc_nqueues; i++, txconf++) { 398 txr = &sc->tx_rings[i]; 399 txr->sc = sc; 400 txr->me = i; 401 402 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 403 printf("%s: unable to allocate TX descriptor\n", 404 DEVNAME(sc)); 405 goto err_tx_desc; 406 } 407 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr; 408 bzero((void *)txr->tx_base, tsize); 409 } 410 411 /* Set up the RX queues. */ 412 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 413 IGC_DBA_ALIGN); 414 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) { 415 rxr = &sc->rx_rings[i]; 416 rxr->sc = sc; 417 rxr->me = i; 418 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr); 419 420 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) { 421 printf("%s: unable to allocate RX descriptor\n", 422 DEVNAME(sc)); 423 goto err_rx_desc; 424 } 425 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr; 426 bzero((void *)rxr->rx_base, rsize); 427 } 428 429 /* Set up the queue holding structs. */ 430 for (i = 0; i < sc->sc_nqueues; i++) { 431 iq = &sc->queues[i]; 432 iq->sc = sc; 433 iq->txr = &sc->tx_rings[i]; 434 iq->rxr = &sc->rx_rings[i]; 435 snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i); 436 } 437 438 return 0; 439 440err_rx_desc: 441 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) 442 igc_dma_free(sc, &rxr->rxdma); 443err_tx_desc: 444 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) 445 igc_dma_free(sc, &txr->txdma); 446 free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring)); 447 sc->rx_rings = NULL; 448rx_fail: 449 free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring)); 450 sc->tx_rings = NULL; 451fail: 452 return ENOMEM; 453} 454 455void 456igc_free_pci_resources(struct igc_softc *sc) 457{ 458 struct igc_osdep *os = &sc->osdep; 459 struct pci_attach_args *pa = &os->os_pa; 460 struct igc_queue *iq = sc->queues; 461 int i; 462 463 /* Release all msix queue resources. */ 464 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 465 if (iq->tag) 466 pci_intr_disestablish(pa->pa_pc, iq->tag); 467 iq->tag = NULL; 468 } 469 470 if (sc->tag) 471 pci_intr_disestablish(pa->pa_pc, sc->tag); 472 sc->tag = NULL; 473 if (os->os_membase != 0) 474 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize); 475 os->os_membase = 0; 476} 477 478/********************************************************************* 479 * 480 * Initialize the hardware to a configuration as specified by the 481 * adapter structure. 482 * 483 **********************************************************************/ 484void 485igc_reset(struct igc_softc *sc) 486{ 487 struct igc_hw *hw = &sc->hw; 488 uint32_t pba; 489 uint16_t rx_buffer_size; 490 491 /* Let the firmware know the OS is in control */ 492 igc_get_hw_control(sc); 493 494 /* 495 * Packet Buffer Allocation (PBA) 496 * Writing PBA sets the receive portion of the buffer 497 * the remainder is used for the transmit buffer. 498 */ 499 pba = IGC_PBA_34K; 500 501 /* 502 * These parameters control the automatic generation (Tx) and 503 * response (Rx) to Ethernet PAUSE frames. 504 * - High water mark should allow for at least two frames to be 505 * received after sending an XOFF. 506 * - Low water mark works best when it is very near the high water mark. 507 * This allows the receiver to restart by sending XON when it has 508 * drained a bit. Here we use an arbitrary value of 1500 which will 509 * restart after one full frame is pulled from the buffer. There 510 * could be several smaller frames in the buffer and if so they will 511 * not trigger the XON until their total number reduces the buffer 512 * by 1500. 513 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 514 */ 515 rx_buffer_size = (pba & 0xffff) << 10; 516 hw->fc.high_water = rx_buffer_size - 517 roundup2(sc->hw.mac.max_frame_size, 1024); 518 /* 16-byte granularity */ 519 hw->fc.low_water = hw->fc.high_water - 16; 520 521 if (sc->fc) /* locally set flow control value? */ 522 hw->fc.requested_mode = sc->fc; 523 else 524 hw->fc.requested_mode = igc_fc_full; 525 526 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 527 528 hw->fc.send_xon = true; 529 530 /* Issue a global reset */ 531 igc_reset_hw(hw); 532 IGC_WRITE_REG(hw, IGC_WUC, 0); 533 534 /* and a re-init */ 535 if (igc_init_hw(hw) < 0) { 536 printf(": Hardware Initialization Failed\n"); 537 return; 538 } 539 540 /* Setup DMA Coalescing */ 541 igc_init_dmac(sc, pba); 542 543 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 544 igc_get_phy_info(hw); 545 igc_check_for_link(hw); 546} 547 548/********************************************************************* 549 * 550 * Initialize the DMA Coalescing feature 551 * 552 **********************************************************************/ 553void 554igc_init_dmac(struct igc_softc *sc, uint32_t pba) 555{ 556 struct igc_hw *hw = &sc->hw; 557 uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN; 558 uint16_t hwm, max_frame_size; 559 int status; 560 561 max_frame_size = sc->hw.mac.max_frame_size; 562 563 if (sc->dmac == 0) { /* Disabling it */ 564 IGC_WRITE_REG(hw, IGC_DMACR, reg); 565 return; 566 } else 567 printf(": DMA Coalescing enabled\n"); 568 569 /* Set starting threshold */ 570 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 571 572 hwm = 64 * pba - max_frame_size / 16; 573 if (hwm < 64 * (pba - 6)) 574 hwm = 64 * (pba - 6); 575 reg = IGC_READ_REG(hw, IGC_FCRTC); 576 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 577 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 578 & IGC_FCRTC_RTH_COAL_MASK); 579 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 580 581 dmac = pba - max_frame_size / 512; 582 if (dmac < pba - 10) 583 dmac = pba - 10; 584 reg = IGC_READ_REG(hw, IGC_DMACR); 585 reg &= ~IGC_DMACR_DMACTHR_MASK; 586 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 587 & IGC_DMACR_DMACTHR_MASK); 588 589 /* transition to L0x or L1 if available..*/ 590 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 591 592 /* Check if status is 2.5Gb backplane connection 593 * before configuration of watchdog timer, which is 594 * in msec values in 12.8usec intervals 595 * watchdog timer= msec values in 32usec intervals 596 * for non 2.5Gb connection 597 */ 598 status = IGC_READ_REG(hw, IGC_STATUS); 599 if ((status & IGC_STATUS_2P5_SKU) && 600 (!(status & IGC_STATUS_2P5_SKU_OVER))) 601 reg |= ((sc->dmac * 5) >> 6); 602 else 603 reg |= (sc->dmac >> 5); 604 605 IGC_WRITE_REG(hw, IGC_DMACR, reg); 606 607 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 608 609 /* Set the interval before transition */ 610 reg = IGC_READ_REG(hw, IGC_DMCTLX); 611 reg |= IGC_DMCTLX_DCFLUSH_DIS; 612 613 /* 614 ** in 2.5Gb connection, TTLX unit is 0.4 usec 615 ** which is 0x4*2 = 0xA. But delay is still 4 usec 616 */ 617 status = IGC_READ_REG(hw, IGC_STATUS); 618 if ((status & IGC_STATUS_2P5_SKU) && 619 (!(status & IGC_STATUS_2P5_SKU_OVER))) 620 reg |= 0xA; 621 else 622 reg |= 0x4; 623 624 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 625 626 /* free space in tx packet buffer to wake from DMA coal */ 627 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 628 (2 * max_frame_size)) >> 6); 629 630 /* make low power state decision controlled by DMA coal */ 631 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 632 reg &= ~IGC_PCIEMISC_LX_DECISION; 633 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 634} 635 636int 637igc_allocate_msix(struct igc_softc *sc) 638{ 639 struct igc_osdep *os = &sc->osdep; 640 struct pci_attach_args *pa = &os->os_pa; 641 struct igc_queue *iq; 642 pci_intr_handle_t ih; 643 int i, error = 0; 644 645 for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) { 646 if (pci_intr_map_msix(pa, i, &ih)) { 647 printf("%s: unable to map msi-x vector %d\n", 648 DEVNAME(sc), i); 649 error = ENOMEM; 650 goto fail; 651 } 652 653 iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih, 654 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i), 655 igc_intr_queue, iq, iq->name); 656 if (iq->tag == NULL) { 657 printf("%s: unable to establish interrupt %d\n", 658 DEVNAME(sc), i); 659 error = ENOMEM; 660 goto fail; 661 } 662 663 iq->msix = i; 664 iq->eims = 1 << i; 665 } 666 667 /* Now the link status/control last MSI-X vector. */ 668 if (pci_intr_map_msix(pa, i, &ih)) { 669 printf("%s: unable to map link vector\n", DEVNAME(sc)); 670 error = ENOMEM; 671 goto fail; 672 } 673 674 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE, 675 igc_intr_link, sc, sc->sc_dev.dv_xname); 676 if (sc->tag == NULL) { 677 printf("%s: unable to establish link interrupt\n", DEVNAME(sc)); 678 error = ENOMEM; 679 goto fail; 680 } 681 682 sc->linkvec = i; 683 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), 684 i, (i > 1) ? "s" : ""); 685 686 return 0; 687fail: 688 for (iq = sc->queues; i > 0; i--, iq++) { 689 if (iq->tag == NULL) 690 continue; 691 pci_intr_disestablish(pa->pa_pc, iq->tag); 692 iq->tag = NULL; 693 } 694 695 return error; 696} 697 698void 699igc_setup_msix(struct igc_softc *sc) 700{ 701 struct igc_osdep *os = &sc->osdep; 702 struct pci_attach_args *pa = &os->os_pa; 703 int nmsix; 704 705 nmsix = pci_intr_msix_count(pa); 706 if (nmsix <= 1) 707 printf(": not enough msi-x vectors\n"); 708 709 /* Give one vector to events. */ 710 nmsix--; 711 712 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS, 713 INTRMAP_POWEROF2); 714 sc->sc_nqueues = intrmap_count(sc->sc_intrmap); 715} 716 717int 718igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma) 719{ 720 struct igc_osdep *os = &sc->osdep; 721 722 dma->dma_tag = os->os_pa.pa_dmat; 723 724 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT, 725 &dma->dma_map)) 726 return 1; 727 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg, 728 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) 729 goto destroy; 730 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size, 731 &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) 732 goto free; 733 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, 734 NULL, BUS_DMA_NOWAIT)) 735 goto unmap; 736 737 dma->dma_size = size; 738 739 return 0; 740unmap: 741 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size); 742free: 743 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 744destroy: 745 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 746 dma->dma_map = NULL; 747 dma->dma_tag = NULL; 748 return 1; 749} 750 751void 752igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma) 753{ 754 if (dma->dma_tag == NULL) 755 return; 756 757 if (dma->dma_map != NULL) { 758 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, 759 dma->dma_map->dm_mapsize, 760 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 761 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 762 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); 763 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 764 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 765 dma->dma_map = NULL; 766 } 767} 768 769/********************************************************************* 770 * 771 * Setup networking device structure and register an interface. 772 * 773 **********************************************************************/ 774void 775igc_setup_interface(struct igc_softc *sc) 776{ 777 struct ifnet *ifp = &sc->sc_ac.ac_if; 778 int i; 779 780 ifp->if_softc = sc; 781 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 782 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 783 ifp->if_xflags = IFXF_MPSAFE; 784 ifp->if_ioctl = igc_ioctl; 785 ifp->if_qstart = igc_start; 786 ifp->if_watchdog = igc_watchdog; 787 ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN - 788 ETHER_CRC_LEN; 789 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 790 791 ifp->if_capabilities = IFCAP_VLAN_MTU; 792 793#ifdef notyet 794#if NVLAN > 0 795 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 796#endif 797#endif 798 799 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 800 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 801 ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 802 803 /* Initialize ifmedia structures. */ 804 ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status); 805 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 806 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 807 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 808 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 809 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 810 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 811 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 812 813 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 814 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 815 816 if_attach(ifp); 817 ether_ifattach(ifp); 818 819 if_attach_queues(ifp, sc->sc_nqueues); 820 if_attach_iqueues(ifp, sc->sc_nqueues); 821 for (i = 0; i < sc->sc_nqueues; i++) { 822 struct ifqueue *ifq = ifp->if_ifqs[i]; 823 struct ifiqueue *ifiq = ifp->if_iqs[i]; 824 struct tx_ring *txr = &sc->tx_rings[i]; 825 struct rx_ring *rxr = &sc->rx_rings[i]; 826 827 ifq->ifq_softc = txr; 828 txr->ifq = ifq; 829 830 ifiq->ifiq_softc = rxr; 831 rxr->ifiq = ifiq; 832 } 833} 834 835void 836igc_init(void *arg) 837{ 838 struct igc_softc *sc = (struct igc_softc *)arg; 839 struct ifnet *ifp = &sc->sc_ac.ac_if; 840 struct rx_ring *rxr; 841 uint32_t ctrl = 0; 842 int i, s; 843 844 s = splnet(); 845 846 igc_stop(sc); 847 848 /* Get the latest mac address, user can use a LAA. */ 849 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 850 851 /* Put the address into the receive address array. */ 852 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 853 854 /* Initialize the hardware. */ 855 igc_reset(sc); 856 igc_update_link_status(sc); 857 858 /* Setup VLAN support, basic and offload if available. */ 859 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 860 861 /* Prepare transmit descriptors and buffers. */ 862 if (igc_setup_transmit_structures(sc)) { 863 printf("%s: Could not setup transmit structures\n", 864 DEVNAME(sc)); 865 igc_stop(sc); 866 splx(s); 867 return; 868 } 869 igc_initialize_transmit_unit(sc); 870 871 sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN; 872 /* Prepare receive descriptors and buffers. */ 873 if (igc_setup_receive_structures(sc)) { 874 printf("%s: Could not setup receive structures\n", 875 DEVNAME(sc)); 876 igc_stop(sc); 877 splx(s); 878 return; 879 } 880 igc_initialize_receive_unit(sc); 881 882 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 883 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 884 ctrl |= IGC_CTRL_VME; 885 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 886 } 887 888 /* Setup multicast table. */ 889 igc_iff(sc); 890 891 igc_clear_hw_cntrs_base_generic(&sc->hw); 892 893 igc_configure_queues(sc); 894 895 /* This clears any pending interrupts */ 896 IGC_READ_REG(&sc->hw, IGC_ICR); 897 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 898 899 /* The driver can now take control from firmware. */ 900 igc_get_hw_control(sc); 901 902 /* Set Energy Efficient Ethernet. */ 903 igc_set_eee_i225(&sc->hw, true, true, true); 904 905 for (i = 0; i < sc->sc_nqueues; i++) { 906 rxr = &sc->rx_rings[i]; 907 igc_rxfill(rxr); 908 if (if_rxr_inuse(&rxr->rx_ring) == 0) { 909 printf("%s: Unable to fill any rx descriptors\n", 910 DEVNAME(sc)); 911 igc_stop(sc); 912 splx(s); 913 } 914 IGC_WRITE_REG(&sc->hw, IGC_RDT(i), 915 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 916 } 917 918 igc_enable_intr(sc); 919 920 ifp->if_flags |= IFF_RUNNING; 921 for (i = 0; i < sc->sc_nqueues; i++) 922 ifq_clr_oactive(ifp->if_ifqs[i]); 923 924 splx(s); 925} 926 927static inline int 928igc_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) 929{ 930 int error; 931 932 error = bus_dmamap_load_mbuf(dmat, map, m, 933 BUS_DMA_STREAMING | BUS_DMA_NOWAIT); 934 if (error != EFBIG) 935 return (error); 936 937 error = m_defrag(m, M_DONTWAIT); 938 if (error != 0) 939 return (error); 940 941 return (bus_dmamap_load_mbuf(dmat, map, m, 942 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)); 943} 944 945void 946igc_start(struct ifqueue *ifq) 947{ 948 struct ifnet *ifp = ifq->ifq_if; 949 struct igc_softc *sc = ifp->if_softc; 950 struct tx_ring *txr = ifq->ifq_softc; 951 union igc_adv_tx_desc *txdesc; 952 struct igc_tx_buf *txbuf; 953 bus_dmamap_t map; 954 struct mbuf *m; 955 unsigned int prod, free, last, i; 956 unsigned int mask; 957 uint32_t cmd_type_len; 958 uint32_t olinfo_status; 959 int post = 0; 960#if NBPFILTER > 0 961 caddr_t if_bpf; 962#endif 963 964 if (!sc->link_active) { 965 ifq_purge(ifq); 966 return; 967 } 968 969 prod = txr->next_avail_desc; 970 free = txr->next_to_clean; 971 if (free <= prod) 972 free += sc->num_tx_desc; 973 free -= prod; 974 975 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 976 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 977 978 mask = sc->num_tx_desc - 1; 979 980 for (;;) { 981 if (free <= IGC_MAX_SCATTER + 1) { 982 ifq_set_oactive(ifq); 983 break; 984 } 985 986 m = ifq_dequeue(ifq); 987 if (m == NULL) 988 break; 989 990 txbuf = &txr->tx_buffers[prod]; 991 map = txbuf->map; 992 993 if (igc_load_mbuf(txr->txdma.dma_tag, map, m) != 0) { 994 ifq->ifq_errors++; 995 m_freem(m); 996 continue; 997 } 998 999 olinfo_status = m->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT; 1000 1001 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, 1002 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1003 1004 if (igc_tx_ctx_setup(txr, m, prod, &olinfo_status)) { 1005 /* Consume the first descriptor */ 1006 prod++; 1007 prod &= mask; 1008 free--; 1009 } 1010 1011 for (i = 0; i < map->dm_nsegs; i++) { 1012 txdesc = &txr->tx_base[prod]; 1013 1014 cmd_type_len = IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA | 1015 IGC_ADVTXD_DCMD_DEXT | map->dm_segs[i].ds_len; 1016 if (i == map->dm_nsegs - 1) 1017 cmd_type_len |= IGC_ADVTXD_DCMD_EOP | 1018 IGC_ADVTXD_DCMD_RS; 1019 1020 htolem64(&txdesc->read.buffer_addr, map->dm_segs[i].ds_addr); 1021 htolem32(&txdesc->read.cmd_type_len, cmd_type_len); 1022 htolem32(&txdesc->read.olinfo_status, olinfo_status); 1023 1024 last = prod; 1025 1026 prod++; 1027 prod &= mask; 1028 } 1029 1030 txbuf->m_head = m; 1031 txbuf->eop_index = last; 1032 1033#if NBPFILTER > 0 1034 if_bpf = ifp->if_bpf; 1035 if (if_bpf) 1036 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT); 1037#endif 1038 1039 free -= i; 1040 post = 1; 1041 } 1042 1043 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1044 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1045 1046 if (post) { 1047 txr->next_avail_desc = prod; 1048 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod); 1049 } 1050} 1051 1052int 1053igc_txeof(struct tx_ring *txr) 1054{ 1055 struct igc_softc *sc = txr->sc; 1056 struct ifqueue *ifq = txr->ifq; 1057 union igc_adv_tx_desc *txdesc; 1058 struct igc_tx_buf *txbuf; 1059 bus_dmamap_t map; 1060 unsigned int cons, prod, last; 1061 unsigned int mask; 1062 int done = 0; 1063 1064 prod = txr->next_avail_desc; 1065 cons = txr->next_to_clean; 1066 1067 if (cons == prod) 1068 return (0); 1069 1070 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1071 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1072 1073 mask = sc->num_tx_desc - 1; 1074 1075 do { 1076 txbuf = &txr->tx_buffers[cons]; 1077 last = txbuf->eop_index; 1078 txdesc = &txr->tx_base[last]; 1079 1080 if (!(txdesc->wb.status & htole32(IGC_TXD_STAT_DD))) 1081 break; 1082 1083 map = txbuf->map; 1084 1085 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize, 1086 BUS_DMASYNC_POSTWRITE); 1087 bus_dmamap_unload(txr->txdma.dma_tag, map); 1088 m_freem(txbuf->m_head); 1089 1090 txbuf->m_head = NULL; 1091 txbuf->eop_index = -1; 1092 1093 cons = last + 1; 1094 cons &= mask; 1095 1096 done = 1; 1097 } while (cons != prod); 1098 1099 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1100 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1101 1102 txr->next_to_clean = cons; 1103 1104 if (ifq_is_oactive(ifq)) 1105 ifq_restart(ifq); 1106 1107 return (done); 1108} 1109 1110/********************************************************************* 1111 * 1112 * This routine disables all traffic on the adapter by issuing a 1113 * global reset on the MAC. 1114 * 1115 **********************************************************************/ 1116void 1117igc_stop(struct igc_softc *sc) 1118{ 1119 struct ifnet *ifp = &sc->sc_ac.ac_if; 1120 int i; 1121 1122 /* Tell the stack that the interface is no longer active. */ 1123 ifp->if_flags &= ~IFF_RUNNING; 1124 1125 igc_disable_intr(sc); 1126 1127 igc_reset_hw(&sc->hw); 1128 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1129 1130 intr_barrier(sc->tag); 1131 for (i = 0; i < sc->sc_nqueues; i++) { 1132 struct ifqueue *ifq = ifp->if_ifqs[i]; 1133 ifq_barrier(ifq); 1134 ifq_clr_oactive(ifq); 1135 1136 if (sc->queues[i].tag != NULL) 1137 intr_barrier(sc->queues[i].tag); 1138 timeout_del(&sc->rx_rings[i].rx_refill); 1139 } 1140 1141 igc_free_transmit_structures(sc); 1142 igc_free_receive_structures(sc); 1143 1144 igc_update_link_status(sc); 1145} 1146 1147/********************************************************************* 1148 * Ioctl entry point 1149 * 1150 * igc_ioctl is called when the user wants to configure the 1151 * interface. 1152 * 1153 * return 0 on success, positive on failure 1154 **********************************************************************/ 1155int 1156igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data) 1157{ 1158 struct igc_softc *sc = ifp->if_softc; 1159 struct ifreq *ifr = (struct ifreq *)data; 1160 int s, error = 0; 1161 1162 s = splnet(); 1163 1164 switch (cmd) { 1165 case SIOCSIFADDR: 1166 ifp->if_flags |= IFF_UP; 1167 if (!(ifp->if_flags & IFF_RUNNING)) 1168 igc_init(sc); 1169 break; 1170 case SIOCSIFFLAGS: 1171 if (ifp->if_flags & IFF_UP) { 1172 if (ifp->if_flags & IFF_RUNNING) 1173 error = ENETRESET; 1174 else 1175 igc_init(sc); 1176 } else { 1177 if (ifp->if_flags & IFF_RUNNING) 1178 igc_stop(sc); 1179 } 1180 break; 1181 case SIOCSIFMEDIA: 1182 case SIOCGIFMEDIA: 1183 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 1184 break; 1185 case SIOCGIFRXR: 1186 error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 1187 break; 1188 default: 1189 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1190 } 1191 1192 if (error == ENETRESET) { 1193 if (ifp->if_flags & IFF_RUNNING) { 1194 igc_disable_intr(sc); 1195 igc_iff(sc); 1196 igc_enable_intr(sc); 1197 } 1198 error = 0; 1199 } 1200 1201 splx(s); 1202 return error; 1203} 1204 1205int 1206igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri) 1207{ 1208 struct if_rxring_info *ifr; 1209 struct rx_ring *rxr; 1210 int error, i, n = 0; 1211 1212 ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF, 1213 M_WAITOK | M_ZERO); 1214 1215 for (i = 0; i < sc->sc_nqueues; i++) { 1216 rxr = &sc->rx_rings[i]; 1217 ifr[n].ifr_size = MCLBYTES; 1218 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i); 1219 ifr[n].ifr_info = rxr->rx_ring; 1220 n++; 1221 } 1222 1223 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr); 1224 free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr)); 1225 1226 return error; 1227} 1228 1229int 1230igc_rxfill(struct rx_ring *rxr) 1231{ 1232 struct igc_softc *sc = rxr->sc; 1233 int i, post = 0; 1234 u_int slots; 1235 1236 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1237 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1238 1239 i = rxr->last_desc_filled; 1240 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0; 1241 slots--) { 1242 if (++i == sc->num_rx_desc) 1243 i = 0; 1244 1245 if (igc_get_buf(rxr, i) != 0) 1246 break; 1247 1248 rxr->last_desc_filled = i; 1249 post = 1; 1250 } 1251 1252 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1253 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1254 1255 if_rxr_put(&rxr->rx_ring, slots); 1256 1257 return post; 1258} 1259 1260void 1261igc_rxrefill(void *xrxr) 1262{ 1263 struct rx_ring *rxr = xrxr; 1264 struct igc_softc *sc = rxr->sc; 1265 1266 if (igc_rxfill(rxr)) { 1267 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), 1268 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 1269 } 1270 else if (if_rxr_inuse(&rxr->rx_ring) == 0) 1271 timeout_add(&rxr->rx_refill, 1); 1272} 1273 1274/********************************************************************* 1275 * 1276 * This routine executes in interrupt context. It replenishes 1277 * the mbufs in the descriptor and sends data which has been 1278 * dma'ed into host memory to upper layer. 1279 * 1280 *********************************************************************/ 1281int 1282igc_rxeof(struct rx_ring *rxr) 1283{ 1284 struct igc_softc *sc = rxr->sc; 1285 struct ifnet *ifp = &sc->sc_ac.ac_if; 1286 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1287 struct mbuf *mp, *m; 1288 struct igc_rx_buf *rxbuf, *nxbuf; 1289 union igc_adv_rx_desc *rxdesc; 1290 uint32_t ptype, staterr = 0; 1291 uint16_t len, vtag; 1292 uint8_t eop = 0; 1293 int i, nextp; 1294 1295 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1296 return 0; 1297 1298 i = rxr->next_to_check; 1299 while (if_rxr_inuse(&rxr->rx_ring) > 0) { 1300 uint32_t hash; 1301 uint16_t hashtype; 1302 1303 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1304 i * sizeof(union igc_adv_rx_desc), 1305 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD); 1306 1307 rxdesc = &rxr->rx_base[i]; 1308 staterr = letoh32(rxdesc->wb.upper.status_error); 1309 if (!ISSET(staterr, IGC_RXD_STAT_DD)) { 1310 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1311 i * sizeof(union igc_adv_rx_desc), 1312 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1313 break; 1314 } 1315 1316 /* Zero out the receive descriptors status. */ 1317 rxdesc->wb.upper.status_error = 0; 1318 rxbuf = &rxr->rx_buffers[i]; 1319 1320 /* Pull the mbuf off the ring. */ 1321 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1322 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1323 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map); 1324 1325 mp = rxbuf->buf; 1326 len = letoh16(rxdesc->wb.upper.length); 1327 vtag = letoh16(rxdesc->wb.upper.vlan); 1328 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 1329 ptype = letoh32(rxdesc->wb.lower.lo_dword.data) & 1330 IGC_PKTTYPE_MASK; 1331 hash = letoh32(rxdesc->wb.lower.hi_dword.rss); 1332 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) & 1333 IGC_RXDADV_RSSTYPE_MASK; 1334 1335 if (staterr & IGC_RXDEXT_STATERR_RXE) { 1336 if (rxbuf->fmp) { 1337 m_freem(rxbuf->fmp); 1338 rxbuf->fmp = NULL; 1339 } 1340 1341 m_freem(mp); 1342 rxbuf->buf = NULL; 1343 goto next_desc; 1344 } 1345 1346 if (mp == NULL) { 1347 panic("%s: igc_rxeof: NULL mbuf in slot %d " 1348 "(nrx %d, filled %d)", DEVNAME(sc), i, 1349 if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled); 1350 } 1351 1352 if (!eop) { 1353 /* 1354 * Figure out the next descriptor of this frame. 1355 */ 1356 nextp = i + 1; 1357 if (nextp == sc->num_rx_desc) 1358 nextp = 0; 1359 nxbuf = &rxr->rx_buffers[nextp]; 1360 /* prefetch(nxbuf); */ 1361 } 1362 1363 mp->m_len = len; 1364 1365 m = rxbuf->fmp; 1366 rxbuf->buf = rxbuf->fmp = NULL; 1367 1368 if (m != NULL) 1369 m->m_pkthdr.len += mp->m_len; 1370 else { 1371 m = mp; 1372 m->m_pkthdr.len = mp->m_len; 1373#if NVLAN > 0 1374 if (staterr & IGC_RXD_STAT_VP) { 1375 m->m_pkthdr.ether_vtag = vtag; 1376 m->m_flags |= M_VLANTAG; 1377 } 1378#endif 1379 } 1380 1381 /* Pass the head pointer on */ 1382 if (eop == 0) { 1383 nxbuf->fmp = m; 1384 m = NULL; 1385 mp->m_next = nxbuf->buf; 1386 } else { 1387 igc_rx_checksum(staterr, m, ptype); 1388 1389 if (hashtype != IGC_RXDADV_RSSTYPE_NONE) { 1390 m->m_pkthdr.ph_flowid = hash; 1391 SET(m->m_pkthdr.csum_flags, M_FLOWID); 1392 } 1393 1394 ml_enqueue(&ml, m); 1395 } 1396next_desc: 1397 if_rxr_put(&rxr->rx_ring, 1); 1398 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1399 i * sizeof(union igc_adv_rx_desc), 1400 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1401 1402 /* Advance our pointers to the next descriptor. */ 1403 if (++i == sc->num_rx_desc) 1404 i = 0; 1405 } 1406 rxr->next_to_check = i; 1407 1408 if (ifiq_input(rxr->ifiq, &ml)) 1409 if_rxr_livelocked(&rxr->rx_ring); 1410 1411 if (!(staterr & IGC_RXD_STAT_DD)) 1412 return 0; 1413 1414 return 1; 1415} 1416 1417/********************************************************************* 1418 * 1419 * Verify that the hardware indicated that the checksum is valid. 1420 * Inform the stack about the status of checksum so that stack 1421 * doesn't spend time verifying the checksum. 1422 * 1423 *********************************************************************/ 1424void 1425igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype) 1426{ 1427 uint16_t status = (uint16_t)staterr; 1428 uint8_t errors = (uint8_t)(staterr >> 24); 1429 1430 if (status & IGC_RXD_STAT_IPCS) { 1431 if (!(errors & IGC_RXD_ERR_IPE)) { 1432 /* IP Checksum Good */ 1433 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1434 } else 1435 m->m_pkthdr.csum_flags = 0; 1436 } 1437 1438 if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) { 1439 if (!(errors & IGC_RXD_ERR_TCPE)) 1440 m->m_pkthdr.csum_flags |= 1441 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1442 } 1443} 1444 1445void 1446igc_watchdog(struct ifnet * ifp) 1447{ 1448} 1449 1450/********************************************************************* 1451 * 1452 * Media Ioctl callback 1453 * 1454 * This routine is called whenever the user queries the status of 1455 * the interface using ifconfig. 1456 * 1457 **********************************************************************/ 1458void 1459igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1460{ 1461 struct igc_softc *sc = ifp->if_softc; 1462 1463 igc_update_link_status(sc); 1464 1465 ifmr->ifm_status = IFM_AVALID; 1466 ifmr->ifm_active = IFM_ETHER; 1467 1468 if (!sc->link_active) { 1469 ifmr->ifm_active |= IFM_NONE; 1470 return; 1471 } 1472 1473 ifmr->ifm_status |= IFM_ACTIVE; 1474 1475 switch (sc->link_speed) { 1476 case 10: 1477 ifmr->ifm_active |= IFM_10_T; 1478 break; 1479 case 100: 1480 ifmr->ifm_active |= IFM_100_TX; 1481 break; 1482 case 1000: 1483 ifmr->ifm_active |= IFM_1000_T; 1484 break; 1485 case 2500: 1486 ifmr->ifm_active |= IFM_2500_T; 1487 break; 1488 } 1489 1490 if (sc->link_duplex == FULL_DUPLEX) 1491 ifmr->ifm_active |= IFM_FDX; 1492 else 1493 ifmr->ifm_active |= IFM_HDX; 1494} 1495 1496/********************************************************************* 1497 * 1498 * Media Ioctl callback 1499 * 1500 * This routine is called when the user changes speed/duplex using 1501 * media/mediopt option with ifconfig. 1502 * 1503 **********************************************************************/ 1504int 1505igc_media_change(struct ifnet *ifp) 1506{ 1507 struct igc_softc *sc = ifp->if_softc; 1508 struct ifmedia *ifm = &sc->media; 1509 1510 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1511 return (EINVAL); 1512 1513 sc->hw.mac.autoneg = DO_AUTO_NEG; 1514 1515 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1516 case IFM_AUTO: 1517 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1518 break; 1519 case IFM_2500_T: 1520 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1521 break; 1522 case IFM_1000_T: 1523 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1524 break; 1525 case IFM_100_TX: 1526 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1527 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1528 else 1529 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1530 break; 1531 case IFM_10_T: 1532 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1533 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1534 else 1535 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1536 break; 1537 default: 1538 return EINVAL; 1539 } 1540 1541 igc_init(sc); 1542 1543 return 0; 1544} 1545 1546void 1547igc_iff(struct igc_softc *sc) 1548{ 1549 struct ifnet *ifp = &sc->sc_ac.ac_if; 1550 struct arpcom *ac = &sc->sc_ac; 1551 struct ether_multi *enm; 1552 struct ether_multistep step; 1553 uint32_t reg_rctl = 0; 1554 uint8_t *mta; 1555 int mcnt = 0; 1556 1557 mta = sc->mta; 1558 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * 1559 MAX_NUM_MULTICAST_ADDRESSES); 1560 1561 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1562 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1563 ifp->if_flags &= ~IFF_ALLMULTI; 1564 1565 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1566 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) { 1567 ifp->if_flags |= IFF_ALLMULTI; 1568 reg_rctl |= IGC_RCTL_MPE; 1569 if (ifp->if_flags & IFF_PROMISC) 1570 reg_rctl |= IGC_RCTL_UPE; 1571 } else { 1572 ETHER_FIRST_MULTI(step, ac, enm); 1573 while (enm != NULL) { 1574 bcopy(enm->enm_addrlo, 1575 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1576 mcnt++; 1577 1578 ETHER_NEXT_MULTI(step, enm); 1579 } 1580 1581 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1582 } 1583 1584 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1585} 1586 1587void 1588igc_update_link_status(struct igc_softc *sc) 1589{ 1590 struct ifnet *ifp = &sc->sc_ac.ac_if; 1591 struct igc_hw *hw = &sc->hw; 1592 int link_state; 1593 1594 if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) { 1595 if (sc->link_active == 0) { 1596 igc_get_speed_and_duplex(hw, &sc->link_speed, 1597 &sc->link_duplex); 1598 sc->link_active = 1; 1599 ifp->if_baudrate = IF_Mbps(sc->link_speed); 1600 } 1601 link_state = (sc->link_duplex == FULL_DUPLEX) ? 1602 LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX; 1603 } else { 1604 if (sc->link_active == 1) { 1605 ifp->if_baudrate = sc->link_speed = 0; 1606 sc->link_duplex = 0; 1607 sc->link_active = 0; 1608 } 1609 link_state = LINK_STATE_DOWN; 1610 } 1611 if (ifp->if_link_state != link_state) { 1612 ifp->if_link_state = link_state; 1613 if_link_state_change(ifp); 1614 } 1615} 1616 1617/********************************************************************* 1618 * 1619 * Get a buffer from system mbuf buffer pool. 1620 * 1621 **********************************************************************/ 1622int 1623igc_get_buf(struct rx_ring *rxr, int i) 1624{ 1625 struct igc_softc *sc = rxr->sc; 1626 struct igc_rx_buf *rxbuf; 1627 struct mbuf *m; 1628 union igc_adv_rx_desc *rxdesc; 1629 int error; 1630 1631 rxbuf = &rxr->rx_buffers[i]; 1632 rxdesc = &rxr->rx_base[i]; 1633 if (rxbuf->buf) { 1634 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i); 1635 return ENOBUFS; 1636 } 1637 1638 m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz); 1639 if (!m) 1640 return ENOBUFS; 1641 1642 m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz); 1643 m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz; 1644 1645 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m, 1646 BUS_DMA_NOWAIT); 1647 if (error) { 1648 m_freem(m); 1649 return error; 1650 } 1651 1652 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1653 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD); 1654 rxbuf->buf = m; 1655 1656 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr); 1657 1658 return 0; 1659} 1660 1661void 1662igc_configure_queues(struct igc_softc *sc) 1663{ 1664 struct igc_hw *hw = &sc->hw; 1665 struct igc_queue *iq = sc->queues; 1666 uint32_t ivar, newitr = 0; 1667 int i; 1668 1669 /* First turn on RSS capability */ 1670 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | 1671 IGC_GPIE_PBA | IGC_GPIE_NSICR); 1672 1673 /* Set the starting interrupt rate */ 1674 newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC; 1675 1676 newitr |= IGC_EITR_CNT_IGNR; 1677 1678 /* Turn on MSI-X */ 1679 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 1680 /* RX entries */ 1681 igc_set_queues(sc, i, iq->msix, 0); 1682 /* TX entries */ 1683 igc_set_queues(sc, i, iq->msix, 1); 1684 sc->msix_queuesmask |= iq->eims; 1685 IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr); 1686 } 1687 1688 /* And for the link interrupt */ 1689 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1690 sc->msix_linkmask = 1 << sc->linkvec; 1691 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1692} 1693 1694void 1695igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type) 1696{ 1697 struct igc_hw *hw = &sc->hw; 1698 uint32_t ivar, index; 1699 1700 index = entry >> 1; 1701 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1702 if (type) { 1703 if (entry & 1) { 1704 ivar &= 0x00FFFFFF; 1705 ivar |= (vector | IGC_IVAR_VALID) << 24; 1706 } else { 1707 ivar &= 0xFFFF00FF; 1708 ivar |= (vector | IGC_IVAR_VALID) << 8; 1709 } 1710 } else { 1711 if (entry & 1) { 1712 ivar &= 0xFF00FFFF; 1713 ivar |= (vector | IGC_IVAR_VALID) << 16; 1714 } else { 1715 ivar &= 0xFFFFFF00; 1716 ivar |= vector | IGC_IVAR_VALID; 1717 } 1718 } 1719 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1720} 1721 1722void 1723igc_enable_queue(struct igc_softc *sc, uint32_t eims) 1724{ 1725 IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims); 1726} 1727 1728void 1729igc_enable_intr(struct igc_softc *sc) 1730{ 1731 struct igc_hw *hw = &sc->hw; 1732 uint32_t mask; 1733 1734 mask = (sc->msix_queuesmask | sc->msix_linkmask); 1735 IGC_WRITE_REG(hw, IGC_EIAC, mask); 1736 IGC_WRITE_REG(hw, IGC_EIAM, mask); 1737 IGC_WRITE_REG(hw, IGC_EIMS, mask); 1738 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 1739 IGC_WRITE_FLUSH(hw); 1740} 1741 1742void 1743igc_disable_intr(struct igc_softc *sc) 1744{ 1745 struct igc_hw *hw = &sc->hw; 1746 1747 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 1748 IGC_WRITE_REG(hw, IGC_EIAC, 0); 1749 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 1750 IGC_WRITE_FLUSH(hw); 1751} 1752 1753int 1754igc_intr_link(void *arg) 1755{ 1756 struct igc_softc *sc = (struct igc_softc *)arg; 1757 uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1758 1759 if (reg_icr & IGC_ICR_LSC) { 1760 KERNEL_LOCK(); 1761 sc->hw.mac.get_link_status = true; 1762 igc_update_link_status(sc); 1763 KERNEL_UNLOCK(); 1764 } 1765 1766 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1767 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask); 1768 1769 return 1; 1770} 1771 1772int 1773igc_intr_queue(void *arg) 1774{ 1775 struct igc_queue *iq = arg; 1776 struct igc_softc *sc = iq->sc; 1777 struct ifnet *ifp = &sc->sc_ac.ac_if; 1778 struct rx_ring *rxr = iq->rxr; 1779 struct tx_ring *txr = iq->txr; 1780 1781 if (ifp->if_flags & IFF_RUNNING) { 1782 igc_txeof(txr); 1783 igc_rxeof(rxr); 1784 igc_rxrefill(rxr); 1785 } 1786 1787 igc_enable_queue(sc, iq->eims); 1788 1789 return 1; 1790} 1791 1792/********************************************************************* 1793 * 1794 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1795 * the information needed to transmit a packet on the wire. 1796 * 1797 **********************************************************************/ 1798int 1799igc_allocate_transmit_buffers(struct tx_ring *txr) 1800{ 1801 struct igc_softc *sc = txr->sc; 1802 struct igc_tx_buf *txbuf; 1803 int error, i; 1804 1805 txr->tx_buffers = mallocarray(sc->num_tx_desc, 1806 sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 1807 if (txr->tx_buffers == NULL) { 1808 printf("%s: Unable to allocate tx_buffer memory\n", 1809 DEVNAME(sc)); 1810 error = ENOMEM; 1811 goto fail; 1812 } 1813 txr->txtag = txr->txdma.dma_tag; 1814 1815 /* Create the descriptor buffer dma maps. */ 1816 for (i = 0; i < sc->num_tx_desc; i++) { 1817 txbuf = &txr->tx_buffers[i]; 1818 error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE, 1819 IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map); 1820 if (error != 0) { 1821 printf("%s: Unable to create TX DMA map\n", 1822 DEVNAME(sc)); 1823 goto fail; 1824 } 1825 } 1826 1827 return 0; 1828fail: 1829 return error; 1830} 1831 1832 1833/********************************************************************* 1834 * 1835 * Allocate and initialize transmit structures. 1836 * 1837 **********************************************************************/ 1838int 1839igc_setup_transmit_structures(struct igc_softc *sc) 1840{ 1841 struct tx_ring *txr = sc->tx_rings; 1842 int i; 1843 1844 for (i = 0; i < sc->sc_nqueues; i++, txr++) { 1845 if (igc_setup_transmit_ring(txr)) 1846 goto fail; 1847 } 1848 1849 return 0; 1850fail: 1851 igc_free_transmit_structures(sc); 1852 return ENOBUFS; 1853} 1854 1855/********************************************************************* 1856 * 1857 * Initialize a transmit ring. 1858 * 1859 **********************************************************************/ 1860int 1861igc_setup_transmit_ring(struct tx_ring *txr) 1862{ 1863 struct igc_softc *sc = txr->sc; 1864 1865 /* Now allocate transmit buffers for the ring. */ 1866 if (igc_allocate_transmit_buffers(txr)) 1867 return ENOMEM; 1868 1869 /* Clear the old ring contents */ 1870 bzero((void *)txr->tx_base, 1871 (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc); 1872 1873 /* Reset indices. */ 1874 txr->next_avail_desc = 0; 1875 txr->next_to_clean = 0; 1876 1877 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1878 txr->txdma.dma_map->dm_mapsize, 1879 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1880 1881 return 0; 1882} 1883 1884/********************************************************************* 1885 * 1886 * Enable transmit unit. 1887 * 1888 **********************************************************************/ 1889void 1890igc_initialize_transmit_unit(struct igc_softc *sc) 1891{ 1892 struct ifnet *ifp = &sc->sc_ac.ac_if; 1893 struct tx_ring *txr; 1894 struct igc_hw *hw = &sc->hw; 1895 uint64_t bus_addr; 1896 uint32_t tctl, txdctl = 0; 1897 int i; 1898 1899 /* Setup the Base and Length of the TX descriptor ring. */ 1900 for (i = 0; i < sc->sc_nqueues; i++) { 1901 txr = &sc->tx_rings[i]; 1902 1903 bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr; 1904 1905 /* Base and len of TX ring */ 1906 IGC_WRITE_REG(hw, IGC_TDLEN(i), 1907 sc->num_tx_desc * sizeof(union igc_adv_tx_desc)); 1908 IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32)); 1909 IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr); 1910 1911 /* Init the HEAD/TAIL indices */ 1912 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 1913 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 1914 1915 txr->watchdog_timer = 0; 1916 1917 txdctl = 0; /* Clear txdctl */ 1918 txdctl |= 0x1f; /* PTHRESH */ 1919 txdctl |= 1 << 8; /* HTHRESH */ 1920 txdctl |= 1 << 16; /* WTHRESH */ 1921 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 1922 txdctl |= IGC_TXDCTL_GRAN; 1923 txdctl |= 1 << 25; /* LWTHRESH */ 1924 1925 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 1926 } 1927 ifp->if_timer = 0; 1928 1929 /* Program the Transmit Control Register */ 1930 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 1931 tctl &= ~IGC_TCTL_CT; 1932 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 1933 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 1934 1935 /* This write will effectively turn on the transmit unit. */ 1936 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 1937} 1938 1939/********************************************************************* 1940 * 1941 * Free all transmit rings. 1942 * 1943 **********************************************************************/ 1944void 1945igc_free_transmit_structures(struct igc_softc *sc) 1946{ 1947 struct tx_ring *txr = sc->tx_rings; 1948 int i; 1949 1950 for (i = 0; i < sc->sc_nqueues; i++, txr++) 1951 igc_free_transmit_buffers(txr); 1952} 1953 1954/********************************************************************* 1955 * 1956 * Free transmit ring related data structures. 1957 * 1958 **********************************************************************/ 1959void 1960igc_free_transmit_buffers(struct tx_ring *txr) 1961{ 1962 struct igc_softc *sc = txr->sc; 1963 struct igc_tx_buf *txbuf; 1964 int i; 1965 1966 if (txr->tx_buffers == NULL) 1967 return; 1968 1969 txbuf = txr->tx_buffers; 1970 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { 1971 if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) { 1972 bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map, 1973 0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1974 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map); 1975 } 1976 if (txbuf->m_head != NULL) { 1977 m_freem(txbuf->m_head); 1978 txbuf->m_head = NULL; 1979 } 1980 if (txbuf->map != NULL) { 1981 bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map); 1982 txbuf->map = NULL; 1983 } 1984 } 1985 1986 if (txr->tx_buffers != NULL) 1987 free(txr->tx_buffers, M_DEVBUF, 1988 sc->num_tx_desc * sizeof(struct igc_tx_buf)); 1989 txr->tx_buffers = NULL; 1990 txr->txtag = NULL; 1991} 1992 1993 1994/********************************************************************* 1995 * 1996 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 1997 * 1998 **********************************************************************/ 1999 2000int 2001igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod, 2002 uint32_t *olinfo_status) 2003{ 2004 struct ether_extracted ext; 2005 struct igc_adv_tx_context_desc *txdesc; 2006 uint32_t type_tucmd_mlhl = 0; 2007 uint32_t vlan_macip_lens = 0; 2008 uint32_t iphlen; 2009 int off = 0; 2010 2011 vlan_macip_lens |= (sizeof(*ext.eh) << IGC_ADVTXD_MACLEN_SHIFT); 2012 2013 /* 2014 * In advanced descriptors the vlan tag must 2015 * be placed into the context descriptor. Hence 2016 * we need to make one even if not doing offloads. 2017 */ 2018#ifdef notyet 2019#if NVLAN > 0 2020 if (ISSET(mp->m_flags, M_VLANTAG)) { 2021 uint32_t vtag = mp->m_pkthdr.ether_vtag; 2022 vlan_macip_lens |= (vtag << IGC_ADVTXD_VLAN_SHIFT); 2023 off = 1; 2024 } 2025#endif 2026#endif 2027 2028 ether_extract_headers(mp, &ext); 2029 2030 if (ext.ip4) { 2031 iphlen = ext.ip4->ip_hl << 2; 2032 2033 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 2034 if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) { 2035 *olinfo_status |= IGC_TXD_POPTS_IXSM << 8; 2036 off = 1; 2037 } 2038#ifdef INET6 2039 } else if (ext.ip6) { 2040 iphlen = sizeof(*ext.ip6); 2041 2042 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 2043#endif 2044 } else { 2045 return 0; 2046 } 2047 2048 vlan_macip_lens |= iphlen; 2049 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 2050 2051 if (ext.tcp) { 2052 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 2053 if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) { 2054 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2055 off = 1; 2056 } 2057 } else if (ext.udp) { 2058 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; 2059 if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) { 2060 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2061 off = 1; 2062 } 2063 } 2064 2065 if (off == 0) 2066 return 0; 2067 2068 /* Now ready a context descriptor */ 2069 txdesc = (struct igc_adv_tx_context_desc *)&txr->tx_base[prod]; 2070 2071 /* Now copy bits into descriptor */ 2072 htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens); 2073 htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl); 2074 htolem32(&txdesc->seqnum_seed, 0); 2075 htolem32(&txdesc->mss_l4len_idx, 0); 2076 2077 return 1; 2078} 2079 2080/********************************************************************* 2081 * 2082 * Allocate memory for rx_buffer structures. Since we use one 2083 * rx_buffer per received packet, the maximum number of rx_buffer's 2084 * that we'll need is equal to the number of receive descriptors 2085 * that we've allocated. 2086 * 2087 **********************************************************************/ 2088int 2089igc_allocate_receive_buffers(struct rx_ring *rxr) 2090{ 2091 struct igc_softc *sc = rxr->sc; 2092 struct igc_rx_buf *rxbuf; 2093 int i, error; 2094 2095 rxr->rx_buffers = mallocarray(sc->num_rx_desc, 2096 sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 2097 if (rxr->rx_buffers == NULL) { 2098 printf("%s: Unable to allocate rx_buffer memory\n", 2099 DEVNAME(sc)); 2100 error = ENOMEM; 2101 goto fail; 2102 } 2103 2104 rxbuf = rxr->rx_buffers; 2105 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) { 2106 error = bus_dmamap_create(rxr->rxdma.dma_tag, 2107 MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0, 2108 BUS_DMA_NOWAIT, &rxbuf->map); 2109 if (error) { 2110 printf("%s: Unable to create RX DMA map\n", 2111 DEVNAME(sc)); 2112 goto fail; 2113 } 2114 } 2115 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 2116 rxr->rxdma.dma_map->dm_mapsize, 2117 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2118 2119 return 0; 2120fail: 2121 return error; 2122} 2123 2124/********************************************************************* 2125 * 2126 * Allocate and initialize receive structures. 2127 * 2128 **********************************************************************/ 2129int 2130igc_setup_receive_structures(struct igc_softc *sc) 2131{ 2132 struct rx_ring *rxr = sc->rx_rings; 2133 int i; 2134 2135 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2136 if (igc_setup_receive_ring(rxr)) 2137 goto fail; 2138 } 2139 2140 return 0; 2141fail: 2142 igc_free_receive_structures(sc); 2143 return ENOBUFS; 2144} 2145 2146/********************************************************************* 2147 * 2148 * Initialize a receive ring and its buffers. 2149 * 2150 **********************************************************************/ 2151int 2152igc_setup_receive_ring(struct rx_ring *rxr) 2153{ 2154 struct igc_softc *sc = rxr->sc; 2155 struct ifnet *ifp = &sc->sc_ac.ac_if; 2156 int rsize; 2157 2158 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 2159 IGC_DBA_ALIGN); 2160 2161 /* Clear the ring contents. */ 2162 bzero((void *)rxr->rx_base, rsize); 2163 2164 if (igc_allocate_receive_buffers(rxr)) 2165 return ENOMEM; 2166 2167 /* Setup our descriptor indices. */ 2168 rxr->next_to_check = 0; 2169 rxr->last_desc_filled = sc->num_rx_desc - 1; 2170 2171 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1), 2172 sc->num_rx_desc - 1); 2173 2174 return 0; 2175} 2176 2177/********************************************************************* 2178 * 2179 * Enable receive unit. 2180 * 2181 **********************************************************************/ 2182#define BSIZEPKT_ROUNDUP ((1 << IGC_SRRCTL_BSIZEPKT_SHIFT) - 1) 2183 2184void 2185igc_initialize_receive_unit(struct igc_softc *sc) 2186{ 2187 struct rx_ring *rxr = sc->rx_rings; 2188 struct igc_hw *hw = &sc->hw; 2189 uint32_t rctl, rxcsum, srrctl = 0; 2190 int i; 2191 2192 /* 2193 * Make sure receives are disabled while setting 2194 * up the descriptor ring. 2195 */ 2196 rctl = IGC_READ_REG(hw, IGC_RCTL); 2197 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2198 2199 /* Setup the Receive Control Register */ 2200 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2201 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | 2202 IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2203 2204 /* Do not store bad packets */ 2205 rctl &= ~IGC_RCTL_SBP; 2206 2207 /* Enable Long Packet receive */ 2208 if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN) 2209 rctl |= IGC_RCTL_LPE; 2210 2211 /* Strip the CRC */ 2212 rctl |= IGC_RCTL_SECRC; 2213 2214 /* 2215 * Set the interrupt throttling rate. Value is calculated 2216 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 2217 */ 2218 IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR); 2219 2220 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2221 rxcsum &= ~IGC_RXCSUM_PCSD; 2222 2223 if (sc->sc_nqueues > 1) 2224 rxcsum |= IGC_RXCSUM_PCSD; 2225 2226 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2227 2228 if (sc->sc_nqueues > 1) 2229 igc_initialize_rss_mapping(sc); 2230 2231 /* Set maximum packet buffer len */ 2232 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2233 IGC_SRRCTL_BSIZEPKT_SHIFT; 2234 /* srrctl above overrides this but set the register to a sane value */ 2235 rctl |= IGC_RCTL_SZ_2048; 2236 2237 /* 2238 * If TX flow control is disabled and there's > 1 queue defined, 2239 * enable DROP. 2240 * 2241 * This drops frames rather than hanging the RX MAC for all queues. 2242 */ 2243 if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none || 2244 sc->fc == igc_fc_rx_pause)) { 2245 srrctl |= IGC_SRRCTL_DROP_EN; 2246 } 2247 2248 /* Setup the Base and Length of the RX descriptor rings. */ 2249 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2250 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0); 2251 uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr; 2252 uint32_t rxdctl; 2253 2254 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2255 2256 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2257 sc->num_rx_desc * sizeof(union igc_adv_rx_desc)); 2258 IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); 2259 IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); 2260 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2261 2262 /* Setup the Head and Tail Descriptor Pointers */ 2263 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2264 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2265 2266 /* Enable this Queue */ 2267 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2268 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2269 rxdctl &= 0xFFF00000; 2270 rxdctl |= IGC_RX_PTHRESH; 2271 rxdctl |= IGC_RX_HTHRESH << 8; 2272 rxdctl |= IGC_RX_WTHRESH << 16; 2273 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2274 } 2275 2276 /* Make sure VLAN Filters are off */ 2277 rctl &= ~IGC_RCTL_VFE; 2278 2279 /* Write out the settings */ 2280 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2281} 2282 2283/********************************************************************* 2284 * 2285 * Free all receive rings. 2286 * 2287 **********************************************************************/ 2288void 2289igc_free_receive_structures(struct igc_softc *sc) 2290{ 2291 struct rx_ring *rxr; 2292 int i; 2293 2294 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2295 if_rxr_init(&rxr->rx_ring, 0, 0); 2296 2297 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2298 igc_free_receive_buffers(rxr); 2299} 2300 2301/********************************************************************* 2302 * 2303 * Free receive ring data structures 2304 * 2305 **********************************************************************/ 2306void 2307igc_free_receive_buffers(struct rx_ring *rxr) 2308{ 2309 struct igc_softc *sc = rxr->sc; 2310 struct igc_rx_buf *rxbuf; 2311 int i; 2312 2313 if (rxr->rx_buffers != NULL) { 2314 for (i = 0; i < sc->num_rx_desc; i++) { 2315 rxbuf = &rxr->rx_buffers[i]; 2316 if (rxbuf->buf != NULL) { 2317 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 2318 0, rxbuf->map->dm_mapsize, 2319 BUS_DMASYNC_POSTREAD); 2320 bus_dmamap_unload(rxr->rxdma.dma_tag, 2321 rxbuf->map); 2322 m_freem(rxbuf->buf); 2323 rxbuf->buf = NULL; 2324 } 2325 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map); 2326 rxbuf->map = NULL; 2327 } 2328 free(rxr->rx_buffers, M_DEVBUF, 2329 sc->num_rx_desc * sizeof(struct igc_rx_buf)); 2330 rxr->rx_buffers = NULL; 2331 } 2332} 2333 2334/* 2335 * Initialise the RSS mapping for NICs that support multiple transmit/ 2336 * receive rings. 2337 */ 2338void 2339igc_initialize_rss_mapping(struct igc_softc *sc) 2340{ 2341 struct igc_hw *hw = &sc->hw; 2342 uint32_t rss_key[10], mrqc, reta, shift = 0; 2343 int i, queue_id; 2344 2345 /* 2346 * The redirection table controls which destination 2347 * queue each bucket redirects traffic to. 2348 * Each DWORD represents four queues, with the LSB 2349 * being the first queue in the DWORD. 2350 * 2351 * This just allocates buckets to queues using round-robin 2352 * allocation. 2353 * 2354 * NOTE: It Just Happens to line up with the default 2355 * RSS allocation method. 2356 */ 2357 2358 /* Warning FM follows */ 2359 reta = 0; 2360 for (i = 0; i < 128; i++) { 2361 queue_id = (i % sc->sc_nqueues); 2362 /* Adjust if required */ 2363 queue_id = queue_id << shift; 2364 2365 /* 2366 * The low 8 bits are for hash value (n+0); 2367 * The next 8 bits are for hash value (n+1), etc. 2368 */ 2369 reta = reta >> 8; 2370 reta = reta | ( ((uint32_t) queue_id) << 24); 2371 if ((i & 3) == 3) { 2372 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 2373 reta = 0; 2374 } 2375 } 2376 2377 /* 2378 * MRQC: Multiple Receive Queues Command 2379 * Set queuing to RSS control, number depends on the device. 2380 */ 2381 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 2382 2383 /* Set up random bits */ 2384 stoeplitz_to_key(&rss_key, sizeof(rss_key)); 2385 2386 /* Now fill our hash function seeds */ 2387 for (i = 0; i < 10; i++) 2388 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 2389 2390 /* 2391 * Configure the RSS fields to hash upon. 2392 */ 2393 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); 2394 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); 2395 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 2396 2397 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 2398} 2399 2400/* 2401 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2402 * For ASF and Pass Through versions of f/w this means 2403 * that the driver is loaded. For AMT version type f/w 2404 * this means that the network i/f is open. 2405 */ 2406void 2407igc_get_hw_control(struct igc_softc *sc) 2408{ 2409 uint32_t ctrl_ext; 2410 2411 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2412 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2413} 2414 2415/* 2416 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2417 * For ASF and Pass Through versions of f/w this means that 2418 * the driver is no longer loaded. For AMT versions of the 2419 * f/w this means that the network i/f is closed. 2420 */ 2421void 2422igc_release_hw_control(struct igc_softc *sc) 2423{ 2424 uint32_t ctrl_ext; 2425 2426 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2427 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2428} 2429 2430int 2431igc_is_valid_ether_addr(uint8_t *addr) 2432{ 2433 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2434 2435 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2436 return 0; 2437 } 2438 2439 return 1; 2440} 2441