if_igc.c revision 1.10
1/* $OpenBSD: if_igc.c,v 1.10 2022/11/11 16:41:44 mbuhl Exp $ */ 2/*- 3 * SPDX-License-Identifier: BSD-2-Clause 4 * 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * All rights reserved. 7 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include "bpfilter.h" 32#include "vlan.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/sockio.h> 37#include <sys/mbuf.h> 38#include <sys/malloc.h> 39#include <sys/kernel.h> 40#include <sys/socket.h> 41#include <sys/device.h> 42#include <sys/endian.h> 43#include <sys/intrmap.h> 44 45#include <net/if.h> 46#include <net/if_media.h> 47#include <net/toeplitz.h> 48 49#include <netinet/in.h> 50#include <netinet/if_ether.h> 51#include <netinet/ip.h> 52#include <netinet/ip6.h> 53 54#if NBPFILTER > 0 55#include <net/bpf.h> 56#endif 57 58#include <machine/bus.h> 59#include <machine/intr.h> 60 61#include <dev/pci/pcivar.h> 62#include <dev/pci/pcireg.h> 63#include <dev/pci/pcidevs.h> 64#include <dev/pci/if_igc.h> 65#include <dev/pci/igc_hw.h> 66 67const struct pci_matchid igc_devices[] = { 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V } 83}; 84 85/********************************************************************* 86 * Function Prototypes 87 *********************************************************************/ 88int igc_match(struct device *, void *, void *); 89void igc_attach(struct device *, struct device *, void *); 90int igc_detach(struct device *, int); 91 92void igc_identify_hardware(struct igc_softc *); 93int igc_allocate_pci_resources(struct igc_softc *); 94int igc_allocate_queues(struct igc_softc *); 95void igc_free_pci_resources(struct igc_softc *); 96void igc_reset(struct igc_softc *); 97void igc_init_dmac(struct igc_softc *, uint32_t); 98int igc_allocate_msix(struct igc_softc *); 99void igc_setup_msix(struct igc_softc *); 100int igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *); 101void igc_dma_free(struct igc_softc *, struct igc_dma_alloc *); 102void igc_setup_interface(struct igc_softc *); 103 104void igc_init(void *); 105void igc_start(struct ifqueue *); 106int igc_txeof(struct tx_ring *); 107void igc_stop(struct igc_softc *); 108int igc_ioctl(struct ifnet *, u_long, caddr_t); 109int igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *); 110int igc_rxfill(struct rx_ring *); 111void igc_rxrefill(void *); 112int igc_rxeof(struct rx_ring *); 113void igc_rx_checksum(uint32_t, struct mbuf *, uint32_t); 114void igc_watchdog(struct ifnet *); 115void igc_media_status(struct ifnet *, struct ifmediareq *); 116int igc_media_change(struct ifnet *); 117void igc_iff(struct igc_softc *); 118void igc_update_link_status(struct igc_softc *); 119int igc_get_buf(struct rx_ring *, int); 120int igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int, uint32_t *); 121 122void igc_configure_queues(struct igc_softc *); 123void igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int); 124void igc_enable_queue(struct igc_softc *, uint32_t); 125void igc_enable_intr(struct igc_softc *); 126void igc_disable_intr(struct igc_softc *); 127int igc_intr_link(void *); 128int igc_intr_queue(void *); 129 130int igc_allocate_transmit_buffers(struct tx_ring *); 131int igc_setup_transmit_structures(struct igc_softc *); 132int igc_setup_transmit_ring(struct tx_ring *); 133void igc_initialize_transmit_unit(struct igc_softc *); 134void igc_free_transmit_structures(struct igc_softc *); 135void igc_free_transmit_buffers(struct tx_ring *); 136int igc_allocate_receive_buffers(struct rx_ring *); 137int igc_setup_receive_structures(struct igc_softc *); 138int igc_setup_receive_ring(struct rx_ring *); 139void igc_initialize_receive_unit(struct igc_softc *); 140void igc_free_receive_structures(struct igc_softc *); 141void igc_free_receive_buffers(struct rx_ring *); 142void igc_initialize_rss_mapping(struct igc_softc *); 143 144void igc_get_hw_control(struct igc_softc *); 145void igc_release_hw_control(struct igc_softc *); 146int igc_is_valid_ether_addr(uint8_t *); 147 148/********************************************************************* 149 * OpenBSD Device Interface Entry Points 150 *********************************************************************/ 151 152struct cfdriver igc_cd = { 153 NULL, "igc", DV_IFNET 154}; 155 156const struct cfattach igc_ca = { 157 sizeof(struct igc_softc), igc_match, igc_attach, igc_detach 158}; 159 160/********************************************************************* 161 * Device identification routine 162 * 163 * igc_match determines if the driver should be loaded on 164 * adapter based on PCI vendor/device id of the adapter. 165 * 166 * return 0 on success, positive on failure 167 *********************************************************************/ 168int 169igc_match(struct device *parent, void *match, void *aux) 170{ 171 return pci_matchbyid((struct pci_attach_args *)aux, igc_devices, 172 nitems(igc_devices)); 173} 174 175/********************************************************************* 176 * Device initialization routine 177 * 178 * The attach entry point is called when the driver is being loaded. 179 * This routine identifies the type of hardware, allocates all resources 180 * and initializes the hardware. 181 * 182 * return 0 on success, positive on failure 183 *********************************************************************/ 184void 185igc_attach(struct device *parent, struct device *self, void *aux) 186{ 187 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 188 struct igc_softc *sc = (struct igc_softc *)self; 189 struct igc_hw *hw = &sc->hw; 190 191 sc->osdep.os_sc = sc; 192 sc->osdep.os_pa = *pa; 193 194 /* Determine hardware and mac info */ 195 igc_identify_hardware(sc); 196 197 sc->num_tx_desc = IGC_DEFAULT_TXD; 198 sc->num_rx_desc = IGC_DEFAULT_RXD; 199 200 /* Setup PCI resources */ 201 if (igc_allocate_pci_resources(sc)) 202 goto err_pci; 203 204 /* Allocate TX/RX queues */ 205 if (igc_allocate_queues(sc)) 206 goto err_pci; 207 208 /* Do shared code initialization */ 209 if (igc_setup_init_funcs(hw, true)) { 210 printf(": Setup of shared code failed\n"); 211 goto err_pci; 212 } 213 214 hw->mac.autoneg = DO_AUTO_NEG; 215 hw->phy.autoneg_wait_to_complete = false; 216 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 217 218 /* Copper options. */ 219 if (hw->phy.media_type == igc_media_type_copper) 220 hw->phy.mdix = AUTO_ALL_MODES; 221 222 /* Set the max frame size. */ 223 sc->hw.mac.max_frame_size = 9234; 224 225 /* Allocate multicast array memory. */ 226 sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES, 227 M_DEVBUF, M_NOWAIT); 228 if (sc->mta == NULL) { 229 printf(": Can not allocate multicast setup array\n"); 230 goto err_late; 231 } 232 233 /* Check SOL/IDER usage. */ 234 if (igc_check_reset_block(hw)) 235 printf(": PHY reset is blocked due to SOL/IDER session\n"); 236 237 /* Enable Energy Efficient Ethernet. */ 238 sc->hw.dev_spec._i225.eee_disable = true; 239 240 igc_reset_hw(hw); 241 242 /* Make sure we have a good EEPROM before we read from it. */ 243 if (igc_validate_nvm_checksum(hw) < 0) { 244 /* 245 * Some PCI-E parts fail the first check due to 246 * the link being in sleep state, call it again, 247 * if it fails a second time its a real issue. 248 */ 249 if (igc_validate_nvm_checksum(hw) < 0) { 250 printf(": The EEPROM checksum is not valid\n"); 251 goto err_late; 252 } 253 } 254 255 /* Copy the permanent MAC address out of the EEPROM. */ 256 if (igc_read_mac_addr(hw) < 0) { 257 printf(": EEPROM read error while reading MAC address\n"); 258 goto err_late; 259 } 260 261 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 262 printf(": Invalid MAC address\n"); 263 goto err_late; 264 } 265 266 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 267 268 if (igc_allocate_msix(sc)) 269 goto err_late; 270 271 /* Setup OS specific network interface. */ 272 igc_setup_interface(sc); 273 274 igc_reset(sc); 275 hw->mac.get_link_status = true; 276 igc_update_link_status(sc); 277 278 /* The driver can now take control from firmware. */ 279 igc_get_hw_control(sc); 280 281 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr)); 282 return; 283 284err_late: 285 igc_release_hw_control(sc); 286err_pci: 287 igc_free_pci_resources(sc); 288 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 289} 290 291/********************************************************************* 292 * Device removal routine 293 * 294 * The detach entry point is called when the driver is being removed. 295 * This routine stops the adapter and deallocates all the resources 296 * that were allocated for driver operation. 297 * 298 * return 0 on success, positive on failure 299 *********************************************************************/ 300int 301igc_detach(struct device *self, int flags) 302{ 303 struct igc_softc *sc = (struct igc_softc *)self; 304 struct ifnet *ifp = &sc->sc_ac.ac_if; 305 306 igc_stop(sc); 307 308 igc_phy_hw_reset(&sc->hw); 309 igc_release_hw_control(sc); 310 311 ether_ifdetach(ifp); 312 if_detach(ifp); 313 314 igc_free_pci_resources(sc); 315 316 igc_free_transmit_structures(sc); 317 igc_free_receive_structures(sc); 318 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 319 320 return 0; 321} 322 323void 324igc_identify_hardware(struct igc_softc *sc) 325{ 326 struct igc_osdep *os = &sc->osdep; 327 struct pci_attach_args *pa = &os->os_pa; 328 329 /* Save off the information about this board. */ 330 sc->hw.device_id = PCI_PRODUCT(pa->pa_id); 331 332 /* Do shared code init and setup. */ 333 if (igc_set_mac_type(&sc->hw)) { 334 printf(": Setup init failure\n"); 335 return; 336 } 337} 338 339int 340igc_allocate_pci_resources(struct igc_softc *sc) 341{ 342 struct igc_osdep *os = &sc->osdep; 343 struct pci_attach_args *pa = &os->os_pa; 344 pcireg_t memtype; 345 346 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG); 347 if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt, 348 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) { 349 printf(": unable to map registers\n"); 350 return ENXIO; 351 } 352 sc->hw.hw_addr = (uint8_t *)os->os_membase; 353 sc->hw.back = os; 354 355 igc_setup_msix(sc); 356 357 return 0; 358} 359 360int 361igc_allocate_queues(struct igc_softc *sc) 362{ 363 struct igc_queue *iq; 364 struct tx_ring *txr; 365 struct rx_ring *rxr; 366 int i, rsize, rxconf, tsize, txconf; 367 368 /* Allocate the top level queue structs. */ 369 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue), 370 M_DEVBUF, M_NOWAIT | M_ZERO); 371 if (sc->queues == NULL) { 372 printf("%s: unable to allocate queue\n", DEVNAME(sc)); 373 goto fail; 374 } 375 376 /* Allocate the TX ring. */ 377 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring), 378 M_DEVBUF, M_NOWAIT | M_ZERO); 379 if (sc->tx_rings == NULL) { 380 printf("%s: unable to allocate TX ring\n", DEVNAME(sc)); 381 goto fail; 382 } 383 384 /* Allocate the RX ring. */ 385 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring), 386 M_DEVBUF, M_NOWAIT | M_ZERO); 387 if (sc->rx_rings == NULL) { 388 printf("%s: unable to allocate RX ring\n", DEVNAME(sc)); 389 goto rx_fail; 390 } 391 392 txconf = rxconf = 0; 393 394 /* Set up the TX queues. */ 395 tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc), 396 IGC_DBA_ALIGN); 397 for (i = 0; i < sc->sc_nqueues; i++, txconf++) { 398 txr = &sc->tx_rings[i]; 399 txr->sc = sc; 400 txr->me = i; 401 402 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 403 printf("%s: unable to allocate TX descriptor\n", 404 DEVNAME(sc)); 405 goto err_tx_desc; 406 } 407 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr; 408 bzero((void *)txr->tx_base, tsize); 409 } 410 411 /* Set up the RX queues. */ 412 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 413 IGC_DBA_ALIGN); 414 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) { 415 rxr = &sc->rx_rings[i]; 416 rxr->sc = sc; 417 rxr->me = i; 418 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr); 419 420 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) { 421 printf("%s: unable to allocate RX descriptor\n", 422 DEVNAME(sc)); 423 goto err_rx_desc; 424 } 425 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr; 426 bzero((void *)rxr->rx_base, rsize); 427 } 428 429 /* Set up the queue holding structs. */ 430 for (i = 0; i < sc->sc_nqueues; i++) { 431 iq = &sc->queues[i]; 432 iq->sc = sc; 433 iq->txr = &sc->tx_rings[i]; 434 iq->rxr = &sc->rx_rings[i]; 435 snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i); 436 } 437 438 return 0; 439 440err_rx_desc: 441 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) 442 igc_dma_free(sc, &rxr->rxdma); 443err_tx_desc: 444 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) 445 igc_dma_free(sc, &txr->txdma); 446 free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring)); 447 sc->rx_rings = NULL; 448rx_fail: 449 free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring)); 450 sc->tx_rings = NULL; 451fail: 452 return ENOMEM; 453} 454 455void 456igc_free_pci_resources(struct igc_softc *sc) 457{ 458 struct igc_osdep *os = &sc->osdep; 459 struct pci_attach_args *pa = &os->os_pa; 460 struct igc_queue *iq = sc->queues; 461 int i; 462 463 /* Release all msix queue resources. */ 464 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 465 if (iq->tag) 466 pci_intr_disestablish(pa->pa_pc, iq->tag); 467 iq->tag = NULL; 468 } 469 470 if (sc->tag) 471 pci_intr_disestablish(pa->pa_pc, sc->tag); 472 sc->tag = NULL; 473 if (os->os_membase != 0) 474 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize); 475 os->os_membase = 0; 476} 477 478/********************************************************************* 479 * 480 * Initialize the hardware to a configuration as specified by the 481 * adapter structure. 482 * 483 **********************************************************************/ 484void 485igc_reset(struct igc_softc *sc) 486{ 487 struct igc_hw *hw = &sc->hw; 488 uint32_t pba; 489 uint16_t rx_buffer_size; 490 491 /* Let the firmware know the OS is in control */ 492 igc_get_hw_control(sc); 493 494 /* 495 * Packet Buffer Allocation (PBA) 496 * Writing PBA sets the receive portion of the buffer 497 * the remainder is used for the transmit buffer. 498 */ 499 pba = IGC_PBA_34K; 500 501 /* 502 * These parameters control the automatic generation (Tx) and 503 * response (Rx) to Ethernet PAUSE frames. 504 * - High water mark should allow for at least two frames to be 505 * received after sending an XOFF. 506 * - Low water mark works best when it is very near the high water mark. 507 * This allows the receiver to restart by sending XON when it has 508 * drained a bit. Here we use an arbitrary value of 1500 which will 509 * restart after one full frame is pulled from the buffer. There 510 * could be several smaller frames in the buffer and if so they will 511 * not trigger the XON until their total number reduces the buffer 512 * by 1500. 513 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 514 */ 515 rx_buffer_size = (pba & 0xffff) << 10; 516 hw->fc.high_water = rx_buffer_size - 517 roundup2(sc->hw.mac.max_frame_size, 1024); 518 /* 16-byte granularity */ 519 hw->fc.low_water = hw->fc.high_water - 16; 520 521 if (sc->fc) /* locally set flow control value? */ 522 hw->fc.requested_mode = sc->fc; 523 else 524 hw->fc.requested_mode = igc_fc_full; 525 526 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 527 528 hw->fc.send_xon = true; 529 530 /* Issue a global reset */ 531 igc_reset_hw(hw); 532 IGC_WRITE_REG(hw, IGC_WUC, 0); 533 534 /* and a re-init */ 535 if (igc_init_hw(hw) < 0) { 536 printf(": Hardware Initialization Failed\n"); 537 return; 538 } 539 540 /* Setup DMA Coalescing */ 541 igc_init_dmac(sc, pba); 542 543 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 544 igc_get_phy_info(hw); 545 igc_check_for_link(hw); 546} 547 548/********************************************************************* 549 * 550 * Initialize the DMA Coalescing feature 551 * 552 **********************************************************************/ 553void 554igc_init_dmac(struct igc_softc *sc, uint32_t pba) 555{ 556 struct igc_hw *hw = &sc->hw; 557 uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN; 558 uint16_t hwm, max_frame_size; 559 int status; 560 561 max_frame_size = sc->hw.mac.max_frame_size; 562 563 if (sc->dmac == 0) { /* Disabling it */ 564 IGC_WRITE_REG(hw, IGC_DMACR, reg); 565 return; 566 } else 567 printf(": DMA Coalescing enabled\n"); 568 569 /* Set starting threshold */ 570 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 571 572 hwm = 64 * pba - max_frame_size / 16; 573 if (hwm < 64 * (pba - 6)) 574 hwm = 64 * (pba - 6); 575 reg = IGC_READ_REG(hw, IGC_FCRTC); 576 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 577 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 578 & IGC_FCRTC_RTH_COAL_MASK); 579 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 580 581 dmac = pba - max_frame_size / 512; 582 if (dmac < pba - 10) 583 dmac = pba - 10; 584 reg = IGC_READ_REG(hw, IGC_DMACR); 585 reg &= ~IGC_DMACR_DMACTHR_MASK; 586 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 587 & IGC_DMACR_DMACTHR_MASK); 588 589 /* transition to L0x or L1 if available..*/ 590 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 591 592 /* Check if status is 2.5Gb backplane connection 593 * before configuration of watchdog timer, which is 594 * in msec values in 12.8usec intervals 595 * watchdog timer= msec values in 32usec intervals 596 * for non 2.5Gb connection 597 */ 598 status = IGC_READ_REG(hw, IGC_STATUS); 599 if ((status & IGC_STATUS_2P5_SKU) && 600 (!(status & IGC_STATUS_2P5_SKU_OVER))) 601 reg |= ((sc->dmac * 5) >> 6); 602 else 603 reg |= (sc->dmac >> 5); 604 605 IGC_WRITE_REG(hw, IGC_DMACR, reg); 606 607 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 608 609 /* Set the interval before transition */ 610 reg = IGC_READ_REG(hw, IGC_DMCTLX); 611 reg |= IGC_DMCTLX_DCFLUSH_DIS; 612 613 /* 614 ** in 2.5Gb connection, TTLX unit is 0.4 usec 615 ** which is 0x4*2 = 0xA. But delay is still 4 usec 616 */ 617 status = IGC_READ_REG(hw, IGC_STATUS); 618 if ((status & IGC_STATUS_2P5_SKU) && 619 (!(status & IGC_STATUS_2P5_SKU_OVER))) 620 reg |= 0xA; 621 else 622 reg |= 0x4; 623 624 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 625 626 /* free space in tx packet buffer to wake from DMA coal */ 627 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 628 (2 * max_frame_size)) >> 6); 629 630 /* make low power state decision controlled by DMA coal */ 631 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 632 reg &= ~IGC_PCIEMISC_LX_DECISION; 633 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 634} 635 636int 637igc_allocate_msix(struct igc_softc *sc) 638{ 639 struct igc_osdep *os = &sc->osdep; 640 struct pci_attach_args *pa = &os->os_pa; 641 struct igc_queue *iq; 642 pci_intr_handle_t ih; 643 int i, error = 0; 644 645 for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) { 646 if (pci_intr_map_msix(pa, i, &ih)) { 647 printf("%s: unable to map msi-x vector %d\n", 648 DEVNAME(sc), i); 649 error = ENOMEM; 650 goto fail; 651 } 652 653 iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih, 654 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i), 655 igc_intr_queue, iq, iq->name); 656 if (iq->tag == NULL) { 657 printf("%s: unable to establish interrupt %d\n", 658 DEVNAME(sc), i); 659 error = ENOMEM; 660 goto fail; 661 } 662 663 iq->msix = i; 664 iq->eims = 1 << i; 665 } 666 667 /* Now the link status/control last MSI-X vector. */ 668 if (pci_intr_map_msix(pa, i, &ih)) { 669 printf("%s: unable to map link vector\n", DEVNAME(sc)); 670 error = ENOMEM; 671 goto fail; 672 } 673 674 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE, 675 igc_intr_link, sc, sc->sc_dev.dv_xname); 676 if (sc->tag == NULL) { 677 printf("%s: unable to establish link interrupt\n", DEVNAME(sc)); 678 error = ENOMEM; 679 goto fail; 680 } 681 682 sc->linkvec = i; 683 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), 684 i, (i > 1) ? "s" : ""); 685 686 return 0; 687fail: 688 for (iq = sc->queues; i > 0; i--, iq++) { 689 if (iq->tag == NULL) 690 continue; 691 pci_intr_disestablish(pa->pa_pc, iq->tag); 692 iq->tag = NULL; 693 } 694 695 return error; 696} 697 698void 699igc_setup_msix(struct igc_softc *sc) 700{ 701 struct igc_osdep *os = &sc->osdep; 702 struct pci_attach_args *pa = &os->os_pa; 703 int nmsix; 704 705 nmsix = pci_intr_msix_count(pa); 706 if (nmsix <= 1) 707 printf(": not enough msi-x vectors\n"); 708 709 /* Give one vector to events. */ 710 nmsix--; 711 712 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS, 713 INTRMAP_POWEROF2); 714 sc->sc_nqueues = intrmap_count(sc->sc_intrmap); 715} 716 717int 718igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma) 719{ 720 struct igc_osdep *os = &sc->osdep; 721 722 dma->dma_tag = os->os_pa.pa_dmat; 723 724 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT, 725 &dma->dma_map)) 726 return 1; 727 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg, 728 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) 729 goto destroy; 730 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size, 731 &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) 732 goto free; 733 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, 734 NULL, BUS_DMA_NOWAIT)) 735 goto unmap; 736 737 dma->dma_size = size; 738 739 return 0; 740unmap: 741 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size); 742free: 743 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 744destroy: 745 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 746 dma->dma_map = NULL; 747 dma->dma_tag = NULL; 748 return 1; 749} 750 751void 752igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma) 753{ 754 if (dma->dma_tag == NULL) 755 return; 756 757 if (dma->dma_map != NULL) { 758 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, 759 dma->dma_map->dm_mapsize, 760 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 761 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 762 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); 763 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 764 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 765 dma->dma_map = NULL; 766 } 767} 768 769/********************************************************************* 770 * 771 * Setup networking device structure and register an interface. 772 * 773 **********************************************************************/ 774void 775igc_setup_interface(struct igc_softc *sc) 776{ 777 struct ifnet *ifp = &sc->sc_ac.ac_if; 778 int i; 779 780 ifp->if_softc = sc; 781 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 782 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 783 ifp->if_xflags = IFXF_MPSAFE; 784 ifp->if_ioctl = igc_ioctl; 785 ifp->if_qstart = igc_start; 786 ifp->if_watchdog = igc_watchdog; 787 ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN - 788 ETHER_CRC_LEN; 789 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 790 791 ifp->if_capabilities = IFCAP_VLAN_MTU; 792 793#ifdef notyet 794#if NVLAN > 0 795 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 796#endif 797#endif 798 799 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 800 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 801 ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 802 803 /* Initialize ifmedia structures. */ 804 ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status); 805 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 806 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 807 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 808 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 809 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 810 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 811 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 812 813 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 814 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 815 816 if_attach(ifp); 817 ether_ifattach(ifp); 818 819 if_attach_queues(ifp, sc->sc_nqueues); 820 if_attach_iqueues(ifp, sc->sc_nqueues); 821 for (i = 0; i < sc->sc_nqueues; i++) { 822 struct ifqueue *ifq = ifp->if_ifqs[i]; 823 struct ifiqueue *ifiq = ifp->if_iqs[i]; 824 struct tx_ring *txr = &sc->tx_rings[i]; 825 struct rx_ring *rxr = &sc->rx_rings[i]; 826 827 ifq->ifq_softc = txr; 828 txr->ifq = ifq; 829 830 ifiq->ifiq_softc = rxr; 831 rxr->ifiq = ifiq; 832 } 833} 834 835void 836igc_init(void *arg) 837{ 838 struct igc_softc *sc = (struct igc_softc *)arg; 839 struct ifnet *ifp = &sc->sc_ac.ac_if; 840 struct rx_ring *rxr; 841 uint32_t ctrl = 0; 842 int i, s; 843 844 s = splnet(); 845 846 igc_stop(sc); 847 848 /* Get the latest mac address, user can use a LAA. */ 849 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 850 851 /* Put the address into the receive address array. */ 852 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 853 854 /* Initialize the hardware. */ 855 igc_reset(sc); 856 igc_update_link_status(sc); 857 858 /* Setup VLAN support, basic and offload if available. */ 859 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 860 861 /* Prepare transmit descriptors and buffers. */ 862 if (igc_setup_transmit_structures(sc)) { 863 printf("%s: Could not setup transmit structures\n", 864 DEVNAME(sc)); 865 igc_stop(sc); 866 splx(s); 867 return; 868 } 869 igc_initialize_transmit_unit(sc); 870 871 sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN; 872 /* Prepare receive descriptors and buffers. */ 873 if (igc_setup_receive_structures(sc)) { 874 printf("%s: Could not setup receive structures\n", 875 DEVNAME(sc)); 876 igc_stop(sc); 877 splx(s); 878 return; 879 } 880 igc_initialize_receive_unit(sc); 881 882 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 883 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 884 ctrl |= IGC_CTRL_VME; 885 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 886 } 887 888 /* Setup multicast table. */ 889 igc_iff(sc); 890 891 igc_clear_hw_cntrs_base_generic(&sc->hw); 892 893 igc_configure_queues(sc); 894 895 /* This clears any pending interrupts */ 896 IGC_READ_REG(&sc->hw, IGC_ICR); 897 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 898 899 /* The driver can now take control from firmware. */ 900 igc_get_hw_control(sc); 901 902 /* Set Energy Efficient Ethernet. */ 903 igc_set_eee_i225(&sc->hw, true, true, true); 904 905 for (i = 0; i < sc->sc_nqueues; i++) { 906 rxr = &sc->rx_rings[i]; 907 igc_rxfill(rxr); 908 if (if_rxr_inuse(&rxr->rx_ring) == 0) { 909 printf("%s: Unable to fill any rx descriptors\n", 910 DEVNAME(sc)); 911 igc_stop(sc); 912 splx(s); 913 } 914 IGC_WRITE_REG(&sc->hw, IGC_RDT(i), 915 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 916 } 917 918 igc_enable_intr(sc); 919 920 ifp->if_flags |= IFF_RUNNING; 921 for (i = 0; i < sc->sc_nqueues; i++) 922 ifq_clr_oactive(ifp->if_ifqs[i]); 923 924 splx(s); 925} 926 927static inline int 928igc_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) 929{ 930 int error; 931 932 error = bus_dmamap_load_mbuf(dmat, map, m, 933 BUS_DMA_STREAMING | BUS_DMA_NOWAIT); 934 if (error != EFBIG) 935 return (error); 936 937 error = m_defrag(m, M_DONTWAIT); 938 if (error != 0) 939 return (error); 940 941 return (bus_dmamap_load_mbuf(dmat, map, m, 942 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)); 943} 944 945void 946igc_start(struct ifqueue *ifq) 947{ 948 struct ifnet *ifp = ifq->ifq_if; 949 struct igc_softc *sc = ifp->if_softc; 950 struct tx_ring *txr = ifq->ifq_softc; 951 union igc_adv_tx_desc *txdesc; 952 struct igc_tx_buf *txbuf; 953 bus_dmamap_t map; 954 struct mbuf *m; 955 unsigned int prod, free, last, i; 956 unsigned int mask; 957 uint32_t cmd_type_len; 958 uint32_t olinfo_status; 959 int post = 0; 960#if NBPFILTER > 0 961 caddr_t if_bpf; 962#endif 963 964 if (!sc->link_active) { 965 ifq_purge(ifq); 966 return; 967 } 968 969 prod = txr->next_avail_desc; 970 free = txr->next_to_clean; 971 if (free <= prod) 972 free += sc->num_tx_desc; 973 free -= prod; 974 975 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 976 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 977 978 mask = sc->num_tx_desc - 1; 979 980 for (;;) { 981 if (free <= IGC_MAX_SCATTER + 1) { 982 ifq_set_oactive(ifq); 983 break; 984 } 985 986 m = ifq_dequeue(ifq); 987 if (m == NULL) 988 break; 989 990 txbuf = &txr->tx_buffers[prod]; 991 map = txbuf->map; 992 993 if (igc_load_mbuf(txr->txdma.dma_tag, map, m) != 0) { 994 ifq->ifq_errors++; 995 m_freem(m); 996 continue; 997 } 998 999 olinfo_status = m->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT; 1000 1001 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, 1002 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1003 1004 if (igc_tx_ctx_setup(txr, m, prod, &olinfo_status)) { 1005 /* Consume the first descriptor */ 1006 prod++; 1007 prod &= mask; 1008 free--; 1009 } 1010 1011 for (i = 0; i < map->dm_nsegs; i++) { 1012 txdesc = &txr->tx_base[prod]; 1013 1014 cmd_type_len = IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA | 1015 IGC_ADVTXD_DCMD_DEXT | map->dm_segs[i].ds_len; 1016 if (i == map->dm_nsegs - 1) 1017 cmd_type_len |= IGC_ADVTXD_DCMD_EOP | 1018 IGC_ADVTXD_DCMD_RS; 1019 1020 htolem64(&txdesc->read.buffer_addr, map->dm_segs[i].ds_addr); 1021 htolem32(&txdesc->read.cmd_type_len, cmd_type_len); 1022 htolem32(&txdesc->read.olinfo_status, olinfo_status); 1023 1024 last = prod; 1025 1026 prod++; 1027 prod &= mask; 1028 } 1029 1030 txbuf->m_head = m; 1031 txbuf->eop_index = last; 1032 1033#if NBPFILTER > 0 1034 if_bpf = ifp->if_bpf; 1035 if (if_bpf) 1036 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT); 1037#endif 1038 1039 free -= i; 1040 post = 1; 1041 } 1042 1043 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1044 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1045 1046 if (post) { 1047 txr->next_avail_desc = prod; 1048 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod); 1049 } 1050} 1051 1052int 1053igc_txeof(struct tx_ring *txr) 1054{ 1055 struct igc_softc *sc = txr->sc; 1056 struct ifqueue *ifq = txr->ifq; 1057 union igc_adv_tx_desc *txdesc; 1058 struct igc_tx_buf *txbuf; 1059 bus_dmamap_t map; 1060 unsigned int cons, prod, last; 1061 unsigned int mask; 1062 int done = 0; 1063 1064 prod = txr->next_avail_desc; 1065 cons = txr->next_to_clean; 1066 1067 if (cons == prod) 1068 return (0); 1069 1070 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1071 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1072 1073 mask = sc->num_tx_desc - 1; 1074 1075 do { 1076 txbuf = &txr->tx_buffers[cons]; 1077 last = txbuf->eop_index; 1078 txdesc = &txr->tx_base[last]; 1079 1080 if (!(txdesc->wb.status & htole32(IGC_TXD_STAT_DD))) 1081 break; 1082 1083 map = txbuf->map; 1084 1085 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize, 1086 BUS_DMASYNC_POSTWRITE); 1087 bus_dmamap_unload(txr->txdma.dma_tag, map); 1088 m_freem(txbuf->m_head); 1089 1090 txbuf->m_head = NULL; 1091 txbuf->eop_index = -1; 1092 1093 cons = last + 1; 1094 cons &= mask; 1095 1096 done = 1; 1097 } while (cons != prod); 1098 1099 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1100 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1101 1102 txr->next_to_clean = cons; 1103 1104 if (ifq_is_oactive(ifq)) 1105 ifq_restart(ifq); 1106 1107 return (done); 1108} 1109 1110/********************************************************************* 1111 * 1112 * This routine disables all traffic on the adapter by issuing a 1113 * global reset on the MAC. 1114 * 1115 **********************************************************************/ 1116void 1117igc_stop(struct igc_softc *sc) 1118{ 1119 struct ifnet *ifp = &sc->sc_ac.ac_if; 1120 int i; 1121 1122 /* Tell the stack that the interface is no longer active. */ 1123 ifp->if_flags &= ~IFF_RUNNING; 1124 1125 igc_disable_intr(sc); 1126 1127 igc_reset_hw(&sc->hw); 1128 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1129 1130 intr_barrier(sc->tag); 1131 for (i = 0; i < sc->sc_nqueues; i++) { 1132 struct ifqueue *ifq = ifp->if_ifqs[i]; 1133 ifq_barrier(ifq); 1134 ifq_clr_oactive(ifq); 1135 1136 if (sc->queues[i].tag != NULL) 1137 intr_barrier(sc->queues[i].tag); 1138 timeout_del(&sc->rx_rings[i].rx_refill); 1139 } 1140 1141 igc_free_transmit_structures(sc); 1142 igc_free_receive_structures(sc); 1143 1144 igc_update_link_status(sc); 1145} 1146 1147/********************************************************************* 1148 * Ioctl entry point 1149 * 1150 * igc_ioctl is called when the user wants to configure the 1151 * interface. 1152 * 1153 * return 0 on success, positive on failure 1154 **********************************************************************/ 1155int 1156igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data) 1157{ 1158 struct igc_softc *sc = ifp->if_softc; 1159 struct ifreq *ifr = (struct ifreq *)data; 1160 int s, error = 0; 1161 1162 s = splnet(); 1163 1164 switch (cmd) { 1165 case SIOCSIFADDR: 1166 ifp->if_flags |= IFF_UP; 1167 if (!(ifp->if_flags & IFF_RUNNING)) 1168 igc_init(sc); 1169 break; 1170 case SIOCSIFFLAGS: 1171 if (ifp->if_flags & IFF_UP) { 1172 if (ifp->if_flags & IFF_RUNNING) 1173 error = ENETRESET; 1174 else 1175 igc_init(sc); 1176 } else { 1177 if (ifp->if_flags & IFF_RUNNING) 1178 igc_stop(sc); 1179 } 1180 break; 1181 case SIOCSIFMEDIA: 1182 case SIOCGIFMEDIA: 1183 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 1184 break; 1185 case SIOCGIFRXR: 1186 error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 1187 break; 1188 default: 1189 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1190 } 1191 1192 if (error == ENETRESET) { 1193 if (ifp->if_flags & IFF_RUNNING) { 1194 igc_disable_intr(sc); 1195 igc_iff(sc); 1196 igc_enable_intr(sc); 1197 } 1198 error = 0; 1199 } 1200 1201 splx(s); 1202 return error; 1203} 1204 1205int 1206igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri) 1207{ 1208 struct if_rxring_info *ifr; 1209 struct rx_ring *rxr; 1210 int error, i, n = 0; 1211 1212 if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF, 1213 M_WAITOK | M_ZERO)) == NULL) 1214 return ENOMEM; 1215 1216 for (i = 0; i < sc->sc_nqueues; i++) { 1217 rxr = &sc->rx_rings[i]; 1218 ifr[n].ifr_size = MCLBYTES; 1219 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i); 1220 ifr[n].ifr_info = rxr->rx_ring; 1221 n++; 1222 } 1223 1224 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr); 1225 free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr)); 1226 1227 return error; 1228} 1229 1230int 1231igc_rxfill(struct rx_ring *rxr) 1232{ 1233 struct igc_softc *sc = rxr->sc; 1234 int i, post = 0; 1235 u_int slots; 1236 1237 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1238 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1239 1240 i = rxr->last_desc_filled; 1241 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0; 1242 slots--) { 1243 if (++i == sc->num_rx_desc) 1244 i = 0; 1245 1246 if (igc_get_buf(rxr, i) != 0) 1247 break; 1248 1249 rxr->last_desc_filled = i; 1250 post = 1; 1251 } 1252 1253 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1254 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1255 1256 if_rxr_put(&rxr->rx_ring, slots); 1257 1258 return post; 1259} 1260 1261void 1262igc_rxrefill(void *xrxr) 1263{ 1264 struct rx_ring *rxr = xrxr; 1265 struct igc_softc *sc = rxr->sc; 1266 1267 if (igc_rxfill(rxr)) { 1268 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), 1269 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 1270 } 1271 else if (if_rxr_inuse(&rxr->rx_ring) == 0) 1272 timeout_add(&rxr->rx_refill, 1); 1273} 1274 1275/********************************************************************* 1276 * 1277 * This routine executes in interrupt context. It replenishes 1278 * the mbufs in the descriptor and sends data which has been 1279 * dma'ed into host memory to upper layer. 1280 * 1281 *********************************************************************/ 1282int 1283igc_rxeof(struct rx_ring *rxr) 1284{ 1285 struct igc_softc *sc = rxr->sc; 1286 struct ifnet *ifp = &sc->sc_ac.ac_if; 1287 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1288 struct mbuf *mp, *m; 1289 struct igc_rx_buf *rxbuf, *nxbuf; 1290 union igc_adv_rx_desc *rxdesc; 1291 uint32_t ptype, staterr = 0; 1292 uint16_t len, vtag; 1293 uint8_t eop = 0; 1294 int i, nextp; 1295 1296 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1297 return 0; 1298 1299 i = rxr->next_to_check; 1300 while (if_rxr_inuse(&rxr->rx_ring) > 0) { 1301 uint32_t hash; 1302 uint16_t hashtype; 1303 1304 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1305 i * sizeof(union igc_adv_rx_desc), 1306 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD); 1307 1308 rxdesc = &rxr->rx_base[i]; 1309 staterr = letoh32(rxdesc->wb.upper.status_error); 1310 if (!ISSET(staterr, IGC_RXD_STAT_DD)) { 1311 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1312 i * sizeof(union igc_adv_rx_desc), 1313 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1314 break; 1315 } 1316 1317 /* Zero out the receive descriptors status. */ 1318 rxdesc->wb.upper.status_error = 0; 1319 rxbuf = &rxr->rx_buffers[i]; 1320 1321 /* Pull the mbuf off the ring. */ 1322 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1323 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1324 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map); 1325 1326 mp = rxbuf->buf; 1327 len = letoh16(rxdesc->wb.upper.length); 1328 vtag = letoh16(rxdesc->wb.upper.vlan); 1329 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 1330 ptype = letoh32(rxdesc->wb.lower.lo_dword.data) & 1331 IGC_PKTTYPE_MASK; 1332 hash = letoh32(rxdesc->wb.lower.hi_dword.rss); 1333 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) & 1334 IGC_RXDADV_RSSTYPE_MASK; 1335 1336 if (staterr & IGC_RXDEXT_STATERR_RXE) { 1337 if (rxbuf->fmp) { 1338 m_freem(rxbuf->fmp); 1339 rxbuf->fmp = NULL; 1340 } 1341 1342 m_freem(mp); 1343 rxbuf->buf = NULL; 1344 goto next_desc; 1345 } 1346 1347 if (mp == NULL) { 1348 panic("%s: igc_rxeof: NULL mbuf in slot %d " 1349 "(nrx %d, filled %d)", DEVNAME(sc), i, 1350 if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled); 1351 } 1352 1353 if (!eop) { 1354 /* 1355 * Figure out the next descriptor of this frame. 1356 */ 1357 nextp = i + 1; 1358 if (nextp == sc->num_rx_desc) 1359 nextp = 0; 1360 nxbuf = &rxr->rx_buffers[nextp]; 1361 /* prefetch(nxbuf); */ 1362 } 1363 1364 mp->m_len = len; 1365 1366 m = rxbuf->fmp; 1367 rxbuf->buf = rxbuf->fmp = NULL; 1368 1369 if (m != NULL) 1370 m->m_pkthdr.len += mp->m_len; 1371 else { 1372 m = mp; 1373 m->m_pkthdr.len = mp->m_len; 1374#if NVLAN > 0 1375 if (staterr & IGC_RXD_STAT_VP) { 1376 m->m_pkthdr.ether_vtag = vtag; 1377 m->m_flags |= M_VLANTAG; 1378 } 1379#endif 1380 } 1381 1382 /* Pass the head pointer on */ 1383 if (eop == 0) { 1384 nxbuf->fmp = m; 1385 m = NULL; 1386 mp->m_next = nxbuf->buf; 1387 } else { 1388 igc_rx_checksum(staterr, m, ptype); 1389 1390 if (hashtype != IGC_RXDADV_RSSTYPE_NONE) { 1391 m->m_pkthdr.ph_flowid = hash; 1392 SET(m->m_pkthdr.csum_flags, M_FLOWID); 1393 } 1394 1395 ml_enqueue(&ml, m); 1396 } 1397next_desc: 1398 if_rxr_put(&rxr->rx_ring, 1); 1399 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1400 i * sizeof(union igc_adv_rx_desc), 1401 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1402 1403 /* Advance our pointers to the next descriptor. */ 1404 if (++i == sc->num_rx_desc) 1405 i = 0; 1406 } 1407 rxr->next_to_check = i; 1408 1409 if (ifiq_input(rxr->ifiq, &ml)) 1410 if_rxr_livelocked(&rxr->rx_ring); 1411 1412 if (!(staterr & IGC_RXD_STAT_DD)) 1413 return 0; 1414 1415 return 1; 1416} 1417 1418/********************************************************************* 1419 * 1420 * Verify that the hardware indicated that the checksum is valid. 1421 * Inform the stack about the status of checksum so that stack 1422 * doesn't spend time verifying the checksum. 1423 * 1424 *********************************************************************/ 1425void 1426igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype) 1427{ 1428 uint16_t status = (uint16_t)staterr; 1429 uint8_t errors = (uint8_t)(staterr >> 24); 1430 1431 if (status & IGC_RXD_STAT_IPCS) { 1432 if (!(errors & IGC_RXD_ERR_IPE)) { 1433 /* IP Checksum Good */ 1434 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1435 } else 1436 m->m_pkthdr.csum_flags = 0; 1437 } 1438 1439 if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) { 1440 if (!(errors & IGC_RXD_ERR_TCPE)) 1441 m->m_pkthdr.csum_flags |= 1442 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1443 } 1444} 1445 1446void 1447igc_watchdog(struct ifnet * ifp) 1448{ 1449} 1450 1451/********************************************************************* 1452 * 1453 * Media Ioctl callback 1454 * 1455 * This routine is called whenever the user queries the status of 1456 * the interface using ifconfig. 1457 * 1458 **********************************************************************/ 1459void 1460igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1461{ 1462 struct igc_softc *sc = ifp->if_softc; 1463 1464 igc_update_link_status(sc); 1465 1466 ifmr->ifm_status = IFM_AVALID; 1467 ifmr->ifm_active = IFM_ETHER; 1468 1469 if (!sc->link_active) { 1470 ifmr->ifm_active |= IFM_NONE; 1471 return; 1472 } 1473 1474 ifmr->ifm_status |= IFM_ACTIVE; 1475 1476 switch (sc->link_speed) { 1477 case 10: 1478 ifmr->ifm_active |= IFM_10_T; 1479 break; 1480 case 100: 1481 ifmr->ifm_active |= IFM_100_TX; 1482 break; 1483 case 1000: 1484 ifmr->ifm_active |= IFM_1000_T; 1485 break; 1486 case 2500: 1487 ifmr->ifm_active |= IFM_2500_T; 1488 break; 1489 } 1490 1491 if (sc->link_duplex == FULL_DUPLEX) 1492 ifmr->ifm_active |= IFM_FDX; 1493 else 1494 ifmr->ifm_active |= IFM_HDX; 1495} 1496 1497/********************************************************************* 1498 * 1499 * Media Ioctl callback 1500 * 1501 * This routine is called when the user changes speed/duplex using 1502 * media/mediopt option with ifconfig. 1503 * 1504 **********************************************************************/ 1505int 1506igc_media_change(struct ifnet *ifp) 1507{ 1508 struct igc_softc *sc = ifp->if_softc; 1509 struct ifmedia *ifm = &sc->media; 1510 1511 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1512 return (EINVAL); 1513 1514 sc->hw.mac.autoneg = DO_AUTO_NEG; 1515 1516 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1517 case IFM_AUTO: 1518 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1519 break; 1520 case IFM_2500_T: 1521 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1522 break; 1523 case IFM_1000_T: 1524 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1525 break; 1526 case IFM_100_TX: 1527 if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX) 1528 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1529 else 1530 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1531 break; 1532 case IFM_10_T: 1533 if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX) 1534 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1535 else 1536 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1537 break; 1538 default: 1539 return EINVAL; 1540 } 1541 1542 igc_init(sc); 1543 1544 return 0; 1545} 1546 1547void 1548igc_iff(struct igc_softc *sc) 1549{ 1550 struct ifnet *ifp = &sc->sc_ac.ac_if; 1551 struct arpcom *ac = &sc->sc_ac; 1552 struct ether_multi *enm; 1553 struct ether_multistep step; 1554 uint32_t reg_rctl = 0; 1555 uint8_t *mta; 1556 int mcnt = 0; 1557 1558 mta = sc->mta; 1559 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * 1560 MAX_NUM_MULTICAST_ADDRESSES); 1561 1562 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1563 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1564 ifp->if_flags &= ~IFF_ALLMULTI; 1565 1566 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1567 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) { 1568 ifp->if_flags |= IFF_ALLMULTI; 1569 reg_rctl |= IGC_RCTL_MPE; 1570 if (ifp->if_flags & IFF_PROMISC) 1571 reg_rctl |= IGC_RCTL_UPE; 1572 } else { 1573 ETHER_FIRST_MULTI(step, ac, enm); 1574 while (enm != NULL) { 1575 bcopy(enm->enm_addrlo, 1576 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1577 mcnt++; 1578 1579 ETHER_NEXT_MULTI(step, enm); 1580 } 1581 1582 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1583 } 1584 1585 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1586} 1587 1588void 1589igc_update_link_status(struct igc_softc *sc) 1590{ 1591 struct ifnet *ifp = &sc->sc_ac.ac_if; 1592 struct igc_hw *hw = &sc->hw; 1593 int link_state; 1594 1595 if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) { 1596 if (sc->link_active == 0) { 1597 igc_get_speed_and_duplex(hw, &sc->link_speed, 1598 &sc->link_duplex); 1599 sc->link_active = 1; 1600 ifp->if_baudrate = IF_Mbps(sc->link_speed); 1601 } 1602 link_state = (sc->link_duplex == FULL_DUPLEX) ? 1603 LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX; 1604 } else { 1605 if (sc->link_active == 1) { 1606 ifp->if_baudrate = sc->link_speed = 0; 1607 sc->link_duplex = 0; 1608 sc->link_active = 0; 1609 } 1610 link_state = LINK_STATE_DOWN; 1611 } 1612 if (ifp->if_link_state != link_state) { 1613 ifp->if_link_state = link_state; 1614 if_link_state_change(ifp); 1615 } 1616} 1617 1618/********************************************************************* 1619 * 1620 * Get a buffer from system mbuf buffer pool. 1621 * 1622 **********************************************************************/ 1623int 1624igc_get_buf(struct rx_ring *rxr, int i) 1625{ 1626 struct igc_softc *sc = rxr->sc; 1627 struct igc_rx_buf *rxbuf; 1628 struct mbuf *m; 1629 union igc_adv_rx_desc *rxdesc; 1630 int error; 1631 1632 rxbuf = &rxr->rx_buffers[i]; 1633 rxdesc = &rxr->rx_base[i]; 1634 if (rxbuf->buf) { 1635 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i); 1636 return ENOBUFS; 1637 } 1638 1639 m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz); 1640 if (!m) 1641 return ENOBUFS; 1642 1643 m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz); 1644 m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz; 1645 1646 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m, 1647 BUS_DMA_NOWAIT); 1648 if (error) { 1649 m_freem(m); 1650 return error; 1651 } 1652 1653 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1654 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD); 1655 rxbuf->buf = m; 1656 1657 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr); 1658 1659 return 0; 1660} 1661 1662void 1663igc_configure_queues(struct igc_softc *sc) 1664{ 1665 struct igc_hw *hw = &sc->hw; 1666 struct igc_queue *iq = sc->queues; 1667 uint32_t ivar, newitr = 0; 1668 int i; 1669 1670 /* First turn on RSS capability */ 1671 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | 1672 IGC_GPIE_PBA | IGC_GPIE_NSICR); 1673 1674 /* Set the starting interrupt rate */ 1675 newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC; 1676 1677 newitr |= IGC_EITR_CNT_IGNR; 1678 1679 /* Turn on MSI-X */ 1680 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 1681 /* RX entries */ 1682 igc_set_queues(sc, i, iq->msix, 0); 1683 /* TX entries */ 1684 igc_set_queues(sc, i, iq->msix, 1); 1685 sc->msix_queuesmask |= iq->eims; 1686 IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr); 1687 } 1688 1689 /* And for the link interrupt */ 1690 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1691 sc->msix_linkmask = 1 << sc->linkvec; 1692 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1693} 1694 1695void 1696igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type) 1697{ 1698 struct igc_hw *hw = &sc->hw; 1699 uint32_t ivar, index; 1700 1701 index = entry >> 1; 1702 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1703 if (type) { 1704 if (entry & 1) { 1705 ivar &= 0x00FFFFFF; 1706 ivar |= (vector | IGC_IVAR_VALID) << 24; 1707 } else { 1708 ivar &= 0xFFFF00FF; 1709 ivar |= (vector | IGC_IVAR_VALID) << 8; 1710 } 1711 } else { 1712 if (entry & 1) { 1713 ivar &= 0xFF00FFFF; 1714 ivar |= (vector | IGC_IVAR_VALID) << 16; 1715 } else { 1716 ivar &= 0xFFFFFF00; 1717 ivar |= vector | IGC_IVAR_VALID; 1718 } 1719 } 1720 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1721} 1722 1723void 1724igc_enable_queue(struct igc_softc *sc, uint32_t eims) 1725{ 1726 IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims); 1727} 1728 1729void 1730igc_enable_intr(struct igc_softc *sc) 1731{ 1732 struct igc_hw *hw = &sc->hw; 1733 uint32_t mask; 1734 1735 mask = (sc->msix_queuesmask | sc->msix_linkmask); 1736 IGC_WRITE_REG(hw, IGC_EIAC, mask); 1737 IGC_WRITE_REG(hw, IGC_EIAM, mask); 1738 IGC_WRITE_REG(hw, IGC_EIMS, mask); 1739 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 1740 IGC_WRITE_FLUSH(hw); 1741} 1742 1743void 1744igc_disable_intr(struct igc_softc *sc) 1745{ 1746 struct igc_hw *hw = &sc->hw; 1747 1748 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 1749 IGC_WRITE_REG(hw, IGC_EIAC, 0); 1750 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 1751 IGC_WRITE_FLUSH(hw); 1752} 1753 1754int 1755igc_intr_link(void *arg) 1756{ 1757 struct igc_softc *sc = (struct igc_softc *)arg; 1758 uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1759 1760 if (reg_icr & IGC_ICR_LSC) { 1761 KERNEL_LOCK(); 1762 sc->hw.mac.get_link_status = true; 1763 igc_update_link_status(sc); 1764 KERNEL_UNLOCK(); 1765 } 1766 1767 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1768 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask); 1769 1770 return 1; 1771} 1772 1773int 1774igc_intr_queue(void *arg) 1775{ 1776 struct igc_queue *iq = arg; 1777 struct igc_softc *sc = iq->sc; 1778 struct ifnet *ifp = &sc->sc_ac.ac_if; 1779 struct rx_ring *rxr = iq->rxr; 1780 struct tx_ring *txr = iq->txr; 1781 1782 if (ifp->if_flags & IFF_RUNNING) { 1783 igc_txeof(txr); 1784 igc_rxeof(rxr); 1785 igc_rxrefill(rxr); 1786 } 1787 1788 igc_enable_queue(sc, iq->eims); 1789 1790 return 1; 1791} 1792 1793/********************************************************************* 1794 * 1795 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1796 * the information needed to transmit a packet on the wire. 1797 * 1798 **********************************************************************/ 1799int 1800igc_allocate_transmit_buffers(struct tx_ring *txr) 1801{ 1802 struct igc_softc *sc = txr->sc; 1803 struct igc_tx_buf *txbuf; 1804 int error, i; 1805 1806 txr->tx_buffers = mallocarray(sc->num_tx_desc, 1807 sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 1808 if (txr->tx_buffers == NULL) { 1809 printf("%s: Unable to allocate tx_buffer memory\n", 1810 DEVNAME(sc)); 1811 error = ENOMEM; 1812 goto fail; 1813 } 1814 txr->txtag = txr->txdma.dma_tag; 1815 1816 /* Create the descriptor buffer dma maps. */ 1817 for (i = 0; i < sc->num_tx_desc; i++) { 1818 txbuf = &txr->tx_buffers[i]; 1819 error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE, 1820 IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map); 1821 if (error != 0) { 1822 printf("%s: Unable to create TX DMA map\n", 1823 DEVNAME(sc)); 1824 goto fail; 1825 } 1826 } 1827 1828 return 0; 1829fail: 1830 return error; 1831} 1832 1833 1834/********************************************************************* 1835 * 1836 * Allocate and initialize transmit structures. 1837 * 1838 **********************************************************************/ 1839int 1840igc_setup_transmit_structures(struct igc_softc *sc) 1841{ 1842 struct tx_ring *txr = sc->tx_rings; 1843 int i; 1844 1845 for (i = 0; i < sc->sc_nqueues; i++, txr++) { 1846 if (igc_setup_transmit_ring(txr)) 1847 goto fail; 1848 } 1849 1850 return 0; 1851fail: 1852 igc_free_transmit_structures(sc); 1853 return ENOBUFS; 1854} 1855 1856/********************************************************************* 1857 * 1858 * Initialize a transmit ring. 1859 * 1860 **********************************************************************/ 1861int 1862igc_setup_transmit_ring(struct tx_ring *txr) 1863{ 1864 struct igc_softc *sc = txr->sc; 1865 1866 /* Now allocate transmit buffers for the ring. */ 1867 if (igc_allocate_transmit_buffers(txr)) 1868 return ENOMEM; 1869 1870 /* Clear the old ring contents */ 1871 bzero((void *)txr->tx_base, 1872 (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc); 1873 1874 /* Reset indices. */ 1875 txr->next_avail_desc = 0; 1876 txr->next_to_clean = 0; 1877 1878 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1879 txr->txdma.dma_map->dm_mapsize, 1880 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1881 1882 return 0; 1883} 1884 1885/********************************************************************* 1886 * 1887 * Enable transmit unit. 1888 * 1889 **********************************************************************/ 1890void 1891igc_initialize_transmit_unit(struct igc_softc *sc) 1892{ 1893 struct ifnet *ifp = &sc->sc_ac.ac_if; 1894 struct tx_ring *txr; 1895 struct igc_hw *hw = &sc->hw; 1896 uint64_t bus_addr; 1897 uint32_t tctl, txdctl = 0; 1898 int i; 1899 1900 /* Setup the Base and Length of the TX descriptor ring. */ 1901 for (i = 0; i < sc->sc_nqueues; i++) { 1902 txr = &sc->tx_rings[i]; 1903 1904 bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr; 1905 1906 /* Base and len of TX ring */ 1907 IGC_WRITE_REG(hw, IGC_TDLEN(i), 1908 sc->num_tx_desc * sizeof(union igc_adv_tx_desc)); 1909 IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32)); 1910 IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr); 1911 1912 /* Init the HEAD/TAIL indices */ 1913 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 1914 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 1915 1916 txr->watchdog_timer = 0; 1917 1918 txdctl = 0; /* Clear txdctl */ 1919 txdctl |= 0x1f; /* PTHRESH */ 1920 txdctl |= 1 << 8; /* HTHRESH */ 1921 txdctl |= 1 << 16; /* WTHRESH */ 1922 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 1923 txdctl |= IGC_TXDCTL_GRAN; 1924 txdctl |= 1 << 25; /* LWTHRESH */ 1925 1926 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 1927 } 1928 ifp->if_timer = 0; 1929 1930 /* Program the Transmit Control Register */ 1931 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 1932 tctl &= ~IGC_TCTL_CT; 1933 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 1934 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 1935 1936 /* This write will effectively turn on the transmit unit. */ 1937 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 1938} 1939 1940/********************************************************************* 1941 * 1942 * Free all transmit rings. 1943 * 1944 **********************************************************************/ 1945void 1946igc_free_transmit_structures(struct igc_softc *sc) 1947{ 1948 struct tx_ring *txr = sc->tx_rings; 1949 int i; 1950 1951 for (i = 0; i < sc->sc_nqueues; i++, txr++) 1952 igc_free_transmit_buffers(txr); 1953} 1954 1955/********************************************************************* 1956 * 1957 * Free transmit ring related data structures. 1958 * 1959 **********************************************************************/ 1960void 1961igc_free_transmit_buffers(struct tx_ring *txr) 1962{ 1963 struct igc_softc *sc = txr->sc; 1964 struct igc_tx_buf *txbuf; 1965 int i; 1966 1967 if (txr->tx_buffers == NULL) 1968 return; 1969 1970 txbuf = txr->tx_buffers; 1971 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { 1972 if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) { 1973 bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map, 1974 0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1975 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map); 1976 } 1977 if (txbuf->m_head != NULL) { 1978 m_freem(txbuf->m_head); 1979 txbuf->m_head = NULL; 1980 } 1981 if (txbuf->map != NULL) { 1982 bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map); 1983 txbuf->map = NULL; 1984 } 1985 } 1986 1987 if (txr->tx_buffers != NULL) 1988 free(txr->tx_buffers, M_DEVBUF, 1989 sc->num_tx_desc * sizeof(struct igc_tx_buf)); 1990 txr->tx_buffers = NULL; 1991 txr->txtag = NULL; 1992} 1993 1994 1995/********************************************************************* 1996 * 1997 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 1998 * 1999 **********************************************************************/ 2000 2001int 2002igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod, 2003 uint32_t *olinfo_status) 2004{ 2005 struct igc_adv_tx_context_desc *txdesc; 2006 struct ether_header *eh = mtod(mp, struct ether_header *); 2007 struct mbuf *m; 2008 uint32_t type_tucmd_mlhl = 0; 2009 uint32_t vlan_macip_lens = 0; 2010 uint32_t iphlen; 2011 int hoff; 2012 int off = 0; 2013 uint8_t ipproto; 2014 2015 vlan_macip_lens |= (sizeof(*eh) << IGC_ADVTXD_MACLEN_SHIFT); 2016 2017 /* 2018 * In advanced descriptors the vlan tag must 2019 * be placed into the context descriptor. Hence 2020 * we need to make one even if not doing offloads. 2021 */ 2022#ifdef notyet 2023#if NVLAN > 0 2024 if (ISSET(mp->m_flags, M_VLANTAG)) { 2025 uint32_t vtag = mp->m_pkthdr.ether_vtag; 2026 vlan_macip_lens |= (vtag << IGC_ADVTXD_VLAN_SHIFT); 2027 off = 1; 2028 } 2029#endif 2030#endif 2031 2032 switch (ntohs(eh->ether_type)) { 2033 case ETHERTYPE_IP: { 2034 struct ip *ip; 2035 2036 m = m_getptr(mp, sizeof(*eh), &hoff); 2037 KASSERT(m != NULL && m->m_len - hoff >= sizeof(*ip)); 2038 ip = (struct ip *)(mtod(m, caddr_t) + hoff); 2039 2040 iphlen = ip->ip_hl << 2; 2041 ipproto = ip->ip_p; 2042 2043 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 2044 if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) { 2045 *olinfo_status |= IGC_TXD_POPTS_IXSM << 8; 2046 off = 1; 2047 } 2048 2049 break; 2050 } 2051#ifdef INET6 2052 case ETHERTYPE_IPV6: { 2053 struct ip6_hdr *ip6; 2054 2055 m = m_getptr(mp, sizeof(*eh), &hoff); 2056 KASSERT(m != NULL && m->m_len - hoff >= sizeof(*ip6)); 2057 ip6 = (struct ip6_hdr *)(mtod(m, caddr_t) + hoff); 2058 2059 iphlen = sizeof(*ip6); 2060 ipproto = ip6->ip6_nxt; 2061 2062 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 2063 break; 2064 } 2065#endif 2066 default: 2067 return 0; 2068 } 2069 2070 vlan_macip_lens |= iphlen; 2071 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 2072 2073 switch (ipproto) { 2074 case IPPROTO_TCP: 2075 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 2076 if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) { 2077 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2078 off = 1; 2079 } 2080 break; 2081 case IPPROTO_UDP: 2082 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; 2083 if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) { 2084 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2085 off = 1; 2086 } 2087 break; 2088 } 2089 2090 if (off == 0) 2091 return 0; 2092 2093 /* Now ready a context descriptor */ 2094 txdesc = (struct igc_adv_tx_context_desc *)&txr->tx_base[prod]; 2095 2096 /* Now copy bits into descriptor */ 2097 htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens); 2098 htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl); 2099 htolem32(&txdesc->seqnum_seed, 0); 2100 htolem32(&txdesc->mss_l4len_idx, 0); 2101 2102 return 1; 2103} 2104 2105/********************************************************************* 2106 * 2107 * Allocate memory for rx_buffer structures. Since we use one 2108 * rx_buffer per received packet, the maximum number of rx_buffer's 2109 * that we'll need is equal to the number of receive descriptors 2110 * that we've allocated. 2111 * 2112 **********************************************************************/ 2113int 2114igc_allocate_receive_buffers(struct rx_ring *rxr) 2115{ 2116 struct igc_softc *sc = rxr->sc; 2117 struct igc_rx_buf *rxbuf; 2118 int i, error; 2119 2120 rxr->rx_buffers = mallocarray(sc->num_rx_desc, 2121 sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 2122 if (rxr->rx_buffers == NULL) { 2123 printf("%s: Unable to allocate rx_buffer memory\n", 2124 DEVNAME(sc)); 2125 error = ENOMEM; 2126 goto fail; 2127 } 2128 2129 rxbuf = rxr->rx_buffers; 2130 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) { 2131 error = bus_dmamap_create(rxr->rxdma.dma_tag, 2132 MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0, 2133 BUS_DMA_NOWAIT, &rxbuf->map); 2134 if (error) { 2135 printf("%s: Unable to create RX DMA map\n", 2136 DEVNAME(sc)); 2137 goto fail; 2138 } 2139 } 2140 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 2141 rxr->rxdma.dma_map->dm_mapsize, 2142 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2143 2144 return 0; 2145fail: 2146 return error; 2147} 2148 2149/********************************************************************* 2150 * 2151 * Allocate and initialize receive structures. 2152 * 2153 **********************************************************************/ 2154int 2155igc_setup_receive_structures(struct igc_softc *sc) 2156{ 2157 struct rx_ring *rxr = sc->rx_rings; 2158 int i; 2159 2160 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2161 if (igc_setup_receive_ring(rxr)) 2162 goto fail; 2163 } 2164 2165 return 0; 2166fail: 2167 igc_free_receive_structures(sc); 2168 return ENOBUFS; 2169} 2170 2171/********************************************************************* 2172 * 2173 * Initialize a receive ring and its buffers. 2174 * 2175 **********************************************************************/ 2176int 2177igc_setup_receive_ring(struct rx_ring *rxr) 2178{ 2179 struct igc_softc *sc = rxr->sc; 2180 struct ifnet *ifp = &sc->sc_ac.ac_if; 2181 int rsize; 2182 2183 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 2184 IGC_DBA_ALIGN); 2185 2186 /* Clear the ring contents. */ 2187 bzero((void *)rxr->rx_base, rsize); 2188 2189 if (igc_allocate_receive_buffers(rxr)) 2190 return ENOMEM; 2191 2192 /* Setup our descriptor indices. */ 2193 rxr->next_to_check = 0; 2194 rxr->last_desc_filled = sc->num_rx_desc - 1; 2195 2196 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1), 2197 sc->num_rx_desc - 1); 2198 2199 return 0; 2200} 2201 2202/********************************************************************* 2203 * 2204 * Enable receive unit. 2205 * 2206 **********************************************************************/ 2207void 2208igc_initialize_receive_unit(struct igc_softc *sc) 2209{ 2210 struct rx_ring *rxr = sc->rx_rings; 2211 struct igc_hw *hw = &sc->hw; 2212 uint32_t rctl, rxcsum, srrctl = 0; 2213 int i; 2214 2215 /* 2216 * Make sure receives are disabled while setting 2217 * up the descriptor ring. 2218 */ 2219 rctl = IGC_READ_REG(hw, IGC_RCTL); 2220 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2221 2222 /* Setup the Receive Control Register */ 2223 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2224 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | 2225 IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2226 2227 /* Do not store bad packets */ 2228 rctl &= ~IGC_RCTL_SBP; 2229 2230 /* Enable Long Packet receive */ 2231 if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN) 2232 rctl |= IGC_RCTL_LPE; 2233 2234 /* Strip the CRC */ 2235 rctl |= IGC_RCTL_SECRC; 2236 2237 /* 2238 * Set the interrupt throttling rate. Value is calculated 2239 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 2240 */ 2241 IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR); 2242 2243 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2244 rxcsum &= ~IGC_RXCSUM_PCSD; 2245 2246 if (sc->sc_nqueues > 1) 2247 rxcsum |= IGC_RXCSUM_PCSD; 2248 2249 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2250 2251 if (sc->sc_nqueues > 1) 2252 igc_initialize_rss_mapping(sc); 2253 2254#if 0 2255 srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 2256 rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX; 2257#endif 2258 2259 srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 2260 rctl |= IGC_RCTL_SZ_2048; 2261 2262 /* 2263 * If TX flow control is disabled and there's > 1 queue defined, 2264 * enable DROP. 2265 * 2266 * This drops frames rather than hanging the RX MAC for all queues. 2267 */ 2268 if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none || 2269 sc->fc == igc_fc_rx_pause)) { 2270 srrctl |= IGC_SRRCTL_DROP_EN; 2271 } 2272 2273 /* Setup the Base and Length of the RX descriptor rings. */ 2274 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2275 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0); 2276 uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr; 2277 uint32_t rxdctl; 2278 2279 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2280 2281 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2282 sc->num_rx_desc * sizeof(union igc_adv_rx_desc)); 2283 IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); 2284 IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); 2285 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2286 2287 /* Setup the Head and Tail Descriptor Pointers */ 2288 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2289 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2290 2291 /* Enable this Queue */ 2292 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2293 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2294 rxdctl &= 0xFFF00000; 2295 rxdctl |= IGC_RX_PTHRESH; 2296 rxdctl |= IGC_RX_HTHRESH << 8; 2297 rxdctl |= IGC_RX_WTHRESH << 16; 2298 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2299 } 2300 2301 /* Make sure VLAN Filters are off */ 2302 rctl &= ~IGC_RCTL_VFE; 2303 2304 /* Write out the settings */ 2305 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2306} 2307 2308/********************************************************************* 2309 * 2310 * Free all receive rings. 2311 * 2312 **********************************************************************/ 2313void 2314igc_free_receive_structures(struct igc_softc *sc) 2315{ 2316 struct rx_ring *rxr; 2317 int i; 2318 2319 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2320 if_rxr_init(&rxr->rx_ring, 0, 0); 2321 2322 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2323 igc_free_receive_buffers(rxr); 2324} 2325 2326/********************************************************************* 2327 * 2328 * Free receive ring data structures 2329 * 2330 **********************************************************************/ 2331void 2332igc_free_receive_buffers(struct rx_ring *rxr) 2333{ 2334 struct igc_softc *sc = rxr->sc; 2335 struct igc_rx_buf *rxbuf; 2336 int i; 2337 2338 if (rxr->rx_buffers != NULL) { 2339 for (i = 0; i < sc->num_rx_desc; i++) { 2340 rxbuf = &rxr->rx_buffers[i]; 2341 if (rxbuf->buf != NULL) { 2342 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 2343 0, rxbuf->map->dm_mapsize, 2344 BUS_DMASYNC_POSTREAD); 2345 bus_dmamap_unload(rxr->rxdma.dma_tag, 2346 rxbuf->map); 2347 m_freem(rxbuf->buf); 2348 rxbuf->buf = NULL; 2349 } 2350 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map); 2351 rxbuf->map = NULL; 2352 } 2353 free(rxr->rx_buffers, M_DEVBUF, 2354 sc->num_rx_desc * sizeof(struct igc_rx_buf)); 2355 rxr->rx_buffers = NULL; 2356 } 2357} 2358 2359/* 2360 * Initialise the RSS mapping for NICs that support multiple transmit/ 2361 * receive rings. 2362 */ 2363void 2364igc_initialize_rss_mapping(struct igc_softc *sc) 2365{ 2366 struct igc_hw *hw = &sc->hw; 2367 uint32_t rss_key[10], mrqc, reta, shift = 0; 2368 int i, queue_id; 2369 2370 /* 2371 * The redirection table controls which destination 2372 * queue each bucket redirects traffic to. 2373 * Each DWORD represents four queues, with the LSB 2374 * being the first queue in the DWORD. 2375 * 2376 * This just allocates buckets to queues using round-robin 2377 * allocation. 2378 * 2379 * NOTE: It Just Happens to line up with the default 2380 * RSS allocation method. 2381 */ 2382 2383 /* Warning FM follows */ 2384 reta = 0; 2385 for (i = 0; i < 128; i++) { 2386 queue_id = (i % sc->sc_nqueues); 2387 /* Adjust if required */ 2388 queue_id = queue_id << shift; 2389 2390 /* 2391 * The low 8 bits are for hash value (n+0); 2392 * The next 8 bits are for hash value (n+1), etc. 2393 */ 2394 reta = reta >> 8; 2395 reta = reta | ( ((uint32_t) queue_id) << 24); 2396 if ((i & 3) == 3) { 2397 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 2398 reta = 0; 2399 } 2400 } 2401 2402 /* 2403 * MRQC: Multiple Receive Queues Command 2404 * Set queuing to RSS control, number depends on the device. 2405 */ 2406 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 2407 2408 /* Set up random bits */ 2409 stoeplitz_to_key(&rss_key, sizeof(rss_key)); 2410 2411 /* Now fill our hash function seeds */ 2412 for (i = 0; i < 10; i++) 2413 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 2414 2415 /* 2416 * Configure the RSS fields to hash upon. 2417 */ 2418 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); 2419 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); 2420 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 2421 2422 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 2423} 2424 2425/* 2426 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2427 * For ASF and Pass Through versions of f/w this means 2428 * that the driver is loaded. For AMT version type f/w 2429 * this means that the network i/f is open. 2430 */ 2431void 2432igc_get_hw_control(struct igc_softc *sc) 2433{ 2434 uint32_t ctrl_ext; 2435 2436 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2437 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2438} 2439 2440/* 2441 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2442 * For ASF and Pass Through versions of f/w this means that 2443 * the driver is no longer loaded. For AMT versions of the 2444 * f/w this means that the network i/f is closed. 2445 */ 2446void 2447igc_release_hw_control(struct igc_softc *sc) 2448{ 2449 uint32_t ctrl_ext; 2450 2451 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2452 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2453} 2454 2455int 2456igc_is_valid_ether_addr(uint8_t *addr) 2457{ 2458 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2459 2460 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2461 return 0; 2462 } 2463 2464 return 1; 2465} 2466