if_igc.c revision 1.2
1/* $OpenBSD: if_igc.c,v 1.2 2021/10/31 15:02:25 patrick Exp $ */ 2/*- 3 * SPDX-License-Identifier: BSD-2-Clause 4 * 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * All rights reserved. 7 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31#include "bpfilter.h" 32#include "vlan.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/sockio.h> 37#include <sys/mbuf.h> 38#include <sys/malloc.h> 39#include <sys/kernel.h> 40#include <sys/socket.h> 41#include <sys/device.h> 42#include <sys/endian.h> 43#include <sys/intrmap.h> 44 45#include <net/if.h> 46#include <net/if_media.h> 47#include <net/toeplitz.h> 48 49#include <netinet/in.h> 50#include <netinet/if_ether.h> 51 52#if NBPFILTER > 0 53#include <net/bpf.h> 54#endif 55 56#include <machine/bus.h> 57#include <machine/intr.h> 58 59#include <dev/pci/pcivar.h> 60#include <dev/pci/pcireg.h> 61#include <dev/pci/pcidevs.h> 62#include <dev/pci/if_igc.h> 63#include <dev/pci/igc_hw.h> 64 65const struct pci_matchid igc_devices[] = { 66 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V }, 67 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V }, 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V } 81}; 82 83/********************************************************************* 84 * Function Prototypes 85 *********************************************************************/ 86int igc_match(struct device *, void *, void *); 87void igc_attach(struct device *, struct device *, void *); 88int igc_detach(struct device *, int); 89 90void igc_identify_hardware(struct igc_softc *); 91int igc_allocate_pci_resources(struct igc_softc *); 92int igc_allocate_queues(struct igc_softc *); 93void igc_free_pci_resources(struct igc_softc *); 94void igc_reset(struct igc_softc *); 95void igc_init_dmac(struct igc_softc *, uint32_t); 96int igc_allocate_msix(struct igc_softc *); 97void igc_setup_msix(struct igc_softc *); 98int igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *); 99void igc_dma_free(struct igc_softc *, struct igc_dma_alloc *); 100void igc_setup_interface(struct igc_softc *); 101 102void igc_init(void *); 103void igc_start(struct ifqueue *); 104void igc_stop(struct igc_softc *); 105int igc_ioctl(struct ifnet *, u_long, caddr_t); 106int igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *); 107int igc_rxfill(struct rx_ring *); 108void igc_rxrefill(void *); 109int igc_rxeof(struct rx_ring *); 110void igc_rx_checksum(uint32_t, struct mbuf *, uint32_t); 111void igc_watchdog(struct ifnet *); 112void igc_media_status(struct ifnet *, struct ifmediareq *); 113int igc_media_change(struct ifnet *); 114void igc_iff(struct igc_softc *); 115void igc_update_link_status(struct igc_softc *); 116int igc_get_buf(struct rx_ring *, int); 117 118void igc_configure_queues(struct igc_softc *); 119void igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int); 120void igc_enable_queue(struct igc_softc *, uint32_t); 121void igc_enable_intr(struct igc_softc *); 122void igc_disable_intr(struct igc_softc *); 123int igc_intr_link(void *); 124int igc_intr_queue(void *); 125 126int igc_allocate_transmit_buffers(struct tx_ring *); 127int igc_setup_transmit_structures(struct igc_softc *); 128int igc_setup_transmit_ring(struct tx_ring *); 129void igc_initialize_transmit_unit(struct igc_softc *); 130void igc_free_transmit_structures(struct igc_softc *); 131void igc_free_transmit_buffers(struct tx_ring *); 132int igc_allocate_receive_buffers(struct rx_ring *); 133int igc_setup_receive_structures(struct igc_softc *); 134int igc_setup_receive_ring(struct rx_ring *); 135void igc_initialize_receive_unit(struct igc_softc *); 136void igc_free_receive_structures(struct igc_softc *); 137void igc_free_receive_buffers(struct rx_ring *); 138void igc_initialize_rss_mapping(struct igc_softc *); 139 140void igc_get_hw_control(struct igc_softc *); 141void igc_release_hw_control(struct igc_softc *); 142int igc_is_valid_ether_addr(uint8_t *); 143 144/********************************************************************* 145 * OpenBSD Device Interface Entry Points 146 *********************************************************************/ 147 148struct cfdriver igc_cd = { 149 NULL, "igc", DV_IFNET 150}; 151 152struct cfattach igc_ca = { 153 sizeof(struct igc_softc), igc_match, igc_attach, igc_detach 154}; 155 156/********************************************************************* 157 * Device identification routine 158 * 159 * igc_match determines if the driver should be loaded on 160 * adapter based on PCI vendor/device id of the adapter. 161 * 162 * return 0 on success, positive on failure 163 *********************************************************************/ 164int 165igc_match(struct device *parent, void *match, void *aux) 166{ 167 return pci_matchbyid((struct pci_attach_args *)aux, igc_devices, 168 nitems(igc_devices)); 169} 170 171/********************************************************************* 172 * Device initialization routine 173 * 174 * The attach entry point is called when the driver is being loaded. 175 * This routine identifies the type of hardware, allocates all resources 176 * and initializes the hardware. 177 * 178 * return 0 on success, positive on failure 179 *********************************************************************/ 180void 181igc_attach(struct device *parent, struct device *self, void *aux) 182{ 183 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 184 struct igc_softc *sc = (struct igc_softc *)self; 185 struct igc_hw *hw = &sc->hw; 186 187 sc->osdep.os_sc = sc; 188 sc->osdep.os_pa = *pa; 189 190 /* Determine hardware and mac info */ 191 igc_identify_hardware(sc); 192 193 sc->num_tx_desc = IGC_DEFAULT_TXD; 194 sc->num_rx_desc = IGC_DEFAULT_RXD; 195 196 /* Setup PCI resources */ 197 if (igc_allocate_pci_resources(sc)) 198 goto err_pci; 199 200 /* Allocate TX/RX queues */ 201 if (igc_allocate_queues(sc)) 202 goto err_pci; 203 204 /* Do shared code initialization */ 205 if (igc_setup_init_funcs(hw, true)) { 206 printf(": Setup of shared code failed\n"); 207 goto err_pci; 208 } 209 210 hw->mac.autoneg = DO_AUTO_NEG; 211 hw->phy.autoneg_wait_to_complete = false; 212 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 213 214 /* Copper options. */ 215 if (hw->phy.media_type == igc_media_type_copper) 216 hw->phy.mdix = AUTO_ALL_MODES; 217 218 /* Set the max frame size. */ 219 sc->hw.mac.max_frame_size = 9234; 220 221 /* Allocate multicast array memory. */ 222 sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES, 223 M_DEVBUF, M_NOWAIT); 224 if (sc->mta == NULL) { 225 printf(": Can not allocate multicast setup array\n"); 226 goto err_late; 227 } 228 229 /* Check SOL/IDER usage. */ 230 if (igc_check_reset_block(hw)) 231 printf(": PHY reset is blocked due to SOL/IDER session\n"); 232 233 /* Enable Energy Efficient Ethernet. */ 234 sc->hw.dev_spec._i225.eee_disable = true; 235 236 igc_reset_hw(hw); 237 238 /* Make sure we have a good EEPROM before we read from it. */ 239 if (igc_validate_nvm_checksum(hw) < 0) { 240 /* 241 * Some PCI-E parts fail the first check due to 242 * the link being in sleep state, call it again, 243 * if it fails a second time its a real issue. 244 */ 245 if (igc_validate_nvm_checksum(hw) < 0) { 246 printf(": The EEPROM checksum is not valid\n"); 247 goto err_late; 248 } 249 } 250 251 /* Copy the permanent MAC address out of the EEPROM. */ 252 if (igc_read_mac_addr(hw) < 0) { 253 printf(": EEPROM read error while reading MAC address\n"); 254 goto err_late; 255 } 256 257 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 258 printf(": Invalid MAC address\n"); 259 goto err_late; 260 } 261 262 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 263 264 if (igc_allocate_msix(sc)) 265 goto err_late; 266 267 /* Setup OS specific network interface. */ 268 igc_setup_interface(sc); 269 270 igc_reset(sc); 271 hw->mac.get_link_status = true; 272 igc_update_link_status(sc); 273 274 /* The driver can now take control from firmware. */ 275 igc_get_hw_control(sc); 276 277 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr)); 278 return; 279 280err_late: 281 igc_release_hw_control(sc); 282err_pci: 283 igc_free_pci_resources(sc); 284 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 285} 286 287/********************************************************************* 288 * Device removal routine 289 * 290 * The detach entry point is called when the driver is being removed. 291 * This routine stops the adapter and deallocates all the resources 292 * that were allocated for driver operation. 293 * 294 * return 0 on success, positive on failure 295 *********************************************************************/ 296int 297igc_detach(struct device *self, int flags) 298{ 299 struct igc_softc *sc = (struct igc_softc *)self; 300 struct ifnet *ifp = &sc->sc_ac.ac_if; 301 302 igc_stop(sc); 303 304 igc_phy_hw_reset(&sc->hw); 305 igc_release_hw_control(sc); 306 307 ether_ifdetach(ifp); 308 if_detach(ifp); 309 310 igc_free_pci_resources(sc); 311 312 igc_free_transmit_structures(sc); 313 igc_free_receive_structures(sc); 314 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 315 316 return 0; 317} 318 319void 320igc_identify_hardware(struct igc_softc *sc) 321{ 322 struct igc_osdep *os = &sc->osdep; 323 struct pci_attach_args *pa = &os->os_pa; 324 325 /* Save off the information about this board. */ 326 sc->hw.device_id = PCI_PRODUCT(pa->pa_id); 327 328 /* Do shared code init and setup. */ 329 if (igc_set_mac_type(&sc->hw)) { 330 printf(": Setup init failure\n"); 331 return; 332 } 333} 334 335int 336igc_allocate_pci_resources(struct igc_softc *sc) 337{ 338 struct igc_osdep *os = &sc->osdep; 339 struct pci_attach_args *pa = &os->os_pa; 340 pcireg_t memtype; 341 342 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG); 343 if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt, 344 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) { 345 printf(": unable to map registers\n"); 346 return ENXIO; 347 } 348 sc->hw.hw_addr = (uint8_t *)os->os_membase; 349 sc->hw.back = os; 350 351 igc_setup_msix(sc); 352 353 return 0; 354} 355 356int 357igc_allocate_queues(struct igc_softc *sc) 358{ 359 struct igc_queue *iq; 360 struct tx_ring *txr; 361 struct rx_ring *rxr; 362 int i, rsize, rxconf, tsize, txconf; 363 364 /* Allocate the top level queue structs. */ 365 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue), 366 M_DEVBUF, M_NOWAIT | M_ZERO); 367 if (sc->queues == NULL) { 368 printf("%s: unable to allocate queue\n", DEVNAME(sc)); 369 goto fail; 370 } 371 372 /* Allocate the TX ring. */ 373 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring), 374 M_DEVBUF, M_NOWAIT | M_ZERO); 375 if (sc->tx_rings == NULL) { 376 printf("%s: unable to allocate TX ring\n", DEVNAME(sc)); 377 goto fail; 378 } 379 380 /* Allocate the RX ring. */ 381 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring), 382 M_DEVBUF, M_NOWAIT | M_ZERO); 383 if (sc->rx_rings == NULL) { 384 printf("%s: unable to allocate RX ring\n", DEVNAME(sc)); 385 goto rx_fail; 386 } 387 388 txconf = rxconf = 0; 389 390 /* Set up the TX queues. */ 391 tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc), 392 IGC_DBA_ALIGN); 393 for (i = 0; i < sc->sc_nqueues; i++, txconf++) { 394 txr = &sc->tx_rings[i]; 395 txr->sc = sc; 396 txr->me = i; 397 398 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 399 printf("%s: unable to allocate TX descriptor\n", 400 DEVNAME(sc)); 401 goto err_tx_desc; 402 } 403 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr; 404 bzero((void *)txr->tx_base, tsize); 405 } 406 407 /* Set up the RX queues. */ 408 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 409 IGC_DBA_ALIGN); 410 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) { 411 rxr = &sc->rx_rings[i]; 412 rxr->sc = sc; 413 rxr->me = i; 414 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr); 415 416 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) { 417 printf("%s: unable to allocate RX descriptor\n", 418 DEVNAME(sc)); 419 goto err_rx_desc; 420 } 421 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr; 422 bzero((void *)rxr->rx_base, rsize); 423 } 424 425 /* Set up the queue holding structs. */ 426 for (i = 0; i < sc->sc_nqueues; i++) { 427 iq = &sc->queues[i]; 428 iq->sc = sc; 429 iq->txr = &sc->tx_rings[i]; 430 iq->rxr = &sc->rx_rings[i]; 431 snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i); 432 } 433 434 return 0; 435 436err_rx_desc: 437 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) 438 igc_dma_free(sc, &rxr->rxdma); 439err_tx_desc: 440 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) 441 igc_dma_free(sc, &txr->txdma); 442 free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring)); 443 sc->rx_rings = NULL; 444rx_fail: 445 free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring)); 446 sc->tx_rings = NULL; 447fail: 448 return ENOMEM; 449} 450 451void 452igc_free_pci_resources(struct igc_softc *sc) 453{ 454 struct igc_osdep *os = &sc->osdep; 455 struct pci_attach_args *pa = &os->os_pa; 456 struct igc_queue *iq = sc->queues; 457 int i; 458 459 /* Release all msix queue resources. */ 460 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 461 if (iq->tag) 462 pci_intr_disestablish(pa->pa_pc, iq->tag); 463 iq->tag = NULL; 464 } 465 466 if (sc->tag) 467 pci_intr_disestablish(pa->pa_pc, sc->tag); 468 sc->tag = NULL; 469 if (os->os_membase != 0) 470 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize); 471 os->os_membase = 0; 472} 473 474/********************************************************************* 475 * 476 * Initialize the hardware to a configuration as specified by the 477 * adapter structure. 478 * 479 **********************************************************************/ 480void 481igc_reset(struct igc_softc *sc) 482{ 483 struct igc_hw *hw = &sc->hw; 484 uint32_t pba; 485 uint16_t rx_buffer_size; 486 487 /* Let the firmware know the OS is in control */ 488 igc_get_hw_control(sc); 489 490 /* 491 * Packet Buffer Allocation (PBA) 492 * Writing PBA sets the receive portion of the buffer 493 * the remainder is used for the transmit buffer. 494 */ 495 pba = IGC_PBA_34K; 496 497 /* 498 * These parameters control the automatic generation (Tx) and 499 * response (Rx) to Ethernet PAUSE frames. 500 * - High water mark should allow for at least two frames to be 501 * received after sending an XOFF. 502 * - Low water mark works best when it is very near the high water mark. 503 * This allows the receiver to restart by sending XON when it has 504 * drained a bit. Here we use an arbitrary value of 1500 which will 505 * restart after one full frame is pulled from the buffer. There 506 * could be several smaller frames in the buffer and if so they will 507 * not trigger the XON until their total number reduces the buffer 508 * by 1500. 509 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 510 */ 511 rx_buffer_size = (pba & 0xffff) << 10; 512 hw->fc.high_water = rx_buffer_size - 513 roundup2(sc->hw.mac.max_frame_size, 1024); 514 /* 16-byte granularity */ 515 hw->fc.low_water = hw->fc.high_water - 16; 516 517 if (sc->fc) /* locally set flow control value? */ 518 hw->fc.requested_mode = sc->fc; 519 else 520 hw->fc.requested_mode = igc_fc_full; 521 522 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 523 524 hw->fc.send_xon = true; 525 526 /* Issue a global reset */ 527 igc_reset_hw(hw); 528 IGC_WRITE_REG(hw, IGC_WUC, 0); 529 530 /* and a re-init */ 531 if (igc_init_hw(hw) < 0) { 532 printf(": Hardware Initialization Failed\n"); 533 return; 534 } 535 536 /* Setup DMA Coalescing */ 537 igc_init_dmac(sc, pba); 538 539 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 540 igc_get_phy_info(hw); 541 igc_check_for_link(hw); 542} 543 544/********************************************************************* 545 * 546 * Initialize the DMA Coalescing feature 547 * 548 **********************************************************************/ 549void 550igc_init_dmac(struct igc_softc *sc, uint32_t pba) 551{ 552 struct igc_hw *hw = &sc->hw; 553 uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN; 554 uint16_t hwm, max_frame_size; 555 int status; 556 557 max_frame_size = sc->hw.mac.max_frame_size; 558 559 if (sc->dmac == 0) { /* Disabling it */ 560 IGC_WRITE_REG(hw, IGC_DMACR, reg); 561 return; 562 } else 563 printf(": DMA Coalescing enabled\n"); 564 565 /* Set starting threshold */ 566 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 567 568 hwm = 64 * pba - max_frame_size / 16; 569 if (hwm < 64 * (pba - 6)) 570 hwm = 64 * (pba - 6); 571 reg = IGC_READ_REG(hw, IGC_FCRTC); 572 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 573 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 574 & IGC_FCRTC_RTH_COAL_MASK); 575 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 576 577 dmac = pba - max_frame_size / 512; 578 if (dmac < pba - 10) 579 dmac = pba - 10; 580 reg = IGC_READ_REG(hw, IGC_DMACR); 581 reg &= ~IGC_DMACR_DMACTHR_MASK; 582 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 583 & IGC_DMACR_DMACTHR_MASK); 584 585 /* transition to L0x or L1 if available..*/ 586 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 587 588 /* Check if status is 2.5Gb backplane connection 589 * before configuration of watchdog timer, which is 590 * in msec values in 12.8usec intervals 591 * watchdog timer= msec values in 32usec intervals 592 * for non 2.5Gb connection 593 */ 594 status = IGC_READ_REG(hw, IGC_STATUS); 595 if ((status & IGC_STATUS_2P5_SKU) && 596 (!(status & IGC_STATUS_2P5_SKU_OVER))) 597 reg |= ((sc->dmac * 5) >> 6); 598 else 599 reg |= (sc->dmac >> 5); 600 601 IGC_WRITE_REG(hw, IGC_DMACR, reg); 602 603 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 604 605 /* Set the interval before transition */ 606 reg = IGC_READ_REG(hw, IGC_DMCTLX); 607 reg |= IGC_DMCTLX_DCFLUSH_DIS; 608 609 /* 610 ** in 2.5Gb connection, TTLX unit is 0.4 usec 611 ** which is 0x4*2 = 0xA. But delay is still 4 usec 612 */ 613 status = IGC_READ_REG(hw, IGC_STATUS); 614 if ((status & IGC_STATUS_2P5_SKU) && 615 (!(status & IGC_STATUS_2P5_SKU_OVER))) 616 reg |= 0xA; 617 else 618 reg |= 0x4; 619 620 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 621 622 /* free space in tx packet buffer to wake from DMA coal */ 623 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 624 (2 * max_frame_size)) >> 6); 625 626 /* make low power state decision controlled by DMA coal */ 627 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 628 reg &= ~IGC_PCIEMISC_LX_DECISION; 629 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 630} 631 632int 633igc_allocate_msix(struct igc_softc *sc) 634{ 635 struct igc_osdep *os = &sc->osdep; 636 struct pci_attach_args *pa = &os->os_pa; 637 struct igc_queue *iq; 638 pci_intr_handle_t ih; 639 int i, error = 0; 640 641 for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) { 642 if (pci_intr_map_msix(pa, i, &ih)) { 643 printf("%s: unable to map msi-x vector %d\n", 644 DEVNAME(sc), i); 645 error = ENOMEM; 646 goto fail; 647 } 648 649 iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih, 650 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i), 651 igc_intr_queue, iq, iq->name); 652 if (iq->tag == NULL) { 653 printf("%s: unable to establish interrupt %d\n", 654 DEVNAME(sc), i); 655 error = ENOMEM; 656 goto fail; 657 } 658 659 iq->msix = i; 660 iq->eims = 1 << i; 661 } 662 663 /* Now the link status/control last MSI-X vector. */ 664 if (pci_intr_map_msix(pa, i, &ih)) { 665 printf("%s: unable to map link vector\n", DEVNAME(sc)); 666 error = ENOMEM; 667 goto fail; 668 } 669 670 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE, 671 igc_intr_link, sc, sc->sc_dev.dv_xname); 672 if (sc->tag == NULL) { 673 printf("%s: unable to establish link interrupt\n", DEVNAME(sc)); 674 error = ENOMEM; 675 goto fail; 676 } 677 678 sc->linkvec = i; 679 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), 680 i, (i > 1) ? "s" : ""); 681 682 return 0; 683fail: 684 for (iq = sc->queues; i > 0; i--, iq++) { 685 if (iq->tag == NULL) 686 continue; 687 pci_intr_disestablish(pa->pa_pc, iq->tag); 688 iq->tag = NULL; 689 } 690 691 return error; 692} 693 694void 695igc_setup_msix(struct igc_softc *sc) 696{ 697 struct igc_osdep *os = &sc->osdep; 698 struct pci_attach_args *pa = &os->os_pa; 699 int nmsix; 700 701 nmsix = pci_intr_msix_count(pa); 702 if (nmsix <= 1) 703 printf(": not enough msi-x vectors\n"); 704 705 /* Give one vector to events. */ 706 nmsix--; 707 708 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS, 709 INTRMAP_POWEROF2); 710 sc->sc_nqueues = intrmap_count(sc->sc_intrmap); 711} 712 713int 714igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma) 715{ 716 struct igc_osdep *os = &sc->osdep; 717 718 dma->dma_tag = os->os_pa.pa_dmat; 719 720 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT, 721 &dma->dma_map)) 722 return 1; 723 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg, 724 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) 725 goto destroy; 726 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size, 727 &dma->dma_vaddr, BUS_DMA_NOWAIT)) 728 goto free; 729 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, 730 NULL, BUS_DMA_NOWAIT)) 731 goto unmap; 732 733 dma->dma_size = size; 734 735 return 0; 736unmap: 737 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size); 738free: 739 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 740destroy: 741 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 742 dma->dma_map = NULL; 743 dma->dma_tag = NULL; 744 return 1; 745} 746 747void 748igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma) 749{ 750 if (dma->dma_tag == NULL) 751 return; 752 753 if (dma->dma_map != NULL) { 754 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, 755 dma->dma_map->dm_mapsize, 756 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 757 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 758 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); 759 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 760 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 761 dma->dma_map = NULL; 762 } 763} 764 765/********************************************************************* 766 * 767 * Setup networking device structure and register an interface. 768 * 769 **********************************************************************/ 770void 771igc_setup_interface(struct igc_softc *sc) 772{ 773 struct ifnet *ifp = &sc->sc_ac.ac_if; 774 int i; 775 776 ifp->if_softc = sc; 777 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 778 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 779 ifp->if_xflags = IFXF_MPSAFE; 780 ifp->if_ioctl = igc_ioctl; 781 ifp->if_qstart = igc_start; 782 ifp->if_watchdog = igc_watchdog; 783 ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN - 784 ETHER_CRC_LEN; 785 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 786 787 ifp->if_capabilities = IFCAP_VLAN_MTU; 788 789#if NVLAN > 0 790 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 791#endif 792 793 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 794 795 /* Initialize ifmedia structures. */ 796 ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status); 797 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 798 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 799 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 800 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 801 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 802 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 803 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 804 805 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 806 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 807 808 if_attach(ifp); 809 ether_ifattach(ifp); 810 811 if_attach_queues(ifp, sc->sc_nqueues); 812 if_attach_iqueues(ifp, sc->sc_nqueues); 813 for (i = 0; i < sc->sc_nqueues; i++) { 814 struct ifqueue *ifq = ifp->if_ifqs[i]; 815 struct ifiqueue *ifiq = ifp->if_iqs[i]; 816 struct tx_ring *txr = &sc->tx_rings[i]; 817 struct rx_ring *rxr = &sc->rx_rings[i]; 818 819 ifq->ifq_softc = txr; 820 txr->ifq = ifq; 821 822 ifiq->ifiq_softc = rxr; 823 rxr->ifiq = ifiq; 824 } 825} 826 827void 828igc_init(void *arg) 829{ 830 struct igc_softc *sc = (struct igc_softc *)arg; 831 struct ifnet *ifp = &sc->sc_ac.ac_if; 832 struct rx_ring *rxr; 833 uint32_t ctrl = 0; 834 int i, s; 835 836 s = splnet(); 837 838 igc_stop(sc); 839 840 /* Get the latest mac address, user can use a LAA. */ 841 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 842 843 /* Put the address into the receive address array. */ 844 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 845 846 /* Initialize the hardware. */ 847 igc_reset(sc); 848 igc_update_link_status(sc); 849 850 /* Setup VLAN support, basic and offload if available. */ 851 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 852 853 /* Prepare transmit descriptors and buffers. */ 854 if (igc_setup_transmit_structures(sc)) { 855 printf("%s: Could not setup transmit structures\n", 856 DEVNAME(sc)); 857 igc_stop(sc); 858 splx(s); 859 return; 860 } 861 igc_initialize_transmit_unit(sc); 862 863 sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN; 864 /* Prepare receive descriptors and buffers. */ 865 if (igc_setup_receive_structures(sc)) { 866 printf("%s: Could not setup receive structures\n", 867 DEVNAME(sc)); 868 igc_stop(sc); 869 splx(s); 870 return; 871 } 872 igc_initialize_receive_unit(sc); 873 874 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 875 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 876 ctrl |= IGC_CTRL_VME; 877 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 878 } 879 880 /* Setup multicast table. */ 881 igc_iff(sc); 882 883 igc_clear_hw_cntrs_base_generic(&sc->hw); 884 885 igc_configure_queues(sc); 886 887 /* This clears any pending interrupts */ 888 IGC_READ_REG(&sc->hw, IGC_ICR); 889 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 890 891 /* The driver can now take control from firmware. */ 892 igc_get_hw_control(sc); 893 894 /* Set Energy Efficient Ethernet. */ 895 igc_set_eee_i225(&sc->hw, true, true, true); 896 897 for (i = 0; i < sc->sc_nqueues; i++) { 898 rxr = &sc->rx_rings[i]; 899 igc_rxfill(rxr); 900 if (if_rxr_inuse(&rxr->rx_ring) == 0) { 901 printf("%s: Unable to fill any rx descriptors\n", 902 DEVNAME(sc)); 903 igc_stop(sc); 904 splx(s); 905 } 906 IGC_WRITE_REG(&sc->hw, IGC_RDT(i), 907 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 908 } 909 910 igc_enable_intr(sc); 911 912 ifp->if_flags |= IFF_RUNNING; 913 for (i = 0; i < sc->sc_nqueues; i++) 914 ifq_clr_oactive(ifp->if_ifqs[i]); 915 916 splx(s); 917} 918 919void 920igc_start(struct ifqueue *ifq) 921{ 922 struct ifnet *ifp = ifq->ifq_if; 923 struct igc_softc *sc = ifp->if_softc; 924 925 if (!sc->link_active) 926 return; 927} 928 929/********************************************************************* 930 * 931 * This routine disables all traffic on the adapter by issuing a 932 * global reset on the MAC. 933 * 934 **********************************************************************/ 935void 936igc_stop(struct igc_softc *sc) 937{ 938 struct ifnet *ifp = &sc->sc_ac.ac_if; 939 int i; 940 941 /* Tell the stack that the interface is no longer active. */ 942 ifp->if_flags &= ~IFF_RUNNING; 943 944 igc_disable_intr(sc); 945 946 igc_reset_hw(&sc->hw); 947 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 948 949 intr_barrier(sc->tag); 950 for (i = 0; i < sc->sc_nqueues; i++) { 951 struct ifqueue *ifq = ifp->if_ifqs[i]; 952 ifq_barrier(ifq); 953 ifq_clr_oactive(ifq); 954 955 if (sc->queues[i].tag != NULL) 956 intr_barrier(sc->queues[i].tag); 957 timeout_del(&sc->rx_rings[i].rx_refill); 958 } 959 960 igc_free_transmit_structures(sc); 961 igc_free_receive_structures(sc); 962 963 igc_update_link_status(sc); 964} 965 966/********************************************************************* 967 * Ioctl entry point 968 * 969 * igc_ioctl is called when the user wants to configure the 970 * interface. 971 * 972 * return 0 on success, positive on failure 973 **********************************************************************/ 974int 975igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data) 976{ 977 struct igc_softc *sc = ifp->if_softc; 978 struct ifreq *ifr = (struct ifreq *)data; 979 int s, error = 0; 980 981 s = splnet(); 982 983 switch (cmd) { 984 case SIOCSIFADDR: 985 ifp->if_flags |= IFF_UP; 986 if (!(ifp->if_flags & IFF_RUNNING)) 987 igc_init(sc); 988 break; 989 case SIOCSIFFLAGS: 990 if (ifp->if_flags & IFF_UP) { 991 if (ifp->if_flags & IFF_RUNNING) 992 error = ENETRESET; 993 else 994 igc_init(sc); 995 } else { 996 if (ifp->if_flags & IFF_RUNNING) 997 igc_stop(sc); 998 } 999 break; 1000 case SIOCSIFMEDIA: 1001 case SIOCGIFMEDIA: 1002 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 1003 break; 1004 case SIOCGIFRXR: 1005 error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 1006 break; 1007 default: 1008 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1009 } 1010 1011 if (error == ENETRESET) { 1012 if (ifp->if_flags & IFF_RUNNING) { 1013 igc_disable_intr(sc); 1014 igc_iff(sc); 1015 igc_enable_intr(sc); 1016 } 1017 error = 0; 1018 } 1019 1020 splx(s); 1021 return error; 1022} 1023 1024int 1025igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri) 1026{ 1027 struct if_rxring_info *ifr, ifr1; 1028 struct rx_ring *rxr; 1029 int error, i, n = 0; 1030 1031 if (sc->sc_nqueues > 1) { 1032 if ((ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF, 1033 M_WAITOK | M_ZERO)) == NULL) 1034 return ENOMEM; 1035 } else 1036 ifr = &ifr1; 1037 1038 for (i = 0; i < sc->sc_nqueues; i++) { 1039 rxr = &sc->rx_rings[i]; 1040 ifr[n].ifr_size = MCLBYTES; 1041 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i); 1042 ifr[n].ifr_info = rxr->rx_ring; 1043 n++; 1044 } 1045 1046 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr); 1047 if (sc->sc_nqueues > 1) 1048 free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr)); 1049 1050 return error; 1051} 1052 1053int 1054igc_rxfill(struct rx_ring *rxr) 1055{ 1056 struct igc_softc *sc = rxr->sc; 1057 int i, post = 0; 1058 u_int slots; 1059 1060 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1061 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1062 1063 i = rxr->last_desc_filled; 1064 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0; 1065 slots--) { 1066 if (++i == sc->num_rx_desc) 1067 i = 0; 1068 1069 if (igc_get_buf(rxr, i) != 0) 1070 break; 1071 1072 rxr->last_desc_filled = i; 1073 post = 1; 1074 } 1075 1076 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1077 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1078 1079 if_rxr_put(&rxr->rx_ring, slots); 1080 1081 return post; 1082} 1083 1084void 1085igc_rxrefill(void *xrxr) 1086{ 1087 struct rx_ring *rxr = xrxr; 1088 struct igc_softc *sc = rxr->sc; 1089 1090 if (igc_rxfill(rxr)) { 1091 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), 1092 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 1093 } 1094 else if (if_rxr_inuse(&rxr->rx_ring) == 0) 1095 timeout_add(&rxr->rx_refill, 1); 1096} 1097 1098/********************************************************************* 1099 * 1100 * This routine executes in interrupt context. It replenishes 1101 * the mbufs in the descriptor and sends data which has been 1102 * dma'ed into host memory to upper layer. 1103 * 1104 *********************************************************************/ 1105int 1106igc_rxeof(struct rx_ring *rxr) 1107{ 1108 struct igc_softc *sc = rxr->sc; 1109 struct ifnet *ifp = &sc->sc_ac.ac_if; 1110 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1111 struct mbuf *mp, *m; 1112 struct igc_rx_buf *rxbuf, *nxbuf; 1113 union igc_adv_rx_desc *rxdesc; 1114 uint32_t ptype, staterr = 0; 1115 uint16_t len, vtag; 1116 uint8_t eop = 0; 1117 int i; 1118 1119 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1120 return 0; 1121 1122 i = rxr->next_to_check; 1123 while (if_rxr_inuse(&rxr->rx_ring) > 0) { 1124 uint32_t hash; 1125 uint16_t hashtype; 1126 1127 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1128 i * sizeof(union igc_adv_rx_desc), 1129 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD); 1130 1131 rxdesc = &rxr->rx_base[i]; 1132 staterr = letoh32(rxdesc->wb.upper.status_error); 1133 if (!ISSET(staterr, IGC_RXD_STAT_DD)) { 1134 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1135 i * sizeof(union igc_adv_rx_desc), 1136 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1137 break; 1138 } 1139 1140 /* Zero out the receive descriptors status. */ 1141 rxdesc->wb.upper.status_error = 0; 1142 rxbuf = &rxr->rx_buffers[i]; 1143 1144 /* Pull the mbuf off the ring. */ 1145 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1146 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1147 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map); 1148 1149 mp = rxbuf->buf; 1150 len = letoh16(rxdesc->wb.upper.length); 1151 vtag = letoh16(rxdesc->wb.upper.vlan); 1152 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 1153 ptype = letoh32(rxdesc->wb.lower.lo_dword.data) & 1154 IGC_PKTTYPE_MASK; 1155 hash = letoh32(rxdesc->wb.lower.hi_dword.rss); 1156 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) & 1157 IGC_RXDADV_RSSTYPE_MASK; 1158 1159 if (staterr & IGC_RXDEXT_STATERR_RXE) { 1160 if (rxbuf->fmp) { 1161 m_freem(rxbuf->fmp); 1162 rxbuf->fmp = NULL; 1163 } 1164 1165 m_freem(mp); 1166 rxbuf->buf = NULL; 1167 goto next_desc; 1168 } 1169 1170 if (mp == NULL) { 1171 panic("%s: igc_rxeof: NULL mbuf in slot %d " 1172 "(nrx %d, filled %d)", DEVNAME(sc), i, 1173 if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled); 1174 } 1175 1176 mp->m_len = len; 1177 1178 m = rxbuf->fmp; 1179 rxbuf->buf = rxbuf->fmp = NULL; 1180 1181 if (m != NULL) 1182 m->m_pkthdr.len += mp->m_len; 1183 else { 1184 m = mp; 1185 m->m_pkthdr.len = mp->m_len; 1186#if NVLAN > 0 1187 if (staterr & IGC_RXD_STAT_VP) { 1188 m->m_pkthdr.ether_vtag = vtag; 1189 m->m_flags |= M_VLANTAG; 1190 } 1191#endif 1192 } 1193 1194 /* Pass the head pointer on */ 1195 if (eop == 0) { 1196 nxbuf->fmp = m; 1197 m = NULL; 1198 mp->m_next = nxbuf->buf; 1199 } else { 1200 igc_rx_checksum(staterr, m, ptype); 1201 1202 if (hashtype != IGC_RXDADV_RSSTYPE_NONE) { 1203 m->m_pkthdr.ph_flowid = hash; 1204 SET(m->m_pkthdr.csum_flags, M_FLOWID); 1205 } 1206 1207 ml_enqueue(&ml, m); 1208 } 1209next_desc: 1210 if_rxr_put(&rxr->rx_ring, 1); 1211 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1212 i * sizeof(union igc_adv_rx_desc), 1213 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1214 1215 /* Advance our pointers to the next descriptor. */ 1216 if (++i == sc->num_rx_desc) 1217 i = 0; 1218 } 1219 rxr->next_to_check = i; 1220 1221 if (ifiq_input(rxr->ifiq, &ml)) 1222 if_rxr_livelocked(&rxr->rx_ring); 1223 1224 if (!(staterr & IGC_RXD_STAT_DD)) 1225 return 0; 1226 1227 return 1; 1228} 1229 1230/********************************************************************* 1231 * 1232 * Verify that the hardware indicated that the checksum is valid. 1233 * Inform the stack about the status of checksum so that stack 1234 * doesn't spend time verifying the checksum. 1235 * 1236 *********************************************************************/ 1237void 1238igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype) 1239{ 1240 uint16_t status = (uint16_t)staterr; 1241 uint8_t errors = (uint8_t)(staterr >> 24); 1242 1243 if (status & IGC_RXD_STAT_IPCS) { 1244 if (!(errors & IGC_RXD_ERR_IPE)) { 1245 /* IP Checksum Good */ 1246 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1247 } else 1248 m->m_pkthdr.csum_flags = 0; 1249 } 1250 1251 if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) { 1252 if (!(errors & IGC_RXD_ERR_TCPE)) 1253 m->m_pkthdr.csum_flags |= 1254 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1255 } 1256} 1257 1258void 1259igc_watchdog(struct ifnet * ifp) 1260{ 1261} 1262 1263/********************************************************************* 1264 * 1265 * Media Ioctl callback 1266 * 1267 * This routine is called whenever the user queries the status of 1268 * the interface using ifconfig. 1269 * 1270 **********************************************************************/ 1271void 1272igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1273{ 1274 struct igc_softc *sc = ifp->if_softc; 1275 1276 igc_update_link_status(sc); 1277 1278 ifmr->ifm_status = IFM_AVALID; 1279 ifmr->ifm_active = IFM_ETHER; 1280 1281 if (!sc->link_active) { 1282 ifmr->ifm_active |= IFM_NONE; 1283 return; 1284 } 1285 1286 ifmr->ifm_status |= IFM_ACTIVE; 1287 1288 switch (sc->link_speed) { 1289 case 10: 1290 ifmr->ifm_active |= IFM_10_T; 1291 break; 1292 case 100: 1293 ifmr->ifm_active |= IFM_100_TX; 1294 break; 1295 case 1000: 1296 ifmr->ifm_active |= IFM_1000_T; 1297 break; 1298 case 2500: 1299 ifmr->ifm_active |= IFM_2500_T; 1300 break; 1301 } 1302 1303 if (sc->link_duplex == FULL_DUPLEX) 1304 ifmr->ifm_active |= IFM_FDX; 1305 else 1306 ifmr->ifm_active |= IFM_HDX; 1307} 1308 1309/********************************************************************* 1310 * 1311 * Media Ioctl callback 1312 * 1313 * This routine is called when the user changes speed/duplex using 1314 * media/mediopt option with ifconfig. 1315 * 1316 **********************************************************************/ 1317int 1318igc_media_change(struct ifnet *ifp) 1319{ 1320 struct igc_softc *sc = ifp->if_softc; 1321 struct ifmedia *ifm = &sc->media; 1322 1323 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1324 return (EINVAL); 1325 1326 sc->hw.mac.autoneg = DO_AUTO_NEG; 1327 1328 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1329 case IFM_AUTO: 1330 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1331 break; 1332 case IFM_2500_T: 1333 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1334 break; 1335 case IFM_1000_T: 1336 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1337 break; 1338 case IFM_100_TX: 1339 if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX) 1340 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1341 else 1342 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1343 break; 1344 case IFM_10_T: 1345 if ((ifm->ifm_media & IFM_GMASK) == IFM_HDX) 1346 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1347 else 1348 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1349 break; 1350 default: 1351 return EINVAL; 1352 } 1353 1354 igc_init(sc); 1355 1356 return 0; 1357} 1358 1359void 1360igc_iff(struct igc_softc *sc) 1361{ 1362 struct ifnet *ifp = &sc->sc_ac.ac_if; 1363 struct arpcom *ac = &sc->sc_ac; 1364 struct ether_multi *enm; 1365 struct ether_multistep step; 1366 uint32_t reg_rctl = 0; 1367 uint8_t *mta; 1368 int mcnt = 0; 1369 1370 mta = sc->mta; 1371 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * 1372 MAX_NUM_MULTICAST_ADDRESSES); 1373 1374 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1375 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1376 ifp->if_flags &= ~IFF_ALLMULTI; 1377 1378 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1379 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) { 1380 ifp->if_flags |= IFF_ALLMULTI; 1381 reg_rctl |= IGC_RCTL_MPE; 1382 if (ifp->if_flags & IFF_PROMISC) 1383 reg_rctl |= IGC_RCTL_UPE; 1384 } else { 1385 ETHER_FIRST_MULTI(step, ac, enm); 1386 while (enm != NULL) { 1387 bcopy(enm->enm_addrlo, 1388 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1389 mcnt++; 1390 1391 ETHER_NEXT_MULTI(step, enm); 1392 } 1393 1394 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1395 } 1396 1397 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1398} 1399 1400void 1401igc_update_link_status(struct igc_softc *sc) 1402{ 1403 struct ifnet *ifp = &sc->sc_ac.ac_if; 1404 struct igc_hw *hw = &sc->hw; 1405 int link_state; 1406 1407 if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) { 1408 if (sc->link_active == 0) { 1409 igc_get_speed_and_duplex(hw, &sc->link_speed, 1410 &sc->link_duplex); 1411 sc->link_active = 1; 1412 ifp->if_baudrate = IF_Mbps(sc->link_speed); 1413 } 1414 link_state = (sc->link_duplex == FULL_DUPLEX) ? 1415 LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX; 1416 } else { 1417 if (sc->link_active == 1) { 1418 ifp->if_baudrate = sc->link_speed = 0; 1419 sc->link_duplex = 0; 1420 sc->link_active = 0; 1421 } 1422 link_state = LINK_STATE_DOWN; 1423 } 1424 if (ifp->if_link_state != link_state) { 1425 ifp->if_link_state = link_state; 1426 if_link_state_change(ifp); 1427 } 1428} 1429 1430/********************************************************************* 1431 * 1432 * Get a buffer from system mbuf buffer pool. 1433 * 1434 **********************************************************************/ 1435int 1436igc_get_buf(struct rx_ring *rxr, int i) 1437{ 1438 struct igc_softc *sc = rxr->sc; 1439 struct igc_rx_buf *rxbuf; 1440 struct mbuf *m; 1441 union igc_adv_rx_desc *rxdesc; 1442 int error; 1443 1444 rxbuf = &rxr->rx_buffers[i]; 1445 rxdesc = &rxr->rx_base[i]; 1446 if (rxbuf->buf) { 1447 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i); 1448 return ENOBUFS; 1449 } 1450 1451 m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz); 1452 if (!m) 1453 return ENOBUFS; 1454 1455 m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz); 1456 m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz; 1457 1458 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m, 1459 BUS_DMA_NOWAIT); 1460 if (error) { 1461 m_freem(m); 1462 return error; 1463 } 1464 1465 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1466 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD); 1467 rxbuf->buf = m; 1468 1469 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr); 1470 1471 return 0; 1472} 1473 1474void 1475igc_configure_queues(struct igc_softc *sc) 1476{ 1477 struct igc_hw *hw = &sc->hw; 1478 struct igc_queue *iq = sc->queues; 1479 uint32_t ivar, newitr = 0; 1480 int i; 1481 1482 /* First turn on RSS capability */ 1483 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | 1484 IGC_GPIE_PBA | IGC_GPIE_NSICR); 1485 1486 /* Set the starting interrupt rate */ 1487 newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC; 1488 1489 newitr |= IGC_EITR_CNT_IGNR; 1490 1491 /* Turn on MSI-X */ 1492 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 1493 /* RX entries */ 1494 igc_set_queues(sc, i, iq->msix, 0); 1495 /* TX entries */ 1496 igc_set_queues(sc, i, iq->msix, 1); 1497 sc->msix_queuesmask |= iq->eims; 1498 IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr); 1499 } 1500 1501 /* And for the link interrupt */ 1502 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1503 sc->msix_linkmask = 1 << sc->linkvec; 1504 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1505} 1506 1507void 1508igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type) 1509{ 1510 struct igc_hw *hw = &sc->hw; 1511 uint32_t ivar, index; 1512 1513 index = entry >> 1; 1514 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1515 if (type) { 1516 if (entry & 1) { 1517 ivar &= 0x00FFFFFF; 1518 ivar |= (vector | IGC_IVAR_VALID) << 24; 1519 } else { 1520 ivar &= 0xFFFF00FF; 1521 ivar |= (vector | IGC_IVAR_VALID) << 8; 1522 } 1523 } else { 1524 if (entry & 1) { 1525 ivar &= 0xFF00FFFF; 1526 ivar |= (vector | IGC_IVAR_VALID) << 16; 1527 } else { 1528 ivar &= 0xFFFFFF00; 1529 ivar |= vector | IGC_IVAR_VALID; 1530 } 1531 } 1532 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1533} 1534 1535void 1536igc_enable_queue(struct igc_softc *sc, uint32_t eims) 1537{ 1538 IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims); 1539} 1540 1541void 1542igc_enable_intr(struct igc_softc *sc) 1543{ 1544 struct igc_hw *hw = &sc->hw; 1545 uint32_t mask; 1546 1547 mask = (sc->msix_queuesmask | sc->msix_linkmask); 1548 IGC_WRITE_REG(hw, IGC_EIAC, mask); 1549 IGC_WRITE_REG(hw, IGC_EIAM, mask); 1550 IGC_WRITE_REG(hw, IGC_EIMS, mask); 1551 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 1552 IGC_WRITE_FLUSH(hw); 1553} 1554 1555void 1556igc_disable_intr(struct igc_softc *sc) 1557{ 1558 struct igc_hw *hw = &sc->hw; 1559 1560 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 1561 IGC_WRITE_REG(hw, IGC_EIAC, 0); 1562 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 1563 IGC_WRITE_FLUSH(hw); 1564} 1565 1566int 1567igc_intr_link(void *arg) 1568{ 1569 struct igc_softc *sc = (struct igc_softc *)arg; 1570 uint32_t reg_icr; 1571 1572 if (reg_icr & IGC_ICR_LSC) { 1573 KERNEL_LOCK(); 1574 sc->hw.mac.get_link_status = true; 1575 igc_update_link_status(sc); 1576 KERNEL_UNLOCK(); 1577 } 1578 1579 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1580 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask); 1581 1582 return 1; 1583} 1584 1585int 1586igc_intr_queue(void *arg) 1587{ 1588 struct igc_queue *iq = arg; 1589 struct igc_softc *sc = iq->sc; 1590 struct ifnet *ifp = &sc->sc_ac.ac_if; 1591 struct rx_ring *rxr = iq->rxr; 1592 1593 if (ifp->if_flags & IFF_RUNNING) { 1594 igc_rxeof(rxr); 1595 igc_rxrefill(rxr); 1596 } 1597 1598 igc_enable_queue(sc, iq->eims); 1599 1600 return 1; 1601} 1602 1603/********************************************************************* 1604 * 1605 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1606 * the information needed to transmit a packet on the wire. 1607 * 1608 **********************************************************************/ 1609int 1610igc_allocate_transmit_buffers(struct tx_ring *txr) 1611{ 1612 struct igc_softc *sc = txr->sc; 1613 struct igc_tx_buf *txbuf; 1614 int error, i; 1615 1616 txr->tx_buffers = mallocarray(sc->num_tx_desc, 1617 sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 1618 if (txr->tx_buffers == NULL) { 1619 printf("%s: Unable to allocate tx_buffer memory\n", 1620 DEVNAME(sc)); 1621 error = ENOMEM; 1622 goto fail; 1623 } 1624 txr->txtag = txr->txdma.dma_tag; 1625 1626 /* Create the descriptor buffer dma maps. */ 1627 for (i = 0; i < sc->num_tx_desc; i++) { 1628 txbuf = &txr->tx_buffers[i]; 1629 error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE, 1630 IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map); 1631 if (error != 0) { 1632 printf("%s: Unable to create TX DMA map\n", 1633 DEVNAME(sc)); 1634 goto fail; 1635 } 1636 } 1637 1638 return 0; 1639fail: 1640 return error; 1641} 1642 1643 1644/********************************************************************* 1645 * 1646 * Allocate and initialize transmit structures. 1647 * 1648 **********************************************************************/ 1649int 1650igc_setup_transmit_structures(struct igc_softc *sc) 1651{ 1652 struct tx_ring *txr = sc->tx_rings; 1653 int i; 1654 1655 for (i = 0; i < sc->sc_nqueues; i++, txr++) { 1656 if (igc_setup_transmit_ring(txr)) 1657 goto fail; 1658 } 1659 1660 return 0; 1661fail: 1662 igc_free_transmit_structures(sc); 1663 return ENOBUFS; 1664} 1665 1666/********************************************************************* 1667 * 1668 * Initialize a transmit ring. 1669 * 1670 **********************************************************************/ 1671int 1672igc_setup_transmit_ring(struct tx_ring *txr) 1673{ 1674 struct igc_softc *sc = txr->sc; 1675 1676 /* Now allocate transmit buffers for the ring. */ 1677 if (igc_allocate_transmit_buffers(txr)) 1678 return ENOMEM; 1679 1680 /* Clear the old ring contents */ 1681 bzero((void *)txr->tx_base, 1682 (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc); 1683 1684 /* Reset indices. */ 1685 txr->next_avail_desc = 0; 1686 txr->next_to_clean = 0; 1687 1688 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1689 txr->txdma.dma_map->dm_mapsize, 1690 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1691 1692 return 0; 1693} 1694 1695/********************************************************************* 1696 * 1697 * Enable transmit unit. 1698 * 1699 **********************************************************************/ 1700void 1701igc_initialize_transmit_unit(struct igc_softc *sc) 1702{ 1703 struct ifnet *ifp = &sc->sc_ac.ac_if; 1704 struct tx_ring *txr; 1705 struct igc_hw *hw = &sc->hw; 1706 uint64_t bus_addr; 1707 uint32_t tctl, txdctl = 0; 1708 int i; 1709 1710 /* Setup the Base and Length of the TX descriptor ring. */ 1711 for (i = 0; i < sc->sc_nqueues; i++) { 1712 txr = &sc->tx_rings[i]; 1713 1714 bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr; 1715 1716 /* Base and len of TX ring */ 1717 IGC_WRITE_REG(hw, IGC_TDLEN(i), 1718 sc->num_tx_desc * sizeof(union igc_adv_tx_desc)); 1719 IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32)); 1720 IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr); 1721 1722 /* Init the HEAD/TAIL indices */ 1723 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 1724 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 1725 1726 txr->watchdog_timer = 0; 1727 1728 txdctl = 0; /* Clear txdctl */ 1729 txdctl |= 0x1f; /* PTHRESH */ 1730 txdctl |= 1 << 8; /* HTHRESH */ 1731 txdctl |= 1 << 16; /* WTHRESH */ 1732 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 1733 txdctl |= IGC_TXDCTL_GRAN; 1734 txdctl |= 1 << 25; /* LWTHRESH */ 1735 1736 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 1737 } 1738 ifp->if_timer = 0; 1739 1740 /* Program the Transmit Control Register */ 1741 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 1742 tctl &= ~IGC_TCTL_CT; 1743 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 1744 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 1745 1746 /* This write will effectively turn on the transmit unit. */ 1747 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 1748} 1749 1750/********************************************************************* 1751 * 1752 * Free all transmit rings. 1753 * 1754 **********************************************************************/ 1755void 1756igc_free_transmit_structures(struct igc_softc *sc) 1757{ 1758 struct tx_ring *txr = sc->tx_rings; 1759 int i; 1760 1761 for (i = 0; i < sc->sc_nqueues; i++, txr++) 1762 igc_free_transmit_buffers(txr); 1763} 1764 1765/********************************************************************* 1766 * 1767 * Free transmit ring related data structures. 1768 * 1769 **********************************************************************/ 1770void 1771igc_free_transmit_buffers(struct tx_ring *txr) 1772{ 1773 struct igc_softc *sc = txr->sc; 1774 struct igc_tx_buf *txbuf; 1775 int i; 1776 1777 if (txr->tx_buffers == NULL) 1778 return; 1779 1780 txbuf = txr->tx_buffers; 1781 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { 1782 if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) { 1783 bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map, 1784 0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1785 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map); 1786 } 1787 if (txbuf->m_head != NULL) { 1788 m_freem(txbuf->m_head); 1789 txbuf->m_head = NULL; 1790 } 1791 if (txbuf->map != NULL) { 1792 bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map); 1793 txbuf->map = NULL; 1794 } 1795 } 1796 1797 if (txr->tx_buffers != NULL) 1798 free(txr->tx_buffers, M_DEVBUF, 1799 sc->num_tx_desc * sizeof(struct igc_tx_buf)); 1800 txr->tx_buffers = NULL; 1801 txr->txtag = NULL; 1802} 1803 1804/********************************************************************* 1805 * 1806 * Allocate memory for rx_buffer structures. Since we use one 1807 * rx_buffer per received packet, the maximum number of rx_buffer's 1808 * that we'll need is equal to the number of receive descriptors 1809 * that we've allocated. 1810 * 1811 **********************************************************************/ 1812int 1813igc_allocate_receive_buffers(struct rx_ring *rxr) 1814{ 1815 struct igc_softc *sc = rxr->sc; 1816 struct igc_rx_buf *rxbuf; 1817 int i, error; 1818 1819 rxr->rx_buffers = mallocarray(sc->num_rx_desc, 1820 sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 1821 if (rxr->rx_buffers == NULL) { 1822 printf("%s: Unable to allocate rx_buffer memory\n", 1823 DEVNAME(sc)); 1824 error = ENOMEM; 1825 goto fail; 1826 } 1827 1828 rxbuf = rxr->rx_buffers; 1829 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) { 1830 error = bus_dmamap_create(rxr->rxdma.dma_tag, 1831 MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0, 1832 BUS_DMA_NOWAIT, &rxbuf->map); 1833 if (error) { 1834 printf("%s: Unable to create RX DMA map\n", 1835 DEVNAME(sc)); 1836 goto fail; 1837 } 1838 } 1839 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1840 rxr->rxdma.dma_map->dm_mapsize, 1841 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1842 1843 return 0; 1844fail: 1845 return error; 1846} 1847 1848/********************************************************************* 1849 * 1850 * Allocate and initialize receive structures. 1851 * 1852 **********************************************************************/ 1853int 1854igc_setup_receive_structures(struct igc_softc *sc) 1855{ 1856 struct rx_ring *rxr = sc->rx_rings; 1857 int i; 1858 1859 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 1860 if (igc_setup_receive_ring(rxr)) 1861 goto fail; 1862 } 1863 1864 return 0; 1865fail: 1866 igc_free_receive_structures(sc); 1867 return ENOBUFS; 1868} 1869 1870/********************************************************************* 1871 * 1872 * Initialize a receive ring and its buffers. 1873 * 1874 **********************************************************************/ 1875int 1876igc_setup_receive_ring(struct rx_ring *rxr) 1877{ 1878 struct igc_softc *sc = rxr->sc; 1879 struct ifnet *ifp = &sc->sc_ac.ac_if; 1880 int rsize; 1881 1882 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 1883 IGC_DBA_ALIGN); 1884 1885 /* Clear the ring contents. */ 1886 bzero((void *)rxr->rx_base, rsize); 1887 1888 if (igc_allocate_receive_buffers(rxr)) 1889 return ENOMEM; 1890 1891 /* Setup our descriptor indices. */ 1892 rxr->next_to_check = 0; 1893 rxr->last_desc_filled = sc->num_rx_desc - 1; 1894 1895 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1), 1896 sc->num_rx_desc - 1); 1897 1898 return 0; 1899} 1900 1901/********************************************************************* 1902 * 1903 * Enable receive unit. 1904 * 1905 **********************************************************************/ 1906void 1907igc_initialize_receive_unit(struct igc_softc *sc) 1908{ 1909 struct rx_ring *rxr = sc->rx_rings; 1910 struct igc_hw *hw = &sc->hw; 1911 uint32_t rctl, rxcsum, srrctl = 0; 1912 int i; 1913 1914 /* 1915 * Make sure receives are disabled while setting 1916 * up the descriptor ring. 1917 */ 1918 rctl = IGC_READ_REG(hw, IGC_RCTL); 1919 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 1920 1921 /* Setup the Receive Control Register */ 1922 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 1923 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | 1924 IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 1925 1926 /* Do not store bad packets */ 1927 rctl &= ~IGC_RCTL_SBP; 1928 1929 /* Enable Long Packet receive */ 1930 if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN) 1931 rctl |= IGC_RCTL_LPE; 1932 1933 /* Strip the CRC */ 1934 rctl |= IGC_RCTL_SECRC; 1935 1936 /* 1937 * Set the interrupt throttling rate. Value is calculated 1938 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 1939 */ 1940 IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR); 1941 1942 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 1943 rxcsum &= ~IGC_RXCSUM_PCSD; 1944 1945 if (sc->sc_nqueues > 1) 1946 rxcsum |= IGC_RXCSUM_PCSD; 1947 1948 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 1949 1950 if (sc->sc_nqueues > 1) 1951 igc_initialize_rss_mapping(sc); 1952 1953#if 0 1954 srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 1955 rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX; 1956#endif 1957 1958 srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; 1959 rctl |= IGC_RCTL_SZ_2048; 1960 1961 /* 1962 * If TX flow control is disabled and there's > 1 queue defined, 1963 * enable DROP. 1964 * 1965 * This drops frames rather than hanging the RX MAC for all queues. 1966 */ 1967 if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none || 1968 sc->fc == igc_fc_rx_pause)) { 1969 srrctl |= IGC_SRRCTL_DROP_EN; 1970 } 1971 1972 /* Setup the Base and Length of the RX descriptor rings. */ 1973 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 1974 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0); 1975 uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr; 1976 uint32_t rxdctl; 1977 1978 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 1979 1980 IGC_WRITE_REG(hw, IGC_RDLEN(i), 1981 sc->num_rx_desc * sizeof(union igc_adv_rx_desc)); 1982 IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); 1983 IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); 1984 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 1985 1986 /* Setup the Head and Tail Descriptor Pointers */ 1987 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 1988 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 1989 1990 /* Enable this Queue */ 1991 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 1992 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 1993 rxdctl &= 0xFFF00000; 1994 rxdctl |= IGC_RX_PTHRESH; 1995 rxdctl |= IGC_RX_HTHRESH << 8; 1996 rxdctl |= IGC_RX_WTHRESH << 16; 1997 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 1998 } 1999 2000 /* Make sure VLAN Filters are off */ 2001 rctl &= ~IGC_RCTL_VFE; 2002 2003 /* Write out the settings */ 2004 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2005} 2006 2007/********************************************************************* 2008 * 2009 * Free all receive rings. 2010 * 2011 **********************************************************************/ 2012void 2013igc_free_receive_structures(struct igc_softc *sc) 2014{ 2015 struct rx_ring *rxr; 2016 int i; 2017 2018 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2019 if_rxr_init(&rxr->rx_ring, 0, 0); 2020 2021 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2022 igc_free_receive_buffers(rxr); 2023} 2024 2025/********************************************************************* 2026 * 2027 * Free receive ring data structures 2028 * 2029 **********************************************************************/ 2030void 2031igc_free_receive_buffers(struct rx_ring *rxr) 2032{ 2033 struct igc_softc *sc = rxr->sc; 2034 struct igc_rx_buf *rxbuf; 2035 int i; 2036 2037 if (rxr->rx_buffers != NULL) { 2038 for (i = 0; i < sc->num_rx_desc; i++) { 2039 rxbuf = &rxr->rx_buffers[i]; 2040 if (rxbuf->buf != NULL) { 2041 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 2042 0, rxbuf->map->dm_mapsize, 2043 BUS_DMASYNC_POSTREAD); 2044 bus_dmamap_unload(rxr->rxdma.dma_tag, 2045 rxbuf->map); 2046 m_freem(rxbuf->buf); 2047 rxbuf->buf = NULL; 2048 } 2049 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map); 2050 rxbuf->map = NULL; 2051 } 2052 free(rxr->rx_buffers, M_DEVBUF, 2053 sc->num_rx_desc * sizeof(struct igc_rx_buf)); 2054 rxr->rx_buffers = NULL; 2055 } 2056} 2057 2058/* 2059 * Initialise the RSS mapping for NICs that support multiple transmit/ 2060 * receive rings. 2061 */ 2062void 2063igc_initialize_rss_mapping(struct igc_softc *sc) 2064{ 2065 struct igc_hw *hw = &sc->hw; 2066 uint32_t rss_key[10], mrqc, reta, shift = 0; 2067 int i, queue_id; 2068 2069 /* 2070 * The redirection table controls which destination 2071 * queue each bucket redirects traffic to. 2072 * Each DWORD represents four queues, with the LSB 2073 * being the first queue in the DWORD. 2074 * 2075 * This just allocates buckets to queues using round-robin 2076 * allocation. 2077 * 2078 * NOTE: It Just Happens to line up with the default 2079 * RSS allocation method. 2080 */ 2081 2082 /* Warning FM follows */ 2083 reta = 0; 2084 for (i = 0; i < 128; i++) { 2085 queue_id = (i % sc->sc_nqueues); 2086 /* Adjust if required */ 2087 queue_id = queue_id << shift; 2088 2089 /* 2090 * The low 8 bits are for hash value (n+0); 2091 * The next 8 bits are for hash value (n+1), etc. 2092 */ 2093 reta = reta >> 8; 2094 reta = reta | ( ((uint32_t) queue_id) << 24); 2095 if ((i & 3) == 3) { 2096 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 2097 reta = 0; 2098 } 2099 } 2100 2101 /* 2102 * MRQC: Multiple Receive Queues Command 2103 * Set queuing to RSS control, number depends on the device. 2104 */ 2105 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 2106 2107 /* Set up random bits */ 2108 stoeplitz_to_key(&rss_key, sizeof(rss_key)); 2109 2110 /* Now fill our hash function seeds */ 2111 for (i = 0; i < 10; i++) 2112 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 2113 2114 /* 2115 * Configure the RSS fields to hash upon. 2116 */ 2117 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); 2118 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); 2119 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 2120 2121 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 2122} 2123 2124/* 2125 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2126 * For ASF and Pass Through versions of f/w this means 2127 * that the driver is loaded. For AMT version type f/w 2128 * this means that the network i/f is open. 2129 */ 2130void 2131igc_get_hw_control(struct igc_softc *sc) 2132{ 2133 uint32_t ctrl_ext; 2134 2135 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2136 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2137} 2138 2139/* 2140 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2141 * For ASF and Pass Through versions of f/w this means that 2142 * the driver is no longer loaded. For AMT versions of the 2143 * f/w this means that the network i/f is closed. 2144 */ 2145void 2146igc_release_hw_control(struct igc_softc *sc) 2147{ 2148 uint32_t ctrl_ext; 2149 2150 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2151 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2152} 2153 2154int 2155igc_is_valid_ether_addr(uint8_t *addr) 2156{ 2157 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2158 2159 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2160 return 0; 2161 } 2162 2163 return 1; 2164} 2165