1266423Sjfv/****************************************************************************** 2266423Sjfv 3349163Serj Copyright (c) 2013-2019, Intel Corporation 4266423Sjfv All rights reserved. 5349163Serj 6266423Sjfv Redistribution and use in source and binary forms, with or without 7266423Sjfv modification, are permitted provided that the following conditions are met: 8266423Sjfv 9266423Sjfv 1. Redistributions of source code must retain the above copyright notice, 10266423Sjfv this list of conditions and the following disclaimer. 11266423Sjfv 12266423Sjfv 2. Redistributions in binary form must reproduce the above copyright 13266423Sjfv notice, this list of conditions and the following disclaimer in the 14266423Sjfv documentation and/or other materials provided with the distribution. 15266423Sjfv 16266423Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17266423Sjfv contributors may be used to endorse or promote products derived from 18266423Sjfv this software without specific prior written permission. 19266423Sjfv 20266423Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21266423Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22266423Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23266423Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24266423Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25266423Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26266423Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27266423Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28266423Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29266423Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30266423Sjfv POSSIBILITY OF SUCH DAMAGE. 31266423Sjfv 32266423Sjfv******************************************************************************/ 33266423Sjfv/*$FreeBSD: stable/11/sys/dev/ixl/ixl_pf_main.c 369202 2021-02-02 14:27:33Z donner $*/ 34266423Sjfv 35279033Sjfv 36270346Sjfv#include "ixl_pf.h" 37269198Sjfv 38303816Ssbruno#ifdef PCI_IOV 39303816Ssbruno#include "ixl_pf_iov.h" 40277262Sjfv#endif 41277262Sjfv 42318357Serj#ifdef IXL_IW 43318357Serj#include "ixl_iw.h" 44318357Serj#include "ixl_iw_int.h" 45318357Serj#endif 46318357Serj 47303816Ssbruno#ifdef DEV_NETMAP 48349163Serj#include <dev/netmap/if_ixl_netmap.h> 49303816Ssbruno#endif /* DEV_NETMAP */ 50266423Sjfv 51333343Serjstatic int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int); 52318357Serjstatic u64 ixl_max_aq_speed_to_value(u8); 53318357Serjstatic u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 54333343Serjstatic void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 55349163Serjstatic enum i40e_status_code ixl_set_lla(struct ixl_vsi *); 56349163Serjstatic const char * ixl_link_speed_string(u8 link_speed); 57266423Sjfv 58349163Serj 59303816Ssbruno/* Sysctls */ 60333343Serjstatic int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS); 61333343Serjstatic int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 62333343Serjstatic int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 63333343Serjstatic int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 64299549Serjstatic int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 65303816Ssbrunostatic int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 66303816Ssbrunostatic int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 67303816Ssbrunostatic int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 68349163Serjstatic int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 69299549Serj 70303816Ssbruno/* Debug Sysctls */ 71299549Serjstatic int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 72299549Serjstatic int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 73299549Serjstatic int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 74299549Serjstatic int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 75299549Serjstatic int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 76369202Sdonnerstatic int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 77303816Ssbrunostatic int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 78318357Serjstatic int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 79303816Ssbrunostatic int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 80318357Serjstatic int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 81318357Serjstatic int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 82318357Serjstatic int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 83318357Serjstatic int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 84318357Serjstatic int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 85318357Serjstatic int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 86318357Serjstatic int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 87318357Serjstatic int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 88333343Serjstatic int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 89333343Serjstatic int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 90349163Serjstatic int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 91299549Serj 92349163Serj 93318357Serj#ifdef IXL_IW 94318357Serjextern int ixl_enable_iwarp; 95333343Serjextern int ixl_limit_iwarp_msix; 96318357Serj#endif 97318357Serj 98323211Srlibbyconst uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = 99323211Srlibby {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 100323211Srlibby 101323211Srlibbyconst char * const ixl_fc_string[6] = { 102323211Srlibby "None", 103323211Srlibby "Rx", 104323211Srlibby "Tx", 105323211Srlibby "Full", 106323211Srlibby "Priority", 107323211Srlibby "Default" 108323211Srlibby}; 109323211Srlibby 110333343Serjstatic char *ixl_fec_string[3] = { 111333343Serj "CL108 RS-FEC", 112333343Serj "CL74 FC-FEC/BASE-R", 113333343Serj "None" 114333343Serj}; 115333343Serj 116323211SrlibbyMALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 117323211Srlibby 118303816Ssbrunovoid 119318357Serjixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) 120303816Ssbruno{ 121303816Ssbruno va_list args; 122266423Sjfv 123303816Ssbruno if (!(mask & pf->dbg_mask)) 124303816Ssbruno return; 125266423Sjfv 126318357Serj /* Re-implement device_printf() */ 127318357Serj device_print_prettyname(pf->dev); 128303816Ssbruno va_start(args, fmt); 129318357Serj vprintf(fmt, args); 130303816Ssbruno va_end(args); 131303816Ssbruno} 132266423Sjfv 133303816Ssbruno/* 134303816Ssbruno** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 135303816Ssbruno*/ 136303816Ssbrunovoid 137303816Ssbrunoixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 138303816Ssbruno{ 139303816Ssbruno u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 140303816Ssbruno u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 141303816Ssbruno u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 142266423Sjfv 143303816Ssbruno sbuf_printf(buf, 144303816Ssbruno "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 145303816Ssbruno hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 146303816Ssbruno hw->aq.api_maj_ver, hw->aq.api_min_ver, 147303816Ssbruno (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 148303816Ssbruno IXL_NVM_VERSION_HI_SHIFT, 149303816Ssbruno (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 150303816Ssbruno IXL_NVM_VERSION_LO_SHIFT, 151303816Ssbruno hw->nvm.eetrack, 152303816Ssbruno oem_ver, oem_build, oem_patch); 153303816Ssbruno} 154279858Sjfv 155303816Ssbrunovoid 156303816Ssbrunoixl_print_nvm_version(struct ixl_pf *pf) 157303816Ssbruno{ 158303816Ssbruno struct i40e_hw *hw = &pf->hw; 159303816Ssbruno device_t dev = pf->dev; 160303816Ssbruno struct sbuf *sbuf; 161279858Sjfv 162303816Ssbruno sbuf = sbuf_new_auto(); 163303816Ssbruno ixl_nvm_version_str(hw, sbuf); 164303816Ssbruno sbuf_finish(sbuf); 165303816Ssbruno device_printf(dev, "%s\n", sbuf_data(sbuf)); 166303816Ssbruno sbuf_delete(sbuf); 167303816Ssbruno} 168279858Sjfv 169349163Serjbool 170349163Serjixl_fw_recovery_mode(struct ixl_pf *pf) 171349163Serj{ 172349163Serj return (rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK); 173349163Serj} 174349163Serj 175303816Ssbrunostatic void 176303816Ssbrunoixl_configure_tx_itr(struct ixl_pf *pf) 177303816Ssbruno{ 178303816Ssbruno struct i40e_hw *hw = &pf->hw; 179303816Ssbruno struct ixl_vsi *vsi = &pf->vsi; 180303816Ssbruno struct ixl_queue *que = vsi->queues; 181279858Sjfv 182303816Ssbruno vsi->tx_itr_setting = pf->tx_itr; 183266423Sjfv 184303816Ssbruno for (int i = 0; i < vsi->num_queues; i++, que++) { 185303816Ssbruno struct tx_ring *txr = &que->txr; 186266423Sjfv 187303816Ssbruno wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), 188303816Ssbruno vsi->tx_itr_setting); 189303816Ssbruno txr->itr = vsi->tx_itr_setting; 190303816Ssbruno txr->latency = IXL_AVE_LATENCY; 191303816Ssbruno } 192303816Ssbruno} 193266423Sjfv 194303816Ssbrunostatic void 195303816Ssbrunoixl_configure_rx_itr(struct ixl_pf *pf) 196303816Ssbruno{ 197303816Ssbruno struct i40e_hw *hw = &pf->hw; 198303816Ssbruno struct ixl_vsi *vsi = &pf->vsi; 199303816Ssbruno struct ixl_queue *que = vsi->queues; 200266423Sjfv 201303816Ssbruno vsi->rx_itr_setting = pf->rx_itr; 202279860Sjfv 203303816Ssbruno for (int i = 0; i < vsi->num_queues; i++, que++) { 204303816Ssbruno struct rx_ring *rxr = &que->rxr; 205269198Sjfv 206303816Ssbruno wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), 207303816Ssbruno vsi->rx_itr_setting); 208303816Ssbruno rxr->itr = vsi->rx_itr_setting; 209303816Ssbruno rxr->latency = IXL_AVE_LATENCY; 210303816Ssbruno } 211303816Ssbruno} 212270346Sjfv 213270346Sjfv/* 214303816Ssbruno * Write PF ITR values to queue ITR registers. 215266423Sjfv */ 216303816Ssbrunovoid 217303816Ssbrunoixl_configure_itr(struct ixl_pf *pf) 218266423Sjfv{ 219303816Ssbruno ixl_configure_tx_itr(pf); 220303816Ssbruno ixl_configure_rx_itr(pf); 221303816Ssbruno} 222266423Sjfv 223266423Sjfv/********************************************************************* 224303816Ssbruno * Init entry point 225266423Sjfv * 226303816Ssbruno * This routine is used in two ways. It is used by the stack as 227303816Ssbruno * init entry point in network interface structure. It is also used 228303816Ssbruno * by the driver as a hw/sw initialization routine to get to a 229303816Ssbruno * consistent state. 230266423Sjfv * 231266423Sjfv * return 0 on success, positive on failure 232303816Ssbruno **********************************************************************/ 233303816Ssbrunovoid 234303816Ssbrunoixl_init_locked(struct ixl_pf *pf) 235266423Sjfv{ 236303816Ssbruno struct i40e_hw *hw = &pf->hw; 237303816Ssbruno struct ixl_vsi *vsi = &pf->vsi; 238303816Ssbruno struct ifnet *ifp = vsi->ifp; 239303816Ssbruno device_t dev = pf->dev; 240303816Ssbruno struct i40e_filter_control_settings filter; 241266423Sjfv 242303816Ssbruno INIT_DEBUGOUT("ixl_init_locked: begin"); 243318357Serj IXL_PF_LOCK_ASSERT(pf); 244266423Sjfv 245349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { 246349163Serj device_printf(dev, "Running in recovery mode, only firmware update available\n"); 247349163Serj return; 248349163Serj } 249349163Serj 250303816Ssbruno ixl_stop_locked(pf); 251266423Sjfv 252318357Serj /* 253318357Serj * If the aq is dead here, it probably means something outside of the driver 254318357Serj * did something to the adapter, like a PF reset. 255318357Serj * So rebuild the driver's state here if that occurs. 256318357Serj */ 257318357Serj if (!i40e_check_asq_alive(&pf->hw)) { 258318357Serj device_printf(dev, "Admin Queue is down; resetting...\n"); 259318357Serj ixl_teardown_hw_structs(pf); 260318357Serj ixl_reset(pf); 261318357Serj } 262318357Serj 263303816Ssbruno /* Get the latest mac address... User might use a LAA */ 264349163Serj if (ixl_set_lla(vsi)) { 265349163Serj device_printf(dev, "LLA address change failed!\n"); 266349163Serj return; 267266423Sjfv } 268266423Sjfv 269303816Ssbruno /* Set the various hardware offload abilities */ 270303816Ssbruno ifp->if_hwassist = 0; 271303816Ssbruno if (ifp->if_capenable & IFCAP_TSO) 272303816Ssbruno ifp->if_hwassist |= CSUM_TSO; 273303816Ssbruno if (ifp->if_capenable & IFCAP_TXCSUM) 274303816Ssbruno ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 275303816Ssbruno if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 276303816Ssbruno ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); 277266423Sjfv 278303816Ssbruno /* Set up the device filtering */ 279303816Ssbruno bzero(&filter, sizeof(filter)); 280303816Ssbruno filter.enable_ethtype = TRUE; 281303816Ssbruno filter.enable_macvlan = TRUE; 282303816Ssbruno filter.enable_fdir = FALSE; 283303816Ssbruno filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 284303816Ssbruno if (i40e_set_filter_control(hw, &filter)) 285303816Ssbruno device_printf(dev, "i40e_set_filter_control() failed\n"); 286299549Serj 287303816Ssbruno /* Prepare the VSI: rings, hmc contexts, etc... */ 288303816Ssbruno if (ixl_initialize_vsi(vsi)) { 289303816Ssbruno device_printf(dev, "initialize vsi failed!!\n"); 290303816Ssbruno return; 291266423Sjfv } 292266423Sjfv 293303816Ssbruno /* Set up RSS */ 294303816Ssbruno ixl_config_rss(pf); 295299552Serj 296303816Ssbruno /* Set up MSI/X routing and the ITR settings */ 297318357Serj if (pf->msix > 1) { 298303816Ssbruno ixl_configure_queue_intr_msix(pf); 299303816Ssbruno ixl_configure_itr(pf); 300303816Ssbruno } else 301303816Ssbruno ixl_configure_legacy(pf); 302266423Sjfv 303303816Ssbruno ixl_enable_rings(vsi); 304266423Sjfv 305303816Ssbruno i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 306266423Sjfv 307303816Ssbruno ixl_reconfigure_filters(vsi); 308266423Sjfv 309349163Serj /* Check if PROMISC or ALLMULTI flags have been set 310349163Serj * by user before bringing interface up */ 311349163Serj ixl_set_promisc(vsi); 312349163Serj 313303816Ssbruno /* And now turn on interrupts */ 314303816Ssbruno ixl_enable_intr(vsi); 315269198Sjfv 316303816Ssbruno /* Get link info */ 317299547Serj hw->phy.get_link_info = TRUE; 318284049Sjfv i40e_get_link_status(hw, &pf->link_up); 319303816Ssbruno ixl_update_link_status(pf); 320266423Sjfv 321303816Ssbruno /* Now inform the stack we're ready */ 322303816Ssbruno ifp->if_drv_flags |= IFF_DRV_RUNNING; 323318357Serj 324318357Serj#ifdef IXL_IW 325318357Serj if (ixl_enable_iwarp && pf->iw_enabled) { 326349163Serj int ret = ixl_iw_pf_init(pf); 327318357Serj if (ret) 328318357Serj device_printf(dev, 329318357Serj "initialize iwarp failed, code %d\n", ret); 330318357Serj } 331318357Serj#endif 332266423Sjfv} 333266423Sjfv 334266423Sjfv 335266423Sjfv/********************************************************************* 336266423Sjfv * 337266423Sjfv * Get the hardware capabilities 338266423Sjfv * 339266423Sjfv **********************************************************************/ 340266423Sjfv 341303816Ssbrunoint 342270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf) 343266423Sjfv{ 344266423Sjfv struct i40e_aqc_list_capabilities_element_resp *buf; 345266423Sjfv struct i40e_hw *hw = &pf->hw; 346266423Sjfv device_t dev = pf->dev; 347349163Serj int error, len, i2c_intfc_num; 348266423Sjfv u16 needed; 349266423Sjfv bool again = TRUE; 350266423Sjfv 351349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { 352349163Serj hw->func_caps.iwarp = 0; 353349163Serj return 0; 354349163Serj } 355349163Serj 356266423Sjfv len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 357266423Sjfvretry: 358266423Sjfv if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 359266423Sjfv malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { 360266423Sjfv device_printf(dev, "Unable to allocate cap memory\n"); 361266423Sjfv return (ENOMEM); 362266423Sjfv } 363266423Sjfv 364266423Sjfv /* This populates the hw struct */ 365266423Sjfv error = i40e_aq_discover_capabilities(hw, buf, len, 366266423Sjfv &needed, i40e_aqc_opc_list_func_capabilities, NULL); 367266423Sjfv free(buf, M_DEVBUF); 368266423Sjfv if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 369266423Sjfv (again == TRUE)) { 370266423Sjfv /* retry once with a larger buffer */ 371266423Sjfv again = FALSE; 372266423Sjfv len = needed; 373266423Sjfv goto retry; 374266423Sjfv } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 375266423Sjfv device_printf(dev, "capability discovery failed: %d\n", 376266423Sjfv pf->hw.aq.asq_last_status); 377266423Sjfv return (ENODEV); 378266423Sjfv } 379266423Sjfv 380270346Sjfv#ifdef IXL_DEBUG 381299553Serj device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, " 382266423Sjfv "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", 383266423Sjfv hw->pf_id, hw->func_caps.num_vfs, 384266423Sjfv hw->func_caps.num_msix_vectors, 385266423Sjfv hw->func_caps.num_msix_vectors_vf, 386266423Sjfv hw->func_caps.fd_filters_guaranteed, 387266423Sjfv hw->func_caps.fd_filters_best_effort, 388266423Sjfv hw->func_caps.num_tx_qp, 389266423Sjfv hw->func_caps.num_rx_qp, 390266423Sjfv hw->func_caps.base_queue); 391266423Sjfv#endif 392349163Serj /* 393349163Serj * Some devices have both MDIO and I2C; since this isn't reported 394349163Serj * by the FW, check registers to see if an I2C interface exists. 395349163Serj */ 396349163Serj i2c_intfc_num = ixl_find_i2c_interface(pf); 397349163Serj if (i2c_intfc_num != -1) 398333343Serj pf->has_i2c = true; 399333343Serj 400349163Serj /* Determine functions to use for driver I2C accesses */ 401349163Serj switch (pf->i2c_access_method) { 402349163Serj case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 403349163Serj if (hw->mac.type == I40E_MAC_XL710 && 404349163Serj hw->aq.api_maj_ver == 1 && 405349163Serj hw->aq.api_min_ver >= 7) { 406349163Serj pf->read_i2c_byte = ixl_read_i2c_byte_aq; 407349163Serj pf->write_i2c_byte = ixl_write_i2c_byte_aq; 408349163Serj } else { 409349163Serj pf->read_i2c_byte = ixl_read_i2c_byte_reg; 410349163Serj pf->write_i2c_byte = ixl_write_i2c_byte_reg; 411349163Serj } 412349163Serj break; 413349163Serj } 414349163Serj case IXL_I2C_ACCESS_METHOD_AQ: 415349163Serj pf->read_i2c_byte = ixl_read_i2c_byte_aq; 416349163Serj pf->write_i2c_byte = ixl_write_i2c_byte_aq; 417349163Serj break; 418349163Serj case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 419349163Serj pf->read_i2c_byte = ixl_read_i2c_byte_reg; 420349163Serj pf->write_i2c_byte = ixl_write_i2c_byte_reg; 421349163Serj break; 422349163Serj case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 423349163Serj pf->read_i2c_byte = ixl_read_i2c_byte_bb; 424349163Serj pf->write_i2c_byte = ixl_write_i2c_byte_bb; 425349163Serj break; 426349163Serj default: 427349163Serj /* Should not happen */ 428349163Serj device_printf(dev, "Error setting I2C access functions\n"); 429349163Serj break; 430349163Serj } 431349163Serj 432303816Ssbruno /* Print a subset of the capability information. */ 433303816Ssbruno device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", 434303816Ssbruno hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 435303816Ssbruno hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 436303816Ssbruno (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 437333343Serj (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 438303816Ssbruno (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 439303816Ssbruno "MDIO shared"); 440303816Ssbruno 441266423Sjfv return (error); 442266423Sjfv} 443266423Sjfv 444303816Ssbrunovoid 445270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) 446266423Sjfv{ 447266423Sjfv device_t dev = vsi->dev; 448266423Sjfv 449266423Sjfv /* Enable/disable TXCSUM/TSO4 */ 450266423Sjfv if (!(ifp->if_capenable & IFCAP_TXCSUM) 451266423Sjfv && !(ifp->if_capenable & IFCAP_TSO4)) { 452266423Sjfv if (mask & IFCAP_TXCSUM) { 453266423Sjfv ifp->if_capenable |= IFCAP_TXCSUM; 454266423Sjfv /* enable TXCSUM, restore TSO if previously enabled */ 455270346Sjfv if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { 456270346Sjfv vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; 457266423Sjfv ifp->if_capenable |= IFCAP_TSO4; 458266423Sjfv } 459266423Sjfv } 460266423Sjfv else if (mask & IFCAP_TSO4) { 461266423Sjfv ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); 462270346Sjfv vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; 463266423Sjfv device_printf(dev, 464266423Sjfv "TSO4 requires txcsum, enabling both...\n"); 465266423Sjfv } 466266423Sjfv } else if((ifp->if_capenable & IFCAP_TXCSUM) 467266423Sjfv && !(ifp->if_capenable & IFCAP_TSO4)) { 468266423Sjfv if (mask & IFCAP_TXCSUM) 469266423Sjfv ifp->if_capenable &= ~IFCAP_TXCSUM; 470266423Sjfv else if (mask & IFCAP_TSO4) 471266423Sjfv ifp->if_capenable |= IFCAP_TSO4; 472266423Sjfv } else if((ifp->if_capenable & IFCAP_TXCSUM) 473266423Sjfv && (ifp->if_capenable & IFCAP_TSO4)) { 474266423Sjfv if (mask & IFCAP_TXCSUM) { 475270346Sjfv vsi->flags |= IXL_FLAGS_KEEP_TSO4; 476266423Sjfv ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); 477266423Sjfv device_printf(dev, 478266423Sjfv "TSO4 requires txcsum, disabling both...\n"); 479266423Sjfv } else if (mask & IFCAP_TSO4) 480266423Sjfv ifp->if_capenable &= ~IFCAP_TSO4; 481266423Sjfv } 482266423Sjfv 483266423Sjfv /* Enable/disable TXCSUM_IPV6/TSO6 */ 484266423Sjfv if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) 485266423Sjfv && !(ifp->if_capenable & IFCAP_TSO6)) { 486266423Sjfv if (mask & IFCAP_TXCSUM_IPV6) { 487266423Sjfv ifp->if_capenable |= IFCAP_TXCSUM_IPV6; 488270346Sjfv if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { 489270346Sjfv vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; 490266423Sjfv ifp->if_capenable |= IFCAP_TSO6; 491266423Sjfv } 492266423Sjfv } else if (mask & IFCAP_TSO6) { 493266423Sjfv ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); 494270346Sjfv vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; 495266423Sjfv device_printf(dev, 496266423Sjfv "TSO6 requires txcsum6, enabling both...\n"); 497266423Sjfv } 498266423Sjfv } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) 499266423Sjfv && !(ifp->if_capenable & IFCAP_TSO6)) { 500266423Sjfv if (mask & IFCAP_TXCSUM_IPV6) 501266423Sjfv ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; 502266423Sjfv else if (mask & IFCAP_TSO6) 503266423Sjfv ifp->if_capenable |= IFCAP_TSO6; 504266423Sjfv } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) 505266423Sjfv && (ifp->if_capenable & IFCAP_TSO6)) { 506266423Sjfv if (mask & IFCAP_TXCSUM_IPV6) { 507270346Sjfv vsi->flags |= IXL_FLAGS_KEEP_TSO6; 508266423Sjfv ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); 509266423Sjfv device_printf(dev, 510266423Sjfv "TSO6 requires txcsum6, disabling both...\n"); 511266423Sjfv } else if (mask & IFCAP_TSO6) 512266423Sjfv ifp->if_capenable &= ~IFCAP_TSO6; 513266423Sjfv } 514266423Sjfv} 515266423Sjfv 516299553Serj/* For the set_advertise sysctl */ 517303816Ssbrunovoid 518333343Serjixl_set_initial_advertised_speeds(struct ixl_pf *pf) 519299553Serj{ 520299553Serj device_t dev = pf->dev; 521333343Serj int err; 522299553Serj 523333343Serj /* Make sure to initialize the device to the complete list of 524333343Serj * supported speeds on driver load, to ensure unloading and 525333343Serj * reloading the driver will restore this value. 526333343Serj */ 527333343Serj err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 528333343Serj if (err) { 529299553Serj /* Non-fatal error */ 530333343Serj device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 531333343Serj __func__, err); 532299553Serj return; 533299553Serj } 534299553Serj 535318357Serj pf->advertised_speed = 536333343Serj ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 537299553Serj} 538299553Serj 539303816Ssbrunoint 540299548Serjixl_teardown_hw_structs(struct ixl_pf *pf) 541299548Serj{ 542299548Serj enum i40e_status_code status = 0; 543299548Serj struct i40e_hw *hw = &pf->hw; 544299548Serj device_t dev = pf->dev; 545299548Serj 546299548Serj /* Shutdown LAN HMC */ 547299548Serj if (hw->hmc.hmc_obj) { 548299548Serj status = i40e_shutdown_lan_hmc(hw); 549299548Serj if (status) { 550299548Serj device_printf(dev, 551299548Serj "init: LAN HMC shutdown failure; status %d\n", status); 552299548Serj goto err_out; 553299548Serj } 554299548Serj } 555299548Serj 556299548Serj /* Shutdown admin queue */ 557333343Serj ixl_disable_intr0(hw); 558299548Serj status = i40e_shutdown_adminq(hw); 559299548Serj if (status) 560299548Serj device_printf(dev, 561299548Serj "init: Admin Queue shutdown failure; status %d\n", status); 562299548Serj 563299548Serjerr_out: 564299548Serj return (status); 565299548Serj} 566299548Serj 567303816Ssbrunoint 568299548Serjixl_reset(struct ixl_pf *pf) 569299548Serj{ 570299548Serj struct i40e_hw *hw = &pf->hw; 571299548Serj device_t dev = pf->dev; 572299554Serj u8 set_fc_err_mask; 573299548Serj int error = 0; 574299548Serj 575299548Serj // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary 576299548Serj i40e_clear_hw(hw); 577299548Serj error = i40e_pf_reset(hw); 578299548Serj if (error) { 579333343Serj device_printf(dev, "init: PF reset failure\n"); 580299548Serj error = EIO; 581299548Serj goto err_out; 582299548Serj } 583299548Serj 584299548Serj error = i40e_init_adminq(hw); 585299548Serj if (error) { 586299554Serj device_printf(dev, "init: Admin queue init failure;" 587333343Serj " status code %d\n", error); 588299548Serj error = EIO; 589299548Serj goto err_out; 590299548Serj } 591299548Serj 592299548Serj i40e_clear_pxe_mode(hw); 593299548Serj 594299548Serj error = ixl_get_hw_capabilities(pf); 595299548Serj if (error) { 596299554Serj device_printf(dev, "init: Error retrieving HW capabilities;" 597299554Serj " status code %d\n", error); 598299548Serj goto err_out; 599299548Serj } 600299548Serj 601299548Serj error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 602299548Serj hw->func_caps.num_rx_qp, 0, 0); 603299548Serj if (error) { 604299554Serj device_printf(dev, "init: LAN HMC init failed; status code %d\n", 605299554Serj error); 606299548Serj error = EIO; 607299548Serj goto err_out; 608299548Serj } 609299548Serj 610299548Serj error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 611299548Serj if (error) { 612299554Serj device_printf(dev, "init: LAN HMC config failed; status code %d\n", 613299554Serj error); 614299548Serj error = EIO; 615299548Serj goto err_out; 616299548Serj } 617299548Serj 618299554Serj // XXX: possible fix for panic, but our failure recovery is still broken 619299554Serj error = ixl_switch_config(pf); 620299554Serj if (error) { 621299554Serj device_printf(dev, "init: ixl_switch_config() failed: %d\n", 622299554Serj error); 623299554Serj goto err_out; 624299554Serj } 625299548Serj 626299548Serj error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 627299548Serj NULL); 628299548Serj if (error) { 629299548Serj device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d," 630299548Serj " aq_err %d\n", error, hw->aq.asq_last_status); 631299548Serj error = EIO; 632299548Serj goto err_out; 633299548Serj } 634299548Serj 635299548Serj error = i40e_set_fc(hw, &set_fc_err_mask, true); 636299548Serj if (error) { 637299548Serj device_printf(dev, "init: setting link flow control failed; retcode %d," 638299548Serj " fc_err_mask 0x%02x\n", error, set_fc_err_mask); 639299548Serj goto err_out; 640299548Serj } 641299548Serj 642299548Serj // XXX: (Rebuild VSIs?) 643299548Serj 644299552Serj /* Firmware delay workaround */ 645299548Serj if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 646299548Serj (hw->aq.fw_maj_ver < 4)) { 647299548Serj i40e_msec_delay(75); 648299548Serj error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 649299548Serj if (error) { 650299548Serj device_printf(dev, "init: link restart failed, aq_err %d\n", 651299548Serj hw->aq.asq_last_status); 652299548Serj goto err_out; 653299548Serj } 654299548Serj } 655299548Serj 656299548Serj 657333343Serj /* Re-enable admin queue interrupt */ 658333343Serj if (pf->msix > 1) { 659333343Serj ixl_configure_intr0_msix(pf); 660333343Serj ixl_enable_intr0(hw); 661333343Serj } 662333343Serj 663299548Serjerr_out: 664299548Serj return (error); 665299548Serj} 666299548Serj 667266423Sjfv/* 668266423Sjfv** MSIX Interrupt Handlers and Tasklets 669266423Sjfv*/ 670303816Ssbrunovoid 671270346Sjfvixl_handle_que(void *context, int pending) 672266423Sjfv{ 673270346Sjfv struct ixl_queue *que = context; 674270346Sjfv struct ixl_vsi *vsi = que->vsi; 675333343Serj struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 676266423Sjfv struct i40e_hw *hw = vsi->hw; 677266423Sjfv struct tx_ring *txr = &que->txr; 678266423Sjfv struct ifnet *ifp = vsi->ifp; 679266423Sjfv bool more; 680266423Sjfv 681266423Sjfv if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 682270346Sjfv more = ixl_rxeof(que, IXL_RX_LIMIT); 683270346Sjfv IXL_TX_LOCK(txr); 684270346Sjfv ixl_txeof(que); 685266423Sjfv if (!drbr_empty(ifp, txr->br)) 686270346Sjfv ixl_mq_start_locked(ifp, txr); 687270346Sjfv IXL_TX_UNLOCK(txr); 688266423Sjfv if (more) { 689266423Sjfv taskqueue_enqueue(que->tq, &que->task); 690266423Sjfv return; 691266423Sjfv } 692266423Sjfv } 693266423Sjfv 694333343Serj /* Re-enable queue interrupt */ 695333343Serj if (pf->msix > 1) 696333343Serj ixl_enable_queue(hw, que->me); 697333343Serj else 698333343Serj ixl_enable_intr0(hw); 699266423Sjfv} 700266423Sjfv 701266423Sjfv 702266423Sjfv/********************************************************************* 703266423Sjfv * 704266423Sjfv * Legacy Interrupt Service routine 705266423Sjfv * 706266423Sjfv **********************************************************************/ 707266423Sjfvvoid 708270346Sjfvixl_intr(void *arg) 709266423Sjfv{ 710270346Sjfv struct ixl_pf *pf = arg; 711266423Sjfv struct i40e_hw *hw = &pf->hw; 712270346Sjfv struct ixl_vsi *vsi = &pf->vsi; 713270346Sjfv struct ixl_queue *que = vsi->queues; 714266423Sjfv struct ifnet *ifp = vsi->ifp; 715266423Sjfv struct tx_ring *txr = &que->txr; 716318357Serj u32 icr0; 717333343Serj bool more; 718266423Sjfv 719349163Serj ixl_disable_intr0(hw); 720349163Serj 721318357Serj pf->admin_irq++; 722266423Sjfv 723333343Serj /* Clear PBA at start of ISR if using legacy interrupts */ 724333343Serj if (pf->msix == 0) 725333343Serj wr32(hw, I40E_PFINT_DYN_CTL0, 726333343Serj I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 727333343Serj (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 728266423Sjfv 729266423Sjfv icr0 = rd32(hw, I40E_PFINT_ICR0); 730266423Sjfv 731266423Sjfv 732279858Sjfv#ifdef PCI_IOV 733279858Sjfv if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) 734279858Sjfv taskqueue_enqueue(pf->tq, &pf->vflr_task); 735279858Sjfv#endif 736279858Sjfv 737333343Serj if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) 738266423Sjfv taskqueue_enqueue(pf->tq, &pf->adminq); 739266423Sjfv 740333343Serj if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 741318357Serj ++que->irqs; 742266423Sjfv 743333343Serj more = ixl_rxeof(que, IXL_RX_LIMIT); 744266423Sjfv 745318357Serj IXL_TX_LOCK(txr); 746333343Serj ixl_txeof(que); 747318357Serj if (!drbr_empty(vsi->ifp, txr->br)) 748333343Serj ixl_mq_start_locked(ifp, txr); 749318357Serj IXL_TX_UNLOCK(txr); 750333343Serj 751333343Serj if (more) 752333343Serj taskqueue_enqueue(que->tq, &que->task); 753318357Serj } 754266423Sjfv 755318357Serj ixl_enable_intr0(hw); 756266423Sjfv} 757266423Sjfv 758266423Sjfv 759266423Sjfv/********************************************************************* 760266423Sjfv * 761266423Sjfv * MSIX VSI Interrupt Service routine 762266423Sjfv * 763266423Sjfv **********************************************************************/ 764266423Sjfvvoid 765270346Sjfvixl_msix_que(void *arg) 766266423Sjfv{ 767333343Serj struct ixl_queue *que = arg; 768270346Sjfv struct ixl_vsi *vsi = que->vsi; 769266423Sjfv struct i40e_hw *hw = vsi->hw; 770266423Sjfv struct tx_ring *txr = &que->txr; 771266423Sjfv bool more_tx, more_rx; 772266423Sjfv 773269198Sjfv /* Protect against spurious interrupts */ 774269198Sjfv if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) 775269198Sjfv return; 776269198Sjfv 777349163Serj /* There are drivers which disable auto-masking of interrupts, 778349163Serj * which is a global setting for all ports. We have to make sure 779349163Serj * to mask it to not lose IRQs */ 780349163Serj ixl_disable_queue(hw, que->me); 781349163Serj 782266423Sjfv ++que->irqs; 783266423Sjfv 784270346Sjfv more_rx = ixl_rxeof(que, IXL_RX_LIMIT); 785266423Sjfv 786270346Sjfv IXL_TX_LOCK(txr); 787270346Sjfv more_tx = ixl_txeof(que); 788266423Sjfv /* 789266423Sjfv ** Make certain that if the stack 790266423Sjfv ** has anything queued the task gets 791266423Sjfv ** scheduled to handle it. 792266423Sjfv */ 793266423Sjfv if (!drbr_empty(vsi->ifp, txr->br)) 794266423Sjfv more_tx = 1; 795270346Sjfv IXL_TX_UNLOCK(txr); 796266423Sjfv 797270346Sjfv ixl_set_queue_rx_itr(que); 798270346Sjfv ixl_set_queue_tx_itr(que); 799266423Sjfv 800266423Sjfv if (more_tx || more_rx) 801266423Sjfv taskqueue_enqueue(que->tq, &que->task); 802266423Sjfv else 803270346Sjfv ixl_enable_queue(hw, que->me); 804266423Sjfv 805266423Sjfv return; 806266423Sjfv} 807266423Sjfv 808266423Sjfv 809266423Sjfv/********************************************************************* 810266423Sjfv * 811266423Sjfv * MSIX Admin Queue Interrupt Service routine 812266423Sjfv * 813266423Sjfv **********************************************************************/ 814303816Ssbrunovoid 815270346Sjfvixl_msix_adminq(void *arg) 816266423Sjfv{ 817270346Sjfv struct ixl_pf *pf = arg; 818266423Sjfv struct i40e_hw *hw = &pf->hw; 819303816Ssbruno device_t dev = pf->dev; 820299549Serj u32 reg, mask, rstat_reg; 821299549Serj bool do_task = FALSE; 822266423Sjfv 823266423Sjfv ++pf->admin_irq; 824266423Sjfv 825266423Sjfv reg = rd32(hw, I40E_PFINT_ICR0); 826266423Sjfv mask = rd32(hw, I40E_PFINT_ICR0_ENA); 827266423Sjfv 828266423Sjfv /* Check on the cause */ 829299549Serj if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) { 830299549Serj mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK; 831299549Serj do_task = TRUE; 832299549Serj } 833266423Sjfv 834269198Sjfv if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { 835270346Sjfv ixl_handle_mdd_event(pf); 836299549Serj mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK; 837269198Sjfv } 838266423Sjfv 839299549Serj if (reg & I40E_PFINT_ICR0_GRST_MASK) { 840303816Ssbruno device_printf(dev, "Reset Requested!\n"); 841299549Serj rstat_reg = rd32(hw, I40E_GLGEN_RSTAT); 842299549Serj rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) 843299549Serj >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; 844303816Ssbruno device_printf(dev, "Reset type: "); 845299549Serj switch (rstat_reg) { 846299549Serj /* These others might be handled similarly to an EMPR reset */ 847299549Serj case I40E_RESET_CORER: 848299549Serj printf("CORER\n"); 849299549Serj break; 850299549Serj case I40E_RESET_GLOBR: 851299549Serj printf("GLOBR\n"); 852299549Serj break; 853299549Serj case I40E_RESET_EMPR: 854299549Serj printf("EMPR\n"); 855299549Serj atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); 856299549Serj break; 857299549Serj default: 858303816Ssbruno printf("POR\n"); 859299549Serj break; 860299549Serj } 861303816Ssbruno /* overload admin queue task to check reset progress */ 862299549Serj do_task = TRUE; 863299549Serj } 864299549Serj 865299549Serj if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) { 866303816Ssbruno device_printf(dev, "ECC Error detected!\n"); 867299549Serj } 868299549Serj 869299549Serj if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) { 870303816Ssbruno reg = rd32(hw, I40E_PFHMC_ERRORINFO); 871303816Ssbruno if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) { 872303816Ssbruno device_printf(dev, "HMC Error detected!\n"); 873303816Ssbruno device_printf(dev, "INFO 0x%08x\n", reg); 874303816Ssbruno reg = rd32(hw, I40E_PFHMC_ERRORDATA); 875303816Ssbruno device_printf(dev, "DATA 0x%08x\n", reg); 876303816Ssbruno wr32(hw, I40E_PFHMC_ERRORINFO, 0); 877303816Ssbruno } 878299549Serj } 879299549Serj 880299549Serj if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) { 881303816Ssbruno device_printf(dev, "PCI Exception detected!\n"); 882299549Serj } 883299549Serj 884279858Sjfv#ifdef PCI_IOV 885279858Sjfv if (reg & I40E_PFINT_ICR0_VFLR_MASK) { 886266423Sjfv mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; 887279858Sjfv taskqueue_enqueue(pf->tq, &pf->vflr_task); 888279858Sjfv } 889279858Sjfv#endif 890266423Sjfv 891299549Serj if (do_task) 892299549Serj taskqueue_enqueue(pf->tq, &pf->adminq); 893303816Ssbruno else 894318357Serj ixl_enable_intr0(hw); 895266423Sjfv} 896266423Sjfv 897266423Sjfvvoid 898270346Sjfvixl_set_promisc(struct ixl_vsi *vsi) 899266423Sjfv{ 900266423Sjfv struct ifnet *ifp = vsi->ifp; 901266423Sjfv struct i40e_hw *hw = vsi->hw; 902266423Sjfv int err, mcnt = 0; 903266423Sjfv bool uni = FALSE, multi = FALSE; 904266423Sjfv 905333343Serj if (ifp->if_flags & IFF_PROMISC) 906333343Serj uni = multi = TRUE; 907333343Serj else if (ifp->if_flags & IFF_ALLMULTI) 908333343Serj multi = TRUE; 909266423Sjfv else { /* Need to count the multicast addresses */ 910266423Sjfv struct ifmultiaddr *ifma; 911266423Sjfv if_maddr_rlock(ifp); 912266423Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 913333343Serj if (ifma->ifma_addr->sa_family != AF_LINK) 914333343Serj continue; 915333343Serj if (mcnt == MAX_MULTICAST_ADDR) { 916333343Serj multi = TRUE; 917333343Serj break; 918333343Serj } 919333343Serj mcnt++; 920266423Sjfv } 921266423Sjfv if_maddr_runlock(ifp); 922266423Sjfv } 923266423Sjfv 924266423Sjfv err = i40e_aq_set_vsi_unicast_promiscuous(hw, 925303816Ssbruno vsi->seid, uni, NULL, TRUE); 926266423Sjfv err = i40e_aq_set_vsi_multicast_promiscuous(hw, 927266423Sjfv vsi->seid, multi, NULL); 928266423Sjfv return; 929266423Sjfv} 930266423Sjfv 931266423Sjfv/********************************************************************* 932266423Sjfv * Filter Routines 933266423Sjfv * 934266423Sjfv * Routines for multicast and vlan filter management. 935266423Sjfv * 936266423Sjfv *********************************************************************/ 937303816Ssbrunovoid 938270346Sjfvixl_add_multi(struct ixl_vsi *vsi) 939266423Sjfv{ 940349163Serj struct ifmultiaddr *ifma; 941266423Sjfv struct ifnet *ifp = vsi->ifp; 942266423Sjfv struct i40e_hw *hw = vsi->hw; 943266423Sjfv int mcnt = 0, flags; 944266423Sjfv 945270346Sjfv IOCTL_DEBUGOUT("ixl_add_multi: begin"); 946266423Sjfv 947266423Sjfv if_maddr_rlock(ifp); 948266423Sjfv /* 949266423Sjfv ** First just get a count, to decide if we 950266423Sjfv ** we simply use multicast promiscuous. 951266423Sjfv */ 952266423Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 953266423Sjfv if (ifma->ifma_addr->sa_family != AF_LINK) 954266423Sjfv continue; 955266423Sjfv mcnt++; 956266423Sjfv } 957266423Sjfv if_maddr_runlock(ifp); 958266423Sjfv 959266423Sjfv if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 960266423Sjfv /* delete existing MC filters */ 961270346Sjfv ixl_del_hw_filters(vsi, mcnt); 962266423Sjfv i40e_aq_set_vsi_multicast_promiscuous(hw, 963266423Sjfv vsi->seid, TRUE, NULL); 964266423Sjfv return; 965266423Sjfv } 966266423Sjfv 967266423Sjfv mcnt = 0; 968266423Sjfv if_maddr_rlock(ifp); 969266423Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 970266423Sjfv if (ifma->ifma_addr->sa_family != AF_LINK) 971266423Sjfv continue; 972270346Sjfv ixl_add_mc_filter(vsi, 973266423Sjfv (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); 974266423Sjfv mcnt++; 975266423Sjfv } 976266423Sjfv if_maddr_runlock(ifp); 977266423Sjfv if (mcnt > 0) { 978270346Sjfv flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 979270346Sjfv ixl_add_hw_filters(vsi, flags, mcnt); 980266423Sjfv } 981266423Sjfv 982270346Sjfv IOCTL_DEBUGOUT("ixl_add_multi: end"); 983266423Sjfv return; 984266423Sjfv} 985266423Sjfv 986303816Ssbrunovoid 987270346Sjfvixl_del_multi(struct ixl_vsi *vsi) 988266423Sjfv{ 989266423Sjfv struct ifnet *ifp = vsi->ifp; 990266423Sjfv struct ifmultiaddr *ifma; 991270346Sjfv struct ixl_mac_filter *f; 992266423Sjfv int mcnt = 0; 993266423Sjfv bool match = FALSE; 994266423Sjfv 995270346Sjfv IOCTL_DEBUGOUT("ixl_del_multi: begin"); 996266423Sjfv 997266423Sjfv /* Search for removed multicast addresses */ 998266423Sjfv if_maddr_rlock(ifp); 999266423Sjfv SLIST_FOREACH(f, &vsi->ftl, next) { 1000270346Sjfv if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { 1001266423Sjfv match = FALSE; 1002266423Sjfv TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1003266423Sjfv if (ifma->ifma_addr->sa_family != AF_LINK) 1004266423Sjfv continue; 1005266423Sjfv u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1006266423Sjfv if (cmp_etheraddr(f->macaddr, mc_addr)) { 1007266423Sjfv match = TRUE; 1008266423Sjfv break; 1009266423Sjfv } 1010266423Sjfv } 1011266423Sjfv if (match == FALSE) { 1012270346Sjfv f->flags |= IXL_FILTER_DEL; 1013266423Sjfv mcnt++; 1014266423Sjfv } 1015266423Sjfv } 1016266423Sjfv } 1017266423Sjfv if_maddr_runlock(ifp); 1018266423Sjfv 1019266423Sjfv if (mcnt > 0) 1020270346Sjfv ixl_del_hw_filters(vsi, mcnt); 1021266423Sjfv} 1022266423Sjfv 1023266423Sjfv/********************************************************************* 1024266423Sjfv * Timer routine 1025266423Sjfv * 1026333343Serj * This routine checks for link status, updates statistics, 1027266423Sjfv * and runs the watchdog check. 1028266423Sjfv * 1029266423Sjfv **********************************************************************/ 1030266423Sjfv 1031303816Ssbrunovoid 1032270346Sjfvixl_local_timer(void *arg) 1033266423Sjfv{ 1034270346Sjfv struct ixl_pf *pf = arg; 1035349163Serj struct ifnet *ifp = pf->vsi.ifp; 1036266423Sjfv 1037349163Serj if (ixl_fw_recovery_mode(pf)) { 1038349163Serj if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) { 1039349163Serj if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1040349163Serj ixl_stop_locked(pf); 1041349163Serj atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE | IXL_PF_STATE_EMPR_RESETTING); 1042349163Serj device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1043349163Serj } 1044349163Serj } 1045349163Serj 1046318357Serj IXL_PF_LOCK_ASSERT(pf); 1047266423Sjfv 1048266423Sjfv /* Fire off the adminq task */ 1049266423Sjfv taskqueue_enqueue(pf->tq, &pf->adminq); 1050266423Sjfv 1051349163Serj if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1052349163Serj /* Update stats */ 1053349163Serj ixl_update_stats_counters(pf); 1054349163Serj } 1055266423Sjfv 1056349163Serj if (ixl_queue_hang_check(&pf->vsi)) { 1057349163Serj /* Increment stat when a queue shows hung */ 1058333343Serj pf->watchdog_events++; 1059349163Serj } 1060266423Sjfv 1061270346Sjfv callout_reset(&pf->timer, hz, ixl_local_timer, pf); 1062266423Sjfv} 1063266423Sjfv 1064318357Serjvoid 1065318357Serjixl_link_up_msg(struct ixl_pf *pf) 1066318357Serj{ 1067318357Serj struct i40e_hw *hw = &pf->hw; 1068318357Serj struct ifnet *ifp = pf->vsi.ifp; 1069333343Serj char *req_fec_string, *neg_fec_string; 1070333343Serj u8 fec_abilities; 1071318357Serj 1072333343Serj fec_abilities = hw->phy.link_info.req_fec_info; 1073333343Serj /* If both RS and KR are requested, only show RS */ 1074333343Serj if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 1075333343Serj req_fec_string = ixl_fec_string[0]; 1076333343Serj else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 1077333343Serj req_fec_string = ixl_fec_string[1]; 1078333343Serj else 1079333343Serj req_fec_string = ixl_fec_string[2]; 1080333343Serj 1081333343Serj if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 1082333343Serj neg_fec_string = ixl_fec_string[0]; 1083333343Serj else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 1084333343Serj neg_fec_string = ixl_fec_string[1]; 1085333343Serj else 1086333343Serj neg_fec_string = ixl_fec_string[2]; 1087333343Serj 1088333343Serj log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 1089318357Serj ifp->if_xname, 1090349163Serj ixl_link_speed_string(hw->phy.link_info.link_speed), 1091333343Serj req_fec_string, neg_fec_string, 1092318357Serj (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 1093318357Serj (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 1094318357Serj hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 1095318357Serj ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 1096318357Serj ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 1097318357Serj ixl_fc_string[1] : ixl_fc_string[0]); 1098318357Serj} 1099318357Serj 1100266423Sjfv/* 1101266423Sjfv** Note: this routine updates the OS on the link state 1102266423Sjfv** the real check of the hardware only happens with 1103266423Sjfv** a link interrupt. 1104266423Sjfv*/ 1105303816Ssbrunovoid 1106270346Sjfvixl_update_link_status(struct ixl_pf *pf) 1107266423Sjfv{ 1108270346Sjfv struct ixl_vsi *vsi = &pf->vsi; 1109266423Sjfv struct ifnet *ifp = vsi->ifp; 1110266423Sjfv device_t dev = pf->dev; 1111266423Sjfv 1112299547Serj if (pf->link_up) { 1113266423Sjfv if (vsi->link_active == FALSE) { 1114266423Sjfv vsi->link_active = TRUE; 1115333343Serj#if __FreeBSD_version >= 1100000 1116318357Serj ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); 1117333343Serj#else 1118333343Serj if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed)); 1119333343Serj#endif 1120266423Sjfv if_link_state_change(ifp, LINK_STATE_UP); 1121318357Serj ixl_link_up_msg(pf); 1122333343Serj#ifdef PCI_IOV 1123333343Serj ixl_broadcast_link_state(pf); 1124333343Serj#endif 1125266423Sjfv } 1126266423Sjfv } else { /* Link down */ 1127266423Sjfv if (vsi->link_active == TRUE) { 1128266423Sjfv if (bootverbose) 1129299547Serj device_printf(dev, "Link is Down\n"); 1130266423Sjfv if_link_state_change(ifp, LINK_STATE_DOWN); 1131266423Sjfv vsi->link_active = FALSE; 1132333343Serj#ifdef PCI_IOV 1133333343Serj ixl_broadcast_link_state(pf); 1134333343Serj#endif 1135266423Sjfv } 1136266423Sjfv } 1137266423Sjfv} 1138266423Sjfv 1139266423Sjfv/********************************************************************* 1140266423Sjfv * 1141266423Sjfv * This routine disables all traffic on the adapter by issuing a 1142266423Sjfv * global reset on the MAC and deallocates TX/RX buffers. 1143266423Sjfv * 1144266423Sjfv **********************************************************************/ 1145266423Sjfv 1146303816Ssbrunovoid 1147299547Serjixl_stop_locked(struct ixl_pf *pf) 1148266423Sjfv{ 1149270346Sjfv struct ixl_vsi *vsi = &pf->vsi; 1150266423Sjfv struct ifnet *ifp = vsi->ifp; 1151266423Sjfv 1152299547Serj INIT_DEBUGOUT("ixl_stop: begin\n"); 1153266423Sjfv 1154299547Serj IXL_PF_LOCK_ASSERT(pf); 1155299547Serj 1156349163Serj /* Tell the stack that the interface is no longer active */ 1157349163Serj ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); 1158349163Serj 1159318357Serj#ifdef IXL_IW 1160318357Serj /* Stop iWARP device */ 1161318357Serj if (ixl_enable_iwarp && pf->iw_enabled) 1162318357Serj ixl_iw_pf_stop(pf); 1163318357Serj#endif 1164318357Serj 1165299553Serj ixl_disable_rings_intr(vsi); 1166270346Sjfv ixl_disable_rings(vsi); 1167266423Sjfv} 1168266423Sjfv 1169303816Ssbrunovoid 1170303816Ssbrunoixl_stop(struct ixl_pf *pf) 1171303816Ssbruno{ 1172303816Ssbruno IXL_PF_LOCK(pf); 1173303816Ssbruno ixl_stop_locked(pf); 1174303816Ssbruno IXL_PF_UNLOCK(pf); 1175303816Ssbruno} 1176303816Ssbruno 1177266423Sjfv/********************************************************************* 1178266423Sjfv * 1179266423Sjfv * Setup MSIX Interrupt resources and handlers for the VSI 1180266423Sjfv * 1181266423Sjfv **********************************************************************/ 1182303816Ssbrunoint 1183318357Serjixl_setup_legacy(struct ixl_pf *pf) 1184266423Sjfv{ 1185266423Sjfv device_t dev = pf->dev; 1186266423Sjfv int error, rid = 0; 1187266423Sjfv 1188266423Sjfv if (pf->msix == 1) 1189266423Sjfv rid = 1; 1190266423Sjfv pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1191266423Sjfv &rid, RF_SHAREABLE | RF_ACTIVE); 1192266423Sjfv if (pf->res == NULL) { 1193318357Serj device_printf(dev, "bus_alloc_resource_any() for" 1194318357Serj " legacy/msi interrupt\n"); 1195266423Sjfv return (ENXIO); 1196266423Sjfv } 1197266423Sjfv 1198266423Sjfv /* Set the handler function */ 1199266423Sjfv error = bus_setup_intr(dev, pf->res, 1200266423Sjfv INTR_TYPE_NET | INTR_MPSAFE, NULL, 1201270346Sjfv ixl_intr, pf, &pf->tag); 1202266423Sjfv if (error) { 1203266423Sjfv pf->res = NULL; 1204318357Serj device_printf(dev, "bus_setup_intr() for legacy/msi" 1205318357Serj " interrupt handler failed, error %d\n", error); 1206318357Serj return (ENXIO); 1207266423Sjfv } 1208318357Serj error = bus_describe_intr(dev, pf->res, pf->tag, "irq"); 1209318357Serj if (error) { 1210318357Serj /* non-fatal */ 1211318357Serj device_printf(dev, "bus_describe_intr() for Admin Queue" 1212318357Serj " interrupt name failed, error %d\n", error); 1213318357Serj } 1214279858Sjfv 1215266423Sjfv return (0); 1216266423Sjfv} 1217266423Sjfv 1218303816Ssbrunoint 1219299553Serjixl_setup_adminq_tq(struct ixl_pf *pf) 1220299546Serj{ 1221299546Serj device_t dev = pf->dev; 1222299553Serj int error = 0; 1223266423Sjfv 1224299553Serj /* Tasklet for Admin Queue interrupts */ 1225299546Serj TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); 1226299546Serj#ifdef PCI_IOV 1227299546Serj /* VFLR Tasklet */ 1228299546Serj TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); 1229299546Serj#endif 1230299553Serj /* Create and start Admin Queue taskqueue */ 1231299553Serj pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT, 1232299546Serj taskqueue_thread_enqueue, &pf->tq); 1233299553Serj if (!pf->tq) { 1234299553Serj device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n"); 1235299553Serj return (ENOMEM); 1236299553Serj } 1237299553Serj error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq", 1238299546Serj device_get_nameunit(dev)); 1239299553Serj if (error) { 1240299553Serj device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n", 1241299553Serj error); 1242299553Serj taskqueue_free(pf->tq); 1243299553Serj return (error); 1244299553Serj } 1245299553Serj return (0); 1246299553Serj} 1247299546Serj 1248303816Ssbrunoint 1249299553Serjixl_setup_queue_tqs(struct ixl_vsi *vsi) 1250299553Serj{ 1251299553Serj struct ixl_queue *que = vsi->queues; 1252299553Serj device_t dev = vsi->dev; 1253303967Ssbruno#ifdef RSS 1254303967Ssbruno int cpu_id = 0; 1255303967Ssbruno cpuset_t cpu_mask; 1256303967Ssbruno#endif 1257299553Serj 1258299546Serj /* Create queue tasks and start queue taskqueues */ 1259299546Serj for (int i = 0; i < vsi->num_queues; i++, que++) { 1260299546Serj TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); 1261299546Serj TASK_INIT(&que->task, 0, ixl_handle_que, que); 1262299546Serj que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, 1263299546Serj taskqueue_thread_enqueue, &que->tq); 1264299546Serj#ifdef RSS 1265299546Serj CPU_SETOF(cpu_id, &cpu_mask); 1266299546Serj taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, 1267299546Serj &cpu_mask, "%s (bucket %d)", 1268299546Serj device_get_nameunit(dev), cpu_id); 1269299546Serj#else 1270299546Serj taskqueue_start_threads(&que->tq, 1, PI_NET, 1271299546Serj "%s (que %d)", device_get_nameunit(dev), que->me); 1272299546Serj#endif 1273299546Serj } 1274299546Serj 1275299553Serj return (0); 1276299546Serj} 1277299546Serj 1278303816Ssbrunovoid 1279299553Serjixl_free_adminq_tq(struct ixl_pf *pf) 1280299546Serj{ 1281299554Serj if (pf->tq) { 1282299553Serj taskqueue_free(pf->tq); 1283299554Serj pf->tq = NULL; 1284299554Serj } 1285299553Serj} 1286299553Serj 1287303816Ssbrunovoid 1288299553Serjixl_free_queue_tqs(struct ixl_vsi *vsi) 1289299553Serj{ 1290299554Serj struct ixl_queue *que = vsi->queues; 1291299546Serj 1292299546Serj for (int i = 0; i < vsi->num_queues; i++, que++) { 1293299554Serj if (que->tq) { 1294299546Serj taskqueue_free(que->tq); 1295299554Serj que->tq = NULL; 1296299554Serj } 1297299546Serj } 1298299546Serj} 1299299546Serj 1300303816Ssbrunoint 1301299553Serjixl_setup_adminq_msix(struct ixl_pf *pf) 1302266423Sjfv{ 1303299553Serj device_t dev = pf->dev; 1304299553Serj int rid, error = 0; 1305266423Sjfv 1306299553Serj /* Admin IRQ rid is 1, vector is 0 */ 1307299553Serj rid = 1; 1308299553Serj /* Get interrupt resource from bus */ 1309266423Sjfv pf->res = bus_alloc_resource_any(dev, 1310266423Sjfv SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); 1311266423Sjfv if (!pf->res) { 1312299553Serj device_printf(dev, "bus_alloc_resource_any() for Admin Queue" 1313299553Serj " interrupt failed [rid=%d]\n", rid); 1314266423Sjfv return (ENXIO); 1315266423Sjfv } 1316299553Serj /* Then associate interrupt with handler */ 1317266423Sjfv error = bus_setup_intr(dev, pf->res, 1318266423Sjfv INTR_TYPE_NET | INTR_MPSAFE, NULL, 1319270346Sjfv ixl_msix_adminq, pf, &pf->tag); 1320266423Sjfv if (error) { 1321266423Sjfv pf->res = NULL; 1322299553Serj device_printf(dev, "bus_setup_intr() for Admin Queue" 1323299553Serj " interrupt handler failed, error %d\n", error); 1324299553Serj return (ENXIO); 1325266423Sjfv } 1326299553Serj error = bus_describe_intr(dev, pf->res, pf->tag, "aq"); 1327299553Serj if (error) { 1328318357Serj /* non-fatal */ 1329299553Serj device_printf(dev, "bus_describe_intr() for Admin Queue" 1330299553Serj " interrupt name failed, error %d\n", error); 1331299553Serj } 1332299553Serj pf->admvec = 0; 1333266423Sjfv 1334299553Serj return (0); 1335299553Serj} 1336299553Serj 1337299553Serj/* 1338299553Serj * Allocate interrupt resources from bus and associate an interrupt handler 1339299553Serj * to those for the VSI's queues. 1340299553Serj */ 1341303816Ssbrunoint 1342299553Serjixl_setup_queue_msix(struct ixl_vsi *vsi) 1343299553Serj{ 1344299553Serj device_t dev = vsi->dev; 1345299553Serj struct ixl_queue *que = vsi->queues; 1346299553Serj struct tx_ring *txr; 1347299553Serj int error, rid, vector = 1; 1348299553Serj 1349299553Serj /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */ 1350266423Sjfv for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { 1351277084Sjfv int cpu_id = i; 1352266423Sjfv rid = vector + 1; 1353266423Sjfv txr = &que->txr; 1354266423Sjfv que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1355266423Sjfv RF_SHAREABLE | RF_ACTIVE); 1356299553Serj if (!que->res) { 1357299553Serj device_printf(dev, "bus_alloc_resource_any() for" 1358299553Serj " Queue %d interrupt failed [rid=%d]\n", 1359299553Serj que->me, rid); 1360266423Sjfv return (ENXIO); 1361266423Sjfv } 1362266423Sjfv /* Set the handler function */ 1363266423Sjfv error = bus_setup_intr(dev, que->res, 1364266423Sjfv INTR_TYPE_NET | INTR_MPSAFE, NULL, 1365270346Sjfv ixl_msix_que, que, &que->tag); 1366266423Sjfv if (error) { 1367299553Serj device_printf(dev, "bus_setup_intr() for Queue %d" 1368299553Serj " interrupt handler failed, error %d\n", 1369299553Serj que->me, error); 1370318357Serj bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1371266423Sjfv return (error); 1372266423Sjfv } 1373299554Serj error = bus_describe_intr(dev, que->res, que->tag, "q%d", i); 1374299553Serj if (error) { 1375299553Serj device_printf(dev, "bus_describe_intr() for Queue %d" 1376299553Serj " interrupt name failed, error %d\n", 1377299553Serj que->me, error); 1378299553Serj } 1379266423Sjfv /* Bind the vector to a CPU */ 1380277084Sjfv#ifdef RSS 1381277084Sjfv cpu_id = rss_getcpu(i % rss_getnumbuckets()); 1382277084Sjfv#endif 1383299553Serj error = bus_bind_intr(dev, que->res, cpu_id); 1384299553Serj if (error) { 1385299553Serj device_printf(dev, "bus_bind_intr() for Queue %d" 1386299553Serj " to CPU %d failed, error %d\n", 1387299553Serj que->me, cpu_id, error); 1388299553Serj } 1389266423Sjfv que->msix = vector; 1390266423Sjfv } 1391266423Sjfv 1392266423Sjfv return (0); 1393266423Sjfv} 1394266423Sjfv 1395303816Ssbruno/* 1396303816Ssbruno * Allocate MSI/X vectors from the OS. 1397303816Ssbruno * Returns 0 for legacy, 1 for MSI, >1 for MSIX. 1398266423Sjfv */ 1399303816Ssbrunoint 1400270346Sjfvixl_init_msix(struct ixl_pf *pf) 1401266423Sjfv{ 1402266423Sjfv device_t dev = pf->dev; 1403303816Ssbruno struct i40e_hw *hw = &pf->hw; 1404333343Serj#ifdef IXL_IW 1405333343Serj#if __FreeBSD_version >= 1100000 1406333343Serj cpuset_t cpu_set; 1407333343Serj#endif 1408333343Serj#endif 1409303816Ssbruno int auto_max_queues; 1410266423Sjfv int rid, want, vectors, queues, available; 1411318357Serj#ifdef IXL_IW 1412333343Serj int iw_want=0, iw_vectors; 1413266423Sjfv 1414318357Serj pf->iw_msix = 0; 1415318357Serj#endif 1416318357Serj 1417266423Sjfv /* Override by tuneable */ 1418303816Ssbruno if (!pf->enable_msix) 1419299552Serj goto no_msix; 1420266423Sjfv 1421266423Sjfv /* First try MSI/X */ 1422318357Serj rid = PCIR_BAR(IXL_MSIX_BAR); 1423266423Sjfv pf->msix_mem = bus_alloc_resource_any(dev, 1424266423Sjfv SYS_RES_MEMORY, &rid, RF_ACTIVE); 1425266423Sjfv if (!pf->msix_mem) { 1426266423Sjfv /* May not be enabled */ 1427266423Sjfv device_printf(pf->dev, 1428299549Serj "Unable to map MSIX table\n"); 1429299552Serj goto no_msix; 1430266423Sjfv } 1431266423Sjfv 1432266423Sjfv available = pci_msix_count(dev); 1433303816Ssbruno if (available < 2) { 1434303816Ssbruno /* system has msix disabled (0), or only one vector (1) */ 1435333343Serj device_printf(pf->dev, "Less than two MSI-X vectors available\n"); 1436266423Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 1437266423Sjfv rid, pf->msix_mem); 1438266423Sjfv pf->msix_mem = NULL; 1439299552Serj goto no_msix; 1440266423Sjfv } 1441266423Sjfv 1442303816Ssbruno /* Clamp max number of queues based on: 1443303816Ssbruno * - # of MSI-X vectors available 1444303816Ssbruno * - # of cpus available 1445303816Ssbruno * - # of queues that can be assigned to the LAN VSI 1446303816Ssbruno */ 1447303816Ssbruno auto_max_queues = min(mp_ncpus, available - 1); 1448303816Ssbruno if (hw->mac.type == I40E_MAC_X722) 1449303816Ssbruno auto_max_queues = min(auto_max_queues, 128); 1450303816Ssbruno else 1451303816Ssbruno auto_max_queues = min(auto_max_queues, 64); 1452266423Sjfv 1453299552Serj /* Override with tunable value if tunable is less than autoconfig count */ 1454303816Ssbruno if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues)) 1455303816Ssbruno queues = pf->max_queues; 1456303816Ssbruno /* Use autoconfig amount if that's lower */ 1457303816Ssbruno else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) { 1458303816Ssbruno device_printf(dev, "ixl_max_queues (%d) is too large, using " 1459303816Ssbruno "autoconfig amount (%d)...\n", 1460303816Ssbruno pf->max_queues, auto_max_queues); 1461303816Ssbruno queues = auto_max_queues; 1462303816Ssbruno } 1463303816Ssbruno /* Limit maximum auto-configured queues to 8 if no user value is set */ 1464303816Ssbruno else 1465303816Ssbruno queues = min(auto_max_queues, 8); 1466266423Sjfv 1467277084Sjfv#ifdef RSS 1468277084Sjfv /* If we're doing RSS, clamp at the number of RSS buckets */ 1469277084Sjfv if (queues > rss_getnumbuckets()) 1470277084Sjfv queues = rss_getnumbuckets(); 1471277084Sjfv#endif 1472277084Sjfv 1473266423Sjfv /* 1474266423Sjfv ** Want one vector (RX/TX pair) per queue 1475266423Sjfv ** plus an additional for the admin queue. 1476266423Sjfv */ 1477266423Sjfv want = queues + 1; 1478266423Sjfv if (want <= available) /* Have enough */ 1479266423Sjfv vectors = want; 1480266423Sjfv else { 1481266423Sjfv device_printf(pf->dev, 1482266423Sjfv "MSIX Configuration Problem, " 1483266423Sjfv "%d vectors available but %d wanted!\n", 1484266423Sjfv available, want); 1485318357Serj pf->msix_mem = NULL; 1486318357Serj goto no_msix; /* Will go to Legacy setup */ 1487266423Sjfv } 1488266423Sjfv 1489318357Serj#ifdef IXL_IW 1490333343Serj if (ixl_enable_iwarp && hw->func_caps.iwarp) { 1491333343Serj#if __FreeBSD_version >= 1100000 1492333343Serj if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0) 1493333343Serj { 1494333343Serj iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX); 1495333343Serj } 1496333343Serj#endif 1497333343Serj if(!iw_want) 1498333343Serj iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX); 1499333343Serj if(ixl_limit_iwarp_msix > 0) 1500333343Serj iw_want = min(iw_want, ixl_limit_iwarp_msix); 1501333343Serj else 1502333343Serj iw_want = min(iw_want, 1); 1503333343Serj 1504318357Serj available -= vectors; 1505318357Serj if (available > 0) { 1506318357Serj iw_vectors = (available >= iw_want) ? 1507318357Serj iw_want : available; 1508318357Serj vectors += iw_vectors; 1509318357Serj } else 1510318357Serj iw_vectors = 0; 1511318357Serj } 1512318357Serj#endif 1513318357Serj 1514318357Serj ixl_set_msix_enable(dev); 1515266423Sjfv if (pci_alloc_msix(dev, &vectors) == 0) { 1516266423Sjfv device_printf(pf->dev, 1517266423Sjfv "Using MSIX interrupts with %d vectors\n", vectors); 1518266423Sjfv pf->msix = vectors; 1519318357Serj#ifdef IXL_IW 1520333343Serj if (ixl_enable_iwarp && hw->func_caps.iwarp) 1521333343Serj { 1522318357Serj pf->iw_msix = iw_vectors; 1523333343Serj device_printf(pf->dev, 1524333343Serj "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n", 1525333343Serj iw_vectors); 1526333343Serj } 1527318357Serj#endif 1528318357Serj 1529266423Sjfv pf->vsi.num_queues = queues; 1530277084Sjfv#ifdef RSS 1531277084Sjfv /* 1532277084Sjfv * If we're doing RSS, the number of queues needs to 1533277084Sjfv * match the number of RSS buckets that are configured. 1534277084Sjfv * 1535277084Sjfv * + If there's more queues than RSS buckets, we'll end 1536277084Sjfv * up with queues that get no traffic. 1537277084Sjfv * 1538277084Sjfv * + If there's more RSS buckets than queues, we'll end 1539277084Sjfv * up having multiple RSS buckets map to the same queue, 1540277084Sjfv * so there'll be some contention. 1541277084Sjfv */ 1542277084Sjfv if (queues != rss_getnumbuckets()) { 1543277084Sjfv device_printf(dev, 1544277084Sjfv "%s: queues (%d) != RSS buckets (%d)" 1545277084Sjfv "; performance will be impacted.\n", 1546277084Sjfv __func__, queues, rss_getnumbuckets()); 1547277084Sjfv } 1548277084Sjfv#endif 1549266423Sjfv return (vectors); 1550266423Sjfv } 1551299552Serjno_msix: 1552299552Serj vectors = pci_msi_count(dev); 1553266423Sjfv pf->vsi.num_queues = 1; 1554303816Ssbruno pf->max_queues = 1; 1555299552Serj if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) 1556299547Serj device_printf(pf->dev, "Using an MSI interrupt\n"); 1557266423Sjfv else { 1558299552Serj vectors = 0; 1559299547Serj device_printf(pf->dev, "Using a Legacy interrupt\n"); 1560266423Sjfv } 1561266423Sjfv return (vectors); 1562266423Sjfv} 1563266423Sjfv 1564266423Sjfv/* 1565299553Serj * Configure admin queue/misc interrupt cause registers in hardware. 1566266423Sjfv */ 1567303816Ssbrunovoid 1568299553Serjixl_configure_intr0_msix(struct ixl_pf *pf) 1569266423Sjfv{ 1570299553Serj struct i40e_hw *hw = &pf->hw; 1571299553Serj u32 reg; 1572266423Sjfv 1573266423Sjfv /* First set up the adminq - vector 0 */ 1574266423Sjfv wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 1575266423Sjfv rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 1576266423Sjfv 1577266423Sjfv reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 1578266423Sjfv I40E_PFINT_ICR0_ENA_GRST_MASK | 1579299549Serj I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 1580266423Sjfv I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 1581266423Sjfv I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 1582266423Sjfv I40E_PFINT_ICR0_ENA_VFLR_MASK | 1583333343Serj I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 1584266423Sjfv I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 1585266423Sjfv wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1586266423Sjfv 1587299547Serj /* 1588299547Serj * 0x7FF is the end of the queue list. 1589299547Serj * This means we won't use MSI-X vector 0 for a queue interrupt 1590299547Serj * in MSIX mode. 1591299547Serj */ 1592266423Sjfv wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1593299547Serj /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 1594299547Serj wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 1595266423Sjfv 1596266423Sjfv wr32(hw, I40E_PFINT_DYN_CTL0, 1597266423Sjfv I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 1598266423Sjfv I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 1599266423Sjfv 1600266423Sjfv wr32(hw, I40E_PFINT_STAT_CTL0, 0); 1601299553Serj} 1602266423Sjfv 1603299553Serj/* 1604299553Serj * Configure queue interrupt cause registers in hardware. 1605299553Serj */ 1606303816Ssbrunovoid 1607299553Serjixl_configure_queue_intr_msix(struct ixl_pf *pf) 1608299553Serj{ 1609299553Serj struct i40e_hw *hw = &pf->hw; 1610299553Serj struct ixl_vsi *vsi = &pf->vsi; 1611299553Serj u32 reg; 1612299553Serj u16 vector = 1; 1613299553Serj 1614266423Sjfv for (int i = 0; i < vsi->num_queues; i++, vector++) { 1615299555Serj wr32(hw, I40E_PFINT_DYN_CTLN(i), 0); 1616299555Serj /* First queue type is RX / 0 */ 1617266423Sjfv wr32(hw, I40E_PFINT_LNKLSTN(i), i); 1618266423Sjfv 1619266423Sjfv reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 1620270346Sjfv (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 1621266423Sjfv (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | 1622266423Sjfv (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | 1623266423Sjfv (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 1624266423Sjfv wr32(hw, I40E_QINT_RQCTL(i), reg); 1625266423Sjfv 1626266423Sjfv reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | 1627270346Sjfv (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | 1628266423Sjfv (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | 1629299555Serj (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | 1630266423Sjfv (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 1631266423Sjfv wr32(hw, I40E_QINT_TQCTL(i), reg); 1632266423Sjfv } 1633266423Sjfv} 1634266423Sjfv 1635266423Sjfv/* 1636266423Sjfv * Configure for MSI single vector operation 1637266423Sjfv */ 1638303816Ssbrunovoid 1639270346Sjfvixl_configure_legacy(struct ixl_pf *pf) 1640266423Sjfv{ 1641266423Sjfv struct i40e_hw *hw = &pf->hw; 1642318357Serj struct ixl_vsi *vsi = &pf->vsi; 1643318357Serj struct ixl_queue *que = vsi->queues; 1644318357Serj struct rx_ring *rxr = &que->rxr; 1645318357Serj struct tx_ring *txr = &que->txr; 1646318357Serj u32 reg; 1647266423Sjfv 1648318357Serj /* Configure ITR */ 1649318357Serj vsi->tx_itr_setting = pf->tx_itr; 1650318357Serj wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR), 1651318357Serj vsi->tx_itr_setting); 1652318357Serj txr->itr = vsi->tx_itr_setting; 1653266423Sjfv 1654318357Serj vsi->rx_itr_setting = pf->rx_itr; 1655318357Serj wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 1656318357Serj vsi->rx_itr_setting); 1657318357Serj rxr->itr = vsi->rx_itr_setting; 1658318357Serj 1659266423Sjfv /* Setup "other" causes */ 1660266423Sjfv reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK 1661266423Sjfv | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK 1662266423Sjfv | I40E_PFINT_ICR0_ENA_GRST_MASK 1663266423Sjfv | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK 1664266423Sjfv | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK 1665266423Sjfv | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK 1666266423Sjfv | I40E_PFINT_ICR0_ENA_VFLR_MASK 1667266423Sjfv | I40E_PFINT_ICR0_ENA_ADMINQ_MASK 1668266423Sjfv ; 1669266423Sjfv wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1670266423Sjfv 1671318357Serj /* No ITR for non-queue interrupts */ 1672318357Serj wr32(hw, I40E_PFINT_STAT_CTL0, 1673318357Serj IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); 1674266423Sjfv 1675266423Sjfv /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 1676266423Sjfv wr32(hw, I40E_PFINT_LNKLST0, 0); 1677266423Sjfv 1678266423Sjfv /* Associate the queue pair to the vector and enable the q int */ 1679266423Sjfv reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK 1680270346Sjfv | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) 1681333343Serj | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); 1682266423Sjfv wr32(hw, I40E_QINT_RQCTL(0), reg); 1683266423Sjfv 1684266423Sjfv reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK 1685270346Sjfv | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) 1686270346Sjfv | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); 1687266423Sjfv wr32(hw, I40E_QINT_TQCTL(0), reg); 1688266423Sjfv} 1689266423Sjfv 1690303816Ssbrunoint 1691270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf) 1692266423Sjfv{ 1693266423Sjfv int rid; 1694303816Ssbruno struct i40e_hw *hw = &pf->hw; 1695266423Sjfv device_t dev = pf->dev; 1696266423Sjfv 1697303816Ssbruno /* Map BAR0 */ 1698266423Sjfv rid = PCIR_BAR(0); 1699266423Sjfv pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1700266423Sjfv &rid, RF_ACTIVE); 1701266423Sjfv 1702266423Sjfv if (!(pf->pci_mem)) { 1703299552Serj device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 1704266423Sjfv return (ENXIO); 1705266423Sjfv } 1706333343Serj /* Ensure proper PCI device operation */ 1707333343Serj ixl_set_busmaster(dev); 1708266423Sjfv 1709303816Ssbruno /* Save off the PCI information */ 1710303816Ssbruno hw->vendor_id = pci_get_vendor(dev); 1711303816Ssbruno hw->device_id = pci_get_device(dev); 1712303816Ssbruno hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 1713303816Ssbruno hw->subsystem_vendor_id = 1714303816Ssbruno pci_read_config(dev, PCIR_SUBVEND_0, 2); 1715303816Ssbruno hw->subsystem_device_id = 1716303816Ssbruno pci_read_config(dev, PCIR_SUBDEV_0, 2); 1717303816Ssbruno 1718303816Ssbruno hw->bus.device = pci_get_slot(dev); 1719303816Ssbruno hw->bus.func = pci_get_function(dev); 1720303816Ssbruno 1721303816Ssbruno /* Save off register access information */ 1722266423Sjfv pf->osdep.mem_bus_space_tag = 1723266423Sjfv rman_get_bustag(pf->pci_mem); 1724266423Sjfv pf->osdep.mem_bus_space_handle = 1725266423Sjfv rman_get_bushandle(pf->pci_mem); 1726270346Sjfv pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 1727272285Srstone pf->osdep.flush_reg = I40E_GLGEN_STAT; 1728266423Sjfv pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 1729266423Sjfv 1730266423Sjfv pf->hw.back = &pf->osdep; 1731266423Sjfv 1732266423Sjfv return (0); 1733266423Sjfv} 1734266423Sjfv 1735299553Serj/* 1736299553Serj * Teardown and release the admin queue/misc vector 1737299553Serj * interrupt. 1738299553Serj */ 1739303816Ssbrunoint 1740299553Serjixl_teardown_adminq_msix(struct ixl_pf *pf) 1741266423Sjfv{ 1742266423Sjfv device_t dev = pf->dev; 1743318357Serj int rid, error = 0; 1744266423Sjfv 1745299553Serj if (pf->admvec) /* we are doing MSIX */ 1746299553Serj rid = pf->admvec + 1; 1747299553Serj else 1748299553Serj (pf->msix != 0) ? (rid = 1):(rid = 0); 1749299553Serj 1750299553Serj if (pf->tag != NULL) { 1751299553Serj bus_teardown_intr(dev, pf->res, pf->tag); 1752318357Serj if (error) { 1753318357Serj device_printf(dev, "bus_teardown_intr() for" 1754318357Serj " interrupt 0 failed\n"); 1755318357Serj // return (ENXIO); 1756318357Serj } 1757299553Serj pf->tag = NULL; 1758299553Serj } 1759299553Serj if (pf->res != NULL) { 1760299553Serj bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); 1761318357Serj if (error) { 1762318357Serj device_printf(dev, "bus_release_resource() for" 1763318357Serj " interrupt 0 failed [rid=%d]\n", rid); 1764318357Serj // return (ENXIO); 1765318357Serj } 1766299553Serj pf->res = NULL; 1767299553Serj } 1768299553Serj 1769299553Serj return (0); 1770299553Serj} 1771299553Serj 1772303816Ssbrunoint 1773299553Serjixl_teardown_queue_msix(struct ixl_vsi *vsi) 1774299553Serj{ 1775303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1776299553Serj struct ixl_queue *que = vsi->queues; 1777299553Serj device_t dev = vsi->dev; 1778299554Serj int rid, error = 0; 1779299553Serj 1780266423Sjfv /* We may get here before stations are setup */ 1781318357Serj if ((pf->msix < 2) || (que == NULL)) 1782299553Serj return (0); 1783266423Sjfv 1784299553Serj /* Release all MSIX queue resources */ 1785266423Sjfv for (int i = 0; i < vsi->num_queues; i++, que++) { 1786266423Sjfv rid = que->msix + 1; 1787266423Sjfv if (que->tag != NULL) { 1788299554Serj error = bus_teardown_intr(dev, que->res, que->tag); 1789299554Serj if (error) { 1790299554Serj device_printf(dev, "bus_teardown_intr() for" 1791299554Serj " Queue %d interrupt failed\n", 1792299554Serj que->me); 1793299554Serj // return (ENXIO); 1794299554Serj } 1795266423Sjfv que->tag = NULL; 1796266423Sjfv } 1797299547Serj if (que->res != NULL) { 1798299554Serj error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); 1799299554Serj if (error) { 1800299554Serj device_printf(dev, "bus_release_resource() for" 1801299554Serj " Queue %d interrupt failed [rid=%d]\n", 1802299554Serj que->me, rid); 1803299554Serj // return (ENXIO); 1804299554Serj } 1805299547Serj que->res = NULL; 1806299547Serj } 1807266423Sjfv } 1808266423Sjfv 1809299553Serj return (0); 1810299547Serj} 1811266423Sjfv 1812303816Ssbrunovoid 1813299547Serjixl_free_pci_resources(struct ixl_pf *pf) 1814299547Serj{ 1815299547Serj device_t dev = pf->dev; 1816299547Serj int memrid; 1817299547Serj 1818299553Serj ixl_teardown_queue_msix(&pf->vsi); 1819299553Serj ixl_teardown_adminq_msix(pf); 1820299547Serj 1821318357Serj if (pf->msix > 0) 1822266423Sjfv pci_release_msi(dev); 1823303816Ssbruno 1824318357Serj memrid = PCIR_BAR(IXL_MSIX_BAR); 1825299547Serj 1826266423Sjfv if (pf->msix_mem != NULL) 1827266423Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 1828266423Sjfv memrid, pf->msix_mem); 1829266423Sjfv 1830266423Sjfv if (pf->pci_mem != NULL) 1831266423Sjfv bus_release_resource(dev, SYS_RES_MEMORY, 1832266423Sjfv PCIR_BAR(0), pf->pci_mem); 1833266423Sjfv 1834266423Sjfv return; 1835266423Sjfv} 1836266423Sjfv 1837303816Ssbrunovoid 1838318357Serjixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types) 1839274205Sjfv{ 1840274205Sjfv /* Display supported media types */ 1841318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 1842274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1843266423Sjfv 1844318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 1845274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); 1846318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 1847279858Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1848318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 1849279858Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); 1850274205Sjfv 1851349163Serj if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 1852349163Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_2500_T, 0, NULL); 1853349163Serj 1854349163Serj if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 1855349163Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_5000_T, 0, NULL); 1856349163Serj 1857318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 1858318357Serj phy_types & (I40E_CAP_PHY_TYPE_XFI) || 1859318357Serj phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 1860274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 1861279033Sjfv 1862318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 1863274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); 1864318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 1865274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); 1866318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 1867274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); 1868274205Sjfv 1869318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 1870318357Serj phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 1871318357Serj phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 1872318357Serj phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 1873318357Serj phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 1874274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 1875318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 1876274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 1877318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 1878274205Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 1879284049Sjfv 1880318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 1881284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); 1882284049Sjfv 1883318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 1884318357Serj || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 1885284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 1886318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 1887333343Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 1888318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 1889284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 1890318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 1891284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 1892318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 1893284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); 1894284049Sjfv 1895318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 1896284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 1897284049Sjfv 1898318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 1899284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 1900318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 1901284049Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 1902318357Serj 1903318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 1904318357Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); 1905318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 1906318357Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); 1907318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 1908318357Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); 1909318357Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 1910333343Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL); 1911333343Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 1912333343Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 1913333343Serj if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 1914333343Serj ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 1915274205Sjfv} 1916274205Sjfv 1917266423Sjfv/********************************************************************* 1918266423Sjfv * 1919266423Sjfv * Setup networking device structure and register an interface. 1920266423Sjfv * 1921266423Sjfv **********************************************************************/ 1922303816Ssbrunoint 1923270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi) 1924266423Sjfv{ 1925318357Serj struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1926266423Sjfv struct ifnet *ifp; 1927266423Sjfv struct i40e_hw *hw = vsi->hw; 1928270346Sjfv struct ixl_queue *que = vsi->queues; 1929279033Sjfv struct i40e_aq_get_phy_abilities_resp abilities; 1930266423Sjfv enum i40e_status_code aq_error = 0; 1931266423Sjfv 1932270346Sjfv INIT_DEBUGOUT("ixl_setup_interface: begin"); 1933266423Sjfv 1934266423Sjfv ifp = vsi->ifp = if_alloc(IFT_ETHER); 1935266423Sjfv if (ifp == NULL) { 1936266423Sjfv device_printf(dev, "can not allocate ifnet structure\n"); 1937349163Serj return (ENOMEM); 1938266423Sjfv } 1939266423Sjfv if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1940266423Sjfv ifp->if_mtu = ETHERMTU; 1941270346Sjfv ifp->if_init = ixl_init; 1942266423Sjfv ifp->if_softc = vsi; 1943266423Sjfv ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1944270346Sjfv ifp->if_ioctl = ixl_ioctl; 1945266423Sjfv 1946274205Sjfv#if __FreeBSD_version >= 1100036 1947272227Sglebius if_setgetcounterfn(ifp, ixl_get_counter); 1948272227Sglebius#endif 1949272227Sglebius 1950270346Sjfv ifp->if_transmit = ixl_mq_start; 1951266423Sjfv 1952270346Sjfv ifp->if_qflush = ixl_qflush; 1953266423Sjfv 1954333343Serj ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; 1955266423Sjfv 1956266423Sjfv vsi->max_frame_size = 1957266423Sjfv ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 1958266423Sjfv + ETHER_VLAN_ENCAP_LEN; 1959266423Sjfv 1960303816Ssbruno /* Set TSO limits */ 1961303816Ssbruno ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); 1962303816Ssbruno ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; 1963333343Serj ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; 1964303816Ssbruno 1965266423Sjfv /* 1966266423Sjfv * Tell the upper layer(s) we support long frames. 1967266423Sjfv */ 1968270856Sglebius ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1969266423Sjfv 1970266423Sjfv ifp->if_capabilities |= IFCAP_HWCSUM; 1971266423Sjfv ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; 1972266423Sjfv ifp->if_capabilities |= IFCAP_TSO; 1973266423Sjfv ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1974266423Sjfv ifp->if_capabilities |= IFCAP_LRO; 1975266423Sjfv 1976266423Sjfv /* VLAN capabilties */ 1977266423Sjfv ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING 1978266423Sjfv | IFCAP_VLAN_HWTSO 1979266423Sjfv | IFCAP_VLAN_MTU 1980266423Sjfv | IFCAP_VLAN_HWCSUM; 1981266423Sjfv ifp->if_capenable = ifp->if_capabilities; 1982266423Sjfv 1983266423Sjfv /* 1984266423Sjfv ** Don't turn this on by default, if vlans are 1985266423Sjfv ** created on another pseudo device (eg. lagg) 1986266423Sjfv ** then vlan events are not passed thru, breaking 1987266423Sjfv ** operation, but with HW FILTER off it works. If 1988270346Sjfv ** using vlans directly on the ixl driver you can 1989266423Sjfv ** enable this and get full hardware tag filtering. 1990266423Sjfv */ 1991266423Sjfv ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1992266423Sjfv 1993266423Sjfv /* 1994266423Sjfv * Specify the media types supported by this adapter and register 1995266423Sjfv * callbacks to update media and link information 1996266423Sjfv */ 1997270346Sjfv ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, 1998270346Sjfv ixl_media_status); 1999266423Sjfv 2000349163Serj if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) == 0) { 2001349163Serj aq_error = i40e_aq_get_phy_capabilities(hw, 2002349163Serj FALSE, TRUE, &abilities, NULL); 2003349163Serj /* May need delay to detect fiber correctly */ 2004349163Serj if (aq_error == I40E_ERR_UNKNOWN_PHY) { 2005349163Serj i40e_msec_delay(200); 2006349163Serj aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, 2007349163Serj TRUE, &abilities, NULL); 2008349163Serj } 2009349163Serj if (aq_error) { 2010349163Serj if (aq_error == I40E_ERR_UNKNOWN_PHY) 2011349163Serj device_printf(dev, "Unknown PHY type detected!\n"); 2012349163Serj else 2013349163Serj device_printf(dev, 2014349163Serj "Error getting supported media types, err %d," 2015349163Serj " AQ error %d\n", aq_error, hw->aq.asq_last_status); 2016349163Serj } else { 2017349163Serj pf->supported_speeds = abilities.link_speed; 2018333343Serj#if __FreeBSD_version >= 1100000 2019349163Serj ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); 2020333343Serj#else 2021349163Serj if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); 2022333343Serj#endif 2023333343Serj 2024349163Serj ixl_add_ifmedia(vsi, hw->phy.phy_types); 2025349163Serj } 2026279033Sjfv } 2027266423Sjfv 2028266423Sjfv /* Use autoselect media by default */ 2029266423Sjfv ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2030266423Sjfv ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); 2031266423Sjfv 2032274205Sjfv ether_ifattach(ifp, hw->mac.addr); 2033274205Sjfv 2034266423Sjfv return (0); 2035266423Sjfv} 2036266423Sjfv 2037279858Sjfv/* 2038299547Serj** Run when the Admin Queue gets a link state change interrupt. 2039279858Sjfv*/ 2040303816Ssbrunovoid 2041279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) 2042266423Sjfv{ 2043279858Sjfv struct i40e_hw *hw = &pf->hw; 2044299547Serj device_t dev = pf->dev; 2045279858Sjfv struct i40e_aqc_get_link_status *status = 2046279858Sjfv (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 2047266423Sjfv 2048299547Serj /* Request link status from adapter */ 2049279858Sjfv hw->phy.get_link_info = TRUE; 2050299547Serj i40e_get_link_status(hw, &pf->link_up); 2051299547Serj 2052299547Serj /* Print out message if an unqualified module is found */ 2053279858Sjfv if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && 2054333343Serj (pf->advertised_speed) && 2055279858Sjfv (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && 2056279858Sjfv (!(status->link_info & I40E_AQ_LINK_UP))) 2057299547Serj device_printf(dev, "Link failed because " 2058299547Serj "an unqualified module was detected!\n"); 2059279858Sjfv 2060299547Serj /* Update OS link info */ 2061299547Serj ixl_update_link_status(pf); 2062266423Sjfv} 2063266423Sjfv 2064266423Sjfv/********************************************************************* 2065266423Sjfv * 2066279033Sjfv * Get Firmware Switch configuration 2067279033Sjfv * - this will need to be more robust when more complex 2068279033Sjfv * switch configurations are enabled. 2069266423Sjfv * 2070266423Sjfv **********************************************************************/ 2071303816Ssbrunoint 2072279033Sjfvixl_switch_config(struct ixl_pf *pf) 2073266423Sjfv{ 2074279033Sjfv struct i40e_hw *hw = &pf->hw; 2075279033Sjfv struct ixl_vsi *vsi = &pf->vsi; 2076266423Sjfv device_t dev = vsi->dev; 2077266423Sjfv struct i40e_aqc_get_switch_config_resp *sw_config; 2078266423Sjfv u8 aq_buf[I40E_AQ_LARGE_BUF]; 2079279858Sjfv int ret; 2080266423Sjfv u16 next = 0; 2081266423Sjfv 2082279033Sjfv memset(&aq_buf, 0, sizeof(aq_buf)); 2083266423Sjfv sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 2084266423Sjfv ret = i40e_aq_get_switch_config(hw, sw_config, 2085266423Sjfv sizeof(aq_buf), &next, NULL); 2086266423Sjfv if (ret) { 2087299554Serj device_printf(dev, "aq_get_switch_config() failed, error %d," 2088299554Serj " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 2089266423Sjfv return (ret); 2090266423Sjfv } 2091303816Ssbruno if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 2092279858Sjfv device_printf(dev, 2093303816Ssbruno "Switch config: header reported: %d in structure, %d total\n", 2094303816Ssbruno sw_config->header.num_reported, sw_config->header.num_total); 2095303816Ssbruno for (int i = 0; i < sw_config->header.num_reported; i++) { 2096303816Ssbruno device_printf(dev, 2097318357Serj "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 2098303816Ssbruno sw_config->element[i].element_type, 2099303816Ssbruno sw_config->element[i].seid, 2100303816Ssbruno sw_config->element[i].uplink_seid, 2101303816Ssbruno sw_config->element[i].downlink_seid); 2102303816Ssbruno } 2103279858Sjfv } 2104303816Ssbruno /* Simplified due to a single VSI */ 2105279858Sjfv vsi->uplink_seid = sw_config->element[0].uplink_seid; 2106279858Sjfv vsi->downlink_seid = sw_config->element[0].downlink_seid; 2107266423Sjfv vsi->seid = sw_config->element[0].seid; 2108279033Sjfv return (ret); 2109279033Sjfv} 2110266423Sjfv 2111279033Sjfv/********************************************************************* 2112279033Sjfv * 2113279033Sjfv * Initialize the VSI: this handles contexts, which means things 2114279033Sjfv * like the number of descriptors, buffer size, 2115279033Sjfv * plus we init the rings thru this function. 2116279033Sjfv * 2117279033Sjfv **********************************************************************/ 2118303816Ssbrunoint 2119279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi) 2120279033Sjfv{ 2121279858Sjfv struct ixl_pf *pf = vsi->back; 2122279033Sjfv struct ixl_queue *que = vsi->queues; 2123279033Sjfv device_t dev = vsi->dev; 2124279033Sjfv struct i40e_hw *hw = vsi->hw; 2125279033Sjfv struct i40e_vsi_context ctxt; 2126303816Ssbruno int tc_queues; 2127279033Sjfv int err = 0; 2128279033Sjfv 2129266423Sjfv memset(&ctxt, 0, sizeof(ctxt)); 2130266423Sjfv ctxt.seid = vsi->seid; 2131279858Sjfv if (pf->veb_seid != 0) 2132279858Sjfv ctxt.uplink_seid = pf->veb_seid; 2133266423Sjfv ctxt.pf_num = hw->pf_id; 2134279033Sjfv err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); 2135279033Sjfv if (err) { 2136299554Serj device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d" 2137299554Serj " aq_error %d\n", err, hw->aq.asq_last_status); 2138279033Sjfv return (err); 2139266423Sjfv } 2140303816Ssbruno ixl_dbg(pf, IXL_DBG_SWITCH_INFO, 2141303816Ssbruno "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " 2142266423Sjfv "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " 2143266423Sjfv "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, 2144266423Sjfv ctxt.uplink_seid, ctxt.vsi_number, 2145266423Sjfv ctxt.vsis_allocated, ctxt.vsis_unallocated, 2146266423Sjfv ctxt.flags, ctxt.pf_num, ctxt.vf_num, 2147266423Sjfv ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits); 2148266423Sjfv /* 2149266423Sjfv ** Set the queue and traffic class bits 2150266423Sjfv ** - when multiple traffic classes are supported 2151266423Sjfv ** this will need to be more robust. 2152266423Sjfv */ 2153266423Sjfv ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 2154266423Sjfv ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; 2155299556Serj /* In contig mode, que_mapping[0] is first queue index used by this VSI */ 2156299556Serj ctxt.info.queue_mapping[0] = 0; 2157299556Serj /* 2158299556Serj * This VSI will only use traffic class 0; start traffic class 0's 2159303816Ssbruno * queue allocation at queue 0, and assign it 2^tc_queues queues (though 2160299556Serj * the driver may not use all of them). 2161299556Serj */ 2162303816Ssbruno tc_queues = bsrl(pf->qtag.num_allocated); 2163299556Serj ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) 2164299556Serj & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | 2165303816Ssbruno ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) 2166299556Serj & I40E_AQ_VSI_TC_QUE_NUMBER_MASK); 2167266423Sjfv 2168266423Sjfv /* Set VLAN receive stripping mode */ 2169266423Sjfv ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; 2170266423Sjfv ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; 2171266423Sjfv if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 2172299548Serj ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 2173266423Sjfv else 2174299548Serj ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2175266423Sjfv 2176318357Serj#ifdef IXL_IW 2177318357Serj /* Set TCP Enable for iWARP capable VSI */ 2178318357Serj if (ixl_enable_iwarp && pf->iw_enabled) { 2179318357Serj ctxt.info.valid_sections |= 2180318357Serj htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); 2181318357Serj ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; 2182318357Serj } 2183318357Serj#endif 2184303816Ssbruno /* Save VSI number and info for use later */ 2185303816Ssbruno vsi->vsi_num = ctxt.vsi_number; 2186303816Ssbruno bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2187266423Sjfv 2188266423Sjfv /* Reset VSI statistics */ 2189270346Sjfv ixl_vsi_reset_stats(vsi); 2190266423Sjfv 2191279858Sjfv ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); 2192279858Sjfv 2193279033Sjfv err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2194279033Sjfv if (err) { 2195303816Ssbruno device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d," 2196303816Ssbruno " aq_error %d\n", err, hw->aq.asq_last_status); 2197279033Sjfv return (err); 2198279033Sjfv } 2199266423Sjfv 2200266423Sjfv for (int i = 0; i < vsi->num_queues; i++, que++) { 2201266423Sjfv struct tx_ring *txr = &que->txr; 2202266423Sjfv struct rx_ring *rxr = &que->rxr; 2203266423Sjfv struct i40e_hmc_obj_txq tctx; 2204266423Sjfv struct i40e_hmc_obj_rxq rctx; 2205266423Sjfv u32 txctl; 2206266423Sjfv u16 size; 2207266423Sjfv 2208266423Sjfv /* Setup the HMC TX Context */ 2209333343Serj size = que->num_tx_desc * sizeof(struct i40e_tx_desc); 2210333343Serj bzero(&tctx, sizeof(tctx)); 2211266423Sjfv tctx.new_context = 1; 2212279858Sjfv tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); 2213333343Serj tctx.qlen = que->num_tx_desc; 2214333343Serj tctx.fc_ena = 0; /* Disable FCoE */ 2215333343Serj /* 2216333343Serj * This value needs to pulled from the VSI that this queue 2217333343Serj * is assigned to. Index into array is traffic class. 2218333343Serj */ 2219333343Serj tctx.rdylist = vsi->info.qs_handle[0]; 2220333343Serj /* 2221333343Serj * Set these to enable Head Writeback 2222333343Serj * - Address is last entry in TX ring (reserved for HWB index) 2223333343Serj * Leave these as 0 for Descriptor Writeback 2224333343Serj */ 2225333343Serj if (vsi->enable_head_writeback) { 2226333343Serj tctx.head_wb_ena = 1; 2227333343Serj tctx.head_wb_addr = txr->dma.pa + 2228333343Serj (que->num_tx_desc * sizeof(struct i40e_tx_desc)); 2229333343Serj } 2230266423Sjfv tctx.rdylist_act = 0; 2231266423Sjfv err = i40e_clear_lan_tx_queue_context(hw, i); 2232266423Sjfv if (err) { 2233266423Sjfv device_printf(dev, "Unable to clear TX context\n"); 2234266423Sjfv break; 2235266423Sjfv } 2236266423Sjfv err = i40e_set_lan_tx_queue_context(hw, i, &tctx); 2237266423Sjfv if (err) { 2238266423Sjfv device_printf(dev, "Unable to set TX context\n"); 2239266423Sjfv break; 2240266423Sjfv } 2241266423Sjfv /* Associate the ring with this PF */ 2242266423Sjfv txctl = I40E_QTX_CTL_PF_QUEUE; 2243266423Sjfv txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2244266423Sjfv I40E_QTX_CTL_PF_INDX_MASK); 2245266423Sjfv wr32(hw, I40E_QTX_CTL(i), txctl); 2246270346Sjfv ixl_flush(hw); 2247266423Sjfv 2248266423Sjfv /* Do ring (re)init */ 2249270346Sjfv ixl_init_tx_ring(que); 2250266423Sjfv 2251266423Sjfv /* Next setup the HMC RX Context */ 2252279858Sjfv if (vsi->max_frame_size <= MCLBYTES) 2253266423Sjfv rxr->mbuf_sz = MCLBYTES; 2254266423Sjfv else 2255266423Sjfv rxr->mbuf_sz = MJUMPAGESIZE; 2256266423Sjfv 2257266423Sjfv u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len; 2258266423Sjfv 2259266423Sjfv /* Set up an RX context for the HMC */ 2260266423Sjfv memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 2261266423Sjfv rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; 2262266423Sjfv /* ignore header split for now */ 2263266423Sjfv rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; 2264266423Sjfv rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? 2265266423Sjfv vsi->max_frame_size : max_rxmax; 2266266423Sjfv rctx.dtype = 0; 2267333343Serj rctx.dsize = 1; /* do 32byte descriptors */ 2268333343Serj rctx.hsplit_0 = 0; /* no header split */ 2269279858Sjfv rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); 2270333343Serj rctx.qlen = que->num_rx_desc; 2271266423Sjfv rctx.tphrdesc_ena = 1; 2272266423Sjfv rctx.tphwdesc_ena = 1; 2273333343Serj rctx.tphdata_ena = 0; /* Header Split related */ 2274333343Serj rctx.tphhead_ena = 0; /* Header Split related */ 2275333343Serj rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */ 2276266423Sjfv rctx.crcstrip = 1; 2277266423Sjfv rctx.l2tsel = 1; 2278333343Serj rctx.showiv = 1; /* Strip inner VLAN header */ 2279333343Serj rctx.fc_ena = 0; /* Disable FCoE */ 2280333343Serj rctx.prefena = 1; /* Prefetch descriptors */ 2281266423Sjfv 2282266423Sjfv err = i40e_clear_lan_rx_queue_context(hw, i); 2283266423Sjfv if (err) { 2284266423Sjfv device_printf(dev, 2285266423Sjfv "Unable to clear RX context %d\n", i); 2286266423Sjfv break; 2287266423Sjfv } 2288266423Sjfv err = i40e_set_lan_rx_queue_context(hw, i, &rctx); 2289266423Sjfv if (err) { 2290266423Sjfv device_printf(dev, "Unable to set RX context %d\n", i); 2291266423Sjfv break; 2292266423Sjfv } 2293270346Sjfv err = ixl_init_rx_ring(que); 2294266423Sjfv if (err) { 2295266423Sjfv device_printf(dev, "Fail in init_rx_ring %d\n", i); 2296266423Sjfv break; 2297266423Sjfv } 2298279860Sjfv#ifdef DEV_NETMAP 2299279860Sjfv /* preserve queue */ 2300279860Sjfv if (vsi->ifp->if_capenable & IFCAP_NETMAP) { 2301279860Sjfv struct netmap_adapter *na = NA(vsi->ifp); 2302341477Svmaffione struct netmap_kring *kring = na->rx_rings[i]; 2303279860Sjfv int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 2304279860Sjfv wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); 2305279860Sjfv } else 2306279860Sjfv#endif /* DEV_NETMAP */ 2307333343Serj wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1); 2308266423Sjfv } 2309266423Sjfv return (err); 2310266423Sjfv} 2311266423Sjfv 2312266423Sjfvvoid 2313333343Serjixl_vsi_free_queues(struct ixl_vsi *vsi) 2314266423Sjfv{ 2315270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2316270346Sjfv struct ixl_queue *que = vsi->queues; 2317266423Sjfv 2318333343Serj if (NULL == vsi->queues) 2319333343Serj return; 2320299549Serj 2321266423Sjfv for (int i = 0; i < vsi->num_queues; i++, que++) { 2322266423Sjfv struct tx_ring *txr = &que->txr; 2323266423Sjfv struct rx_ring *rxr = &que->rxr; 2324266423Sjfv 2325266423Sjfv if (!mtx_initialized(&txr->mtx)) /* uninitialized */ 2326266423Sjfv continue; 2327270346Sjfv IXL_TX_LOCK(txr); 2328333343Serj if (txr->br) 2329333343Serj buf_ring_free(txr->br, M_DEVBUF); 2330270346Sjfv ixl_free_que_tx(que); 2331266423Sjfv if (txr->base) 2332271834Sbz i40e_free_dma_mem(&pf->hw, &txr->dma); 2333270346Sjfv IXL_TX_UNLOCK(txr); 2334270346Sjfv IXL_TX_LOCK_DESTROY(txr); 2335266423Sjfv 2336266423Sjfv if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ 2337266423Sjfv continue; 2338270346Sjfv IXL_RX_LOCK(rxr); 2339270346Sjfv ixl_free_que_rx(que); 2340266423Sjfv if (rxr->base) 2341271834Sbz i40e_free_dma_mem(&pf->hw, &rxr->dma); 2342270346Sjfv IXL_RX_UNLOCK(rxr); 2343270346Sjfv IXL_RX_LOCK_DESTROY(rxr); 2344266423Sjfv } 2345349163Serj 2346349163Serj sysctl_ctx_free(&vsi->sysctl_ctx); 2347333343Serj} 2348266423Sjfv 2349333343Serj 2350333343Serj/********************************************************************* 2351333343Serj * 2352333343Serj * Free all VSI structs. 2353333343Serj * 2354333343Serj **********************************************************************/ 2355333343Serjvoid 2356333343Serjixl_free_vsi(struct ixl_vsi *vsi) 2357333343Serj{ 2358333343Serj /* Free station queues */ 2359333343Serj ixl_vsi_free_queues(vsi); 2360333343Serj if (vsi->queues) 2361333343Serj free(vsi->queues, M_DEVBUF); 2362333343Serj 2363266423Sjfv /* Free VSI filter list */ 2364279858Sjfv ixl_free_mac_filters(vsi); 2365279858Sjfv} 2366279858Sjfv 2367303816Ssbrunovoid 2368279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi) 2369279858Sjfv{ 2370279858Sjfv struct ixl_mac_filter *f; 2371279858Sjfv 2372266423Sjfv while (!SLIST_EMPTY(&vsi->ftl)) { 2373266423Sjfv f = SLIST_FIRST(&vsi->ftl); 2374266423Sjfv SLIST_REMOVE_HEAD(&vsi->ftl, next); 2375266423Sjfv free(f, M_DEVBUF); 2376266423Sjfv } 2377349163Serj 2378349163Serj vsi->num_hw_filters = 0; 2379266423Sjfv} 2380266423Sjfv 2381303816Ssbruno/* 2382303816Ssbruno * Fill out fields in queue struct and setup tx/rx memory and structs 2383303816Ssbruno */ 2384303816Ssbrunostatic int 2385333343Serjixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index) 2386303816Ssbruno{ 2387333343Serj struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2388303816Ssbruno device_t dev = pf->dev; 2389303816Ssbruno struct i40e_hw *hw = &pf->hw; 2390303816Ssbruno struct tx_ring *txr = &que->txr; 2391303816Ssbruno struct rx_ring *rxr = &que->rxr; 2392303816Ssbruno int error = 0; 2393303816Ssbruno int rsize, tsize; 2394266423Sjfv 2395333343Serj que->num_tx_desc = vsi->num_tx_desc; 2396333343Serj que->num_rx_desc = vsi->num_rx_desc; 2397303816Ssbruno que->me = index; 2398303816Ssbruno que->vsi = vsi; 2399303816Ssbruno 2400303816Ssbruno txr->que = que; 2401303816Ssbruno txr->tail = I40E_QTX_TAIL(que->me); 2402303816Ssbruno 2403303816Ssbruno /* Initialize the TX lock */ 2404303816Ssbruno snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", 2405303816Ssbruno device_get_nameunit(dev), que->me); 2406303816Ssbruno mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); 2407333343Serj /* 2408333343Serj * Create the TX descriptor ring 2409333343Serj * 2410333343Serj * In Head Writeback mode, the descriptor ring is one bigger 2411333343Serj * than the number of descriptors for space for the HW to 2412333343Serj * write back index of last completed descriptor. 2413333343Serj */ 2414333343Serj if (vsi->enable_head_writeback) { 2415333343Serj tsize = roundup2((que->num_tx_desc * 2416333343Serj sizeof(struct i40e_tx_desc)) + 2417333343Serj sizeof(u32), DBA_ALIGN); 2418333343Serj } else { 2419333343Serj tsize = roundup2((que->num_tx_desc * 2420333343Serj sizeof(struct i40e_tx_desc)), DBA_ALIGN); 2421333343Serj } 2422303816Ssbruno if (i40e_allocate_dma_mem(hw, 2423303816Ssbruno &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { 2424303816Ssbruno device_printf(dev, 2425303816Ssbruno "Unable to allocate TX Descriptor memory\n"); 2426303816Ssbruno error = ENOMEM; 2427333343Serj goto err_destroy_tx_mtx; 2428303816Ssbruno } 2429303816Ssbruno txr->base = (struct i40e_tx_desc *)txr->dma.va; 2430303816Ssbruno bzero((void *)txr->base, tsize); 2431303816Ssbruno /* Now allocate transmit soft structs for the ring */ 2432303816Ssbruno if (ixl_allocate_tx_data(que)) { 2433303816Ssbruno device_printf(dev, 2434303816Ssbruno "Critical Failure setting up TX structures\n"); 2435303816Ssbruno error = ENOMEM; 2436333343Serj goto err_free_tx_dma; 2437303816Ssbruno } 2438303816Ssbruno /* Allocate a buf ring */ 2439303816Ssbruno txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, 2440303816Ssbruno M_NOWAIT, &txr->mtx); 2441303816Ssbruno if (txr->br == NULL) { 2442303816Ssbruno device_printf(dev, 2443303816Ssbruno "Critical Failure setting up TX buf ring\n"); 2444303816Ssbruno error = ENOMEM; 2445333343Serj goto err_free_tx_data; 2446303816Ssbruno } 2447303816Ssbruno 2448333343Serj rsize = roundup2(que->num_rx_desc * 2449303816Ssbruno sizeof(union i40e_rx_desc), DBA_ALIGN); 2450303816Ssbruno rxr->que = que; 2451303816Ssbruno rxr->tail = I40E_QRX_TAIL(que->me); 2452303816Ssbruno 2453303816Ssbruno /* Initialize the RX side lock */ 2454303816Ssbruno snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", 2455303816Ssbruno device_get_nameunit(dev), que->me); 2456303816Ssbruno mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); 2457303816Ssbruno 2458303816Ssbruno if (i40e_allocate_dma_mem(hw, 2459303816Ssbruno &rxr->dma, i40e_mem_reserved, rsize, 4096)) { 2460303816Ssbruno device_printf(dev, 2461303816Ssbruno "Unable to allocate RX Descriptor memory\n"); 2462303816Ssbruno error = ENOMEM; 2463333343Serj goto err_destroy_rx_mtx; 2464303816Ssbruno } 2465303816Ssbruno rxr->base = (union i40e_rx_desc *)rxr->dma.va; 2466303816Ssbruno bzero((void *)rxr->base, rsize); 2467303816Ssbruno /* Allocate receive soft structs for the ring*/ 2468303816Ssbruno if (ixl_allocate_rx_data(que)) { 2469303816Ssbruno device_printf(dev, 2470303816Ssbruno "Critical Failure setting up receive structs\n"); 2471303816Ssbruno error = ENOMEM; 2472333343Serj goto err_free_rx_dma; 2473303816Ssbruno } 2474303816Ssbruno 2475303816Ssbruno return (0); 2476303816Ssbruno 2477333343Serjerr_free_rx_dma: 2478333343Serj i40e_free_dma_mem(&pf->hw, &rxr->dma); 2479333343Serjerr_destroy_rx_mtx: 2480333343Serj mtx_destroy(&rxr->mtx); 2481333343Serj /* err_free_tx_buf_ring */ 2482333343Serj buf_ring_free(txr->br, M_DEVBUF); 2483333343Serjerr_free_tx_data: 2484333343Serj ixl_free_que_tx(que); 2485333343Serjerr_free_tx_dma: 2486333343Serj i40e_free_dma_mem(&pf->hw, &txr->dma); 2487333343Serjerr_destroy_tx_mtx: 2488333343Serj mtx_destroy(&txr->mtx); 2489333343Serj 2490303816Ssbruno return (error); 2491303816Ssbruno} 2492303816Ssbruno 2493333343Serjint 2494333343Serjixl_vsi_setup_queues(struct ixl_vsi *vsi) 2495333343Serj{ 2496333343Serj struct ixl_queue *que; 2497333343Serj int error = 0; 2498333343Serj 2499333343Serj for (int i = 0; i < vsi->num_queues; i++) { 2500333343Serj que = &vsi->queues[i]; 2501333343Serj error = ixl_vsi_setup_queue(vsi, que, i); 2502333343Serj if (error) 2503333343Serj break; 2504333343Serj } 2505349163Serj if (error == 0) 2506349163Serj sysctl_ctx_init(&vsi->sysctl_ctx); 2507349163Serj 2508333343Serj return (error); 2509333343Serj} 2510333343Serj 2511333343Serj 2512266423Sjfv/********************************************************************* 2513266423Sjfv * 2514266423Sjfv * Allocate memory for the VSI (virtual station interface) and their 2515266423Sjfv * associated queues, rings and the descriptors associated with each, 2516266423Sjfv * called only once at attach. 2517266423Sjfv * 2518266423Sjfv **********************************************************************/ 2519303816Ssbrunoint 2520270346Sjfvixl_setup_stations(struct ixl_pf *pf) 2521266423Sjfv{ 2522266423Sjfv device_t dev = pf->dev; 2523270346Sjfv struct ixl_vsi *vsi; 2524303816Ssbruno int error = 0; 2525266423Sjfv 2526266423Sjfv vsi = &pf->vsi; 2527266423Sjfv vsi->back = (void *)pf; 2528266423Sjfv vsi->hw = &pf->hw; 2529266423Sjfv vsi->id = 0; 2530266423Sjfv vsi->num_vlans = 0; 2531279858Sjfv vsi->back = pf; 2532266423Sjfv 2533333343Serj if (pf->msix > 1) 2534333343Serj vsi->flags |= IXL_FLAGS_USES_MSIX; 2535333343Serj 2536266423Sjfv /* Get memory for the station queues */ 2537266423Sjfv if (!(vsi->queues = 2538270346Sjfv (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * 2539266423Sjfv vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { 2540266423Sjfv device_printf(dev, "Unable to allocate queue memory\n"); 2541266423Sjfv error = ENOMEM; 2542333343Serj goto ixl_setup_stations_err; 2543266423Sjfv } 2544266423Sjfv 2545318357Serj /* Then setup each queue */ 2546333343Serj error = ixl_vsi_setup_queues(vsi); 2547333343Serjixl_setup_stations_err: 2548333343Serj return (error); 2549266423Sjfv} 2550266423Sjfv 2551266423Sjfv/* 2552266423Sjfv** Provide a update to the queue RX 2553266423Sjfv** interrupt moderation value. 2554266423Sjfv*/ 2555303816Ssbrunovoid 2556270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que) 2557266423Sjfv{ 2558270346Sjfv struct ixl_vsi *vsi = que->vsi; 2559303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2560266423Sjfv struct i40e_hw *hw = vsi->hw; 2561266423Sjfv struct rx_ring *rxr = &que->rxr; 2562266423Sjfv u16 rx_itr; 2563266423Sjfv u16 rx_latency = 0; 2564266423Sjfv int rx_bytes; 2565266423Sjfv 2566266423Sjfv /* Idle, do nothing */ 2567266423Sjfv if (rxr->bytes == 0) 2568266423Sjfv return; 2569266423Sjfv 2570303816Ssbruno if (pf->dynamic_rx_itr) { 2571266423Sjfv rx_bytes = rxr->bytes/rxr->itr; 2572266423Sjfv rx_itr = rxr->itr; 2573266423Sjfv 2574266423Sjfv /* Adjust latency range */ 2575266423Sjfv switch (rxr->latency) { 2576270346Sjfv case IXL_LOW_LATENCY: 2577266423Sjfv if (rx_bytes > 10) { 2578270346Sjfv rx_latency = IXL_AVE_LATENCY; 2579270346Sjfv rx_itr = IXL_ITR_20K; 2580266423Sjfv } 2581266423Sjfv break; 2582270346Sjfv case IXL_AVE_LATENCY: 2583266423Sjfv if (rx_bytes > 20) { 2584270346Sjfv rx_latency = IXL_BULK_LATENCY; 2585270346Sjfv rx_itr = IXL_ITR_8K; 2586266423Sjfv } else if (rx_bytes <= 10) { 2587270346Sjfv rx_latency = IXL_LOW_LATENCY; 2588270346Sjfv rx_itr = IXL_ITR_100K; 2589266423Sjfv } 2590266423Sjfv break; 2591270346Sjfv case IXL_BULK_LATENCY: 2592266423Sjfv if (rx_bytes <= 20) { 2593270346Sjfv rx_latency = IXL_AVE_LATENCY; 2594270346Sjfv rx_itr = IXL_ITR_20K; 2595266423Sjfv } 2596266423Sjfv break; 2597266423Sjfv } 2598266423Sjfv 2599266423Sjfv rxr->latency = rx_latency; 2600266423Sjfv 2601266423Sjfv if (rx_itr != rxr->itr) { 2602266423Sjfv /* do an exponential smoothing */ 2603266423Sjfv rx_itr = (10 * rx_itr * rxr->itr) / 2604266423Sjfv ((9 * rx_itr) + rxr->itr); 2605318357Serj rxr->itr = min(rx_itr, IXL_MAX_ITR); 2606270346Sjfv wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, 2607266423Sjfv que->me), rxr->itr); 2608266423Sjfv } 2609266423Sjfv } else { /* We may have have toggled to non-dynamic */ 2610270346Sjfv if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) 2611303816Ssbruno vsi->rx_itr_setting = pf->rx_itr; 2612266423Sjfv /* Update the hardware if needed */ 2613266423Sjfv if (rxr->itr != vsi->rx_itr_setting) { 2614266423Sjfv rxr->itr = vsi->rx_itr_setting; 2615270346Sjfv wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, 2616266423Sjfv que->me), rxr->itr); 2617266423Sjfv } 2618266423Sjfv } 2619266423Sjfv rxr->bytes = 0; 2620266423Sjfv rxr->packets = 0; 2621266423Sjfv return; 2622266423Sjfv} 2623266423Sjfv 2624266423Sjfv 2625266423Sjfv/* 2626266423Sjfv** Provide a update to the queue TX 2627266423Sjfv** interrupt moderation value. 2628266423Sjfv*/ 2629303816Ssbrunovoid 2630270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que) 2631266423Sjfv{ 2632270346Sjfv struct ixl_vsi *vsi = que->vsi; 2633303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2634266423Sjfv struct i40e_hw *hw = vsi->hw; 2635266423Sjfv struct tx_ring *txr = &que->txr; 2636266423Sjfv u16 tx_itr; 2637266423Sjfv u16 tx_latency = 0; 2638266423Sjfv int tx_bytes; 2639266423Sjfv 2640266423Sjfv 2641266423Sjfv /* Idle, do nothing */ 2642266423Sjfv if (txr->bytes == 0) 2643266423Sjfv return; 2644266423Sjfv 2645303816Ssbruno if (pf->dynamic_tx_itr) { 2646266423Sjfv tx_bytes = txr->bytes/txr->itr; 2647266423Sjfv tx_itr = txr->itr; 2648266423Sjfv 2649266423Sjfv switch (txr->latency) { 2650270346Sjfv case IXL_LOW_LATENCY: 2651266423Sjfv if (tx_bytes > 10) { 2652270346Sjfv tx_latency = IXL_AVE_LATENCY; 2653270346Sjfv tx_itr = IXL_ITR_20K; 2654266423Sjfv } 2655266423Sjfv break; 2656270346Sjfv case IXL_AVE_LATENCY: 2657266423Sjfv if (tx_bytes > 20) { 2658270346Sjfv tx_latency = IXL_BULK_LATENCY; 2659270346Sjfv tx_itr = IXL_ITR_8K; 2660266423Sjfv } else if (tx_bytes <= 10) { 2661270346Sjfv tx_latency = IXL_LOW_LATENCY; 2662270346Sjfv tx_itr = IXL_ITR_100K; 2663266423Sjfv } 2664266423Sjfv break; 2665270346Sjfv case IXL_BULK_LATENCY: 2666266423Sjfv if (tx_bytes <= 20) { 2667270346Sjfv tx_latency = IXL_AVE_LATENCY; 2668270346Sjfv tx_itr = IXL_ITR_20K; 2669266423Sjfv } 2670266423Sjfv break; 2671266423Sjfv } 2672266423Sjfv 2673266423Sjfv txr->latency = tx_latency; 2674266423Sjfv 2675266423Sjfv if (tx_itr != txr->itr) { 2676266423Sjfv /* do an exponential smoothing */ 2677266423Sjfv tx_itr = (10 * tx_itr * txr->itr) / 2678266423Sjfv ((9 * tx_itr) + txr->itr); 2679318357Serj txr->itr = min(tx_itr, IXL_MAX_ITR); 2680270346Sjfv wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, 2681266423Sjfv que->me), txr->itr); 2682266423Sjfv } 2683266423Sjfv 2684266423Sjfv } else { /* We may have have toggled to non-dynamic */ 2685270346Sjfv if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) 2686303816Ssbruno vsi->tx_itr_setting = pf->tx_itr; 2687266423Sjfv /* Update the hardware if needed */ 2688266423Sjfv if (txr->itr != vsi->tx_itr_setting) { 2689266423Sjfv txr->itr = vsi->tx_itr_setting; 2690270346Sjfv wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, 2691266423Sjfv que->me), txr->itr); 2692266423Sjfv } 2693266423Sjfv } 2694266423Sjfv txr->bytes = 0; 2695266423Sjfv txr->packets = 0; 2696266423Sjfv return; 2697266423Sjfv} 2698266423Sjfv 2699303816Ssbrunovoid 2700349163Serjixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 2701279858Sjfv{ 2702279858Sjfv struct sysctl_oid *tree; 2703279858Sjfv struct sysctl_oid_list *child; 2704279858Sjfv struct sysctl_oid_list *vsi_list; 2705279858Sjfv 2706349163Serj tree = device_get_sysctl_tree(vsi->dev); 2707279858Sjfv child = SYSCTL_CHILDREN(tree); 2708349163Serj vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 2709349163Serj CTLFLAG_RD, NULL, "VSI Number"); 2710349163Serj 2711279858Sjfv vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 2712349163Serj ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 2713279858Sjfv 2714349163Serj if (queues_sysctls) 2715349163Serj ixl_vsi_add_queues_stats(vsi); 2716279858Sjfv} 2717279858Sjfv 2718303816Ssbruno/* 2719303816Ssbruno * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 2720303816Ssbruno * Writes to the ITR registers immediately. 2721303816Ssbruno */ 2722303816Ssbrunostatic int 2723303816Ssbrunoixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 2724303816Ssbruno{ 2725303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 2726303816Ssbruno device_t dev = pf->dev; 2727303816Ssbruno int error = 0; 2728303816Ssbruno int requested_tx_itr; 2729303816Ssbruno 2730303816Ssbruno requested_tx_itr = pf->tx_itr; 2731303816Ssbruno error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 2732303816Ssbruno if ((error) || (req->newptr == NULL)) 2733303816Ssbruno return (error); 2734303816Ssbruno if (pf->dynamic_tx_itr) { 2735303816Ssbruno device_printf(dev, 2736303816Ssbruno "Cannot set TX itr value while dynamic TX itr is enabled\n"); 2737303816Ssbruno return (EINVAL); 2738303816Ssbruno } 2739303816Ssbruno if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 2740303816Ssbruno device_printf(dev, 2741303816Ssbruno "Invalid TX itr value; value must be between 0 and %d\n", 2742303816Ssbruno IXL_MAX_ITR); 2743303816Ssbruno return (EINVAL); 2744303816Ssbruno } 2745303816Ssbruno 2746303816Ssbruno pf->tx_itr = requested_tx_itr; 2747303816Ssbruno ixl_configure_tx_itr(pf); 2748303816Ssbruno 2749303816Ssbruno return (error); 2750303816Ssbruno} 2751303816Ssbruno 2752303816Ssbruno/* 2753303816Ssbruno * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 2754303816Ssbruno * Writes to the ITR registers immediately. 2755303816Ssbruno */ 2756303816Ssbrunostatic int 2757303816Ssbrunoixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 2758303816Ssbruno{ 2759303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 2760303816Ssbruno device_t dev = pf->dev; 2761303816Ssbruno int error = 0; 2762303816Ssbruno int requested_rx_itr; 2763303816Ssbruno 2764303816Ssbruno requested_rx_itr = pf->rx_itr; 2765303816Ssbruno error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 2766303816Ssbruno if ((error) || (req->newptr == NULL)) 2767303816Ssbruno return (error); 2768303816Ssbruno if (pf->dynamic_rx_itr) { 2769303816Ssbruno device_printf(dev, 2770303816Ssbruno "Cannot set RX itr value while dynamic RX itr is enabled\n"); 2771303816Ssbruno return (EINVAL); 2772303816Ssbruno } 2773303816Ssbruno if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 2774303816Ssbruno device_printf(dev, 2775303816Ssbruno "Invalid RX itr value; value must be between 0 and %d\n", 2776303816Ssbruno IXL_MAX_ITR); 2777303816Ssbruno return (EINVAL); 2778303816Ssbruno } 2779303816Ssbruno 2780303816Ssbruno pf->rx_itr = requested_rx_itr; 2781303816Ssbruno ixl_configure_rx_itr(pf); 2782303816Ssbruno 2783303816Ssbruno return (error); 2784303816Ssbruno} 2785303816Ssbruno 2786303816Ssbrunovoid 2787270346Sjfvixl_add_hw_stats(struct ixl_pf *pf) 2788266423Sjfv{ 2789266423Sjfv device_t dev = pf->dev; 2790269198Sjfv struct i40e_hw_port_stats *pf_stats = &pf->stats; 2791266423Sjfv 2792266423Sjfv struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2793266423Sjfv struct sysctl_oid *tree = device_get_sysctl_tree(dev); 2794266423Sjfv struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 2795266423Sjfv 2796266423Sjfv /* Driver statistics */ 2797333343Serj SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", 2798266423Sjfv CTLFLAG_RD, &pf->watchdog_events, 2799266423Sjfv "Watchdog timeouts"); 2800333343Serj SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", 2801266423Sjfv CTLFLAG_RD, &pf->admin_irq, 2802266423Sjfv "Admin Queue IRQ Handled"); 2803266423Sjfv 2804349163Serj ixl_vsi_add_sysctls(&pf->vsi, "pf", true); 2805266423Sjfv /* MAC stats */ 2806270346Sjfv ixl_add_sysctls_mac_stats(ctx, child, pf_stats); 2807266423Sjfv} 2808266423Sjfv 2809303816Ssbrunovoid 2810270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 2811266423Sjfv struct sysctl_oid_list *child, 2812266423Sjfv struct i40e_hw_port_stats *stats) 2813266423Sjfv{ 2814269198Sjfv struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 2815266423Sjfv CTLFLAG_RD, NULL, "Mac Statistics"); 2816266423Sjfv struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 2817266423Sjfv 2818266423Sjfv struct i40e_eth_stats *eth_stats = &stats->eth; 2819270346Sjfv ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 2820266423Sjfv 2821270346Sjfv struct ixl_sysctl_info ctls[] = 2822266423Sjfv { 2823266423Sjfv {&stats->crc_errors, "crc_errors", "CRC Errors"}, 2824266423Sjfv {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 2825266423Sjfv {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 2826266423Sjfv {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 2827266423Sjfv {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 2828266423Sjfv /* Packet Reception Stats */ 2829266423Sjfv {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 2830266423Sjfv {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 2831266423Sjfv {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 2832266423Sjfv {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 2833266423Sjfv {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 2834266423Sjfv {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 2835266423Sjfv {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 2836266423Sjfv {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 2837266423Sjfv {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 2838266423Sjfv {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 2839266423Sjfv {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 2840266423Sjfv {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 2841266423Sjfv /* Packet Transmission Stats */ 2842266423Sjfv {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 2843266423Sjfv {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 2844266423Sjfv {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 2845266423Sjfv {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 2846266423Sjfv {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 2847266423Sjfv {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 2848266423Sjfv {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 2849266423Sjfv /* Flow control */ 2850266423Sjfv {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 2851266423Sjfv {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 2852266423Sjfv {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 2853266423Sjfv {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 2854266423Sjfv /* End */ 2855266423Sjfv {0,0,0} 2856266423Sjfv }; 2857266423Sjfv 2858270346Sjfv struct ixl_sysctl_info *entry = ctls; 2859303816Ssbruno while (entry->stat != 0) 2860266423Sjfv { 2861266423Sjfv SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 2862266423Sjfv CTLFLAG_RD, entry->stat, 2863266423Sjfv entry->description); 2864266423Sjfv entry++; 2865266423Sjfv } 2866266423Sjfv} 2867266423Sjfv 2868303816Ssbrunovoid 2869303816Ssbrunoixl_set_rss_key(struct ixl_pf *pf) 2870266423Sjfv{ 2871303816Ssbruno struct i40e_hw *hw = &pf->hw; 2872303816Ssbruno struct ixl_vsi *vsi = &pf->vsi; 2873303816Ssbruno device_t dev = pf->dev; 2874333343Serj u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 2875303816Ssbruno enum i40e_status_code status; 2876266423Sjfv 2877277084Sjfv#ifdef RSS 2878277084Sjfv /* Fetch the configured RSS key */ 2879277084Sjfv rss_getkey((uint8_t *) &rss_seed); 2880333343Serj#else 2881333343Serj ixl_get_default_rss_key(rss_seed); 2882277084Sjfv#endif 2883266423Sjfv /* Fill out hash function seed */ 2884303816Ssbruno if (hw->mac.type == I40E_MAC_X722) { 2885303816Ssbruno struct i40e_aqc_get_set_rss_key_data key_data; 2886333343Serj bcopy(rss_seed, &key_data, 52); 2887303816Ssbruno status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 2888303816Ssbruno if (status) 2889333343Serj device_printf(dev, 2890333343Serj "i40e_aq_set_rss_key status %s, error %s\n", 2891333343Serj i40e_stat_str(hw, status), 2892333343Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 2893303816Ssbruno } else { 2894303816Ssbruno for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 2895303816Ssbruno i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 2896303816Ssbruno } 2897303816Ssbruno} 2898266423Sjfv 2899303816Ssbruno/* 2900303816Ssbruno * Configure enabled PCTYPES for RSS. 2901303816Ssbruno */ 2902303816Ssbrunovoid 2903303816Ssbrunoixl_set_rss_pctypes(struct ixl_pf *pf) 2904303816Ssbruno{ 2905303816Ssbruno struct i40e_hw *hw = &pf->hw; 2906303816Ssbruno u64 set_hena = 0, hena; 2907303816Ssbruno 2908277084Sjfv#ifdef RSS 2909303816Ssbruno u32 rss_hash_config; 2910303816Ssbruno 2911277084Sjfv rss_hash_config = rss_gethashconfig(); 2912277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 2913277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 2914277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 2915277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 2916277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 2917277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 2918277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 2919277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 2920279033Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 2921277151Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 2922277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 2923277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 2924277084Sjfv if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 2925277084Sjfv set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 2926277084Sjfv#else 2927318357Serj if (hw->mac.type == I40E_MAC_X722) 2928318357Serj set_hena = IXL_DEFAULT_RSS_HENA_X722; 2929318357Serj else 2930318357Serj set_hena = IXL_DEFAULT_RSS_HENA_XL710; 2931277084Sjfv#endif 2932299555Serj hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 2933299555Serj ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 2934266423Sjfv hena |= set_hena; 2935299555Serj i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 2936299555Serj i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 2937266423Sjfv 2938303816Ssbruno} 2939303816Ssbruno 2940303816Ssbrunovoid 2941303816Ssbrunoixl_set_rss_hlut(struct ixl_pf *pf) 2942303816Ssbruno{ 2943303816Ssbruno struct i40e_hw *hw = &pf->hw; 2944303816Ssbruno device_t dev = pf->dev; 2945303816Ssbruno struct ixl_vsi *vsi = &pf->vsi; 2946303816Ssbruno int i, que_id; 2947303816Ssbruno int lut_entry_width; 2948303816Ssbruno u32 lut = 0; 2949303816Ssbruno enum i40e_status_code status; 2950303816Ssbruno 2951333343Serj lut_entry_width = pf->hw.func_caps.rss_table_entry_width; 2952303816Ssbruno 2953266423Sjfv /* Populate the LUT with max no. of queues in round robin fashion */ 2954303816Ssbruno u8 hlut_buf[512]; 2955303816Ssbruno for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { 2956277084Sjfv#ifdef RSS 2957277084Sjfv /* 2958277084Sjfv * Fetch the RSS bucket id for the given indirection entry. 2959277084Sjfv * Cap it at the number of configured buckets (which is 2960277084Sjfv * num_queues.) 2961277084Sjfv */ 2962277084Sjfv que_id = rss_get_indirection_to_bucket(i); 2963277262Sjfv que_id = que_id % vsi->num_queues; 2964277084Sjfv#else 2965303816Ssbruno que_id = i % vsi->num_queues; 2966277084Sjfv#endif 2967303816Ssbruno lut = (que_id & ((0x1 << lut_entry_width) - 1)); 2968303816Ssbruno hlut_buf[i] = lut; 2969266423Sjfv } 2970303816Ssbruno 2971303816Ssbruno if (hw->mac.type == I40E_MAC_X722) { 2972303816Ssbruno status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf)); 2973303816Ssbruno if (status) 2974303816Ssbruno device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n", 2975303816Ssbruno i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 2976303816Ssbruno } else { 2977303816Ssbruno for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++) 2978303816Ssbruno wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]); 2979303816Ssbruno ixl_flush(hw); 2980303816Ssbruno } 2981266423Sjfv} 2982266423Sjfv 2983303816Ssbruno/* 2984303816Ssbruno** Setup the PF's RSS parameters. 2985303816Ssbruno*/ 2986303816Ssbrunovoid 2987303816Ssbrunoixl_config_rss(struct ixl_pf *pf) 2988303816Ssbruno{ 2989303816Ssbruno ixl_set_rss_key(pf); 2990303816Ssbruno ixl_set_rss_pctypes(pf); 2991303816Ssbruno ixl_set_rss_hlut(pf); 2992303816Ssbruno} 2993266423Sjfv 2994266423Sjfv/* 2995266423Sjfv** This routine is run via an vlan config EVENT, 2996266423Sjfv** it enables us to use the HW Filter table since 2997266423Sjfv** we can get the vlan id. This just creates the 2998266423Sjfv** entry in the soft version of the VFTA, init will 2999266423Sjfv** repopulate the real table. 3000266423Sjfv*/ 3001303816Ssbrunovoid 3002270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3003266423Sjfv{ 3004270346Sjfv struct ixl_vsi *vsi = ifp->if_softc; 3005266423Sjfv struct i40e_hw *hw = vsi->hw; 3006270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 3007266423Sjfv 3008266423Sjfv if (ifp->if_softc != arg) /* Not our event */ 3009266423Sjfv return; 3010266423Sjfv 3011266423Sjfv if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3012266423Sjfv return; 3013266423Sjfv 3014270346Sjfv IXL_PF_LOCK(pf); 3015266423Sjfv ++vsi->num_vlans; 3016270346Sjfv ixl_add_filter(vsi, hw->mac.addr, vtag); 3017270346Sjfv IXL_PF_UNLOCK(pf); 3018266423Sjfv} 3019266423Sjfv 3020266423Sjfv/* 3021266423Sjfv** This routine is run via an vlan 3022266423Sjfv** unconfig EVENT, remove our entry 3023266423Sjfv** in the soft vfta. 3024266423Sjfv*/ 3025303816Ssbrunovoid 3026270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 3027266423Sjfv{ 3028270346Sjfv struct ixl_vsi *vsi = ifp->if_softc; 3029266423Sjfv struct i40e_hw *hw = vsi->hw; 3030270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 3031266423Sjfv 3032266423Sjfv if (ifp->if_softc != arg) 3033266423Sjfv return; 3034266423Sjfv 3035266423Sjfv if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 3036266423Sjfv return; 3037266423Sjfv 3038270346Sjfv IXL_PF_LOCK(pf); 3039266423Sjfv --vsi->num_vlans; 3040270346Sjfv ixl_del_filter(vsi, hw->mac.addr, vtag); 3041270346Sjfv IXL_PF_UNLOCK(pf); 3042266423Sjfv} 3043266423Sjfv 3044266423Sjfv/* 3045349163Serj * In some firmware versions there is default MAC/VLAN filter 3046349163Serj * configured which interferes with filters managed by driver. 3047349163Serj * Make sure it's removed. 3048349163Serj */ 3049303816Ssbrunovoid 3050349163Serjixl_del_default_hw_filters(struct ixl_vsi *vsi) 3051266423Sjfv{ 3052349163Serj struct i40e_aqc_remove_macvlan_element_data e; 3053266423Sjfv 3054349163Serj bzero(&e, sizeof(e)); 3055349163Serj bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 3056349163Serj e.vlan_tag = 0; 3057349163Serj e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 3058349163Serj i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 3059349163Serj 3060349163Serj bzero(&e, sizeof(e)); 3061349163Serj bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 3062349163Serj e.vlan_tag = 0; 3063349163Serj e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 3064349163Serj I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 3065349163Serj i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 3066266423Sjfv} 3067266423Sjfv 3068349163Serjstatic enum i40e_status_code 3069349163Serjixl_set_lla(struct ixl_vsi *vsi) 3070349163Serj{ 3071349163Serj struct i40e_hw *hw = vsi->hw; 3072349163Serj u8 tmpaddr[ETHER_ADDR_LEN]; 3073349163Serj enum i40e_status_code status; 3074349163Serj 3075349163Serj status = I40E_SUCCESS; 3076349163Serj 3077349163Serj bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETHER_ADDR_LEN); 3078349163Serj if (memcmp(hw->mac.addr, tmpaddr, ETHER_ADDR_LEN) == 0) 3079349163Serj goto set_lla_exit; 3080349163Serj 3081349163Serj status = i40e_validate_mac_addr(tmpaddr); 3082349163Serj if (status != I40E_SUCCESS) 3083349163Serj goto set_lla_exit; 3084349163Serj 3085349163Serj ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 3086349163Serj bcopy(tmpaddr, hw->mac.addr, ETHER_ADDR_LEN); 3087349163Serj status = i40e_aq_mac_address_write(hw, 3088349163Serj I40E_AQC_WRITE_TYPE_LAA_ONLY, 3089349163Serj hw->mac.addr, NULL); 3090349163Serj if (status != I40E_SUCCESS) 3091349163Serj goto set_lla_exit; 3092349163Serj 3093349163Serj ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 3094349163Serjset_lla_exit: 3095349163Serj return (status); 3096349163Serj} 3097349163Serj 3098266423Sjfv/* 3099266423Sjfv** Initialize filter list and add filters that the hardware 3100266423Sjfv** needs to know about. 3101299552Serj** 3102349163Serj** Requires VSI's seid to be set before calling. 3103266423Sjfv*/ 3104303816Ssbrunovoid 3105270346Sjfvixl_init_filters(struct ixl_vsi *vsi) 3106266423Sjfv{ 3107303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 3108303816Ssbruno 3109349163Serj /* Initialize mac filter list for VSI */ 3110349163Serj SLIST_INIT(&vsi->ftl); 3111349163Serj vsi->num_hw_filters = 0; 3112349163Serj 3113269198Sjfv /* Add broadcast address */ 3114279858Sjfv ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); 3115299552Serj 3116349163Serj if (IXL_VSI_IS_VF(vsi)) 3117349163Serj return; 3118349163Serj 3119349163Serj ixl_del_default_hw_filters(vsi); 3120349163Serj 3121349163Serj ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 3122349163Serj 3123299552Serj /* 3124299552Serj * Prevent Tx flow control frames from being sent out by 3125299552Serj * non-firmware transmitters. 3126303816Ssbruno * This affects every VSI in the PF. 3127299552Serj */ 3128303816Ssbruno if (pf->enable_tx_fc_filter) 3129303816Ssbruno i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 3130266423Sjfv} 3131266423Sjfv 3132266423Sjfv/* 3133266423Sjfv** This routine adds mulicast filters 3134266423Sjfv*/ 3135303816Ssbrunovoid 3136270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) 3137266423Sjfv{ 3138270346Sjfv struct ixl_mac_filter *f; 3139266423Sjfv 3140266423Sjfv /* Does one already exist */ 3141270346Sjfv f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 3142266423Sjfv if (f != NULL) 3143266423Sjfv return; 3144266423Sjfv 3145270346Sjfv f = ixl_get_filter(vsi); 3146266423Sjfv if (f == NULL) { 3147266423Sjfv printf("WARNING: no filter available!!\n"); 3148266423Sjfv return; 3149266423Sjfv } 3150266423Sjfv bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 3151270346Sjfv f->vlan = IXL_VLAN_ANY; 3152270346Sjfv f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED 3153270346Sjfv | IXL_FILTER_MC); 3154266423Sjfv 3155266423Sjfv return; 3156266423Sjfv} 3157266423Sjfv 3158303816Ssbrunovoid 3159279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi) 3160279858Sjfv{ 3161349163Serj ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_hw_filters); 3162279858Sjfv} 3163279858Sjfv 3164266423Sjfv/* 3165266423Sjfv** This routine adds macvlan filters 3166266423Sjfv*/ 3167303816Ssbrunovoid 3168323211Srlibbyixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 3169266423Sjfv{ 3170270346Sjfv struct ixl_mac_filter *f, *tmp; 3171279858Sjfv struct ixl_pf *pf; 3172279858Sjfv device_t dev; 3173266423Sjfv 3174270346Sjfv DEBUGOUT("ixl_add_filter: begin"); 3175266423Sjfv 3176279858Sjfv pf = vsi->back; 3177279858Sjfv dev = pf->dev; 3178279858Sjfv 3179266423Sjfv /* Does one already exist */ 3180270346Sjfv f = ixl_find_filter(vsi, macaddr, vlan); 3181266423Sjfv if (f != NULL) 3182266423Sjfv return; 3183266423Sjfv /* 3184266423Sjfv ** Is this the first vlan being registered, if so we 3185266423Sjfv ** need to remove the ANY filter that indicates we are 3186266423Sjfv ** not in a vlan, and replace that with a 0 filter. 3187266423Sjfv */ 3188270346Sjfv if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 3189270346Sjfv tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 3190266423Sjfv if (tmp != NULL) { 3191270346Sjfv ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); 3192270346Sjfv ixl_add_filter(vsi, macaddr, 0); 3193266423Sjfv } 3194266423Sjfv } 3195266423Sjfv 3196270346Sjfv f = ixl_get_filter(vsi); 3197266423Sjfv if (f == NULL) { 3198266423Sjfv device_printf(dev, "WARNING: no filter available!!\n"); 3199266423Sjfv return; 3200266423Sjfv } 3201266423Sjfv bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 3202266423Sjfv f->vlan = vlan; 3203270346Sjfv f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); 3204270346Sjfv if (f->vlan != IXL_VLAN_ANY) 3205270346Sjfv f->flags |= IXL_FILTER_VLAN; 3206279858Sjfv else 3207279858Sjfv vsi->num_macs++; 3208266423Sjfv 3209270346Sjfv ixl_add_hw_filters(vsi, f->flags, 1); 3210266423Sjfv return; 3211266423Sjfv} 3212266423Sjfv 3213303816Ssbrunovoid 3214323211Srlibbyixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 3215266423Sjfv{ 3216270346Sjfv struct ixl_mac_filter *f; 3217266423Sjfv 3218270346Sjfv f = ixl_find_filter(vsi, macaddr, vlan); 3219266423Sjfv if (f == NULL) 3220266423Sjfv return; 3221266423Sjfv 3222270346Sjfv f->flags |= IXL_FILTER_DEL; 3223270346Sjfv ixl_del_hw_filters(vsi, 1); 3224279858Sjfv vsi->num_macs--; 3225266423Sjfv 3226266423Sjfv /* Check if this is the last vlan removal */ 3227270346Sjfv if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { 3228266423Sjfv /* Switch back to a non-vlan filter */ 3229270346Sjfv ixl_del_filter(vsi, macaddr, 0); 3230270346Sjfv ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 3231266423Sjfv } 3232266423Sjfv return; 3233266423Sjfv} 3234266423Sjfv 3235266423Sjfv/* 3236266423Sjfv** Find the filter with both matching mac addr and vlan id 3237266423Sjfv*/ 3238303816Ssbrunostruct ixl_mac_filter * 3239323211Srlibbyixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 3240266423Sjfv{ 3241270346Sjfv struct ixl_mac_filter *f; 3242266423Sjfv bool match = FALSE; 3243266423Sjfv 3244266423Sjfv SLIST_FOREACH(f, &vsi->ftl, next) { 3245266423Sjfv if (!cmp_etheraddr(f->macaddr, macaddr)) 3246266423Sjfv continue; 3247266423Sjfv if (f->vlan == vlan) { 3248266423Sjfv match = TRUE; 3249266423Sjfv break; 3250266423Sjfv } 3251266423Sjfv } 3252266423Sjfv 3253266423Sjfv if (!match) 3254266423Sjfv f = NULL; 3255266423Sjfv return (f); 3256266423Sjfv} 3257266423Sjfv 3258266423Sjfv/* 3259266423Sjfv** This routine takes additions to the vsi filter 3260266423Sjfv** table and creates an Admin Queue call to create 3261266423Sjfv** the filters in the hardware. 3262266423Sjfv*/ 3263303816Ssbrunovoid 3264270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) 3265266423Sjfv{ 3266266423Sjfv struct i40e_aqc_add_macvlan_element_data *a, *b; 3267270346Sjfv struct ixl_mac_filter *f; 3268279858Sjfv struct ixl_pf *pf; 3269279858Sjfv struct i40e_hw *hw; 3270279858Sjfv device_t dev; 3271279858Sjfv int err, j = 0; 3272266423Sjfv 3273279858Sjfv pf = vsi->back; 3274279858Sjfv dev = pf->dev; 3275279858Sjfv hw = &pf->hw; 3276279858Sjfv IXL_PF_LOCK_ASSERT(pf); 3277279858Sjfv 3278266423Sjfv a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 3279266423Sjfv M_DEVBUF, M_NOWAIT | M_ZERO); 3280266423Sjfv if (a == NULL) { 3281277084Sjfv device_printf(dev, "add_hw_filters failed to get memory\n"); 3282266423Sjfv return; 3283266423Sjfv } 3284266423Sjfv 3285266423Sjfv /* 3286266423Sjfv ** Scan the filter list, each time we find one 3287266423Sjfv ** we add it to the admin queue array and turn off 3288266423Sjfv ** the add bit. 3289266423Sjfv */ 3290266423Sjfv SLIST_FOREACH(f, &vsi->ftl, next) { 3291333343Serj if ((f->flags & flags) == flags) { 3292266423Sjfv b = &a[j]; // a pox on fvl long names :) 3293266423Sjfv bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 3294279858Sjfv if (f->vlan == IXL_VLAN_ANY) { 3295279858Sjfv b->vlan_tag = 0; 3296279858Sjfv b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 3297279858Sjfv } else { 3298279858Sjfv b->vlan_tag = f->vlan; 3299279858Sjfv b->flags = 0; 3300279858Sjfv } 3301279858Sjfv b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 3302270346Sjfv f->flags &= ~IXL_FILTER_ADD; 3303266423Sjfv j++; 3304266423Sjfv } 3305266423Sjfv if (j == cnt) 3306266423Sjfv break; 3307266423Sjfv } 3308266423Sjfv if (j > 0) { 3309266423Sjfv err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 3310266423Sjfv if (err) 3311279033Sjfv device_printf(dev, "aq_add_macvlan err %d, " 3312279033Sjfv "aq_error %d\n", err, hw->aq.asq_last_status); 3313266423Sjfv else 3314349163Serj vsi->num_hw_filters += j; 3315266423Sjfv } 3316266423Sjfv free(a, M_DEVBUF); 3317266423Sjfv return; 3318266423Sjfv} 3319266423Sjfv 3320266423Sjfv/* 3321266423Sjfv** This routine takes removals in the vsi filter 3322266423Sjfv** table and creates an Admin Queue call to delete 3323266423Sjfv** the filters in the hardware. 3324266423Sjfv*/ 3325303816Ssbrunovoid 3326270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) 3327266423Sjfv{ 3328266423Sjfv struct i40e_aqc_remove_macvlan_element_data *d, *e; 3329279858Sjfv struct ixl_pf *pf; 3330279858Sjfv struct i40e_hw *hw; 3331279858Sjfv device_t dev; 3332270346Sjfv struct ixl_mac_filter *f, *f_temp; 3333266423Sjfv int err, j = 0; 3334266423Sjfv 3335270346Sjfv DEBUGOUT("ixl_del_hw_filters: begin\n"); 3336266423Sjfv 3337279858Sjfv pf = vsi->back; 3338279858Sjfv hw = &pf->hw; 3339279858Sjfv dev = pf->dev; 3340279858Sjfv 3341266423Sjfv d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 3342266423Sjfv M_DEVBUF, M_NOWAIT | M_ZERO); 3343266423Sjfv if (d == NULL) { 3344266423Sjfv printf("del hw filter failed to get memory\n"); 3345266423Sjfv return; 3346266423Sjfv } 3347266423Sjfv 3348266423Sjfv SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { 3349270346Sjfv if (f->flags & IXL_FILTER_DEL) { 3350266423Sjfv e = &d[j]; // a pox on fvl long names :) 3351266423Sjfv bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 3352266423Sjfv e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 3353333343Serj if (f->vlan == IXL_VLAN_ANY) { 3354333343Serj e->vlan_tag = 0; 3355333343Serj e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 3356333343Serj } else { 3357333343Serj e->vlan_tag = f->vlan; 3358333343Serj } 3359266423Sjfv /* delete entry from vsi list */ 3360270346Sjfv SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); 3361266423Sjfv free(f, M_DEVBUF); 3362266423Sjfv j++; 3363266423Sjfv } 3364266423Sjfv if (j == cnt) 3365266423Sjfv break; 3366266423Sjfv } 3367266423Sjfv if (j > 0) { 3368266423Sjfv err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 3369266423Sjfv if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { 3370266423Sjfv int sc = 0; 3371266423Sjfv for (int i = 0; i < j; i++) 3372266423Sjfv sc += (!d[i].error_code); 3373349163Serj vsi->num_hw_filters -= sc; 3374266423Sjfv device_printf(dev, 3375266423Sjfv "Failed to remove %d/%d filters, aq error %d\n", 3376266423Sjfv j - sc, j, hw->aq.asq_last_status); 3377266423Sjfv } else 3378349163Serj vsi->num_hw_filters -= j; 3379266423Sjfv } 3380266423Sjfv free(d, M_DEVBUF); 3381266423Sjfv 3382270346Sjfv DEBUGOUT("ixl_del_hw_filters: end\n"); 3383266423Sjfv return; 3384266423Sjfv} 3385266423Sjfv 3386303816Ssbrunoint 3387303816Ssbrunoixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3388303816Ssbruno{ 3389303816Ssbruno struct i40e_hw *hw = &pf->hw; 3390303816Ssbruno int error = 0; 3391303816Ssbruno u32 reg; 3392303816Ssbruno u16 pf_qidx; 3393303816Ssbruno 3394303816Ssbruno pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 3395303816Ssbruno 3396303816Ssbruno ixl_dbg(pf, IXL_DBG_EN_DIS, 3397303816Ssbruno "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 3398303816Ssbruno pf_qidx, vsi_qidx); 3399303816Ssbruno 3400303816Ssbruno i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 3401303816Ssbruno 3402303816Ssbruno reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 3403303816Ssbruno reg |= I40E_QTX_ENA_QENA_REQ_MASK | 3404303816Ssbruno I40E_QTX_ENA_QENA_STAT_MASK; 3405303816Ssbruno wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 3406303816Ssbruno /* Verify the enable took */ 3407303816Ssbruno for (int j = 0; j < 10; j++) { 3408303816Ssbruno reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 3409303816Ssbruno if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 3410303816Ssbruno break; 3411333343Serj i40e_usec_delay(10); 3412303816Ssbruno } 3413303816Ssbruno if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 3414303816Ssbruno device_printf(pf->dev, "TX queue %d still disabled!\n", 3415303816Ssbruno pf_qidx); 3416303816Ssbruno error = ETIMEDOUT; 3417303816Ssbruno } 3418303816Ssbruno 3419303816Ssbruno return (error); 3420303816Ssbruno} 3421303816Ssbruno 3422303816Ssbrunoint 3423303816Ssbrunoixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3424303816Ssbruno{ 3425303816Ssbruno struct i40e_hw *hw = &pf->hw; 3426303816Ssbruno int error = 0; 3427303816Ssbruno u32 reg; 3428303816Ssbruno u16 pf_qidx; 3429303816Ssbruno 3430303816Ssbruno pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 3431303816Ssbruno 3432303816Ssbruno ixl_dbg(pf, IXL_DBG_EN_DIS, 3433303816Ssbruno "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 3434303816Ssbruno pf_qidx, vsi_qidx); 3435303816Ssbruno 3436303816Ssbruno reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 3437303816Ssbruno reg |= I40E_QRX_ENA_QENA_REQ_MASK | 3438303816Ssbruno I40E_QRX_ENA_QENA_STAT_MASK; 3439303816Ssbruno wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 3440303816Ssbruno /* Verify the enable took */ 3441303816Ssbruno for (int j = 0; j < 10; j++) { 3442303816Ssbruno reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 3443303816Ssbruno if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 3444303816Ssbruno break; 3445333343Serj i40e_usec_delay(10); 3446303816Ssbruno } 3447303816Ssbruno if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 3448303816Ssbruno device_printf(pf->dev, "RX queue %d still disabled!\n", 3449303816Ssbruno pf_qidx); 3450303816Ssbruno error = ETIMEDOUT; 3451303816Ssbruno } 3452303816Ssbruno 3453303816Ssbruno return (error); 3454303816Ssbruno} 3455303816Ssbruno 3456303816Ssbrunoint 3457303816Ssbrunoixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3458303816Ssbruno{ 3459303816Ssbruno int error = 0; 3460303816Ssbruno 3461303816Ssbruno error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 3462303816Ssbruno /* Called function already prints error message */ 3463303816Ssbruno if (error) 3464303816Ssbruno return (error); 3465303816Ssbruno error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 3466303816Ssbruno return (error); 3467303816Ssbruno} 3468303816Ssbruno 3469303816Ssbruno/* For PF VSI only */ 3470303816Ssbrunoint 3471270346Sjfvixl_enable_rings(struct ixl_vsi *vsi) 3472266423Sjfv{ 3473279858Sjfv struct ixl_pf *pf = vsi->back; 3474303816Ssbruno int error = 0; 3475303816Ssbruno 3476303816Ssbruno for (int i = 0; i < vsi->num_queues; i++) { 3477303816Ssbruno error = ixl_enable_ring(pf, &pf->qtag, i); 3478303816Ssbruno if (error) 3479303816Ssbruno return (error); 3480303816Ssbruno } 3481303816Ssbruno 3482303816Ssbruno return (error); 3483303816Ssbruno} 3484303816Ssbruno 3485333343Serj/* 3486333343Serj * Returns error on first ring that is detected hung. 3487333343Serj */ 3488303816Ssbrunoint 3489303816Ssbrunoixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3490303816Ssbruno{ 3491279858Sjfv struct i40e_hw *hw = &pf->hw; 3492303816Ssbruno int error = 0; 3493266423Sjfv u32 reg; 3494303816Ssbruno u16 pf_qidx; 3495266423Sjfv 3496303816Ssbruno pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 3497266423Sjfv 3498303816Ssbruno i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 3499303816Ssbruno i40e_usec_delay(500); 3500266423Sjfv 3501303816Ssbruno reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 3502303816Ssbruno reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3503303816Ssbruno wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 3504303816Ssbruno /* Verify the disable took */ 3505303816Ssbruno for (int j = 0; j < 10; j++) { 3506303816Ssbruno reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 3507303816Ssbruno if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3508303816Ssbruno break; 3509303816Ssbruno i40e_msec_delay(10); 3510266423Sjfv } 3511303816Ssbruno if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 3512303816Ssbruno device_printf(pf->dev, "TX queue %d still enabled!\n", 3513303816Ssbruno pf_qidx); 3514303816Ssbruno error = ETIMEDOUT; 3515303816Ssbruno } 3516279858Sjfv 3517279858Sjfv return (error); 3518266423Sjfv} 3519266423Sjfv 3520333343Serj/* 3521333343Serj * Returns error on first ring that is detected hung. 3522333343Serj */ 3523303816Ssbrunoint 3524303816Ssbrunoixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3525266423Sjfv{ 3526279858Sjfv struct i40e_hw *hw = &pf->hw; 3527303816Ssbruno int error = 0; 3528266423Sjfv u32 reg; 3529303816Ssbruno u16 pf_qidx; 3530266423Sjfv 3531303816Ssbruno pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 3532279858Sjfv 3533303816Ssbruno reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 3534303816Ssbruno reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 3535303816Ssbruno wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 3536303816Ssbruno /* Verify the disable took */ 3537303816Ssbruno for (int j = 0; j < 10; j++) { 3538303816Ssbruno reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 3539303816Ssbruno if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 3540303816Ssbruno break; 3541303816Ssbruno i40e_msec_delay(10); 3542303816Ssbruno } 3543303816Ssbruno if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 3544303816Ssbruno device_printf(pf->dev, "RX queue %d still enabled!\n", 3545303816Ssbruno pf_qidx); 3546303816Ssbruno error = ETIMEDOUT; 3547303816Ssbruno } 3548266423Sjfv 3549303816Ssbruno return (error); 3550303816Ssbruno} 3551266423Sjfv 3552303816Ssbrunoint 3553303816Ssbrunoixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 3554303816Ssbruno{ 3555303816Ssbruno int error = 0; 3556303816Ssbruno 3557303816Ssbruno error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 3558303816Ssbruno /* Called function already prints error message */ 3559303816Ssbruno if (error) 3560303816Ssbruno return (error); 3561303816Ssbruno error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 3562303816Ssbruno return (error); 3563303816Ssbruno} 3564303816Ssbruno 3565303816Ssbruno/* For PF VSI only */ 3566303816Ssbrunoint 3567303816Ssbrunoixl_disable_rings(struct ixl_vsi *vsi) 3568303816Ssbruno{ 3569303816Ssbruno struct ixl_pf *pf = vsi->back; 3570303816Ssbruno int error = 0; 3571303816Ssbruno 3572303816Ssbruno for (int i = 0; i < vsi->num_queues; i++) { 3573303816Ssbruno error = ixl_disable_ring(pf, &pf->qtag, i); 3574303816Ssbruno if (error) 3575303816Ssbruno return (error); 3576266423Sjfv } 3577279858Sjfv 3578279858Sjfv return (error); 3579266423Sjfv} 3580266423Sjfv 3581269198Sjfv/** 3582270346Sjfv * ixl_handle_mdd_event 3583269198Sjfv * 3584269198Sjfv * Called from interrupt handler to identify possibly malicious vfs 3585269198Sjfv * (But also detects events from the PF, as well) 3586269198Sjfv **/ 3587303816Ssbrunovoid 3588299554Serjixl_handle_mdd_event(struct ixl_pf *pf) 3589269198Sjfv{ 3590269198Sjfv struct i40e_hw *hw = &pf->hw; 3591269198Sjfv device_t dev = pf->dev; 3592269198Sjfv bool mdd_detected = false; 3593269198Sjfv bool pf_mdd_detected = false; 3594269198Sjfv u32 reg; 3595269198Sjfv 3596269198Sjfv /* find what triggered the MDD event */ 3597269198Sjfv reg = rd32(hw, I40E_GL_MDET_TX); 3598269198Sjfv if (reg & I40E_GL_MDET_TX_VALID_MASK) { 3599269198Sjfv u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 3600269198Sjfv I40E_GL_MDET_TX_PF_NUM_SHIFT; 3601269198Sjfv u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 3602269198Sjfv I40E_GL_MDET_TX_EVENT_SHIFT; 3603303816Ssbruno u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 3604269198Sjfv I40E_GL_MDET_TX_QUEUE_SHIFT; 3605269198Sjfv device_printf(dev, 3606303816Ssbruno "Malicious Driver Detection event %d" 3607303816Ssbruno " on TX queue %d, pf number %d\n", 3608303816Ssbruno event, queue, pf_num); 3609269198Sjfv wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 3610269198Sjfv mdd_detected = true; 3611269198Sjfv } 3612269198Sjfv reg = rd32(hw, I40E_GL_MDET_RX); 3613269198Sjfv if (reg & I40E_GL_MDET_RX_VALID_MASK) { 3614303816Ssbruno u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 3615269198Sjfv I40E_GL_MDET_RX_FUNCTION_SHIFT; 3616269198Sjfv u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 3617269198Sjfv I40E_GL_MDET_RX_EVENT_SHIFT; 3618303816Ssbruno u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 3619269198Sjfv I40E_GL_MDET_RX_QUEUE_SHIFT; 3620269198Sjfv device_printf(dev, 3621303816Ssbruno "Malicious Driver Detection event %d" 3622303816Ssbruno " on RX queue %d, pf number %d\n", 3623303816Ssbruno event, queue, pf_num); 3624269198Sjfv wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 3625269198Sjfv mdd_detected = true; 3626269198Sjfv } 3627269198Sjfv 3628269198Sjfv if (mdd_detected) { 3629269198Sjfv reg = rd32(hw, I40E_PF_MDET_TX); 3630269198Sjfv if (reg & I40E_PF_MDET_TX_VALID_MASK) { 3631269198Sjfv wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 3632269198Sjfv device_printf(dev, 3633333343Serj "MDD TX event is for this function!\n"); 3634269198Sjfv pf_mdd_detected = true; 3635269198Sjfv } 3636269198Sjfv reg = rd32(hw, I40E_PF_MDET_RX); 3637269198Sjfv if (reg & I40E_PF_MDET_RX_VALID_MASK) { 3638269198Sjfv wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 3639269198Sjfv device_printf(dev, 3640333343Serj "MDD RX event is for this function!\n"); 3641269198Sjfv pf_mdd_detected = true; 3642269198Sjfv } 3643269198Sjfv } 3644269198Sjfv 3645269198Sjfv /* re-enable mdd interrupt cause */ 3646269198Sjfv reg = rd32(hw, I40E_PFINT_ICR0_ENA); 3647269198Sjfv reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 3648269198Sjfv wr32(hw, I40E_PFINT_ICR0_ENA, reg); 3649270346Sjfv ixl_flush(hw); 3650269198Sjfv} 3651269198Sjfv 3652303816Ssbrunovoid 3653270346Sjfvixl_enable_intr(struct ixl_vsi *vsi) 3654266423Sjfv{ 3655303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 3656266423Sjfv struct i40e_hw *hw = vsi->hw; 3657270346Sjfv struct ixl_queue *que = vsi->queues; 3658266423Sjfv 3659318357Serj if (pf->msix > 1) { 3660266423Sjfv for (int i = 0; i < vsi->num_queues; i++, que++) 3661270346Sjfv ixl_enable_queue(hw, que->me); 3662266423Sjfv } else 3663318357Serj ixl_enable_intr0(hw); 3664266423Sjfv} 3665266423Sjfv 3666303816Ssbrunovoid 3667279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi) 3668266423Sjfv{ 3669266423Sjfv struct i40e_hw *hw = vsi->hw; 3670270346Sjfv struct ixl_queue *que = vsi->queues; 3671266423Sjfv 3672279858Sjfv for (int i = 0; i < vsi->num_queues; i++, que++) 3673279858Sjfv ixl_disable_queue(hw, que->me); 3674279858Sjfv} 3675279858Sjfv 3676303816Ssbrunovoid 3677318357Serjixl_enable_intr0(struct i40e_hw *hw) 3678279858Sjfv{ 3679266423Sjfv u32 reg; 3680266423Sjfv 3681318357Serj /* Use IXL_ITR_NONE so ITR isn't updated here */ 3682266423Sjfv reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 3683266423Sjfv I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 3684270346Sjfv (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 3685266423Sjfv wr32(hw, I40E_PFINT_DYN_CTL0, reg); 3686266423Sjfv} 3687266423Sjfv 3688303816Ssbrunovoid 3689318357Serjixl_disable_intr0(struct i40e_hw *hw) 3690266423Sjfv{ 3691266423Sjfv u32 reg; 3692266423Sjfv 3693270346Sjfv reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 3694266423Sjfv wr32(hw, I40E_PFINT_DYN_CTL0, reg); 3695299547Serj ixl_flush(hw); 3696266423Sjfv} 3697266423Sjfv 3698303816Ssbrunovoid 3699270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id) 3700266423Sjfv{ 3701266423Sjfv u32 reg; 3702266423Sjfv 3703266423Sjfv reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 3704266423Sjfv I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 3705270346Sjfv (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 3706266423Sjfv wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 3707266423Sjfv} 3708266423Sjfv 3709303816Ssbrunovoid 3710270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id) 3711266423Sjfv{ 3712266423Sjfv u32 reg; 3713266423Sjfv 3714270346Sjfv reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 3715266423Sjfv wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 3716266423Sjfv} 3717266423Sjfv 3718303816Ssbrunovoid 3719270346Sjfvixl_update_stats_counters(struct ixl_pf *pf) 3720266423Sjfv{ 3721266423Sjfv struct i40e_hw *hw = &pf->hw; 3722279858Sjfv struct ixl_vsi *vsi = &pf->vsi; 3723279858Sjfv struct ixl_vf *vf; 3724269198Sjfv 3725266423Sjfv struct i40e_hw_port_stats *nsd = &pf->stats; 3726266423Sjfv struct i40e_hw_port_stats *osd = &pf->stats_offsets; 3727266423Sjfv 3728266423Sjfv /* Update hw stats */ 3729270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 3730266423Sjfv pf->stat_offsets_loaded, 3731266423Sjfv &osd->crc_errors, &nsd->crc_errors); 3732270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 3733266423Sjfv pf->stat_offsets_loaded, 3734266423Sjfv &osd->illegal_bytes, &nsd->illegal_bytes); 3735270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 3736266423Sjfv I40E_GLPRT_GORCL(hw->port), 3737266423Sjfv pf->stat_offsets_loaded, 3738266423Sjfv &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 3739270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 3740266423Sjfv I40E_GLPRT_GOTCL(hw->port), 3741266423Sjfv pf->stat_offsets_loaded, 3742266423Sjfv &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 3743270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 3744266423Sjfv pf->stat_offsets_loaded, 3745266423Sjfv &osd->eth.rx_discards, 3746266423Sjfv &nsd->eth.rx_discards); 3747270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 3748266423Sjfv I40E_GLPRT_UPRCL(hw->port), 3749266423Sjfv pf->stat_offsets_loaded, 3750266423Sjfv &osd->eth.rx_unicast, 3751266423Sjfv &nsd->eth.rx_unicast); 3752270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 3753266423Sjfv I40E_GLPRT_UPTCL(hw->port), 3754266423Sjfv pf->stat_offsets_loaded, 3755266423Sjfv &osd->eth.tx_unicast, 3756266423Sjfv &nsd->eth.tx_unicast); 3757270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 3758266423Sjfv I40E_GLPRT_MPRCL(hw->port), 3759266423Sjfv pf->stat_offsets_loaded, 3760266423Sjfv &osd->eth.rx_multicast, 3761266423Sjfv &nsd->eth.rx_multicast); 3762270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 3763266423Sjfv I40E_GLPRT_MPTCL(hw->port), 3764266423Sjfv pf->stat_offsets_loaded, 3765266423Sjfv &osd->eth.tx_multicast, 3766266423Sjfv &nsd->eth.tx_multicast); 3767270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 3768266423Sjfv I40E_GLPRT_BPRCL(hw->port), 3769266423Sjfv pf->stat_offsets_loaded, 3770266423Sjfv &osd->eth.rx_broadcast, 3771266423Sjfv &nsd->eth.rx_broadcast); 3772270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 3773266423Sjfv I40E_GLPRT_BPTCL(hw->port), 3774266423Sjfv pf->stat_offsets_loaded, 3775266423Sjfv &osd->eth.tx_broadcast, 3776266423Sjfv &nsd->eth.tx_broadcast); 3777266423Sjfv 3778270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 3779266423Sjfv pf->stat_offsets_loaded, 3780266423Sjfv &osd->tx_dropped_link_down, 3781266423Sjfv &nsd->tx_dropped_link_down); 3782270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 3783266423Sjfv pf->stat_offsets_loaded, 3784266423Sjfv &osd->mac_local_faults, 3785266423Sjfv &nsd->mac_local_faults); 3786270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 3787266423Sjfv pf->stat_offsets_loaded, 3788266423Sjfv &osd->mac_remote_faults, 3789266423Sjfv &nsd->mac_remote_faults); 3790270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 3791266423Sjfv pf->stat_offsets_loaded, 3792266423Sjfv &osd->rx_length_errors, 3793266423Sjfv &nsd->rx_length_errors); 3794266423Sjfv 3795269198Sjfv /* Flow control (LFC) stats */ 3796270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 3797266423Sjfv pf->stat_offsets_loaded, 3798266423Sjfv &osd->link_xon_rx, &nsd->link_xon_rx); 3799270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 3800266423Sjfv pf->stat_offsets_loaded, 3801266423Sjfv &osd->link_xon_tx, &nsd->link_xon_tx); 3802270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 3803266423Sjfv pf->stat_offsets_loaded, 3804266423Sjfv &osd->link_xoff_rx, &nsd->link_xoff_rx); 3805270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 3806266423Sjfv pf->stat_offsets_loaded, 3807266423Sjfv &osd->link_xoff_tx, &nsd->link_xoff_tx); 3808266423Sjfv 3809269198Sjfv /* Packet size stats rx */ 3810270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 3811266423Sjfv I40E_GLPRT_PRC64L(hw->port), 3812266423Sjfv pf->stat_offsets_loaded, 3813266423Sjfv &osd->rx_size_64, &nsd->rx_size_64); 3814270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 3815266423Sjfv I40E_GLPRT_PRC127L(hw->port), 3816266423Sjfv pf->stat_offsets_loaded, 3817266423Sjfv &osd->rx_size_127, &nsd->rx_size_127); 3818270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 3819266423Sjfv I40E_GLPRT_PRC255L(hw->port), 3820266423Sjfv pf->stat_offsets_loaded, 3821266423Sjfv &osd->rx_size_255, &nsd->rx_size_255); 3822270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 3823266423Sjfv I40E_GLPRT_PRC511L(hw->port), 3824266423Sjfv pf->stat_offsets_loaded, 3825266423Sjfv &osd->rx_size_511, &nsd->rx_size_511); 3826270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 3827266423Sjfv I40E_GLPRT_PRC1023L(hw->port), 3828266423Sjfv pf->stat_offsets_loaded, 3829266423Sjfv &osd->rx_size_1023, &nsd->rx_size_1023); 3830270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 3831266423Sjfv I40E_GLPRT_PRC1522L(hw->port), 3832266423Sjfv pf->stat_offsets_loaded, 3833266423Sjfv &osd->rx_size_1522, &nsd->rx_size_1522); 3834270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 3835266423Sjfv I40E_GLPRT_PRC9522L(hw->port), 3836266423Sjfv pf->stat_offsets_loaded, 3837266423Sjfv &osd->rx_size_big, &nsd->rx_size_big); 3838266423Sjfv 3839269198Sjfv /* Packet size stats tx */ 3840270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 3841266423Sjfv I40E_GLPRT_PTC64L(hw->port), 3842266423Sjfv pf->stat_offsets_loaded, 3843266423Sjfv &osd->tx_size_64, &nsd->tx_size_64); 3844270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 3845266423Sjfv I40E_GLPRT_PTC127L(hw->port), 3846266423Sjfv pf->stat_offsets_loaded, 3847266423Sjfv &osd->tx_size_127, &nsd->tx_size_127); 3848270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 3849266423Sjfv I40E_GLPRT_PTC255L(hw->port), 3850266423Sjfv pf->stat_offsets_loaded, 3851266423Sjfv &osd->tx_size_255, &nsd->tx_size_255); 3852270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 3853266423Sjfv I40E_GLPRT_PTC511L(hw->port), 3854266423Sjfv pf->stat_offsets_loaded, 3855266423Sjfv &osd->tx_size_511, &nsd->tx_size_511); 3856270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 3857266423Sjfv I40E_GLPRT_PTC1023L(hw->port), 3858266423Sjfv pf->stat_offsets_loaded, 3859266423Sjfv &osd->tx_size_1023, &nsd->tx_size_1023); 3860270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 3861266423Sjfv I40E_GLPRT_PTC1522L(hw->port), 3862266423Sjfv pf->stat_offsets_loaded, 3863266423Sjfv &osd->tx_size_1522, &nsd->tx_size_1522); 3864270346Sjfv ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 3865266423Sjfv I40E_GLPRT_PTC9522L(hw->port), 3866266423Sjfv pf->stat_offsets_loaded, 3867266423Sjfv &osd->tx_size_big, &nsd->tx_size_big); 3868266423Sjfv 3869270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 3870266423Sjfv pf->stat_offsets_loaded, 3871266423Sjfv &osd->rx_undersize, &nsd->rx_undersize); 3872270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 3873266423Sjfv pf->stat_offsets_loaded, 3874266423Sjfv &osd->rx_fragments, &nsd->rx_fragments); 3875270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 3876266423Sjfv pf->stat_offsets_loaded, 3877266423Sjfv &osd->rx_oversize, &nsd->rx_oversize); 3878270346Sjfv ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 3879266423Sjfv pf->stat_offsets_loaded, 3880266423Sjfv &osd->rx_jabber, &nsd->rx_jabber); 3881349163Serj /* EEE */ 3882349163Serj i40e_get_phy_lpi_status(hw, nsd); 3883349163Serj 3884349163Serj ixl_stat_update32(hw, I40E_PRTPM_TLPIC, 3885349163Serj pf->stat_offsets_loaded, 3886349163Serj &osd->tx_lpi_count, &nsd->tx_lpi_count); 3887349163Serj ixl_stat_update32(hw, I40E_PRTPM_RLPIC, 3888349163Serj pf->stat_offsets_loaded, 3889349163Serj &osd->rx_lpi_count, &nsd->rx_lpi_count); 3890349163Serj 3891266423Sjfv pf->stat_offsets_loaded = true; 3892269198Sjfv /* End hw stats */ 3893266423Sjfv 3894266423Sjfv /* Update vsi stats */ 3895279858Sjfv ixl_update_vsi_stats(vsi); 3896266423Sjfv 3897279858Sjfv for (int i = 0; i < pf->num_vfs; i++) { 3898279858Sjfv vf = &pf->vfs[i]; 3899279858Sjfv if (vf->vf_flags & VF_FLAG_ENABLED) 3900279858Sjfv ixl_update_eth_stats(&pf->vfs[i].vsi); 3901279858Sjfv } 3902266423Sjfv} 3903266423Sjfv 3904303816Ssbrunoint 3905333343Serjixl_prepare_for_reset(struct ixl_pf *pf, bool is_up) 3906299553Serj{ 3907299553Serj struct i40e_hw *hw = &pf->hw; 3908299553Serj struct ixl_vsi *vsi = &pf->vsi; 3909299553Serj device_t dev = pf->dev; 3910299553Serj int error = 0; 3911299553Serj 3912299553Serj /* Teardown */ 3913299553Serj if (is_up) 3914299553Serj ixl_stop(pf); 3915333343Serj 3916333343Serj ixl_teardown_queue_msix(vsi); 3917333343Serj 3918349163Serj if (hw->hmc.hmc_obj) { 3919349163Serj error = i40e_shutdown_lan_hmc(hw); 3920349163Serj if (error) 3921349163Serj device_printf(dev, 3922349163Serj "Shutdown LAN HMC failed with code %d\n", error); 3923349163Serj } 3924333343Serj 3925349163Serj callout_drain(&pf->timer); 3926349163Serj 3927318357Serj ixl_disable_intr0(hw); 3928299553Serj ixl_teardown_adminq_msix(pf); 3929333343Serj 3930299553Serj error = i40e_shutdown_adminq(hw); 3931299553Serj if (error) 3932299553Serj device_printf(dev, 3933299553Serj "Shutdown Admin queue failed with code %d\n", error); 3934299553Serj 3935333343Serj /* Free ring buffers, locks and filters */ 3936333343Serj ixl_vsi_free_queues(vsi); 3937333343Serj 3938333343Serj ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 3939333343Serj 3940333343Serj return (error); 3941333343Serj} 3942333343Serj 3943333343Serjint 3944333343Serjixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up) 3945333343Serj{ 3946333343Serj struct i40e_hw *hw = &pf->hw; 3947333343Serj struct ixl_vsi *vsi = &pf->vsi; 3948333343Serj device_t dev = pf->dev; 3949349163Serj enum i40e_get_fw_lldp_status_resp lldp_status; 3950333343Serj int error = 0; 3951333343Serj 3952333343Serj device_printf(dev, "Rebuilding driver state...\n"); 3953333343Serj 3954349163Serj if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) { 3955349163Serj if (ixl_fw_recovery_mode(pf)) { 3956349163Serj atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 3957349163Serj pf->link_up = FALSE; 3958349163Serj ixl_update_link_status(pf); 3959349163Serj } 3960333343Serj } 3961333343Serj 3962349163Serj 3963299553Serj /* Setup */ 3964299553Serj error = i40e_init_adminq(hw); 3965299553Serj if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { 3966299553Serj device_printf(dev, "Unable to initialize Admin Queue, error %d\n", 3967299553Serj error); 3968333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 3969299553Serj } 3970333343Serj 3971333343Serj i40e_clear_pxe_mode(hw); 3972333343Serj 3973333343Serj error = ixl_get_hw_capabilities(pf); 3974299553Serj if (error) { 3975333343Serj device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error); 3976333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 3977299553Serj } 3978349163Serj ixl_configure_intr0_msix(pf); 3979349163Serj ixl_enable_intr0(hw); 3980333343Serj 3981349163Serj /* Do not init LAN HMC and bring interface up in recovery mode */ 3982349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) == 0) { 3983349163Serj error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 3984349163Serj hw->func_caps.num_rx_qp, 0, 0); 3985349163Serj if (error) { 3986349163Serj device_printf(dev, "init_lan_hmc failed: %d\n", error); 3987349163Serj goto ixl_rebuild_hw_structs_after_reset_err; 3988349163Serj } 3989333343Serj 3990349163Serj error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 3991349163Serj if (error) { 3992349163Serj device_printf(dev, "configure_lan_hmc failed: %d\n", error); 3993349163Serj goto ixl_rebuild_hw_structs_after_reset_err; 3994349163Serj } 3995333343Serj 3996349163Serj if (!pf->qmgr.qinfo) { 3997349163Serj /* Init queue allocation manager */ 3998349163Serj error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_rx_qp); 3999349163Serj if (error) { 4000349163Serj device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 4001349163Serj error); 4002349163Serj goto ixl_rebuild_hw_structs_after_reset_err; 4003349163Serj } 4004349163Serj } 4005349163Serj /* reserve a contiguous allocation for the PF's VSI */ 4006349163Serj error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); 4007349163Serj if (error) { 4008349163Serj device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 4009349163Serj error); 4010349163Serj /* TODO: error handling */ 4011349163Serj } else 4012349163Serj device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 4013349163Serj pf->qtag.num_allocated, pf->qtag.num_active); 4014333343Serj 4015349163Serj error = ixl_switch_config(pf); 4016349163Serj if (error) { 4017349163Serj device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n", 4018349163Serj error); 4019349163Serj goto ixl_rebuild_hw_structs_after_reset_err; 4020349163Serj } 4021349163Serj } /* not in recovery mode */ 4022333343Serj 4023349163Serj /* Remove default filters reinstalled by FW on reset */ 4024349163Serj ixl_del_default_hw_filters(vsi); 4025333343Serj 4026333343Serj if (ixl_vsi_setup_queues(vsi)) { 4027333343Serj device_printf(dev, "setup queues failed!\n"); 4028333343Serj error = ENOMEM; 4029333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 4030333343Serj } 4031333343Serj 4032349163Serj ixl_vsi_add_sysctls(vsi, "pf", true); 4033349163Serj 4034333343Serj if (pf->msix > 1) { 4035333343Serj error = ixl_setup_adminq_msix(pf); 4036333343Serj if (error) { 4037333343Serj device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", 4038333343Serj error); 4039333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 4040333343Serj } 4041333343Serj 4042333343Serj ixl_configure_intr0_msix(pf); 4043333343Serj ixl_enable_intr0(hw); 4044333343Serj 4045333343Serj error = ixl_setup_queue_msix(vsi); 4046333343Serj if (error) { 4047333343Serj device_printf(dev, "ixl_setup_queue_msix() error: %d\n", 4048333343Serj error); 4049333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 4050333343Serj } 4051349163Serj error = ixl_setup_queue_tqs(vsi); 4052349163Serj if (error) { 4053349163Serj device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", 4054349163Serj error); 4055349163Serj goto ixl_rebuild_hw_structs_after_reset_err; 4056349163Serj } 4057333343Serj } else { 4058333343Serj error = ixl_setup_legacy(pf); 4059333343Serj if (error) { 4060333343Serj device_printf(dev, "ixl_setup_legacy() error: %d\n", 4061333343Serj error); 4062333343Serj goto ixl_rebuild_hw_structs_after_reset_err; 4063333343Serj } 4064333343Serj } 4065333343Serj 4066349163Serj /* Do not bring interface up in recovery mode */ 4067349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) 4068349163Serj return (error); 4069349163Serj 4070333343Serj /* Determine link state */ 4071333343Serj if (ixl_attach_get_link_status(pf)) { 4072333343Serj error = EINVAL; 4073333343Serj /* TODO: error handling */ 4074333343Serj } 4075333343Serj 4076333343Serj i40e_aq_set_dcb_parameters(hw, TRUE, NULL); 4077333343Serj 4078349163Serj /* Query device FW LLDP status */ 4079349163Serj if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { 4080349163Serj if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { 4081349163Serj atomic_set_int(&pf->state, 4082349163Serj IXL_PF_STATE_FW_LLDP_DISABLED); 4083349163Serj } else { 4084349163Serj atomic_clear_int(&pf->state, 4085349163Serj IXL_PF_STATE_FW_LLDP_DISABLED); 4086349163Serj } 4087349163Serj } 4088349163Serj 4089299553Serj if (is_up) 4090299553Serj ixl_init(pf); 4091299553Serj 4092333343Serj device_printf(dev, "Rebuilding driver state done.\n"); 4093349163Serj IXL_PF_LOCK(pf); 4094349163Serj callout_reset(&pf->timer, hz, ixl_local_timer, pf); 4095349163Serj IXL_PF_UNLOCK(pf); 4096299553Serj return (0); 4097333343Serj 4098333343Serjixl_rebuild_hw_structs_after_reset_err: 4099333343Serj device_printf(dev, "Reload the driver to recover\n"); 4100333343Serj return (error); 4101299553Serj} 4102299553Serj 4103303816Ssbrunovoid 4104299553Serjixl_handle_empr_reset(struct ixl_pf *pf) 4105299553Serj{ 4106333343Serj struct ixl_vsi *vsi = &pf->vsi; 4107333343Serj struct i40e_hw *hw = &pf->hw; 4108333343Serj bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); 4109349163Serj int error = 0; 4110299553Serj 4111333343Serj ixl_prepare_for_reset(pf, is_up); 4112349163Serj /* 4113349163Serj * i40e_pf_reset checks the type of reset and acts 4114349163Serj * accordingly. If EMP or Core reset was performed 4115349163Serj * doing PF reset is not necessary and it sometimes 4116349163Serj * fails. 4117349163Serj */ 4118349163Serj error = i40e_pf_reset(hw); 4119349163Serj if (error) { 4120349163Serj device_printf(pf->dev, "PF reset failure %s\n", 4121349163Serj i40e_stat_str(hw, error)); 4122299553Serj } 4123299553Serj 4124333343Serj ixl_rebuild_hw_structs_after_reset(pf, is_up); 4125299553Serj 4126299553Serj atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); 4127299553Serj} 4128299553Serj 4129266423Sjfv/* 4130266423Sjfv** Tasklet handler for MSIX Adminq interrupts 4131266423Sjfv** - do outside interrupt since it might sleep 4132266423Sjfv*/ 4133303816Ssbrunovoid 4134270346Sjfvixl_do_adminq(void *context, int pending) 4135266423Sjfv{ 4136270346Sjfv struct ixl_pf *pf = context; 4137266423Sjfv struct i40e_hw *hw = &pf->hw; 4138266423Sjfv struct i40e_arq_event_info event; 4139266423Sjfv i40e_status ret; 4140299547Serj device_t dev = pf->dev; 4141299553Serj u32 loop = 0; 4142349163Serj u16 opcode, arq_pending; 4143266423Sjfv 4144299549Serj if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { 4145299553Serj /* Flag cleared at end of this function */ 4146299553Serj ixl_handle_empr_reset(pf); 4147299549Serj return; 4148299549Serj } 4149299549Serj 4150299553Serj /* Admin Queue handling */ 4151274205Sjfv event.buf_len = IXL_AQ_BUF_SZ; 4152274205Sjfv event.msg_buf = malloc(event.buf_len, 4153266423Sjfv M_DEVBUF, M_NOWAIT | M_ZERO); 4154266423Sjfv if (!event.msg_buf) { 4155299547Serj device_printf(dev, "%s: Unable to allocate memory for Admin" 4156299547Serj " Queue event!\n", __func__); 4157266423Sjfv return; 4158266423Sjfv } 4159266423Sjfv 4160279858Sjfv IXL_PF_LOCK(pf); 4161266423Sjfv /* clean and process any events */ 4162266423Sjfv do { 4163349163Serj ret = i40e_clean_arq_element(hw, &event, &arq_pending); 4164266423Sjfv if (ret) 4165266423Sjfv break; 4166266423Sjfv opcode = LE16_TO_CPU(event.desc.opcode); 4167303816Ssbruno ixl_dbg(pf, IXL_DBG_AQ, 4168318357Serj "Admin Queue event: %#06x\n", opcode); 4169266423Sjfv switch (opcode) { 4170266423Sjfv case i40e_aqc_opc_get_link_status: 4171279858Sjfv ixl_link_event(pf, &event); 4172266423Sjfv break; 4173266423Sjfv case i40e_aqc_opc_send_msg_to_pf: 4174279858Sjfv#ifdef PCI_IOV 4175279858Sjfv ixl_handle_vf_msg(pf, &event); 4176279858Sjfv#endif 4177266423Sjfv break; 4178266423Sjfv case i40e_aqc_opc_event_lan_overflow: 4179266423Sjfv default: 4180266423Sjfv break; 4181266423Sjfv } 4182266423Sjfv 4183349163Serj } while (arq_pending && (loop++ < IXL_ADM_LIMIT)); 4184266423Sjfv 4185266423Sjfv free(event.msg_buf, M_DEVBUF); 4186266423Sjfv 4187349163Serj /* If there are still messages to process, reschedule. */ 4188349163Serj if (arq_pending > 0) 4189279858Sjfv taskqueue_enqueue(pf->tq, &pf->adminq); 4190266423Sjfv else 4191318357Serj ixl_enable_intr0(hw); 4192279858Sjfv 4193279858Sjfv IXL_PF_UNLOCK(pf); 4194266423Sjfv} 4195266423Sjfv 4196266423Sjfv/** 4197266423Sjfv * Update VSI-specific ethernet statistics counters. 4198266423Sjfv **/ 4199299554Serjvoid 4200299554Serjixl_update_eth_stats(struct ixl_vsi *vsi) 4201266423Sjfv{ 4202270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 4203266423Sjfv struct i40e_hw *hw = &pf->hw; 4204266423Sjfv struct i40e_eth_stats *es; 4205266423Sjfv struct i40e_eth_stats *oes; 4206266423Sjfv u16 stat_idx = vsi->info.stat_counter_idx; 4207266423Sjfv 4208266423Sjfv es = &vsi->eth_stats; 4209266423Sjfv oes = &vsi->eth_stats_offsets; 4210266423Sjfv 4211266423Sjfv /* Gather up the stats that the hw collects */ 4212270346Sjfv ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 4213266423Sjfv vsi->stat_offsets_loaded, 4214266423Sjfv &oes->tx_errors, &es->tx_errors); 4215270346Sjfv ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 4216266423Sjfv vsi->stat_offsets_loaded, 4217266423Sjfv &oes->rx_discards, &es->rx_discards); 4218266423Sjfv 4219270346Sjfv ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 4220266423Sjfv I40E_GLV_GORCL(stat_idx), 4221266423Sjfv vsi->stat_offsets_loaded, 4222266423Sjfv &oes->rx_bytes, &es->rx_bytes); 4223270346Sjfv ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 4224266423Sjfv I40E_GLV_UPRCL(stat_idx), 4225266423Sjfv vsi->stat_offsets_loaded, 4226266423Sjfv &oes->rx_unicast, &es->rx_unicast); 4227270346Sjfv ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 4228266423Sjfv I40E_GLV_MPRCL(stat_idx), 4229266423Sjfv vsi->stat_offsets_loaded, 4230266423Sjfv &oes->rx_multicast, &es->rx_multicast); 4231270346Sjfv ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 4232266423Sjfv I40E_GLV_BPRCL(stat_idx), 4233266423Sjfv vsi->stat_offsets_loaded, 4234266423Sjfv &oes->rx_broadcast, &es->rx_broadcast); 4235266423Sjfv 4236270346Sjfv ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 4237266423Sjfv I40E_GLV_GOTCL(stat_idx), 4238266423Sjfv vsi->stat_offsets_loaded, 4239266423Sjfv &oes->tx_bytes, &es->tx_bytes); 4240270346Sjfv ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 4241266423Sjfv I40E_GLV_UPTCL(stat_idx), 4242266423Sjfv vsi->stat_offsets_loaded, 4243266423Sjfv &oes->tx_unicast, &es->tx_unicast); 4244270346Sjfv ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 4245266423Sjfv I40E_GLV_MPTCL(stat_idx), 4246266423Sjfv vsi->stat_offsets_loaded, 4247266423Sjfv &oes->tx_multicast, &es->tx_multicast); 4248270346Sjfv ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 4249266423Sjfv I40E_GLV_BPTCL(stat_idx), 4250266423Sjfv vsi->stat_offsets_loaded, 4251266423Sjfv &oes->tx_broadcast, &es->tx_broadcast); 4252266423Sjfv vsi->stat_offsets_loaded = true; 4253279858Sjfv} 4254269198Sjfv 4255303816Ssbrunovoid 4256279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi) 4257279858Sjfv{ 4258279858Sjfv struct ixl_pf *pf; 4259279858Sjfv struct ifnet *ifp; 4260279858Sjfv struct i40e_eth_stats *es; 4261279858Sjfv u64 tx_discards; 4262279858Sjfv 4263279858Sjfv struct i40e_hw_port_stats *nsd; 4264279858Sjfv 4265279858Sjfv pf = vsi->back; 4266279858Sjfv ifp = vsi->ifp; 4267279858Sjfv es = &vsi->eth_stats; 4268279858Sjfv nsd = &pf->stats; 4269279858Sjfv 4270279858Sjfv ixl_update_eth_stats(vsi); 4271279858Sjfv 4272272227Sglebius tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 4273279858Sjfv for (int i = 0; i < vsi->num_queues; i++) 4274272227Sglebius tx_discards += vsi->queues[i].txr.br->br_drops; 4275272227Sglebius 4276269198Sjfv /* Update ifnet stats */ 4277272227Sglebius IXL_SET_IPACKETS(vsi, es->rx_unicast + 4278269198Sjfv es->rx_multicast + 4279272227Sglebius es->rx_broadcast); 4280272227Sglebius IXL_SET_OPACKETS(vsi, es->tx_unicast + 4281269198Sjfv es->tx_multicast + 4282272227Sglebius es->tx_broadcast); 4283272227Sglebius IXL_SET_IBYTES(vsi, es->rx_bytes); 4284272227Sglebius IXL_SET_OBYTES(vsi, es->tx_bytes); 4285272227Sglebius IXL_SET_IMCASTS(vsi, es->rx_multicast); 4286272227Sglebius IXL_SET_OMCASTS(vsi, es->tx_multicast); 4287269198Sjfv 4288279858Sjfv IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 4289279858Sjfv nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + 4290279858Sjfv nsd->rx_jabber); 4291272227Sglebius IXL_SET_OERRORS(vsi, es->tx_errors); 4292272227Sglebius IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 4293272227Sglebius IXL_SET_OQDROPS(vsi, tx_discards); 4294272227Sglebius IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 4295272227Sglebius IXL_SET_COLLISIONS(vsi, 0); 4296266423Sjfv} 4297266423Sjfv 4298266423Sjfv/** 4299266423Sjfv * Reset all of the stats for the given pf 4300266423Sjfv **/ 4301303816Ssbrunovoid 4302303816Ssbrunoixl_pf_reset_stats(struct ixl_pf *pf) 4303266423Sjfv{ 4304266423Sjfv bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 4305266423Sjfv bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 4306266423Sjfv pf->stat_offsets_loaded = false; 4307266423Sjfv} 4308266423Sjfv 4309266423Sjfv/** 4310266423Sjfv * Resets all stats of the given vsi 4311266423Sjfv **/ 4312303816Ssbrunovoid 4313303816Ssbrunoixl_vsi_reset_stats(struct ixl_vsi *vsi) 4314266423Sjfv{ 4315266423Sjfv bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 4316266423Sjfv bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 4317266423Sjfv vsi->stat_offsets_loaded = false; 4318266423Sjfv} 4319266423Sjfv 4320266423Sjfv/** 4321266423Sjfv * Read and update a 48 bit stat from the hw 4322266423Sjfv * 4323266423Sjfv * Since the device stats are not reset at PFReset, they likely will not 4324266423Sjfv * be zeroed when the driver starts. We'll save the first values read 4325266423Sjfv * and use them as offsets to be subtracted from the raw values in order 4326266423Sjfv * to report stats that count from zero. 4327266423Sjfv **/ 4328303816Ssbrunovoid 4329270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 4330266423Sjfv bool offset_loaded, u64 *offset, u64 *stat) 4331266423Sjfv{ 4332266423Sjfv u64 new_data; 4333266423Sjfv 4334270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) 4335266423Sjfv new_data = rd64(hw, loreg); 4336266423Sjfv#else 4337266423Sjfv /* 4338269198Sjfv * Use two rd32's instead of one rd64; FreeBSD versions before 4339303816Ssbruno * 10 don't support 64-bit bus reads/writes. 4340266423Sjfv */ 4341266423Sjfv new_data = rd32(hw, loreg); 4342266423Sjfv new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 4343266423Sjfv#endif 4344266423Sjfv 4345266423Sjfv if (!offset_loaded) 4346266423Sjfv *offset = new_data; 4347266423Sjfv if (new_data >= *offset) 4348266423Sjfv *stat = new_data - *offset; 4349266423Sjfv else 4350266423Sjfv *stat = (new_data + ((u64)1 << 48)) - *offset; 4351266423Sjfv *stat &= 0xFFFFFFFFFFFFULL; 4352266423Sjfv} 4353266423Sjfv 4354266423Sjfv/** 4355266423Sjfv * Read and update a 32 bit stat from the hw 4356266423Sjfv **/ 4357303816Ssbrunovoid 4358270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg, 4359266423Sjfv bool offset_loaded, u64 *offset, u64 *stat) 4360266423Sjfv{ 4361266423Sjfv u32 new_data; 4362266423Sjfv 4363266423Sjfv new_data = rd32(hw, reg); 4364266423Sjfv if (!offset_loaded) 4365266423Sjfv *offset = new_data; 4366266423Sjfv if (new_data >= *offset) 4367266423Sjfv *stat = (u32)(new_data - *offset); 4368266423Sjfv else 4369266423Sjfv *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 4370266423Sjfv} 4371266423Sjfv 4372303816Ssbrunovoid 4373299549Serjixl_add_device_sysctls(struct ixl_pf *pf) 4374299549Serj{ 4375299549Serj device_t dev = pf->dev; 4376318357Serj struct i40e_hw *hw = &pf->hw; 4377299549Serj 4378303816Ssbruno struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4379303816Ssbruno struct sysctl_oid_list *ctx_list = 4380303816Ssbruno SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4381303816Ssbruno 4382303816Ssbruno struct sysctl_oid *debug_node; 4383303816Ssbruno struct sysctl_oid_list *debug_list; 4384303816Ssbruno 4385318357Serj struct sysctl_oid *fec_node; 4386318357Serj struct sysctl_oid_list *fec_list; 4387318357Serj 4388349163Serj struct sysctl_oid *eee_node; 4389349163Serj struct sysctl_oid_list *eee_list; 4390349163Serj 4391299549Serj /* Set up sysctls */ 4392303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4393299549Serj OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, 4394333343Serj pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 4395299549Serj 4396303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4397299549Serj OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, 4398333343Serj pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 4399299549Serj 4400303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4401333343Serj OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD, 4402333343Serj pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 4403333343Serj 4404333343Serj SYSCTL_ADD_PROC(ctx, ctx_list, 4405299549Serj OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, 4406333343Serj pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed"); 4407299549Serj 4408303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4409299549Serj OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 4410299549Serj pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); 4411299549Serj 4412303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4413303816Ssbruno OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, 4414303816Ssbruno pf, 0, ixl_sysctl_unallocated_queues, "I", 4415303816Ssbruno "Queues not allocated to a PF or VF"); 4416299549Serj 4417303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4418303816Ssbruno OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, 4419303816Ssbruno pf, 0, ixl_sysctl_pf_tx_itr, "I", 4420303816Ssbruno "Immediately set TX ITR value for all queues"); 4421303816Ssbruno 4422303816Ssbruno SYSCTL_ADD_PROC(ctx, ctx_list, 4423303816Ssbruno OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, 4424303816Ssbruno pf, 0, ixl_sysctl_pf_rx_itr, "I", 4425303816Ssbruno "Immediately set RX ITR value for all queues"); 4426303816Ssbruno 4427303816Ssbruno SYSCTL_ADD_INT(ctx, ctx_list, 4428299549Serj OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 4429303816Ssbruno &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 4430299549Serj 4431303816Ssbruno SYSCTL_ADD_INT(ctx, ctx_list, 4432299549Serj OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 4433303816Ssbruno &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 4434299549Serj 4435333343Serj SYSCTL_ADD_INT(ctx, ctx_list, 4436333343Serj OID_AUTO, "tx_ring_size", CTLFLAG_RD, 4437333343Serj &pf->vsi.num_tx_desc, 0, "TX ring size"); 4438333343Serj 4439333343Serj SYSCTL_ADD_INT(ctx, ctx_list, 4440333343Serj OID_AUTO, "rx_ring_size", CTLFLAG_RD, 4441333343Serj &pf->vsi.num_rx_desc, 0, "RX ring size"); 4442333343Serj 4443318357Serj /* Add FEC sysctls for 25G adapters */ 4444333343Serj if (i40e_is_25G_device(hw->device_id)) { 4445318357Serj fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 4446318357Serj OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); 4447318357Serj fec_list = SYSCTL_CHILDREN(fec_node); 4448318357Serj 4449318357Serj SYSCTL_ADD_PROC(ctx, fec_list, 4450333343Serj OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW, 4451318357Serj pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 4452318357Serj 4453318357Serj SYSCTL_ADD_PROC(ctx, fec_list, 4454333343Serj OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW, 4455318357Serj pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 4456318357Serj 4457318357Serj SYSCTL_ADD_PROC(ctx, fec_list, 4458333343Serj OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW, 4459318357Serj pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); 4460318357Serj 4461318357Serj SYSCTL_ADD_PROC(ctx, fec_list, 4462333343Serj OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW, 4463318357Serj pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); 4464318357Serj 4465318357Serj SYSCTL_ADD_PROC(ctx, fec_list, 4466333343Serj OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW, 4467318357Serj pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); 4468318357Serj } 4469318357Serj 4470333343Serj SYSCTL_ADD_PROC(ctx, ctx_list, 4471333343Serj OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW, 4472333343Serj pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 4473333343Serj 4474349163Serj eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 4475349163Serj OID_AUTO, "eee", CTLFLAG_RD, NULL, 4476349163Serj "Energy Efficient Ethernet (EEE) Sysctls"); 4477349163Serj eee_list = SYSCTL_CHILDREN(eee_node); 4478349163Serj 4479349163Serj SYSCTL_ADD_PROC(ctx, eee_list, 4480349163Serj OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW, 4481349163Serj pf, 0, ixl_sysctl_eee_enable, "I", 4482349163Serj "Enable Energy Efficient Ethernet (EEE)"); 4483349163Serj 4484349163Serj SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 4485349163Serj CTLFLAG_RD, &pf->stats.tx_lpi_status, 0, 4486349163Serj "TX LPI status"); 4487349163Serj 4488349163Serj SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 4489349163Serj CTLFLAG_RD, &pf->stats.rx_lpi_status, 0, 4490349163Serj "RX LPI status"); 4491349163Serj 4492349163Serj SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 4493349163Serj CTLFLAG_RD, &pf->stats.tx_lpi_count, 4494349163Serj "TX LPI count"); 4495349163Serj 4496349163Serj SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 4497349163Serj CTLFLAG_RD, &pf->stats.rx_lpi_count, 4498349163Serj "RX LPI count"); 4499303816Ssbruno /* Add sysctls meant to print debug information, but don't list them 4500303816Ssbruno * in "sysctl -a" output. */ 4501303816Ssbruno debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 4502303816Ssbruno OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); 4503303816Ssbruno debug_list = SYSCTL_CHILDREN(debug_node); 4504299549Serj 4505303816Ssbruno SYSCTL_ADD_UINT(ctx, debug_list, 4506303816Ssbruno OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 4507303816Ssbruno &pf->hw.debug_mask, 0, "Shared code debug message level"); 4508299549Serj 4509303816Ssbruno SYSCTL_ADD_UINT(ctx, debug_list, 4510303816Ssbruno OID_AUTO, "core_debug_mask", CTLFLAG_RW, 4511303816Ssbruno &pf->dbg_mask, 0, "Non-hared code debug message level"); 4512303816Ssbruno 4513303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4514299549Serj OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, 4515299551Serj pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 4516299549Serj 4517303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4518299549Serj OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, 4519299549Serj pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 4520299549Serj 4521303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4522299549Serj OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, 4523299549Serj pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 4524299549Serj 4525303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4526299549Serj OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD, 4527299549Serj pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 4528299549Serj 4529303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4530299549Serj OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, 4531299549Serj pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 4532299551Serj 4533303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4534369202Sdonner OID_AUTO, "switch_vlans", CTLTYPE_INT | CTLFLAG_WR, 4535369202Sdonner pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 4536369202Sdonner 4537369202Sdonner SYSCTL_ADD_PROC(ctx, debug_list, 4538303816Ssbruno OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD, 4539303816Ssbruno pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 4540303816Ssbruno 4541303816Ssbruno SYSCTL_ADD_PROC(ctx, debug_list, 4542303816Ssbruno OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD, 4543303816Ssbruno pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 4544318357Serj 4545318357Serj SYSCTL_ADD_PROC(ctx, debug_list, 4546318357Serj OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, 4547318357Serj pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 4548318357Serj 4549318357Serj SYSCTL_ADD_PROC(ctx, debug_list, 4550318357Serj OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, 4551318357Serj pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 4552318357Serj 4553333343Serj SYSCTL_ADD_PROC(ctx, debug_list, 4554333343Serj OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD, 4555333343Serj pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 4556333343Serj 4557318357Serj if (pf->has_i2c) { 4558318357Serj SYSCTL_ADD_PROC(ctx, debug_list, 4559318357Serj OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, 4560318357Serj pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); 4561318357Serj 4562318357Serj SYSCTL_ADD_PROC(ctx, debug_list, 4563318357Serj OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, 4564318357Serj pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); 4565349163Serj 4566349163Serj SYSCTL_ADD_PROC(ctx, debug_list, 4567349163Serj OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, 4568349163Serj pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 4569318357Serj } 4570318357Serj 4571299551Serj#ifdef PCI_IOV 4572303816Ssbruno SYSCTL_ADD_UINT(ctx, debug_list, 4573299551Serj OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, 4574299551Serj 0, "PF/VF Virtual Channel debug level"); 4575299549Serj#endif 4576299549Serj} 4577299549Serj 4578266423Sjfv/* 4579303816Ssbruno * Primarily for finding out how many queues can be assigned to VFs, 4580303816Ssbruno * at runtime. 4581303816Ssbruno */ 4582303816Ssbrunostatic int 4583303816Ssbrunoixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 4584303816Ssbruno{ 4585303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 4586303816Ssbruno int queues; 4587303816Ssbruno 4588303816Ssbruno IXL_PF_LOCK(pf); 4589303816Ssbruno queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 4590303816Ssbruno IXL_PF_UNLOCK(pf); 4591303816Ssbruno 4592303816Ssbruno return sysctl_handle_int(oidp, NULL, queues, req); 4593303816Ssbruno} 4594303816Ssbruno 4595303816Ssbruno/* 4596266423Sjfv** Set flow control using sysctl: 4597266423Sjfv** 0 - off 4598266423Sjfv** 1 - rx pause 4599266423Sjfv** 2 - tx pause 4600266423Sjfv** 3 - full 4601266423Sjfv*/ 4602303816Ssbrunoint 4603333343Serjixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS) 4604266423Sjfv{ 4605270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 4606266423Sjfv struct i40e_hw *hw = &pf->hw; 4607266423Sjfv device_t dev = pf->dev; 4608299553Serj int requested_fc, error = 0; 4609266423Sjfv enum i40e_status_code aq_error = 0; 4610266423Sjfv u8 fc_aq_err = 0; 4611266423Sjfv 4612279033Sjfv /* Get request */ 4613299553Serj requested_fc = pf->fc; 4614299553Serj error = sysctl_handle_int(oidp, &requested_fc, 0, req); 4615266423Sjfv if ((error) || (req->newptr == NULL)) 4616269198Sjfv return (error); 4617349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { 4618349163Serj device_printf(dev, "Interface is currently in FW recovery mode. " 4619349163Serj "Setting flow control not supported\n"); 4620349163Serj return (EINVAL); 4621349163Serj } 4622299553Serj if (requested_fc < 0 || requested_fc > 3) { 4623266423Sjfv device_printf(dev, 4624266423Sjfv "Invalid fc mode; valid modes are 0 through 3\n"); 4625266423Sjfv return (EINVAL); 4626266423Sjfv } 4627266423Sjfv 4628266423Sjfv /* Set fc ability for port */ 4629299553Serj hw->fc.requested_mode = requested_fc; 4630269198Sjfv aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE); 4631269198Sjfv if (aq_error) { 4632269198Sjfv device_printf(dev, 4633269198Sjfv "%s: Error setting new fc mode %d; fc_err %#x\n", 4634269198Sjfv __func__, aq_error, fc_aq_err); 4635299547Serj return (EIO); 4636269198Sjfv } 4637299553Serj pf->fc = requested_fc; 4638266423Sjfv 4639299547Serj /* Get new link state */ 4640299547Serj i40e_msec_delay(250); 4641299547Serj hw->phy.get_link_info = TRUE; 4642299547Serj i40e_get_link_status(hw, &pf->link_up); 4643299547Serj 4644269198Sjfv return (0); 4645269198Sjfv} 4646266423Sjfv 4647349163Serj 4648349163Serjstatic const char * 4649349163Serjixl_link_speed_string(u8 link_speed) 4650270346Sjfv{ 4651349163Serj const char * link_speed_str[] = { 4652270346Sjfv "Unknown", 4653318357Serj "100 Mbps", 4654318357Serj "1 Gbps", 4655318357Serj "10 Gbps", 4656318357Serj "40 Gbps", 4657318357Serj "20 Gbps", 4658318357Serj "25 Gbps", 4659349163Serj "2.5 Gbps", 4660349163Serj "5 Gbps" 4661270346Sjfv }; 4662349163Serj int index; 4663270346Sjfv 4664318357Serj switch (link_speed) { 4665349163Serj case I40E_LINK_SPEED_100MB: 4666349163Serj index = 1; 4667349163Serj break; 4668349163Serj case I40E_LINK_SPEED_1GB: 4669349163Serj index = 2; 4670349163Serj break; 4671349163Serj case I40E_LINK_SPEED_10GB: 4672349163Serj index = 3; 4673349163Serj break; 4674349163Serj case I40E_LINK_SPEED_40GB: 4675349163Serj index = 4; 4676349163Serj break; 4677349163Serj case I40E_LINK_SPEED_20GB: 4678349163Serj index = 5; 4679349163Serj break; 4680349163Serj case I40E_LINK_SPEED_25GB: 4681349163Serj index = 6; 4682349163Serj break; 4683349163Serj case I40E_LINK_SPEED_2_5GB: 4684349163Serj index = 7; 4685349163Serj break; 4686349163Serj case I40E_LINK_SPEED_5GB: 4687349163Serj index = 8; 4688349163Serj break; 4689349163Serj case I40E_LINK_SPEED_UNKNOWN: 4690349163Serj default: 4691349163Serj index = 0; 4692349163Serj break; 4693270346Sjfv } 4694270346Sjfv 4695349163Serj return (link_speed_str[index]); 4696318357Serj} 4697318357Serj 4698318357Serjint 4699333343Serjixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 4700318357Serj{ 4701318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 4702318357Serj struct i40e_hw *hw = &pf->hw; 4703318357Serj int error = 0; 4704318357Serj 4705318357Serj ixl_update_link_status(pf); 4706318357Serj 4707318357Serj error = sysctl_handle_string(oidp, 4708349163Serj __DECONST(void *, 4709349163Serj ixl_link_speed_string(hw->phy.link_info.link_speed)), 4710318357Serj 8, req); 4711349163Serj 4712270346Sjfv return (error); 4713270346Sjfv} 4714270346Sjfv 4715333343Serj/* 4716333343Serj * Converts 8-bit speeds value to and from sysctl flags and 4717333343Serj * Admin Queue flags. 4718333343Serj */ 4719318357Serjstatic u8 4720318357Serjixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 4721318357Serj{ 4722349163Serj#define SPEED_MAP_SIZE 8 4723349163Serj static u16 speedmap[SPEED_MAP_SIZE] = { 4724318357Serj (I40E_LINK_SPEED_100MB | (0x1 << 8)), 4725318357Serj (I40E_LINK_SPEED_1GB | (0x2 << 8)), 4726318357Serj (I40E_LINK_SPEED_10GB | (0x4 << 8)), 4727318357Serj (I40E_LINK_SPEED_20GB | (0x8 << 8)), 4728318357Serj (I40E_LINK_SPEED_25GB | (0x10 << 8)), 4729349163Serj (I40E_LINK_SPEED_40GB | (0x20 << 8)), 4730349163Serj (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 4731349163Serj (I40E_LINK_SPEED_5GB | (0x80 << 8)), 4732318357Serj }; 4733318357Serj u8 retval = 0; 4734318357Serj 4735349163Serj for (int i = 0; i < SPEED_MAP_SIZE; i++) { 4736318357Serj if (to_aq) 4737318357Serj retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 4738318357Serj else 4739318357Serj retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 4740318357Serj } 4741318357Serj 4742318357Serj return (retval); 4743318357Serj} 4744318357Serj 4745303816Ssbrunoint 4746333343Serjixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 4747274205Sjfv{ 4748274205Sjfv struct i40e_hw *hw = &pf->hw; 4749274205Sjfv device_t dev = pf->dev; 4750274205Sjfv struct i40e_aq_get_phy_abilities_resp abilities; 4751274205Sjfv struct i40e_aq_set_phy_config config; 4752274205Sjfv enum i40e_status_code aq_error = 0; 4753274205Sjfv 4754274205Sjfv /* Get current capability information */ 4755279033Sjfv aq_error = i40e_aq_get_phy_capabilities(hw, 4756279033Sjfv FALSE, FALSE, &abilities, NULL); 4757274205Sjfv if (aq_error) { 4758279033Sjfv device_printf(dev, 4759279033Sjfv "%s: Error getting phy capabilities %d," 4760274205Sjfv " aq error: %d\n", __func__, aq_error, 4761274205Sjfv hw->aq.asq_last_status); 4762303816Ssbruno return (EIO); 4763274205Sjfv } 4764274205Sjfv 4765274205Sjfv /* Prepare new config */ 4766274205Sjfv bzero(&config, sizeof(config)); 4767333343Serj if (from_aq) 4768333343Serj config.link_speed = speeds; 4769333343Serj else 4770333343Serj config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 4771274205Sjfv config.phy_type = abilities.phy_type; 4772318357Serj config.phy_type_ext = abilities.phy_type_ext; 4773274205Sjfv config.abilities = abilities.abilities 4774274205Sjfv | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4775274205Sjfv config.eee_capability = abilities.eee_capability; 4776274205Sjfv config.eeer = abilities.eeer_val; 4777274205Sjfv config.low_power_ctrl = abilities.d3_lpan; 4778349163Serj config.fec_config = abilities.fec_cfg_curr_mod_ext_info 4779349163Serj & I40E_AQ_PHY_FEC_CONFIG_MASK; 4780274205Sjfv 4781274205Sjfv /* Do aq command & restart link */ 4782274205Sjfv aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 4783274205Sjfv if (aq_error) { 4784279033Sjfv device_printf(dev, 4785279033Sjfv "%s: Error setting new phy config %d," 4786274205Sjfv " aq error: %d\n", __func__, aq_error, 4787274205Sjfv hw->aq.asq_last_status); 4788318357Serj return (EIO); 4789274205Sjfv } 4790274205Sjfv 4791274205Sjfv return (0); 4792274205Sjfv} 4793274205Sjfv 4794269198Sjfv/* 4795349163Serj** Supported link speeds 4796333343Serj** Flags: 4797333343Serj** 0x1 - 100 Mb 4798333343Serj** 0x2 - 1G 4799333343Serj** 0x4 - 10G 4800333343Serj** 0x8 - 20G 4801333343Serj** 0x10 - 25G 4802333343Serj** 0x20 - 40G 4803349163Serj** 0x40 - 2.5G 4804349163Serj** 0x80 - 5G 4805333343Serj*/ 4806333343Serjstatic int 4807333343Serjixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 4808333343Serj{ 4809333343Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 4810333343Serj int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 4811333343Serj 4812333343Serj return sysctl_handle_int(oidp, NULL, supported, req); 4813333343Serj} 4814333343Serj 4815333343Serj/* 4816269198Sjfv** Control link advertise speed: 4817270346Sjfv** Flags: 4818299552Serj** 0x1 - advertise 100 Mb 4819299552Serj** 0x2 - advertise 1G 4820299552Serj** 0x4 - advertise 10G 4821299552Serj** 0x8 - advertise 20G 4822318357Serj** 0x10 - advertise 25G 4823318357Serj** 0x20 - advertise 40G 4824349163Serj** 0x40 - advertise 2.5G 4825349163Serj** 0x80 - advertise 5G 4826269198Sjfv** 4827299552Serj** Set to 0 to disable link 4828269198Sjfv*/ 4829303816Ssbrunoint 4830333343Serjixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 4831269198Sjfv{ 4832270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 4833269198Sjfv device_t dev = pf->dev; 4834318357Serj u8 converted_speeds; 4835270346Sjfv int requested_ls = 0; 4836269198Sjfv int error = 0; 4837266423Sjfv 4838269198Sjfv /* Read in new mode */ 4839270346Sjfv requested_ls = pf->advertised_speed; 4840269198Sjfv error = sysctl_handle_int(oidp, &requested_ls, 0, req); 4841269198Sjfv if ((error) || (req->newptr == NULL)) 4842269198Sjfv return (error); 4843349163Serj if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) { 4844349163Serj device_printf(dev, "Interface is currently in FW recovery mode. " 4845349163Serj "Setting advertise speed not supported\n"); 4846349163Serj return (EINVAL); 4847349163Serj } 4848333343Serj 4849333343Serj /* Error out if bits outside of possible flag range are set */ 4850349163Serj if ((requested_ls & ~((u8)0xFF)) != 0) { 4851333343Serj device_printf(dev, "Input advertised speed out of range; " 4852333343Serj "valid flags are: 0x%02x\n", 4853333343Serj ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 4854269198Sjfv return (EINVAL); 4855266423Sjfv } 4856269198Sjfv 4857333343Serj /* Check if adapter supports input value */ 4858318357Serj converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 4859318357Serj if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 4860318357Serj device_printf(dev, "Invalid advertised speed; " 4861318357Serj "valid flags are: 0x%02x\n", 4862318357Serj ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 4863318357Serj return (EINVAL); 4864318357Serj } 4865269198Sjfv 4866333343Serj error = ixl_set_advertised_speeds(pf, requested_ls, false); 4867274205Sjfv if (error) 4868274205Sjfv return (error); 4869270346Sjfv 4870270346Sjfv pf->advertised_speed = requested_ls; 4871270346Sjfv ixl_update_link_status(pf); 4872269198Sjfv return (0); 4873266423Sjfv} 4874266423Sjfv 4875266423Sjfv/* 4876318357Serj * Input: bitmap of enum i40e_aq_link_speed 4877318357Serj */ 4878318357Serjstatic u64 4879318357Serjixl_max_aq_speed_to_value(u8 link_speeds) 4880318357Serj{ 4881318357Serj if (link_speeds & I40E_LINK_SPEED_40GB) 4882318357Serj return IF_Gbps(40); 4883318357Serj if (link_speeds & I40E_LINK_SPEED_25GB) 4884318357Serj return IF_Gbps(25); 4885318357Serj if (link_speeds & I40E_LINK_SPEED_20GB) 4886318357Serj return IF_Gbps(20); 4887318357Serj if (link_speeds & I40E_LINK_SPEED_10GB) 4888318357Serj return IF_Gbps(10); 4889349163Serj if (link_speeds & I40E_LINK_SPEED_5GB) 4890349163Serj return IF_Gbps(5); 4891349163Serj if (link_speeds & I40E_LINK_SPEED_2_5GB) 4892349163Serj return IF_Mbps(2500); 4893318357Serj if (link_speeds & I40E_LINK_SPEED_1GB) 4894318357Serj return IF_Gbps(1); 4895318357Serj if (link_speeds & I40E_LINK_SPEED_100MB) 4896318357Serj return IF_Mbps(100); 4897318357Serj else 4898318357Serj /* Minimum supported link speed */ 4899318357Serj return IF_Mbps(100); 4900318357Serj} 4901318357Serj 4902318357Serj/* 4903266423Sjfv** Get the width and transaction speed of 4904266423Sjfv** the bus this adapter is plugged into. 4905266423Sjfv*/ 4906303816Ssbrunovoid 4907318357Serjixl_get_bus_info(struct ixl_pf *pf) 4908266423Sjfv{ 4909318357Serj struct i40e_hw *hw = &pf->hw; 4910318357Serj device_t dev = pf->dev; 4911318357Serj u16 link; 4912318357Serj u32 offset, num_ports; 4913318357Serj u64 max_speed; 4914303816Ssbruno 4915303816Ssbruno /* Some devices don't use PCIE */ 4916303816Ssbruno if (hw->mac.type == I40E_MAC_X722) 4917303816Ssbruno return; 4918303816Ssbruno 4919303816Ssbruno /* Read PCI Express Capabilities Link Status Register */ 4920266423Sjfv pci_find_cap(dev, PCIY_EXPRESS, &offset); 4921266423Sjfv link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 4922266423Sjfv 4923303816Ssbruno /* Fill out hw struct with PCIE info */ 4924303816Ssbruno i40e_set_pci_config_data(hw, link); 4925266423Sjfv 4926303816Ssbruno /* Use info to print out bandwidth messages */ 4927266423Sjfv device_printf(dev,"PCI Express Bus: Speed %s %s\n", 4928266423Sjfv ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 4929266423Sjfv (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 4930266423Sjfv (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 4931266423Sjfv (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 4932266423Sjfv (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 4933318357Serj (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 4934266423Sjfv (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 4935266423Sjfv ("Unknown")); 4936266423Sjfv 4937318357Serj /* 4938318357Serj * If adapter is in slot with maximum supported speed, 4939318357Serj * no warning message needs to be printed out. 4940318357Serj */ 4941318357Serj if (hw->bus.speed >= i40e_bus_speed_8000 4942318357Serj && hw->bus.width >= i40e_bus_width_pcie_x8) 4943318357Serj return; 4944318357Serj 4945318357Serj num_ports = bitcount32(hw->func_caps.valid_functions); 4946318357Serj max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 4947318357Serj 4948318357Serj if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 4949266423Sjfv device_printf(dev, "PCI-Express bandwidth available" 4950303816Ssbruno " for this device may be insufficient for" 4951279858Sjfv " optimal performance.\n"); 4952318357Serj device_printf(dev, "Please move the device to a different" 4953318357Serj " PCI-e link with more lanes and/or higher" 4954318357Serj " transfer rate.\n"); 4955266423Sjfv } 4956266423Sjfv} 4957266423Sjfv 4958274205Sjfvstatic int 4959274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 4960274205Sjfv{ 4961274205Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 4962274205Sjfv struct i40e_hw *hw = &pf->hw; 4963299552Serj struct sbuf *sbuf; 4964274205Sjfv 4965299552Serj sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4966299552Serj ixl_nvm_version_str(hw, sbuf); 4967299552Serj sbuf_finish(sbuf); 4968299552Serj sbuf_delete(sbuf); 4969299552Serj 4970333343Serj return (0); 4971274205Sjfv} 4972274205Sjfv 4973303816Ssbrunovoid 4974299553Serjixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 4975299553Serj{ 4976299553Serj if ((nvma->command == I40E_NVM_READ) && 4977299553Serj ((nvma->config & 0xFF) == 0xF) && 4978299553Serj (((nvma->config & 0xF00) >> 8) == 0xF) && 4979299553Serj (nvma->offset == 0) && 4980299553Serj (nvma->data_size == 1)) { 4981299553Serj // device_printf(dev, "- Get Driver Status Command\n"); 4982299553Serj } 4983299553Serj else if (nvma->command == I40E_NVM_READ) { 4984299553Serj 4985299553Serj } 4986299553Serj else { 4987299553Serj switch (nvma->command) { 4988299553Serj case 0xB: 4989299553Serj device_printf(dev, "- command: I40E_NVM_READ\n"); 4990299553Serj break; 4991299553Serj case 0xC: 4992299553Serj device_printf(dev, "- command: I40E_NVM_WRITE\n"); 4993299553Serj break; 4994299553Serj default: 4995299553Serj device_printf(dev, "- command: unknown 0x%08x\n", nvma->command); 4996299553Serj break; 4997299553Serj } 4998299553Serj 4999299553Serj device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF); 5000299553Serj device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8); 5001299553Serj device_printf(dev, "- offset : 0x%08x\n", nvma->offset); 5002299553Serj device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size); 5003299553Serj } 5004299553Serj} 5005299553Serj 5006303816Ssbrunoint 5007299547Serjixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 5008299547Serj{ 5009299547Serj struct i40e_hw *hw = &pf->hw; 5010299547Serj struct i40e_nvm_access *nvma; 5011299547Serj device_t dev = pf->dev; 5012299547Serj enum i40e_status_code status = 0; 5013344105Smarius size_t nvma_size, ifd_len, exp_len; 5014344105Smarius int err, perrno; 5015274205Sjfv 5016299547Serj DEBUGFUNC("ixl_handle_nvmupd_cmd"); 5017299547Serj 5018299553Serj /* Sanity checks */ 5019344105Smarius nvma_size = sizeof(struct i40e_nvm_access); 5020344105Smarius ifd_len = ifd->ifd_len; 5021344105Smarius 5022344105Smarius if (ifd_len < nvma_size || 5023299547Serj ifd->ifd_data == NULL) { 5024299553Serj device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 5025299553Serj __func__); 5026333343Serj device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 5027344105Smarius __func__, ifd_len, nvma_size); 5028299553Serj device_printf(dev, "%s: data pointer: %p\n", __func__, 5029299553Serj ifd->ifd_data); 5030299547Serj return (EINVAL); 5031299547Serj } 5032299547Serj 5033344105Smarius nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK); 5034344105Smarius err = copyin(ifd->ifd_data, nvma, ifd_len); 5035344105Smarius if (err) { 5036344105Smarius device_printf(dev, "%s: Cannot get request from user space\n", 5037344105Smarius __func__); 5038344105Smarius free(nvma, M_DEVBUF); 5039344105Smarius return (err); 5040344105Smarius } 5041299547Serj 5042303816Ssbruno if (pf->dbg_mask & IXL_DBG_NVMUPD) 5043303816Ssbruno ixl_print_nvm_cmd(dev, nvma); 5044299553Serj 5045299549Serj if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { 5046299549Serj int count = 0; 5047299549Serj while (count++ < 100) { 5048299549Serj i40e_msec_delay(100); 5049299549Serj if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) 5050299549Serj break; 5051299549Serj } 5052299549Serj } 5053299549Serj 5054344105Smarius if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { 5055344105Smarius free(nvma, M_DEVBUF); 5056344105Smarius return (-EBUSY); 5057299549Serj } 5058299549Serj 5059344105Smarius if (nvma->data_size < 1 || nvma->data_size > 4096) { 5060344105Smarius device_printf(dev, "%s: invalid request, data size not in supported range\n", 5061344105Smarius __func__); 5062344105Smarius free(nvma, M_DEVBUF); 5063344105Smarius return (EINVAL); 5064344105Smarius } 5065344105Smarius 5066344105Smarius /* 5067344105Smarius * Older versions of the NVM update tool don't set ifd_len to the size 5068344105Smarius * of the entire buffer passed to the ioctl. Check the data_size field 5069344105Smarius * in the contained i40e_nvm_access struct and ensure everything is 5070344105Smarius * copied in from userspace. 5071344105Smarius */ 5072344105Smarius exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 5073344105Smarius 5074344105Smarius if (ifd_len < exp_len) { 5075344105Smarius ifd_len = exp_len; 5076344105Smarius nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK); 5077344105Smarius err = copyin(ifd->ifd_data, nvma, ifd_len); 5078344105Smarius if (err) { 5079344105Smarius device_printf(dev, "%s: Cannot get request from user space\n", 5080344105Smarius __func__); 5081344105Smarius free(nvma, M_DEVBUF); 5082344105Smarius return (err); 5083344105Smarius } 5084344105Smarius } 5085344105Smarius 5086344105Smarius IXL_PF_LOCK(pf); 5087344105Smarius status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 5088344105Smarius IXL_PF_UNLOCK(pf); 5089344105Smarius 5090344105Smarius err = copyout(nvma, ifd->ifd_data, ifd_len); 5091344105Smarius free(nvma, M_DEVBUF); 5092344105Smarius if (err) { 5093344105Smarius device_printf(dev, "%s: Cannot return data to user space\n", 5094344105Smarius __func__); 5095344105Smarius return (err); 5096344105Smarius } 5097344105Smarius 5098333343Serj /* Let the nvmupdate report errors, show them only when debug is enabled */ 5099333343Serj if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 5100318357Serj device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 5101318357Serj i40e_stat_str(hw, status), perrno); 5102299547Serj 5103299549Serj /* 5104299549Serj * -EPERM is actually ERESTART, which the kernel interprets as it needing 5105299549Serj * to run this ioctl again. So use -EACCES for -EPERM instead. 5106299549Serj */ 5107299548Serj if (perrno == -EPERM) 5108299548Serj return (-EACCES); 5109299548Serj else 5110299548Serj return (perrno); 5111299547Serj} 5112299547Serj 5113349163Serjint 5114349163Serjixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *pf, struct ifreq *ifr) 5115349163Serj{ 5116349163Serj struct ifi2creq i2c; 5117349163Serj int error = 0; 5118349163Serj int i; 5119349163Serj 5120349163Serj if (pf->read_i2c_byte == NULL) 5121349163Serj return (EINVAL); 5122349163Serj 5123349163Serj#ifdef ifr_data 5124349163Serj error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 5125349163Serj#else 5126349163Serj error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 5127349163Serj#endif 5128349163Serj 5129349163Serj if (error != 0) 5130349163Serj return (error); 5131349163Serj if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 5132349163Serj error = EINVAL; 5133349163Serj return (error); 5134349163Serj } 5135349163Serj if (i2c.len > sizeof(i2c.data)) { 5136349163Serj error = EINVAL; 5137349163Serj return (error); 5138349163Serj } 5139349163Serj 5140349163Serj for (i = 0; i < i2c.len; ++i) { 5141349163Serj if (pf->read_i2c_byte(pf, i2c.offset + i, 5142349163Serj i2c.dev_addr, &i2c.data[i])) 5143349163Serj return (EIO); 5144349163Serj } 5145349163Serj 5146349163Serj#ifdef ifr_data 5147349163Serj error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 5148349163Serj#else 5149349163Serj error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 5150349163Serj#endif 5151349163Serj 5152349163Serj return (error); 5153349163Serj} 5154349163Serj 5155303816Ssbruno/********************************************************************* 5156303816Ssbruno * 5157303816Ssbruno * Media Ioctl callback 5158303816Ssbruno * 5159303816Ssbruno * This routine is called whenever the user queries the status of 5160303816Ssbruno * the interface using ifconfig. 5161303816Ssbruno * 5162333343Serj * When adding new media types here, make sure to add them to 5163333343Serj * ixl_add_ifmedia(), too. 5164333343Serj * 5165303816Ssbruno **********************************************************************/ 5166303816Ssbrunovoid 5167303816Ssbrunoixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) 5168303816Ssbruno{ 5169303816Ssbruno struct ixl_vsi *vsi = ifp->if_softc; 5170303816Ssbruno struct ixl_pf *pf = vsi->back; 5171303816Ssbruno struct i40e_hw *hw = &pf->hw; 5172303816Ssbruno 5173303816Ssbruno INIT_DEBUGOUT("ixl_media_status: begin"); 5174333343Serj 5175333343Serj /* Don't touch PF during reset */ 5176333343Serj if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING) 5177333343Serj return; 5178333343Serj 5179303816Ssbruno IXL_PF_LOCK(pf); 5180303816Ssbruno 5181303816Ssbruno i40e_get_link_status(hw, &pf->link_up); 5182303816Ssbruno ixl_update_link_status(pf); 5183303816Ssbruno 5184303816Ssbruno ifmr->ifm_status = IFM_AVALID; 5185303816Ssbruno ifmr->ifm_active = IFM_ETHER; 5186303816Ssbruno 5187303816Ssbruno if (!pf->link_up) { 5188303816Ssbruno IXL_PF_UNLOCK(pf); 5189303816Ssbruno return; 5190303816Ssbruno } 5191303816Ssbruno 5192303816Ssbruno ifmr->ifm_status |= IFM_ACTIVE; 5193303816Ssbruno 5194303816Ssbruno /* Hardware always does full-duplex */ 5195303816Ssbruno ifmr->ifm_active |= IFM_FDX; 5196303816Ssbruno 5197303816Ssbruno switch (hw->phy.link_info.phy_type) { 5198303816Ssbruno /* 100 M */ 5199303816Ssbruno case I40E_PHY_TYPE_100BASE_TX: 5200303816Ssbruno ifmr->ifm_active |= IFM_100_TX; 5201303816Ssbruno break; 5202303816Ssbruno /* 1 G */ 5203303816Ssbruno case I40E_PHY_TYPE_1000BASE_T: 5204303816Ssbruno ifmr->ifm_active |= IFM_1000_T; 5205303816Ssbruno break; 5206303816Ssbruno case I40E_PHY_TYPE_1000BASE_SX: 5207303816Ssbruno ifmr->ifm_active |= IFM_1000_SX; 5208303816Ssbruno break; 5209303816Ssbruno case I40E_PHY_TYPE_1000BASE_LX: 5210303816Ssbruno ifmr->ifm_active |= IFM_1000_LX; 5211303816Ssbruno break; 5212303816Ssbruno case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 5213333343Serj ifmr->ifm_active |= IFM_1000_T; 5214303816Ssbruno break; 5215349163Serj /* 2.5 G */ 5216349163Serj case I40E_PHY_TYPE_2_5GBASE_T: 5217349163Serj ifmr->ifm_active |= IFM_2500_T; 5218349163Serj break; 5219349163Serj /* 5 G */ 5220349163Serj case I40E_PHY_TYPE_5GBASE_T: 5221349163Serj ifmr->ifm_active |= IFM_5000_T; 5222349163Serj break; 5223303816Ssbruno /* 10 G */ 5224303816Ssbruno case I40E_PHY_TYPE_10GBASE_SFPP_CU: 5225303816Ssbruno ifmr->ifm_active |= IFM_10G_TWINAX; 5226303816Ssbruno break; 5227303816Ssbruno case I40E_PHY_TYPE_10GBASE_SR: 5228303816Ssbruno ifmr->ifm_active |= IFM_10G_SR; 5229303816Ssbruno break; 5230303816Ssbruno case I40E_PHY_TYPE_10GBASE_LR: 5231303816Ssbruno ifmr->ifm_active |= IFM_10G_LR; 5232303816Ssbruno break; 5233303816Ssbruno case I40E_PHY_TYPE_10GBASE_T: 5234303816Ssbruno ifmr->ifm_active |= IFM_10G_T; 5235303816Ssbruno break; 5236303816Ssbruno case I40E_PHY_TYPE_XAUI: 5237303816Ssbruno case I40E_PHY_TYPE_XFI: 5238333343Serj ifmr->ifm_active |= IFM_10G_TWINAX; 5239333343Serj break; 5240303816Ssbruno case I40E_PHY_TYPE_10GBASE_AOC: 5241333343Serj ifmr->ifm_active |= IFM_10G_AOC; 5242303816Ssbruno break; 5243318357Serj /* 25 G */ 5244318357Serj case I40E_PHY_TYPE_25GBASE_KR: 5245318357Serj ifmr->ifm_active |= IFM_25G_KR; 5246318357Serj break; 5247318357Serj case I40E_PHY_TYPE_25GBASE_CR: 5248318357Serj ifmr->ifm_active |= IFM_25G_CR; 5249318357Serj break; 5250318357Serj case I40E_PHY_TYPE_25GBASE_SR: 5251318357Serj ifmr->ifm_active |= IFM_25G_SR; 5252318357Serj break; 5253318357Serj case I40E_PHY_TYPE_25GBASE_LR: 5254333343Serj ifmr->ifm_active |= IFM_25G_LR; 5255318357Serj break; 5256333343Serj case I40E_PHY_TYPE_25GBASE_AOC: 5257333343Serj ifmr->ifm_active |= IFM_25G_AOC; 5258333343Serj break; 5259333343Serj case I40E_PHY_TYPE_25GBASE_ACC: 5260333343Serj ifmr->ifm_active |= IFM_25G_ACC; 5261333343Serj break; 5262303816Ssbruno /* 40 G */ 5263303816Ssbruno case I40E_PHY_TYPE_40GBASE_CR4: 5264303816Ssbruno case I40E_PHY_TYPE_40GBASE_CR4_CU: 5265303816Ssbruno ifmr->ifm_active |= IFM_40G_CR4; 5266303816Ssbruno break; 5267303816Ssbruno case I40E_PHY_TYPE_40GBASE_SR4: 5268303816Ssbruno ifmr->ifm_active |= IFM_40G_SR4; 5269303816Ssbruno break; 5270303816Ssbruno case I40E_PHY_TYPE_40GBASE_LR4: 5271303816Ssbruno ifmr->ifm_active |= IFM_40G_LR4; 5272303816Ssbruno break; 5273303816Ssbruno case I40E_PHY_TYPE_XLAUI: 5274303816Ssbruno ifmr->ifm_active |= IFM_OTHER; 5275303816Ssbruno break; 5276303816Ssbruno case I40E_PHY_TYPE_1000BASE_KX: 5277303816Ssbruno ifmr->ifm_active |= IFM_1000_KX; 5278303816Ssbruno break; 5279303816Ssbruno case I40E_PHY_TYPE_SGMII: 5280303816Ssbruno ifmr->ifm_active |= IFM_1000_SGMII; 5281303816Ssbruno break; 5282303816Ssbruno /* ERJ: What's the difference between these? */ 5283303816Ssbruno case I40E_PHY_TYPE_10GBASE_CR1_CU: 5284303816Ssbruno case I40E_PHY_TYPE_10GBASE_CR1: 5285303816Ssbruno ifmr->ifm_active |= IFM_10G_CR1; 5286303816Ssbruno break; 5287303816Ssbruno case I40E_PHY_TYPE_10GBASE_KX4: 5288303816Ssbruno ifmr->ifm_active |= IFM_10G_KX4; 5289303816Ssbruno break; 5290303816Ssbruno case I40E_PHY_TYPE_10GBASE_KR: 5291303816Ssbruno ifmr->ifm_active |= IFM_10G_KR; 5292303816Ssbruno break; 5293303816Ssbruno case I40E_PHY_TYPE_SFI: 5294303816Ssbruno ifmr->ifm_active |= IFM_10G_SFI; 5295303816Ssbruno break; 5296303816Ssbruno /* Our single 20G media type */ 5297303816Ssbruno case I40E_PHY_TYPE_20GBASE_KR2: 5298303816Ssbruno ifmr->ifm_active |= IFM_20G_KR2; 5299303816Ssbruno break; 5300303816Ssbruno case I40E_PHY_TYPE_40GBASE_KR4: 5301303816Ssbruno ifmr->ifm_active |= IFM_40G_KR4; 5302303816Ssbruno break; 5303303816Ssbruno case I40E_PHY_TYPE_XLPPI: 5304303816Ssbruno case I40E_PHY_TYPE_40GBASE_AOC: 5305303816Ssbruno ifmr->ifm_active |= IFM_40G_XLPPI; 5306303816Ssbruno break; 5307303816Ssbruno /* Unknown to driver */ 5308303816Ssbruno default: 5309303816Ssbruno ifmr->ifm_active |= IFM_UNKNOWN; 5310303816Ssbruno break; 5311303816Ssbruno } 5312303816Ssbruno /* Report flow control status as well */ 5313303816Ssbruno if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 5314303816Ssbruno ifmr->ifm_active |= IFM_ETH_TXPAUSE; 5315303816Ssbruno if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 5316303816Ssbruno ifmr->ifm_active |= IFM_ETH_RXPAUSE; 5317303816Ssbruno 5318303816Ssbruno IXL_PF_UNLOCK(pf); 5319303816Ssbruno} 5320303816Ssbruno 5321303816Ssbrunovoid 5322303816Ssbrunoixl_init(void *arg) 5323303816Ssbruno{ 5324303816Ssbruno struct ixl_pf *pf = arg; 5325303816Ssbruno 5326303816Ssbruno IXL_PF_LOCK(pf); 5327303816Ssbruno ixl_init_locked(pf); 5328303816Ssbruno IXL_PF_UNLOCK(pf); 5329303816Ssbruno} 5330303816Ssbruno 5331303816Ssbruno/* 5332303816Ssbruno * NOTE: Fortville does not support forcing media speeds. Instead, 5333303816Ssbruno * use the set_advertise sysctl to set the speeds Fortville 5334303816Ssbruno * will advertise or be allowed to operate at. 5335303816Ssbruno */ 5336303816Ssbrunoint 5337303816Ssbrunoixl_media_change(struct ifnet * ifp) 5338303816Ssbruno{ 5339303816Ssbruno struct ixl_vsi *vsi = ifp->if_softc; 5340303816Ssbruno struct ifmedia *ifm = &vsi->media; 5341303816Ssbruno 5342303816Ssbruno INIT_DEBUGOUT("ixl_media_change: begin"); 5343303816Ssbruno 5344303816Ssbruno if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5345303816Ssbruno return (EINVAL); 5346303816Ssbruno 5347303816Ssbruno if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n"); 5348303816Ssbruno 5349303816Ssbruno return (ENODEV); 5350303816Ssbruno} 5351303816Ssbruno 5352303816Ssbruno/********************************************************************* 5353303816Ssbruno * Ioctl entry point 5354303816Ssbruno * 5355303816Ssbruno * ixl_ioctl is called when the user wants to configure the 5356303816Ssbruno * interface. 5357303816Ssbruno * 5358303816Ssbruno * return 0 on success, positive on failure 5359303816Ssbruno **********************************************************************/ 5360303816Ssbruno 5361303816Ssbrunoint 5362303816Ssbrunoixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 5363303816Ssbruno{ 5364303816Ssbruno struct ixl_vsi *vsi = ifp->if_softc; 5365303816Ssbruno struct ixl_pf *pf = vsi->back; 5366303816Ssbruno struct ifreq *ifr = (struct ifreq *)data; 5367303816Ssbruno struct ifdrv *ifd = (struct ifdrv *)data; 5368303816Ssbruno#if defined(INET) || defined(INET6) 5369303816Ssbruno struct ifaddr *ifa = (struct ifaddr *)data; 5370303816Ssbruno bool avoid_reset = FALSE; 5371303816Ssbruno#endif 5372303816Ssbruno int error = 0; 5373303816Ssbruno 5374349163Serj if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) { 5375349163Serj /* We are in recovery mode supporting only NVM update */ 5376349163Serj switch (command) { 5377349163Serj case SIOCSDRVSPEC: 5378349163Serj case SIOCGDRVSPEC: 5379349163Serj IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " 5380349163Serj "Info)\n"); 5381349163Serj 5382349163Serj /* NVM update command */ 5383349163Serj if (ifd->ifd_cmd == I40E_NVM_ACCESS) 5384349163Serj error = ixl_handle_nvmupd_cmd(pf, ifd); 5385349163Serj else 5386349163Serj error = EINVAL; 5387349163Serj break; 5388349163Serj default: 5389349163Serj error = EINVAL; 5390349163Serj break; 5391349163Serj } 5392349163Serj 5393349163Serj return (error); 5394349163Serj } 5395349163Serj 5396303816Ssbruno switch (command) { 5397303816Ssbruno 5398303816Ssbruno case SIOCSIFADDR: 5399318357Serj IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)"); 5400303816Ssbruno#ifdef INET 5401303816Ssbruno if (ifa->ifa_addr->sa_family == AF_INET) 5402303816Ssbruno avoid_reset = TRUE; 5403303816Ssbruno#endif 5404303816Ssbruno#ifdef INET6 5405303816Ssbruno if (ifa->ifa_addr->sa_family == AF_INET6) 5406303816Ssbruno avoid_reset = TRUE; 5407303816Ssbruno#endif 5408303816Ssbruno#if defined(INET) || defined(INET6) 5409303816Ssbruno /* 5410303816Ssbruno ** Calling init results in link renegotiation, 5411303816Ssbruno ** so we avoid doing it when possible. 5412303816Ssbruno */ 5413303816Ssbruno if (avoid_reset) { 5414303816Ssbruno ifp->if_flags |= IFF_UP; 5415303816Ssbruno if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 5416303816Ssbruno ixl_init(pf); 5417303816Ssbruno#ifdef INET 5418303816Ssbruno if (!(ifp->if_flags & IFF_NOARP)) 5419303816Ssbruno arp_ifinit(ifp, ifa); 5420303816Ssbruno#endif 5421303816Ssbruno } else 5422303816Ssbruno error = ether_ioctl(ifp, command, data); 5423303816Ssbruno break; 5424303816Ssbruno#endif 5425303816Ssbruno case SIOCSIFMTU: 5426303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 5427303816Ssbruno if (ifr->ifr_mtu > IXL_MAX_FRAME - 5428303816Ssbruno ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { 5429303816Ssbruno error = EINVAL; 5430303816Ssbruno } else { 5431303816Ssbruno IXL_PF_LOCK(pf); 5432303816Ssbruno ifp->if_mtu = ifr->ifr_mtu; 5433303816Ssbruno vsi->max_frame_size = 5434303816Ssbruno ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 5435303816Ssbruno + ETHER_VLAN_ENCAP_LEN; 5436303816Ssbruno ixl_init_locked(pf); 5437303816Ssbruno IXL_PF_UNLOCK(pf); 5438303816Ssbruno } 5439303816Ssbruno break; 5440303816Ssbruno case SIOCSIFFLAGS: 5441303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 5442303816Ssbruno IXL_PF_LOCK(pf); 5443303816Ssbruno if (ifp->if_flags & IFF_UP) { 5444303816Ssbruno if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5445303816Ssbruno if ((ifp->if_flags ^ pf->if_flags) & 5446303816Ssbruno (IFF_PROMISC | IFF_ALLMULTI)) { 5447303816Ssbruno ixl_set_promisc(vsi); 5448303816Ssbruno } 5449349163Serj } else 5450349163Serj ixl_init_locked(pf); 5451349163Serj } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5452349163Serj ixl_stop_locked(pf); 5453349163Serj 5454303816Ssbruno pf->if_flags = ifp->if_flags; 5455303816Ssbruno IXL_PF_UNLOCK(pf); 5456303816Ssbruno break; 5457303816Ssbruno case SIOCSDRVSPEC: 5458303816Ssbruno case SIOCGDRVSPEC: 5459303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " 5460303816Ssbruno "Info)\n"); 5461303816Ssbruno 5462303816Ssbruno /* NVM update command */ 5463303816Ssbruno if (ifd->ifd_cmd == I40E_NVM_ACCESS) 5464303816Ssbruno error = ixl_handle_nvmupd_cmd(pf, ifd); 5465303816Ssbruno else 5466303816Ssbruno error = EINVAL; 5467303816Ssbruno break; 5468303816Ssbruno case SIOCADDMULTI: 5469303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); 5470303816Ssbruno if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5471303816Ssbruno IXL_PF_LOCK(pf); 5472318357Serj ixl_disable_rings_intr(vsi); 5473303816Ssbruno ixl_add_multi(vsi); 5474303816Ssbruno ixl_enable_intr(vsi); 5475303816Ssbruno IXL_PF_UNLOCK(pf); 5476303816Ssbruno } 5477303816Ssbruno break; 5478303816Ssbruno case SIOCDELMULTI: 5479303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); 5480303816Ssbruno if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5481303816Ssbruno IXL_PF_LOCK(pf); 5482318357Serj ixl_disable_rings_intr(vsi); 5483303816Ssbruno ixl_del_multi(vsi); 5484303816Ssbruno ixl_enable_intr(vsi); 5485303816Ssbruno IXL_PF_UNLOCK(pf); 5486303816Ssbruno } 5487303816Ssbruno break; 5488303816Ssbruno case SIOCSIFMEDIA: 5489303816Ssbruno case SIOCGIFMEDIA: 5490303816Ssbruno case SIOCGIFXMEDIA: 5491303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 5492303816Ssbruno error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); 5493303816Ssbruno break; 5494303816Ssbruno case SIOCSIFCAP: 5495303816Ssbruno { 5496303816Ssbruno int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 5497303816Ssbruno IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 5498303816Ssbruno 5499303816Ssbruno ixl_cap_txcsum_tso(vsi, ifp, mask); 5500303816Ssbruno 5501303816Ssbruno if (mask & IFCAP_RXCSUM) 5502303816Ssbruno ifp->if_capenable ^= IFCAP_RXCSUM; 5503303816Ssbruno if (mask & IFCAP_RXCSUM_IPV6) 5504303816Ssbruno ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 5505303816Ssbruno if (mask & IFCAP_LRO) 5506303816Ssbruno ifp->if_capenable ^= IFCAP_LRO; 5507303816Ssbruno if (mask & IFCAP_VLAN_HWTAGGING) 5508303816Ssbruno ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 5509303816Ssbruno if (mask & IFCAP_VLAN_HWFILTER) 5510303816Ssbruno ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 5511303816Ssbruno if (mask & IFCAP_VLAN_HWTSO) 5512303816Ssbruno ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 5513303816Ssbruno if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 5514303816Ssbruno IXL_PF_LOCK(pf); 5515303816Ssbruno ixl_init_locked(pf); 5516303816Ssbruno IXL_PF_UNLOCK(pf); 5517303816Ssbruno } 5518303816Ssbruno VLAN_CAPABILITIES(ifp); 5519303816Ssbruno 5520303816Ssbruno break; 5521303816Ssbruno } 5522318357Serj#if __FreeBSD_version >= 1003000 5523318357Serj case SIOCGI2C: 5524318357Serj { 5525318357Serj IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 5526318357Serj if (!pf->has_i2c) 5527318357Serj return (ENOTTY); 5528318357Serj 5529349163Serj error = ixl_handle_i2c_eeprom_read_cmd(pf, ifr); 5530318357Serj break; 5531318357Serj } 5532318357Serj#endif 5533303816Ssbruno default: 5534303816Ssbruno IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); 5535303816Ssbruno error = ether_ioctl(ifp, command, data); 5536303816Ssbruno break; 5537303816Ssbruno } 5538303816Ssbruno 5539303816Ssbruno return (error); 5540303816Ssbruno} 5541303816Ssbruno 5542318357Serjint 5543318357Serjixl_find_i2c_interface(struct ixl_pf *pf) 5544318357Serj{ 5545318357Serj struct i40e_hw *hw = &pf->hw; 5546318357Serj bool i2c_en, port_matched; 5547318357Serj u32 reg; 5548318357Serj 5549318357Serj for (int i = 0; i < 4; i++) { 5550318357Serj reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 5551318357Serj i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 5552318357Serj port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 5553318357Serj >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 5554318357Serj & BIT(hw->port); 5555318357Serj if (i2c_en && port_matched) 5556318357Serj return (i); 5557318357Serj } 5558318357Serj 5559318357Serj return (-1); 5560318357Serj} 5561318357Serj 5562303816Ssbrunostatic char * 5563318357Serjixl_phy_type_string(u32 bit_pos, bool ext) 5564303816Ssbruno{ 5565303816Ssbruno static char * phy_types_str[32] = { 5566303816Ssbruno "SGMII", 5567303816Ssbruno "1000BASE-KX", 5568303816Ssbruno "10GBASE-KX4", 5569303816Ssbruno "10GBASE-KR", 5570303816Ssbruno "40GBASE-KR4", 5571303816Ssbruno "XAUI", 5572303816Ssbruno "XFI", 5573303816Ssbruno "SFI", 5574303816Ssbruno "XLAUI", 5575303816Ssbruno "XLPPI", 5576303816Ssbruno "40GBASE-CR4", 5577303816Ssbruno "10GBASE-CR1", 5578333343Serj "SFP+ Active DA", 5579333343Serj "QSFP+ Active DA", 5580303816Ssbruno "Reserved (14)", 5581303816Ssbruno "Reserved (15)", 5582303816Ssbruno "Reserved (16)", 5583303816Ssbruno "100BASE-TX", 5584303816Ssbruno "1000BASE-T", 5585303816Ssbruno "10GBASE-T", 5586303816Ssbruno "10GBASE-SR", 5587303816Ssbruno "10GBASE-LR", 5588303816Ssbruno "10GBASE-SFP+Cu", 5589303816Ssbruno "10GBASE-CR1", 5590303816Ssbruno "40GBASE-CR4", 5591303816Ssbruno "40GBASE-SR4", 5592303816Ssbruno "40GBASE-LR4", 5593303816Ssbruno "1000BASE-SX", 5594303816Ssbruno "1000BASE-LX", 5595303816Ssbruno "1000BASE-T Optical", 5596303816Ssbruno "20GBASE-KR2", 5597303816Ssbruno "Reserved (31)" 5598303816Ssbruno }; 5599333343Serj static char * ext_phy_types_str[8] = { 5600318357Serj "25GBASE-KR", 5601318357Serj "25GBASE-CR", 5602318357Serj "25GBASE-SR", 5603333343Serj "25GBASE-LR", 5604333343Serj "25GBASE-AOC", 5605333343Serj "25GBASE-ACC", 5606349163Serj "2.5GBASE-T", 5607349163Serj "5GBASE-T" 5608318357Serj }; 5609303816Ssbruno 5610333343Serj if (ext && bit_pos > 7) return "Invalid_Ext"; 5611303816Ssbruno if (bit_pos > 31) return "Invalid"; 5612318357Serj 5613318357Serj return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 5614303816Ssbruno} 5615303816Ssbruno 5616349163Serjstatic char * 5617349163Serjixl_phy_type_string_ls(u8 val) 5618349163Serj{ 5619349163Serj if (val >= 0x1F) 5620349163Serj return ixl_phy_type_string(val - 0x1F, true); 5621349163Serj else 5622349163Serj return ixl_phy_type_string(val, false); 5623349163Serj} 5624349163Serj 5625318357Serjint 5626318357Serjixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 5627318357Serj{ 5628318357Serj device_t dev = pf->dev; 5629318357Serj struct i40e_hw *hw = &pf->hw; 5630318357Serj struct i40e_aq_desc desc; 5631318357Serj enum i40e_status_code status; 5632303816Ssbruno 5633318357Serj struct i40e_aqc_get_link_status *aq_link_status = 5634318357Serj (struct i40e_aqc_get_link_status *)&desc.params.raw; 5635318357Serj 5636318357Serj i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 5637318357Serj link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 5638318357Serj status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 5639318357Serj if (status) { 5640318357Serj device_printf(dev, 5641318357Serj "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 5642318357Serj __func__, i40e_stat_str(hw, status), 5643318357Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 5644318357Serj return (EIO); 5645318357Serj } 5646318357Serj 5647318357Serj bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 5648318357Serj return (0); 5649318357Serj} 5650318357Serj 5651266423Sjfvstatic int 5652270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 5653266423Sjfv{ 5654270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 5655303816Ssbruno device_t dev = pf->dev; 5656303816Ssbruno struct sbuf *buf; 5657303816Ssbruno int error = 0; 5658266423Sjfv 5659303816Ssbruno buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5660303816Ssbruno if (!buf) { 5661303816Ssbruno device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 5662303816Ssbruno return (ENOMEM); 5663303816Ssbruno } 5664266423Sjfv 5665318357Serj struct i40e_aqc_get_link_status link_status; 5666318357Serj error = ixl_aq_get_link_status(pf, &link_status); 5667318357Serj if (error) { 5668303816Ssbruno sbuf_delete(buf); 5669318357Serj return (error); 5670266423Sjfv } 5671266423Sjfv 5672303816Ssbruno sbuf_printf(buf, "\n" 5673303816Ssbruno "PHY Type : 0x%02x<%s>\n" 5674303816Ssbruno "Speed : 0x%02x\n" 5675303816Ssbruno "Link info: 0x%02x\n" 5676303816Ssbruno "AN info : 0x%02x\n" 5677303816Ssbruno "Ext info : 0x%02x\n" 5678318357Serj "Loopback : 0x%02x\n" 5679299551Serj "Max Frame: %d\n" 5680318357Serj "Config : 0x%02x\n" 5681318357Serj "Power : 0x%02x", 5682318357Serj link_status.phy_type, 5683318357Serj ixl_phy_type_string_ls(link_status.phy_type), 5684303816Ssbruno link_status.link_speed, 5685318357Serj link_status.link_info, 5686318357Serj link_status.an_info, 5687318357Serj link_status.ext_info, 5688318357Serj link_status.loopback, 5689318357Serj link_status.max_frame_size, 5690318357Serj link_status.config, 5691318357Serj link_status.power_desc); 5692266423Sjfv 5693303816Ssbruno error = sbuf_finish(buf); 5694303816Ssbruno if (error) 5695303816Ssbruno device_printf(dev, "Error finishing sbuf: %d\n", error); 5696303816Ssbruno 5697303816Ssbruno sbuf_delete(buf); 5698303816Ssbruno return (error); 5699266423Sjfv} 5700266423Sjfv 5701266423Sjfvstatic int 5702270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 5703266423Sjfv{ 5704303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 5705303816Ssbruno struct i40e_hw *hw = &pf->hw; 5706303816Ssbruno device_t dev = pf->dev; 5707303816Ssbruno enum i40e_status_code status; 5708279858Sjfv struct i40e_aq_get_phy_abilities_resp abilities; 5709303816Ssbruno struct sbuf *buf; 5710303816Ssbruno int error = 0; 5711266423Sjfv 5712303816Ssbruno buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5713303816Ssbruno if (!buf) { 5714303816Ssbruno device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 5715303816Ssbruno return (ENOMEM); 5716303816Ssbruno } 5717303816Ssbruno 5718303816Ssbruno status = i40e_aq_get_phy_capabilities(hw, 5719318357Serj FALSE, FALSE, &abilities, NULL); 5720303816Ssbruno if (status) { 5721303816Ssbruno device_printf(dev, 5722303816Ssbruno "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 5723303816Ssbruno __func__, i40e_stat_str(hw, status), 5724303816Ssbruno i40e_aq_str(hw, hw->aq.asq_last_status)); 5725303816Ssbruno sbuf_delete(buf); 5726303816Ssbruno return (EIO); 5727266423Sjfv } 5728266423Sjfv 5729303816Ssbruno sbuf_printf(buf, "\n" 5730303816Ssbruno "PHY Type : %08x", 5731303816Ssbruno abilities.phy_type); 5732303816Ssbruno 5733303816Ssbruno if (abilities.phy_type != 0) { 5734303816Ssbruno sbuf_printf(buf, "<"); 5735303816Ssbruno for (int i = 0; i < 32; i++) 5736303816Ssbruno if ((1 << i) & abilities.phy_type) 5737318357Serj sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 5738349163Serj sbuf_printf(buf, ">"); 5739303816Ssbruno } 5740303816Ssbruno 5741349163Serj sbuf_printf(buf, "\nPHY Ext : %02x", 5742318357Serj abilities.phy_type_ext); 5743318357Serj 5744318357Serj if (abilities.phy_type_ext != 0) { 5745318357Serj sbuf_printf(buf, "<"); 5746318357Serj for (int i = 0; i < 4; i++) 5747318357Serj if ((1 << i) & abilities.phy_type_ext) 5748349163Serj sbuf_printf(buf, "%s,", 5749349163Serj ixl_phy_type_string(i, true)); 5750318357Serj sbuf_printf(buf, ">"); 5751318357Serj } 5752318357Serj 5753349163Serj sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 5754349163Serj if (abilities.link_speed != 0) { 5755349163Serj u8 link_speed; 5756349163Serj sbuf_printf(buf, " <"); 5757349163Serj for (int i = 0; i < 8; i++) { 5758349163Serj link_speed = (1 << i) & abilities.link_speed; 5759349163Serj if (link_speed) 5760349163Serj sbuf_printf(buf, "%s, ", 5761349163Serj ixl_link_speed_string(link_speed)); 5762349163Serj } 5763349163Serj sbuf_printf(buf, ">"); 5764349163Serj } 5765349163Serj 5766349163Serj sbuf_printf(buf, "\n" 5767303816Ssbruno "Abilities: %02x\n" 5768303816Ssbruno "EEE cap : %04x\n" 5769303816Ssbruno "EEER reg : %08x\n" 5770303816Ssbruno "D3 Lpan : %02x\n" 5771303816Ssbruno "ID : %02x %02x %02x %02x\n" 5772318357Serj "ModType : %02x %02x %02x\n" 5773318357Serj "ModType E: %01x\n" 5774318357Serj "FEC Cfg : %02x\n" 5775318357Serj "Ext CC : %02x", 5776279858Sjfv abilities.abilities, abilities.eee_capability, 5777303816Ssbruno abilities.eeer_val, abilities.d3_lpan, 5778303816Ssbruno abilities.phy_id[0], abilities.phy_id[1], 5779303816Ssbruno abilities.phy_id[2], abilities.phy_id[3], 5780303816Ssbruno abilities.module_type[0], abilities.module_type[1], 5781333343Serj abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 5782333343Serj abilities.fec_cfg_curr_mod_ext_info & 0x1F, 5783318357Serj abilities.ext_comp_code); 5784266423Sjfv 5785303816Ssbruno error = sbuf_finish(buf); 5786303816Ssbruno if (error) 5787303816Ssbruno device_printf(dev, "Error finishing sbuf: %d\n", error); 5788303816Ssbruno 5789303816Ssbruno sbuf_delete(buf); 5790303816Ssbruno return (error); 5791266423Sjfv} 5792266423Sjfv 5793266423Sjfvstatic int 5794270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 5795266423Sjfv{ 5796270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 5797270346Sjfv struct ixl_vsi *vsi = &pf->vsi; 5798270346Sjfv struct ixl_mac_filter *f; 5799266423Sjfv char *buf, *buf_i; 5800266423Sjfv 5801266423Sjfv int error = 0; 5802266423Sjfv int ftl_len = 0; 5803266423Sjfv int ftl_counter = 0; 5804266423Sjfv int buf_len = 0; 5805266423Sjfv int entry_len = 42; 5806266423Sjfv 5807266423Sjfv SLIST_FOREACH(f, &vsi->ftl, next) { 5808266423Sjfv ftl_len++; 5809266423Sjfv } 5810266423Sjfv 5811266423Sjfv if (ftl_len < 1) { 5812266423Sjfv sysctl_handle_string(oidp, "(none)", 6, req); 5813266423Sjfv return (0); 5814266423Sjfv } 5815266423Sjfv 5816266423Sjfv buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; 5817266423Sjfv buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); 5818266423Sjfv 5819266423Sjfv sprintf(buf_i++, "\n"); 5820266423Sjfv SLIST_FOREACH(f, &vsi->ftl, next) { 5821266423Sjfv sprintf(buf_i, 5822266423Sjfv MAC_FORMAT ", vlan %4d, flags %#06x", 5823266423Sjfv MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 5824266423Sjfv buf_i += entry_len; 5825266423Sjfv /* don't print '\n' for last entry */ 5826266423Sjfv if (++ftl_counter != ftl_len) { 5827266423Sjfv sprintf(buf_i, "\n"); 5828266423Sjfv buf_i++; 5829266423Sjfv } 5830266423Sjfv } 5831266423Sjfv 5832266423Sjfv error = sysctl_handle_string(oidp, buf, strlen(buf), req); 5833266423Sjfv if (error) 5834266423Sjfv printf("sysctl error: %d\n", error); 5835266423Sjfv free(buf, M_DEVBUF); 5836266423Sjfv return error; 5837266423Sjfv} 5838269198Sjfv 5839270346Sjfv#define IXL_SW_RES_SIZE 0x14 5840303816Ssbrunoint 5841277084Sjfvixl_res_alloc_cmp(const void *a, const void *b) 5842277084Sjfv{ 5843277084Sjfv const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 5844284049Sjfv one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 5845284049Sjfv two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 5846277084Sjfv 5847277084Sjfv return ((int)one->resource_type - (int)two->resource_type); 5848277084Sjfv} 5849277084Sjfv 5850299549Serj/* 5851299549Serj * Longest string length: 25 5852299549Serj */ 5853303816Ssbrunochar * 5854299549Serjixl_switch_res_type_string(u8 type) 5855299549Serj{ 5856318357Serj static char * ixl_switch_res_type_strings[0x14] = { 5857299549Serj "VEB", 5858299549Serj "VSI", 5859299549Serj "Perfect Match MAC address", 5860299549Serj "S-tag", 5861299549Serj "(Reserved)", 5862299549Serj "Multicast hash entry", 5863299549Serj "Unicast hash entry", 5864299549Serj "VLAN", 5865299549Serj "VSI List entry", 5866299549Serj "(Reserved)", 5867299549Serj "VLAN Statistic Pool", 5868299549Serj "Mirror Rule", 5869299549Serj "Queue Set", 5870299549Serj "Inner VLAN Forward filter", 5871299549Serj "(Reserved)", 5872299549Serj "Inner MAC", 5873299549Serj "IP", 5874299549Serj "GRE/VN1 Key", 5875299549Serj "VN2 Key", 5876299549Serj "Tunneling Port" 5877299549Serj }; 5878299549Serj 5879299549Serj if (type < 0x14) 5880299549Serj return ixl_switch_res_type_strings[type]; 5881299549Serj else 5882299549Serj return "(Reserved)"; 5883299549Serj} 5884299549Serj 5885277084Sjfvstatic int 5886274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 5887269198Sjfv{ 5888270346Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 5889269198Sjfv struct i40e_hw *hw = &pf->hw; 5890269198Sjfv device_t dev = pf->dev; 5891269198Sjfv struct sbuf *buf; 5892303816Ssbruno enum i40e_status_code status; 5893269198Sjfv int error = 0; 5894269198Sjfv 5895269198Sjfv u8 num_entries; 5896270346Sjfv struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 5897269198Sjfv 5898299546Serj buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5899269198Sjfv if (!buf) { 5900269198Sjfv device_printf(dev, "Could not allocate sbuf for output.\n"); 5901269198Sjfv return (ENOMEM); 5902269198Sjfv } 5903269198Sjfv 5904277084Sjfv bzero(resp, sizeof(resp)); 5905303816Ssbruno status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 5906269198Sjfv resp, 5907270346Sjfv IXL_SW_RES_SIZE, 5908269198Sjfv NULL); 5909303816Ssbruno if (status) { 5910279858Sjfv device_printf(dev, 5911303816Ssbruno "%s: get_switch_resource_alloc() error %s, aq error %s\n", 5912303816Ssbruno __func__, i40e_stat_str(hw, status), 5913303816Ssbruno i40e_aq_str(hw, hw->aq.asq_last_status)); 5914269198Sjfv sbuf_delete(buf); 5915303816Ssbruno return (error); 5916269198Sjfv } 5917269198Sjfv 5918277084Sjfv /* Sort entries by type for display */ 5919277084Sjfv qsort(resp, num_entries, 5920277084Sjfv sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 5921277084Sjfv &ixl_res_alloc_cmp); 5922277084Sjfv 5923269198Sjfv sbuf_cat(buf, "\n"); 5924277084Sjfv sbuf_printf(buf, "# of entries: %d\n", num_entries); 5925269198Sjfv sbuf_printf(buf, 5926299549Serj " Type | Guaranteed | Total | Used | Un-allocated\n" 5927299549Serj " | (this) | (all) | (this) | (all) \n"); 5928269198Sjfv for (int i = 0; i < num_entries; i++) { 5929269198Sjfv sbuf_printf(buf, 5930299549Serj "%25s | %10d %5d %6d %12d", 5931299549Serj ixl_switch_res_type_string(resp[i].resource_type), 5932269198Sjfv resp[i].guaranteed, 5933269198Sjfv resp[i].total, 5934269198Sjfv resp[i].used, 5935269198Sjfv resp[i].total_unalloced); 5936269198Sjfv if (i < num_entries - 1) 5937269198Sjfv sbuf_cat(buf, "\n"); 5938269198Sjfv } 5939269198Sjfv 5940269198Sjfv error = sbuf_finish(buf); 5941299546Serj if (error) 5942299545Serj device_printf(dev, "Error finishing sbuf: %d\n", error); 5943299545Serj 5944290708Ssmh sbuf_delete(buf); 5945303816Ssbruno return (error); 5946274205Sjfv} 5947269198Sjfv 5948274205Sjfv/* 5949274205Sjfv** Caller must init and delete sbuf; this function will clear and 5950274205Sjfv** finish it for caller. 5951299549Serj** 5952299549Serj** XXX: Cannot use the SEID for this, since there is no longer a 5953299549Serj** fixed mapping between SEID and element type. 5954274205Sjfv*/ 5955303816Ssbrunochar * 5956299549Serjixl_switch_element_string(struct sbuf *s, 5957299549Serj struct i40e_aqc_switch_config_element_resp *element) 5958274205Sjfv{ 5959274205Sjfv sbuf_clear(s); 5960274205Sjfv 5961299549Serj switch (element->element_type) { 5962299549Serj case I40E_AQ_SW_ELEM_TYPE_MAC: 5963299549Serj sbuf_printf(s, "MAC %3d", element->element_info); 5964299549Serj break; 5965299549Serj case I40E_AQ_SW_ELEM_TYPE_PF: 5966299549Serj sbuf_printf(s, "PF %3d", element->element_info); 5967299549Serj break; 5968299549Serj case I40E_AQ_SW_ELEM_TYPE_VF: 5969299549Serj sbuf_printf(s, "VF %3d", element->element_info); 5970299549Serj break; 5971299549Serj case I40E_AQ_SW_ELEM_TYPE_EMP: 5972274205Sjfv sbuf_cat(s, "EMP"); 5973299549Serj break; 5974299549Serj case I40E_AQ_SW_ELEM_TYPE_BMC: 5975299549Serj sbuf_cat(s, "BMC"); 5976299549Serj break; 5977299549Serj case I40E_AQ_SW_ELEM_TYPE_PV: 5978299549Serj sbuf_cat(s, "PV"); 5979299549Serj break; 5980299549Serj case I40E_AQ_SW_ELEM_TYPE_VEB: 5981299549Serj sbuf_cat(s, "VEB"); 5982299549Serj break; 5983299549Serj case I40E_AQ_SW_ELEM_TYPE_PA: 5984299549Serj sbuf_cat(s, "PA"); 5985299549Serj break; 5986299549Serj case I40E_AQ_SW_ELEM_TYPE_VSI: 5987299549Serj sbuf_printf(s, "VSI %3d", element->element_info); 5988299549Serj break; 5989299549Serj default: 5990299549Serj sbuf_cat(s, "?"); 5991299549Serj break; 5992299549Serj } 5993274205Sjfv 5994274205Sjfv sbuf_finish(s); 5995274205Sjfv return sbuf_data(s); 5996269198Sjfv} 5997269198Sjfv 5998274205Sjfvstatic int 5999274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 6000274205Sjfv{ 6001274205Sjfv struct ixl_pf *pf = (struct ixl_pf *)arg1; 6002274205Sjfv struct i40e_hw *hw = &pf->hw; 6003274205Sjfv device_t dev = pf->dev; 6004274205Sjfv struct sbuf *buf; 6005274205Sjfv struct sbuf *nmbuf; 6006303816Ssbruno enum i40e_status_code status; 6007274205Sjfv int error = 0; 6008299549Serj u16 next = 0; 6009274205Sjfv u8 aq_buf[I40E_AQ_LARGE_BUF]; 6010274205Sjfv 6011274205Sjfv struct i40e_aqc_get_switch_config_resp *sw_config; 6012274205Sjfv sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 6013274205Sjfv 6014299546Serj buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6015274205Sjfv if (!buf) { 6016274205Sjfv device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 6017274205Sjfv return (ENOMEM); 6018274205Sjfv } 6019274205Sjfv 6020303816Ssbruno status = i40e_aq_get_switch_config(hw, sw_config, 6021274205Sjfv sizeof(aq_buf), &next, NULL); 6022303816Ssbruno if (status) { 6023279858Sjfv device_printf(dev, 6024303816Ssbruno "%s: aq_get_switch_config() error %s, aq error %s\n", 6025303816Ssbruno __func__, i40e_stat_str(hw, status), 6026303816Ssbruno i40e_aq_str(hw, hw->aq.asq_last_status)); 6027274205Sjfv sbuf_delete(buf); 6028274205Sjfv return error; 6029274205Sjfv } 6030299549Serj if (next) 6031299549Serj device_printf(dev, "%s: TODO: get more config with SEID %d\n", 6032299549Serj __func__, next); 6033274205Sjfv 6034274205Sjfv nmbuf = sbuf_new_auto(); 6035274205Sjfv if (!nmbuf) { 6036274205Sjfv device_printf(dev, "Could not allocate sbuf for name output.\n"); 6037299546Serj sbuf_delete(buf); 6038274205Sjfv return (ENOMEM); 6039274205Sjfv } 6040274205Sjfv 6041274205Sjfv sbuf_cat(buf, "\n"); 6042303816Ssbruno /* Assuming <= 255 elements in switch */ 6043299549Serj sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 6044299549Serj sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 6045274205Sjfv /* Exclude: 6046274205Sjfv ** Revision -- all elements are revision 1 for now 6047274205Sjfv */ 6048274205Sjfv sbuf_printf(buf, 6049274205Sjfv "SEID ( Name ) | Uplink | Downlink | Conn Type\n" 6050274205Sjfv " | | | (uplink)\n"); 6051274205Sjfv for (int i = 0; i < sw_config->header.num_reported; i++) { 6052274205Sjfv // "%4d (%8s) | %8s %8s %#8x", 6053274205Sjfv sbuf_printf(buf, "%4d", sw_config->element[i].seid); 6054274205Sjfv sbuf_cat(buf, " "); 6055279858Sjfv sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 6056299549Serj &sw_config->element[i])); 6057274205Sjfv sbuf_cat(buf, " | "); 6058299549Serj sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid); 6059274205Sjfv sbuf_cat(buf, " "); 6060299549Serj sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid); 6061274205Sjfv sbuf_cat(buf, " "); 6062274205Sjfv sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); 6063274205Sjfv if (i < sw_config->header.num_reported - 1) 6064274205Sjfv sbuf_cat(buf, "\n"); 6065274205Sjfv } 6066274205Sjfv sbuf_delete(nmbuf); 6067274205Sjfv 6068274205Sjfv error = sbuf_finish(buf); 6069299546Serj if (error) 6070299545Serj device_printf(dev, "Error finishing sbuf: %d\n", error); 6071299545Serj 6072274205Sjfv sbuf_delete(buf); 6073274205Sjfv 6074274205Sjfv return (error); 6075274205Sjfv} 6076299552Serj 6077299552Serjstatic int 6078369202Sdonnerixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 6079369202Sdonner{ 6080369202Sdonner struct ixl_pf *pf = (struct ixl_pf *)arg1; 6081369202Sdonner struct i40e_hw *hw = &pf->hw; 6082369202Sdonner device_t dev = pf->dev; 6083369202Sdonner int requested_vlan = -1; 6084369202Sdonner enum i40e_status_code status = 0; 6085369202Sdonner int error = 0; 6086369202Sdonner 6087369202Sdonner error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 6088369202Sdonner if ((error) || (req->newptr == NULL)) 6089369202Sdonner return (error); 6090369202Sdonner 6091369202Sdonner if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 6092369202Sdonner device_printf(dev, "Flags disallow setting of vlans\n"); 6093369202Sdonner return (ENODEV); 6094369202Sdonner } 6095369202Sdonner 6096369202Sdonner hw->switch_tag = requested_vlan; 6097369202Sdonner device_printf(dev, 6098369202Sdonner "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 6099369202Sdonner hw->switch_tag, hw->first_tag, hw->second_tag); 6100369202Sdonner status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 6101369202Sdonner if (status) { 6102369202Sdonner device_printf(dev, 6103369202Sdonner "%s: aq_set_switch_config() error %s, aq error %s\n", 6104369202Sdonner __func__, i40e_stat_str(hw, status), 6105369202Sdonner i40e_aq_str(hw, hw->aq.asq_last_status)); 6106369202Sdonner return (status); 6107369202Sdonner } 6108369202Sdonner return (0); 6109369202Sdonner} 6110369202Sdonner 6111369202Sdonnerstatic int 6112303816Ssbrunoixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 6113299552Serj{ 6114303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 6115303816Ssbruno struct i40e_hw *hw = &pf->hw; 6116303816Ssbruno device_t dev = pf->dev; 6117303816Ssbruno struct sbuf *buf; 6118303816Ssbruno int error = 0; 6119279858Sjfv enum i40e_status_code status; 6120303816Ssbruno u32 reg; 6121279858Sjfv 6122303816Ssbruno struct i40e_aqc_get_set_rss_key_data key_data; 6123279858Sjfv 6124303816Ssbruno buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6125303816Ssbruno if (!buf) { 6126303816Ssbruno device_printf(dev, "Could not allocate sbuf for output.\n"); 6127303816Ssbruno return (ENOMEM); 6128269198Sjfv } 6129269198Sjfv 6130333343Serj bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); 6131333343Serj 6132303816Ssbruno sbuf_cat(buf, "\n"); 6133303816Ssbruno if (hw->mac.type == I40E_MAC_X722) { 6134303816Ssbruno status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 6135303816Ssbruno if (status) 6136303816Ssbruno device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 6137303816Ssbruno i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 6138279858Sjfv } else { 6139303816Ssbruno for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 6140303816Ssbruno reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 6141333343Serj bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 6142279858Sjfv } 6143279858Sjfv } 6144279858Sjfv 6145333343Serj ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 6146333343Serj 6147303816Ssbruno error = sbuf_finish(buf); 6148303816Ssbruno if (error) 6149303816Ssbruno device_printf(dev, "Error finishing sbuf: %d\n", error); 6150303816Ssbruno sbuf_delete(buf); 6151279858Sjfv 6152303816Ssbruno return (error); 6153279858Sjfv} 6154279858Sjfv 6155333343Serjstatic void 6156333343Serjixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 6157333343Serj{ 6158333343Serj int i, j, k, width; 6159333343Serj char c; 6160333343Serj 6161333343Serj if (length < 1 || buf == NULL) return; 6162333343Serj 6163333343Serj int byte_stride = 16; 6164333343Serj int lines = length / byte_stride; 6165333343Serj int rem = length % byte_stride; 6166333343Serj if (rem > 0) 6167333343Serj lines++; 6168333343Serj 6169333343Serj for (i = 0; i < lines; i++) { 6170333343Serj width = (rem > 0 && i == lines - 1) 6171333343Serj ? rem : byte_stride; 6172333343Serj 6173333343Serj sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 6174333343Serj 6175333343Serj for (j = 0; j < width; j++) 6176333343Serj sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 6177333343Serj 6178333343Serj if (width < byte_stride) { 6179333343Serj for (k = 0; k < (byte_stride - width); k++) 6180333343Serj sbuf_printf(sb, " "); 6181333343Serj } 6182333343Serj 6183333343Serj if (!text) { 6184333343Serj sbuf_printf(sb, "\n"); 6185333343Serj continue; 6186333343Serj } 6187333343Serj 6188333343Serj for (j = 0; j < width; j++) { 6189333343Serj c = (char)buf[i * byte_stride + j]; 6190333343Serj if (c < 32 || c > 126) 6191333343Serj sbuf_printf(sb, "."); 6192333343Serj else 6193333343Serj sbuf_printf(sb, "%c", c); 6194333343Serj 6195333343Serj if (j == width - 1) 6196333343Serj sbuf_printf(sb, "\n"); 6197333343Serj } 6198333343Serj } 6199333343Serj} 6200333343Serj 6201279858Sjfvstatic int 6202303816Ssbrunoixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 6203279858Sjfv{ 6204303816Ssbruno struct ixl_pf *pf = (struct ixl_pf *)arg1; 6205303816Ssbruno struct i40e_hw *hw = &pf->hw; 6206303816Ssbruno device_t dev = pf->dev; 6207303816Ssbruno struct sbuf *buf; 6208303816Ssbruno int error = 0; 6209303816Ssbruno enum i40e_status_code status; 6210303816Ssbruno u8 hlut[512]; 6211303816Ssbruno u32 reg; 6212279858Sjfv 6213303816Ssbruno buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6214303816Ssbruno if (!buf) { 6215303816Ssbruno device_printf(dev, "Could not allocate sbuf for output.\n"); 6216303816Ssbruno return (ENOMEM); 6217279858Sjfv } 6218279858Sjfv 6219333343Serj bzero(hlut, sizeof(hlut)); 6220303816Ssbruno sbuf_cat(buf, "\n"); 6221303816Ssbruno if (hw->mac.type == I40E_MAC_X722) { 6222303816Ssbruno status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 6223303816Ssbruno if (status) 6224303816Ssbruno device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 6225303816Ssbruno i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 6226303816Ssbruno } else { 6227303816Ssbruno for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 6228303816Ssbruno reg = rd32(hw, I40E_PFQF_HLUT(i)); 6229333343Serj bcopy(®, &hlut[i << 2], 4); 6230279858Sjfv } 6231279858Sjfv } 6232333343Serj ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 6233279858Sjfv 6234303816Ssbruno error = sbuf_finish(buf); 6235303816Ssbruno if (error) 6236303816Ssbruno device_printf(dev, "Error finishing sbuf: %d\n", error); 6237303816Ssbruno sbuf_delete(buf); 6238279858Sjfv 6239279858Sjfv return (error); 6240279858Sjfv} 6241279858Sjfv 6242318357Serjstatic int 6243318357Serjixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 6244318357Serj{ 6245318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6246318357Serj struct i40e_hw *hw = &pf->hw; 6247318357Serj u64 hena; 6248318357Serj 6249318357Serj hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 6250318357Serj ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 6251318357Serj 6252318357Serj return sysctl_handle_long(oidp, NULL, hena, req); 6253318357Serj} 6254318357Serj 6255318357Serj/* 6256318357Serj * Sysctl to disable firmware's link management 6257318357Serj * 6258318357Serj * 1 - Disable link management on this port 6259318357Serj * 0 - Re-enable link management 6260318357Serj * 6261318357Serj * On normal NVMs, firmware manages link by default. 6262318357Serj */ 6263318357Serjstatic int 6264318357Serjixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 6265318357Serj{ 6266318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6267318357Serj struct i40e_hw *hw = &pf->hw; 6268318357Serj device_t dev = pf->dev; 6269318357Serj int requested_mode = -1; 6270318357Serj enum i40e_status_code status = 0; 6271318357Serj int error = 0; 6272318357Serj 6273318357Serj /* Read in new mode */ 6274318357Serj error = sysctl_handle_int(oidp, &requested_mode, 0, req); 6275318357Serj if ((error) || (req->newptr == NULL)) 6276318357Serj return (error); 6277318357Serj /* Check for sane value */ 6278318357Serj if (requested_mode < 0 || requested_mode > 1) { 6279318357Serj device_printf(dev, "Valid modes are 0 or 1\n"); 6280318357Serj return (EINVAL); 6281318357Serj } 6282318357Serj 6283318357Serj /* Set new mode */ 6284318357Serj status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 6285318357Serj if (status) { 6286318357Serj device_printf(dev, 6287318357Serj "%s: Error setting new phy debug mode %s," 6288318357Serj " aq error: %s\n", __func__, i40e_stat_str(hw, status), 6289318357Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 6290318357Serj return (EIO); 6291318357Serj } 6292318357Serj 6293318357Serj return (0); 6294318357Serj} 6295318357Serj 6296318357Serj/* 6297349163Serj * Read some diagnostic data from a (Q)SFP+ module 6298349163Serj * 6299349163Serj * SFP A2 QSFP Lower Page 6300349163Serj * Temperature 96-97 22-23 6301349163Serj * Vcc 98-99 26-27 6302349163Serj * TX power 102-103 34-35..40-41 6303349163Serj * RX power 104-105 50-51..56-57 6304349163Serj */ 6305349163Serjstatic int 6306349163Serjixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 6307349163Serj{ 6308349163Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6309349163Serj device_t dev = pf->dev; 6310349163Serj struct sbuf *sbuf; 6311349163Serj int error = 0; 6312349163Serj u8 output; 6313349163Serj 6314349163Serj if (req->oldptr == NULL) { 6315349163Serj error = SYSCTL_OUT(req, 0, 128); 6316349163Serj return (0); 6317349163Serj } 6318349163Serj 6319349163Serj error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 6320349163Serj if (error) { 6321349163Serj device_printf(dev, "Error reading from i2c\n"); 6322349163Serj return (error); 6323349163Serj } 6324349163Serj 6325349163Serj /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 6326349163Serj if (output == 0x3) { 6327349163Serj /* 6328349163Serj * Check for: 6329349163Serj * - Internally calibrated data 6330349163Serj * - Diagnostic monitoring is implemented 6331349163Serj */ 6332349163Serj pf->read_i2c_byte(pf, 92, 0xA0, &output); 6333349163Serj if (!(output & 0x60)) { 6334349163Serj device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 6335349163Serj return (0); 6336349163Serj } 6337349163Serj 6338349163Serj sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6339349163Serj 6340349163Serj for (u8 offset = 96; offset < 100; offset++) { 6341349163Serj pf->read_i2c_byte(pf, offset, 0xA2, &output); 6342349163Serj sbuf_printf(sbuf, "%02X ", output); 6343349163Serj } 6344349163Serj for (u8 offset = 102; offset < 106; offset++) { 6345349163Serj pf->read_i2c_byte(pf, offset, 0xA2, &output); 6346349163Serj sbuf_printf(sbuf, "%02X ", output); 6347349163Serj } 6348349163Serj } else if (output == 0xD || output == 0x11) { 6349349163Serj /* 6350349163Serj * QSFP+ modules are always internally calibrated, and must indicate 6351349163Serj * what types of diagnostic monitoring are implemented 6352349163Serj */ 6353349163Serj sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6354349163Serj 6355349163Serj for (u8 offset = 22; offset < 24; offset++) { 6356349163Serj pf->read_i2c_byte(pf, offset, 0xA0, &output); 6357349163Serj sbuf_printf(sbuf, "%02X ", output); 6358349163Serj } 6359349163Serj for (u8 offset = 26; offset < 28; offset++) { 6360349163Serj pf->read_i2c_byte(pf, offset, 0xA0, &output); 6361349163Serj sbuf_printf(sbuf, "%02X ", output); 6362349163Serj } 6363349163Serj /* Read the data from the first lane */ 6364349163Serj for (u8 offset = 34; offset < 36; offset++) { 6365349163Serj pf->read_i2c_byte(pf, offset, 0xA0, &output); 6366349163Serj sbuf_printf(sbuf, "%02X ", output); 6367349163Serj } 6368349163Serj for (u8 offset = 50; offset < 52; offset++) { 6369349163Serj pf->read_i2c_byte(pf, offset, 0xA0, &output); 6370349163Serj sbuf_printf(sbuf, "%02X ", output); 6371349163Serj } 6372349163Serj } else { 6373349163Serj device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 6374349163Serj return (0); 6375349163Serj } 6376349163Serj 6377349163Serj sbuf_finish(sbuf); 6378349163Serj sbuf_delete(sbuf); 6379349163Serj 6380349163Serj return (0); 6381349163Serj} 6382349163Serj 6383349163Serj/* 6384318357Serj * Sysctl to read a byte from I2C bus. 6385318357Serj * 6386318357Serj * Input: 32-bit value: 6387318357Serj * bits 0-7: device address (0xA0 or 0xA2) 6388318357Serj * bits 8-15: offset (0-255) 6389318357Serj * bits 16-31: unused 6390318357Serj * Output: 8-bit value read 6391318357Serj */ 6392318357Serjstatic int 6393318357Serjixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 6394318357Serj{ 6395318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6396318357Serj device_t dev = pf->dev; 6397318357Serj int input = -1, error = 0; 6398318357Serj 6399318357Serj u8 dev_addr, offset, output; 6400318357Serj 6401318357Serj /* Read in I2C read parameters */ 6402318357Serj error = sysctl_handle_int(oidp, &input, 0, req); 6403318357Serj if ((error) || (req->newptr == NULL)) 6404318357Serj return (error); 6405318357Serj /* Validate device address */ 6406318357Serj dev_addr = input & 0xFF; 6407318357Serj if (dev_addr != 0xA0 && dev_addr != 0xA2) { 6408318357Serj return (EINVAL); 6409318357Serj } 6410318357Serj offset = (input >> 8) & 0xFF; 6411318357Serj 6412349163Serj error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 6413318357Serj if (error) 6414318357Serj return (error); 6415318357Serj 6416318357Serj device_printf(dev, "%02X\n", output); 6417318357Serj return (0); 6418318357Serj} 6419318357Serj 6420318357Serj/* 6421318357Serj * Sysctl to write a byte to the I2C bus. 6422318357Serj * 6423318357Serj * Input: 32-bit value: 6424318357Serj * bits 0-7: device address (0xA0 or 0xA2) 6425318357Serj * bits 8-15: offset (0-255) 6426318357Serj * bits 16-23: value to write 6427318357Serj * bits 24-31: unused 6428318357Serj * Output: 8-bit value written 6429318357Serj */ 6430318357Serjstatic int 6431318357Serjixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 6432318357Serj{ 6433318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6434318357Serj device_t dev = pf->dev; 6435318357Serj int input = -1, error = 0; 6436318357Serj 6437318357Serj u8 dev_addr, offset, value; 6438318357Serj 6439318357Serj /* Read in I2C write parameters */ 6440318357Serj error = sysctl_handle_int(oidp, &input, 0, req); 6441318357Serj if ((error) || (req->newptr == NULL)) 6442318357Serj return (error); 6443318357Serj /* Validate device address */ 6444318357Serj dev_addr = input & 0xFF; 6445318357Serj if (dev_addr != 0xA0 && dev_addr != 0xA2) { 6446318357Serj return (EINVAL); 6447318357Serj } 6448318357Serj offset = (input >> 8) & 0xFF; 6449318357Serj value = (input >> 16) & 0xFF; 6450318357Serj 6451349163Serj error = pf->write_i2c_byte(pf, offset, dev_addr, value); 6452318357Serj if (error) 6453318357Serj return (error); 6454318357Serj 6455318357Serj device_printf(dev, "%02X written\n", value); 6456318357Serj return (0); 6457318357Serj} 6458318357Serj 6459318357Serjstatic int 6460318357Serjixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 6461318357Serj u8 bit_pos, int *is_set) 6462318357Serj{ 6463318357Serj device_t dev = pf->dev; 6464318357Serj struct i40e_hw *hw = &pf->hw; 6465318357Serj enum i40e_status_code status; 6466318357Serj 6467318357Serj status = i40e_aq_get_phy_capabilities(hw, 6468318357Serj FALSE, FALSE, abilities, NULL); 6469318357Serj if (status) { 6470318357Serj device_printf(dev, 6471318357Serj "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 6472318357Serj __func__, i40e_stat_str(hw, status), 6473318357Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 6474318357Serj return (EIO); 6475318357Serj } 6476318357Serj 6477333343Serj *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 6478318357Serj return (0); 6479318357Serj} 6480318357Serj 6481318357Serjstatic int 6482318357Serjixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 6483318357Serj u8 bit_pos, int set) 6484318357Serj{ 6485318357Serj device_t dev = pf->dev; 6486318357Serj struct i40e_hw *hw = &pf->hw; 6487318357Serj struct i40e_aq_set_phy_config config; 6488318357Serj enum i40e_status_code status; 6489318357Serj 6490318357Serj /* Set new PHY config */ 6491318357Serj memset(&config, 0, sizeof(config)); 6492333343Serj config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 6493318357Serj if (set) 6494318357Serj config.fec_config |= bit_pos; 6495333343Serj if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 6496318357Serj config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 6497318357Serj config.phy_type = abilities->phy_type; 6498318357Serj config.phy_type_ext = abilities->phy_type_ext; 6499318357Serj config.link_speed = abilities->link_speed; 6500318357Serj config.eee_capability = abilities->eee_capability; 6501318357Serj config.eeer = abilities->eeer_val; 6502318357Serj config.low_power_ctrl = abilities->d3_lpan; 6503318357Serj status = i40e_aq_set_phy_config(hw, &config, NULL); 6504318357Serj 6505318357Serj if (status) { 6506318357Serj device_printf(dev, 6507318357Serj "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 6508318357Serj __func__, i40e_stat_str(hw, status), 6509318357Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 6510318357Serj return (EIO); 6511318357Serj } 6512318357Serj } 6513318357Serj 6514318357Serj return (0); 6515318357Serj} 6516318357Serj 6517318357Serjstatic int 6518318357Serjixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 6519318357Serj{ 6520318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6521318357Serj int mode, error = 0; 6522318357Serj 6523318357Serj struct i40e_aq_get_phy_abilities_resp abilities; 6524333343Serj error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 6525318357Serj if (error) 6526318357Serj return (error); 6527318357Serj /* Read in new mode */ 6528318357Serj error = sysctl_handle_int(oidp, &mode, 0, req); 6529318357Serj if ((error) || (req->newptr == NULL)) 6530318357Serj return (error); 6531318357Serj 6532318357Serj return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 6533318357Serj} 6534318357Serj 6535318357Serjstatic int 6536318357Serjixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 6537318357Serj{ 6538318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6539318357Serj int mode, error = 0; 6540318357Serj 6541318357Serj struct i40e_aq_get_phy_abilities_resp abilities; 6542333343Serj error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 6543318357Serj if (error) 6544318357Serj return (error); 6545318357Serj /* Read in new mode */ 6546318357Serj error = sysctl_handle_int(oidp, &mode, 0, req); 6547318357Serj if ((error) || (req->newptr == NULL)) 6548318357Serj return (error); 6549318357Serj 6550318357Serj return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 6551318357Serj} 6552318357Serj 6553318357Serjstatic int 6554318357Serjixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 6555318357Serj{ 6556318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6557318357Serj int mode, error = 0; 6558318357Serj 6559318357Serj struct i40e_aq_get_phy_abilities_resp abilities; 6560333343Serj error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 6561318357Serj if (error) 6562318357Serj return (error); 6563318357Serj /* Read in new mode */ 6564318357Serj error = sysctl_handle_int(oidp, &mode, 0, req); 6565318357Serj if ((error) || (req->newptr == NULL)) 6566318357Serj return (error); 6567318357Serj 6568318357Serj return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 6569318357Serj} 6570318357Serj 6571318357Serjstatic int 6572318357Serjixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 6573318357Serj{ 6574318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6575318357Serj int mode, error = 0; 6576318357Serj 6577318357Serj struct i40e_aq_get_phy_abilities_resp abilities; 6578333343Serj error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 6579318357Serj if (error) 6580318357Serj return (error); 6581318357Serj /* Read in new mode */ 6582318357Serj error = sysctl_handle_int(oidp, &mode, 0, req); 6583318357Serj if ((error) || (req->newptr == NULL)) 6584318357Serj return (error); 6585318357Serj 6586318357Serj return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 6587318357Serj} 6588318357Serj 6589318357Serjstatic int 6590318357Serjixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 6591318357Serj{ 6592318357Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6593318357Serj int mode, error = 0; 6594318357Serj 6595318357Serj struct i40e_aq_get_phy_abilities_resp abilities; 6596333343Serj error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 6597318357Serj if (error) 6598318357Serj return (error); 6599318357Serj /* Read in new mode */ 6600318357Serj error = sysctl_handle_int(oidp, &mode, 0, req); 6601318357Serj if ((error) || (req->newptr == NULL)) 6602318357Serj return (error); 6603318357Serj 6604318357Serj return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 6605318357Serj} 6606318357Serj 6607333343Serjstatic int 6608333343Serjixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 6609333343Serj{ 6610333343Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6611333343Serj struct i40e_hw *hw = &pf->hw; 6612333343Serj device_t dev = pf->dev; 6613333343Serj struct sbuf *buf; 6614333343Serj int error = 0; 6615333343Serj enum i40e_status_code status; 6616333343Serj 6617333343Serj buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6618333343Serj if (!buf) { 6619333343Serj device_printf(dev, "Could not allocate sbuf for output.\n"); 6620333343Serj return (ENOMEM); 6621333343Serj } 6622333343Serj 6623333343Serj u8 *final_buff; 6624333343Serj /* This amount is only necessary if reading the entire cluster into memory */ 6625333343Serj#define IXL_FINAL_BUFF_SIZE (1280 * 1024) 6626333343Serj final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK); 6627333343Serj int final_buff_len = 0; 6628333343Serj 6629333343Serj u8 cluster_id = 1; 6630333343Serj bool more = true; 6631333343Serj 6632333343Serj u8 dump_buf[4096]; 6633333343Serj u16 curr_buff_size = 4096; 6634333343Serj u8 curr_next_table = 0; 6635333343Serj u32 curr_next_index = 0; 6636333343Serj 6637333343Serj u16 ret_buff_size; 6638333343Serj u8 ret_next_table; 6639333343Serj u32 ret_next_index; 6640333343Serj 6641333343Serj sbuf_cat(buf, "\n"); 6642333343Serj 6643333343Serj while (more) { 6644333343Serj status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 6645333343Serj dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 6646333343Serj if (status) { 6647333343Serj device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 6648333343Serj i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 6649333343Serj goto free_out; 6650333343Serj } 6651333343Serj 6652333343Serj /* copy info out of temp buffer */ 6653333343Serj bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 6654333343Serj final_buff_len += ret_buff_size; 6655333343Serj 6656333343Serj if (ret_next_table != curr_next_table) { 6657333343Serj /* We're done with the current table; we can dump out read data. */ 6658333343Serj sbuf_printf(buf, "%d:", curr_next_table); 6659333343Serj int bytes_printed = 0; 6660333343Serj while (bytes_printed <= final_buff_len) { 6661333343Serj sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 6662333343Serj bytes_printed += 16; 6663333343Serj } 6664333343Serj sbuf_cat(buf, "\n"); 6665333343Serj 6666333343Serj /* The entire cluster has been read; we're finished */ 6667333343Serj if (ret_next_table == 0xFF) 6668333343Serj break; 6669333343Serj 6670333343Serj /* Otherwise clear the output buffer and continue reading */ 6671333343Serj bzero(final_buff, IXL_FINAL_BUFF_SIZE); 6672333343Serj final_buff_len = 0; 6673333343Serj } 6674333343Serj 6675333343Serj if (ret_next_index == 0xFFFFFFFF) 6676333343Serj ret_next_index = 0; 6677333343Serj 6678333343Serj bzero(dump_buf, sizeof(dump_buf)); 6679333343Serj curr_next_table = ret_next_table; 6680333343Serj curr_next_index = ret_next_index; 6681333343Serj } 6682333343Serj 6683333343Serjfree_out: 6684333343Serj free(final_buff, M_DEVBUF); 6685333343Serj error = sbuf_finish(buf); 6686333343Serj if (error) 6687333343Serj device_printf(dev, "Error finishing sbuf: %d\n", error); 6688333343Serj sbuf_delete(buf); 6689333343Serj 6690333343Serj return (error); 6691333343Serj} 6692333343Serj 6693333343Serjstatic int 6694349163Serjixl_start_fw_lldp(struct ixl_pf *pf) 6695333343Serj{ 6696333343Serj struct i40e_hw *hw = &pf->hw; 6697349163Serj enum i40e_status_code status; 6698349163Serj 6699349163Serj status = i40e_aq_start_lldp(hw, false, NULL); 6700349163Serj if (status != I40E_SUCCESS) { 6701349163Serj switch (hw->aq.asq_last_status) { 6702349163Serj case I40E_AQ_RC_EEXIST: 6703349163Serj device_printf(pf->dev, 6704349163Serj "FW LLDP agent is already running\n"); 6705349163Serj break; 6706349163Serj case I40E_AQ_RC_EPERM: 6707349163Serj device_printf(pf->dev, 6708349163Serj "Device configuration forbids SW from starting " 6709349163Serj "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 6710349163Serj "attribute to \"Enabled\" to use this sysctl\n"); 6711349163Serj return (EINVAL); 6712349163Serj default: 6713349163Serj device_printf(pf->dev, 6714349163Serj "Starting FW LLDP agent failed: error: %s, %s\n", 6715349163Serj i40e_stat_str(hw, status), 6716349163Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 6717349163Serj return (EINVAL); 6718349163Serj } 6719349163Serj } 6720349163Serj 6721349163Serj atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 6722349163Serj return (0); 6723349163Serj} 6724349163Serj 6725349163Serjstatic int 6726349163Serjixl_stop_fw_lldp(struct ixl_pf *pf) 6727349163Serj{ 6728349163Serj struct i40e_hw *hw = &pf->hw; 6729333343Serj device_t dev = pf->dev; 6730333343Serj enum i40e_status_code status; 6731349163Serj 6732349163Serj if (hw->func_caps.npar_enable != 0) { 6733349163Serj device_printf(dev, 6734349163Serj "Disabling FW LLDP agent is not supported on this device\n"); 6735349163Serj return (EINVAL); 6736349163Serj } 6737349163Serj 6738349163Serj if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 6739349163Serj device_printf(dev, 6740349163Serj "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 6741349163Serj return (EINVAL); 6742349163Serj } 6743349163Serj 6744349163Serj status = i40e_aq_stop_lldp(hw, true, false, NULL); 6745349163Serj if (status != I40E_SUCCESS) { 6746349163Serj if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 6747349163Serj device_printf(dev, 6748349163Serj "Disabling FW LLDP agent failed: error: %s, %s\n", 6749349163Serj i40e_stat_str(hw, status), 6750349163Serj i40e_aq_str(hw, hw->aq.asq_last_status)); 6751349163Serj return (EINVAL); 6752349163Serj } 6753349163Serj 6754349163Serj device_printf(dev, "FW LLDP agent is already stopped\n"); 6755349163Serj } 6756349163Serj 6757349163Serj i40e_aq_set_dcb_parameters(hw, true, NULL); 6758349163Serj atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 6759349163Serj return (0); 6760349163Serj} 6761349163Serj 6762349163Serjstatic int 6763349163Serjixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 6764349163Serj{ 6765349163Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6766349163Serj int state, new_state, error = 0; 6767349163Serj 6768333343Serj state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 6769333343Serj 6770333343Serj /* Read in new mode */ 6771333343Serj error = sysctl_handle_int(oidp, &new_state, 0, req); 6772333343Serj if ((error) || (req->newptr == NULL)) 6773333343Serj return (error); 6774333343Serj 6775333343Serj /* Already in requested state */ 6776333343Serj if (new_state == state) 6777333343Serj return (error); 6778333343Serj 6779349163Serj if (new_state == 0) 6780349163Serj return ixl_stop_fw_lldp(pf); 6781333343Serj 6782349163Serj return ixl_start_fw_lldp(pf); 6783333343Serj} 6784333343Serj 6785349163Serjstatic int 6786349163Serjixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 6787333343Serj{ 6788349163Serj struct ixl_pf *pf = (struct ixl_pf *)arg1; 6789349163Serj int state, new_state; 6790349163Serj int sysctl_handle_status = 0; 6791349163Serj enum i40e_status_code cmd_status; 6792333343Serj 6793349163Serj /* Init states' values */ 6794349163Serj state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 6795333343Serj 6796349163Serj /* Get requested mode */ 6797349163Serj sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 6798349163Serj if ((sysctl_handle_status) || (req->newptr == NULL)) 6799349163Serj return (sysctl_handle_status); 6800333343Serj 6801349163Serj /* Check if state has changed */ 6802349163Serj if (new_state == state) 6803349163Serj return (0); 6804333343Serj 6805349163Serj /* Set new state */ 6806349163Serj cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 6807349163Serj 6808349163Serj /* Save new state or report error */ 6809349163Serj if (!cmd_status) { 6810349163Serj if (new_state == 0) 6811349163Serj atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); 6812349163Serj else 6813349163Serj atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED); 6814349163Serj } else if (cmd_status == I40E_ERR_CONFIG) 6815349163Serj return (EPERM); 6816349163Serj else 6817349163Serj return (EIO); 6818349163Serj 6819333343Serj return (0); 6820333343Serj} 6821333343Serj 6822333343Serjint 6823333343Serjixl_attach_get_link_status(struct ixl_pf *pf) 6824333343Serj{ 6825333343Serj struct i40e_hw *hw = &pf->hw; 6826333343Serj device_t dev = pf->dev; 6827333343Serj int error = 0; 6828333343Serj 6829333343Serj if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 6830333343Serj (hw->aq.fw_maj_ver < 4)) { 6831333343Serj i40e_msec_delay(75); 6832333343Serj error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 6833333343Serj if (error) { 6834333343Serj device_printf(dev, "link restart failed, aq_err=%d\n", 6835333343Serj pf->hw.aq.asq_last_status); 6836333343Serj return error; 6837333343Serj } 6838333343Serj } 6839333343Serj 6840333343Serj /* Determine link state */ 6841333343Serj hw->phy.get_link_info = TRUE; 6842333343Serj i40e_get_link_status(hw, &pf->link_up); 6843333343Serj return (0); 6844333343Serj} 6845