ixgbe_common.c revision 315333
1215911Sjfv/****************************************************************************** 2215911Sjfv 3215911Sjfv Copyright (c) 2001-2017, Intel Corporation 4215911Sjfv All rights reserved. 5215911Sjfv 6215911Sjfv Redistribution and use in source and binary forms, with or without 7215911Sjfv modification, are permitted provided that the following conditions are met: 8215911Sjfv 9215911Sjfv 1. Redistributions of source code must retain the above copyright notice, 10215911Sjfv this list of conditions and the following disclaimer. 11215911Sjfv 12215911Sjfv 2. Redistributions in binary form must reproduce the above copyright 13215911Sjfv notice, this list of conditions and the following disclaimer in the 14215911Sjfv documentation and/or other materials provided with the distribution. 15215911Sjfv 16215911Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17215911Sjfv contributors may be used to endorse or promote products derived from 18215911Sjfv this software without specific prior written permission. 19215911Sjfv 20215911Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21215911Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22215911Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23215911Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24215911Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25215911Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26215911Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27215911Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28215911Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29215911Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30215911Sjfv POSSIBILITY OF SUCH DAMAGE. 31215911Sjfv 32215911Sjfv******************************************************************************/ 33215911Sjfv/*$FreeBSD: stable/10/sys/dev/ixgbe/ixgbe_common.c 315333 2017-03-15 21:20:17Z erj $*/ 34215911Sjfv 35215911Sjfv#include "ixgbe_common.h" 36215911Sjfv#include "ixgbe_phy.h" 37215911Sjfv#include "ixgbe_dcb.h" 38215911Sjfv#include "ixgbe_dcb_82599.h" 39215911Sjfv#include "ixgbe_api.h" 40215911Sjfv 41215911Sjfvstatic s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 42215911Sjfvstatic s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 43215911Sjfvstatic void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 44215911Sjfvstatic s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 45215911Sjfvstatic void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 46215911Sjfvstatic void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 47215911Sjfv u16 count); 48215911Sjfvstatic u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 49215911Sjfvstatic void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 50215911Sjfvstatic void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 51215911Sjfvstatic void ixgbe_release_eeprom(struct ixgbe_hw *hw); 52215911Sjfv 53215911Sjfvstatic s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 54215911Sjfvstatic s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 55215911Sjfv u16 *san_mac_offset); 56215911Sjfvstatic s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 57215911Sjfv u16 words, u16 *data); 58215911Sjfvstatic s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 59215911Sjfv u16 words, u16 *data); 60215911Sjfvstatic s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 61215911Sjfv u16 offset); 62215911Sjfv 63215911Sjfv/** 64215911Sjfv * ixgbe_init_ops_generic - Inits function ptrs 65215911Sjfv * @hw: pointer to the hardware structure 66215911Sjfv * 67215911Sjfv * Initialize the function pointers. 68215911Sjfv **/ 69215911Sjfvs32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) 70215911Sjfv{ 71215911Sjfv struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 72215911Sjfv struct ixgbe_mac_info *mac = &hw->mac; 73215911Sjfv u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 74215911Sjfv 75215911Sjfv DEBUGFUNC("ixgbe_init_ops_generic"); 76215911Sjfv 77215911Sjfv /* EEPROM */ 78215911Sjfv eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; 79215911Sjfv /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ 80215911Sjfv if (eec & IXGBE_EEC_PRES) { 81215911Sjfv eeprom->ops.read = ixgbe_read_eerd_generic; 82215911Sjfv eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; 83215911Sjfv } else { 84215911Sjfv eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; 85215911Sjfv eeprom->ops.read_buffer = 86215911Sjfv ixgbe_read_eeprom_buffer_bit_bang_generic; 87215911Sjfv } 88215911Sjfv eeprom->ops.write = ixgbe_write_eeprom_generic; 89215911Sjfv eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; 90215911Sjfv eeprom->ops.validate_checksum = 91215911Sjfv ixgbe_validate_eeprom_checksum_generic; 92215911Sjfv eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; 93215911Sjfv eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; 94215911Sjfv 95215911Sjfv /* MAC */ 96215911Sjfv mac->ops.init_hw = ixgbe_init_hw_generic; 97215911Sjfv mac->ops.reset_hw = NULL; 98215911Sjfv mac->ops.start_hw = ixgbe_start_hw_generic; 99215911Sjfv mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; 100215911Sjfv mac->ops.get_media_type = NULL; 101215911Sjfv mac->ops.get_supported_physical_layer = NULL; 102215911Sjfv mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; 103215911Sjfv mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; 104215911Sjfv mac->ops.stop_adapter = ixgbe_stop_adapter_generic; 105215911Sjfv mac->ops.get_bus_info = ixgbe_get_bus_info_generic; 106215911Sjfv mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; 107215911Sjfv mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; 108215911Sjfv mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; 109215911Sjfv mac->ops.prot_autoc_read = prot_autoc_read_generic; 110215911Sjfv mac->ops.prot_autoc_write = prot_autoc_write_generic; 111215911Sjfv 112215911Sjfv /* LEDs */ 113215911Sjfv mac->ops.led_on = ixgbe_led_on_generic; 114 mac->ops.led_off = ixgbe_led_off_generic; 115 mac->ops.blink_led_start = ixgbe_blink_led_start_generic; 116 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; 117 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; 118 119 /* RAR, Multicast, VLAN */ 120 mac->ops.set_rar = ixgbe_set_rar_generic; 121 mac->ops.clear_rar = ixgbe_clear_rar_generic; 122 mac->ops.insert_mac_addr = NULL; 123 mac->ops.set_vmdq = NULL; 124 mac->ops.clear_vmdq = NULL; 125 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; 126 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; 127 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; 128 mac->ops.enable_mc = ixgbe_enable_mc_generic; 129 mac->ops.disable_mc = ixgbe_disable_mc_generic; 130 mac->ops.clear_vfta = NULL; 131 mac->ops.set_vfta = NULL; 132 mac->ops.set_vlvf = NULL; 133 mac->ops.init_uta_tables = NULL; 134 mac->ops.enable_rx = ixgbe_enable_rx_generic; 135 mac->ops.disable_rx = ixgbe_disable_rx_generic; 136 137 /* Flow Control */ 138 mac->ops.fc_enable = ixgbe_fc_enable_generic; 139 mac->ops.setup_fc = ixgbe_setup_fc_generic; 140 mac->ops.fc_autoneg = ixgbe_fc_autoneg; 141 142 /* Link */ 143 mac->ops.get_link_capabilities = NULL; 144 mac->ops.setup_link = NULL; 145 mac->ops.check_link = NULL; 146 mac->ops.dmac_config = NULL; 147 mac->ops.dmac_update_tcs = NULL; 148 mac->ops.dmac_config_tcs = NULL; 149 150 return IXGBE_SUCCESS; 151} 152 153/** 154 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation 155 * of flow control 156 * @hw: pointer to hardware structure 157 * 158 * This function returns TRUE if the device supports flow control 159 * autonegotiation, and FALSE if it does not. 160 * 161 **/ 162bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 163{ 164 bool supported = FALSE; 165 ixgbe_link_speed speed; 166 bool link_up; 167 168 DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); 169 170 switch (hw->phy.media_type) { 171 case ixgbe_media_type_fiber_fixed: 172 case ixgbe_media_type_fiber_qsfp: 173 case ixgbe_media_type_fiber: 174 /* flow control autoneg black list */ 175 switch (hw->device_id) { 176 case IXGBE_DEV_ID_X550EM_A_SFP: 177 case IXGBE_DEV_ID_X550EM_A_SFP_N: 178 case IXGBE_DEV_ID_X550EM_A_QSFP: 179 case IXGBE_DEV_ID_X550EM_A_QSFP_N: 180 supported = FALSE; 181 break; 182 default: 183 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 184 /* if link is down, assume supported */ 185 if (link_up) 186 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? 187 TRUE : FALSE; 188 else 189 supported = TRUE; 190 } 191 192 break; 193 case ixgbe_media_type_backplane: 194 supported = TRUE; 195 break; 196 case ixgbe_media_type_copper: 197 /* only some copper devices support flow control autoneg */ 198 switch (hw->device_id) { 199 case IXGBE_DEV_ID_82599_T3_LOM: 200 case IXGBE_DEV_ID_X540T: 201 case IXGBE_DEV_ID_X540T1: 202 case IXGBE_DEV_ID_X540_BYPASS: 203 case IXGBE_DEV_ID_X550T: 204 case IXGBE_DEV_ID_X550T1: 205 case IXGBE_DEV_ID_X550EM_X_10G_T: 206 case IXGBE_DEV_ID_X550EM_A_10G_T: 207 case IXGBE_DEV_ID_X550EM_A_1G_T: 208 case IXGBE_DEV_ID_X550EM_A_1G_T_L: 209 supported = TRUE; 210 break; 211 default: 212 supported = FALSE; 213 } 214 default: 215 break; 216 } 217 218 if (!supported) 219 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, 220 "Device %x does not support flow control autoneg", 221 hw->device_id); 222 return supported; 223} 224 225/** 226 * ixgbe_setup_fc_generic - Set up flow control 227 * @hw: pointer to hardware structure 228 * 229 * Called at init time to set up flow control. 230 **/ 231s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) 232{ 233 s32 ret_val = IXGBE_SUCCESS; 234 u32 reg = 0, reg_bp = 0; 235 u16 reg_cu = 0; 236 bool locked = FALSE; 237 238 DEBUGFUNC("ixgbe_setup_fc_generic"); 239 240 /* Validate the requested mode */ 241 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { 242 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 243 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); 244 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 245 goto out; 246 } 247 248 /* 249 * 10gig parts do not have a word in the EEPROM to determine the 250 * default flow control setting, so we explicitly set it to full. 251 */ 252 if (hw->fc.requested_mode == ixgbe_fc_default) 253 hw->fc.requested_mode = ixgbe_fc_full; 254 255 /* 256 * Set up the 1G and 10G flow control advertisement registers so the 257 * HW will be able to do fc autoneg once the cable is plugged in. If 258 * we link at 10G, the 1G advertisement is harmless and vice versa. 259 */ 260 switch (hw->phy.media_type) { 261 case ixgbe_media_type_backplane: 262 /* some MAC's need RMW protection on AUTOC */ 263 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); 264 if (ret_val != IXGBE_SUCCESS) 265 goto out; 266 267 /* only backplane uses autoc so fall though */ 268 case ixgbe_media_type_fiber_fixed: 269 case ixgbe_media_type_fiber_qsfp: 270 case ixgbe_media_type_fiber: 271 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 272 273 break; 274 case ixgbe_media_type_copper: 275 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 276 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); 277 break; 278 default: 279 break; 280 } 281 282 /* 283 * The possible values of fc.requested_mode are: 284 * 0: Flow control is completely disabled 285 * 1: Rx flow control is enabled (we can receive pause frames, 286 * but not send pause frames). 287 * 2: Tx flow control is enabled (we can send pause frames but 288 * we do not support receiving pause frames). 289 * 3: Both Rx and Tx flow control (symmetric) are enabled. 290 * other: Invalid. 291 */ 292 switch (hw->fc.requested_mode) { 293 case ixgbe_fc_none: 294 /* Flow control completely disabled by software override. */ 295 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); 296 if (hw->phy.media_type == ixgbe_media_type_backplane) 297 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | 298 IXGBE_AUTOC_ASM_PAUSE); 299 else if (hw->phy.media_type == ixgbe_media_type_copper) 300 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); 301 break; 302 case ixgbe_fc_tx_pause: 303 /* 304 * Tx Flow control is enabled, and Rx Flow control is 305 * disabled by software override. 306 */ 307 reg |= IXGBE_PCS1GANA_ASM_PAUSE; 308 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; 309 if (hw->phy.media_type == ixgbe_media_type_backplane) { 310 reg_bp |= IXGBE_AUTOC_ASM_PAUSE; 311 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; 312 } else if (hw->phy.media_type == ixgbe_media_type_copper) { 313 reg_cu |= IXGBE_TAF_ASM_PAUSE; 314 reg_cu &= ~IXGBE_TAF_SYM_PAUSE; 315 } 316 break; 317 case ixgbe_fc_rx_pause: 318 /* 319 * Rx Flow control is enabled and Tx Flow control is 320 * disabled by software override. Since there really 321 * isn't a way to advertise that we are capable of RX 322 * Pause ONLY, we will advertise that we support both 323 * symmetric and asymmetric Rx PAUSE, as such we fall 324 * through to the fc_full statement. Later, we will 325 * disable the adapter's ability to send PAUSE frames. 326 */ 327 case ixgbe_fc_full: 328 /* Flow control (both Rx and Tx) is enabled by SW override. */ 329 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; 330 if (hw->phy.media_type == ixgbe_media_type_backplane) 331 reg_bp |= IXGBE_AUTOC_SYM_PAUSE | 332 IXGBE_AUTOC_ASM_PAUSE; 333 else if (hw->phy.media_type == ixgbe_media_type_copper) 334 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; 335 break; 336 default: 337 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 338 "Flow control param set incorrectly\n"); 339 ret_val = IXGBE_ERR_CONFIG; 340 goto out; 341 break; 342 } 343 344 if (hw->mac.type < ixgbe_mac_X540) { 345 /* 346 * Enable auto-negotiation between the MAC & PHY; 347 * the MAC will advertise clause 37 flow control. 348 */ 349 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 350 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 351 352 /* Disable AN timeout */ 353 if (hw->fc.strict_ieee) 354 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 355 356 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 357 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 358 } 359 360 /* 361 * AUTOC restart handles negotiation of 1G and 10G on backplane 362 * and copper. There is no need to set the PCS1GCTL register. 363 * 364 */ 365 if (hw->phy.media_type == ixgbe_media_type_backplane) { 366 reg_bp |= IXGBE_AUTOC_AN_RESTART; 367 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); 368 if (ret_val) 369 goto out; 370 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 371 (ixgbe_device_supports_autoneg_fc(hw))) { 372 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 373 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); 374 } 375 376 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); 377out: 378 return ret_val; 379} 380 381/** 382 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 383 * @hw: pointer to hardware structure 384 * 385 * Starts the hardware by filling the bus info structure and media type, clears 386 * all on chip counters, initializes receive address registers, multicast 387 * table, VLAN filter table, calls routine to set up link and flow control 388 * settings, and leaves transmit and receive units disabled and uninitialized 389 **/ 390s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 391{ 392 s32 ret_val; 393 u32 ctrl_ext; 394 u16 device_caps; 395 396 DEBUGFUNC("ixgbe_start_hw_generic"); 397 398 /* Set the media type */ 399 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 400 401 /* PHY ops initialization must be done in reset_hw() */ 402 403 /* Clear the VLAN filter table */ 404 hw->mac.ops.clear_vfta(hw); 405 406 /* Clear statistics registers */ 407 hw->mac.ops.clear_hw_cntrs(hw); 408 409 /* Set No Snoop Disable */ 410 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 411 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; 412 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 413 IXGBE_WRITE_FLUSH(hw); 414 415 /* Setup flow control */ 416 ret_val = ixgbe_setup_fc(hw); 417 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { 418 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); 419 return ret_val; 420 } 421 422 /* Cache bit indicating need for crosstalk fix */ 423 switch (hw->mac.type) { 424 case ixgbe_mac_82599EB: 425 case ixgbe_mac_X550EM_x: 426 case ixgbe_mac_X550EM_a: 427 hw->mac.ops.get_device_caps(hw, &device_caps); 428 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) 429 hw->need_crosstalk_fix = FALSE; 430 else 431 hw->need_crosstalk_fix = TRUE; 432 break; 433 default: 434 hw->need_crosstalk_fix = FALSE; 435 break; 436 } 437 438 /* Clear adapter stopped flag */ 439 hw->adapter_stopped = FALSE; 440 441 return IXGBE_SUCCESS; 442} 443 444/** 445 * ixgbe_start_hw_gen2 - Init sequence for common device family 446 * @hw: pointer to hw structure 447 * 448 * Performs the init sequence common to the second generation 449 * of 10 GbE devices. 450 * Devices in the second generation: 451 * 82599 452 * X540 453 **/ 454s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) 455{ 456 u32 i; 457 u32 regval; 458 459 /* Clear the rate limiters */ 460 for (i = 0; i < hw->mac.max_tx_queues; i++) { 461 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 462 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 463 } 464 IXGBE_WRITE_FLUSH(hw); 465 466 /* Disable relaxed ordering */ 467 for (i = 0; i < hw->mac.max_tx_queues; i++) { 468 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 469 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 470 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 471 } 472 473 for (i = 0; i < hw->mac.max_rx_queues; i++) { 474 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 475 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 476 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 477 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 478 } 479 480 return IXGBE_SUCCESS; 481} 482 483/** 484 * ixgbe_init_hw_generic - Generic hardware initialization 485 * @hw: pointer to hardware structure 486 * 487 * Initialize the hardware by resetting the hardware, filling the bus info 488 * structure and media type, clears all on chip counters, initializes receive 489 * address registers, multicast table, VLAN filter table, calls routine to set 490 * up link and flow control settings, and leaves transmit and receive units 491 * disabled and uninitialized 492 **/ 493s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 494{ 495 s32 status; 496 497 DEBUGFUNC("ixgbe_init_hw_generic"); 498 499 /* Reset the hardware */ 500 status = hw->mac.ops.reset_hw(hw); 501 502 if (status == IXGBE_SUCCESS) { 503 /* Start the HW */ 504 status = hw->mac.ops.start_hw(hw); 505 } 506 507 /* Initialize the LED link active for LED blink support */ 508 hw->mac.ops.init_led_link_act(hw); 509 510 if (status != IXGBE_SUCCESS) 511 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); 512 513 return status; 514} 515 516/** 517 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 518 * @hw: pointer to hardware structure 519 * 520 * Clears all hardware statistics counters by reading them from the hardware 521 * Statistics counters are clear on read. 522 **/ 523s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 524{ 525 u16 i = 0; 526 527 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); 528 529 IXGBE_READ_REG(hw, IXGBE_CRCERRS); 530 IXGBE_READ_REG(hw, IXGBE_ILLERRC); 531 IXGBE_READ_REG(hw, IXGBE_ERRBC); 532 IXGBE_READ_REG(hw, IXGBE_MSPDC); 533 for (i = 0; i < 8; i++) 534 IXGBE_READ_REG(hw, IXGBE_MPC(i)); 535 536 IXGBE_READ_REG(hw, IXGBE_MLFC); 537 IXGBE_READ_REG(hw, IXGBE_MRFC); 538 IXGBE_READ_REG(hw, IXGBE_RLEC); 539 IXGBE_READ_REG(hw, IXGBE_LXONTXC); 540 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 541 if (hw->mac.type >= ixgbe_mac_82599EB) { 542 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 543 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 544 } else { 545 IXGBE_READ_REG(hw, IXGBE_LXONRXC); 546 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 547 } 548 549 for (i = 0; i < 8; i++) { 550 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 551 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 552 if (hw->mac.type >= ixgbe_mac_82599EB) { 553 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 554 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 555 } else { 556 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 557 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 558 } 559 } 560 if (hw->mac.type >= ixgbe_mac_82599EB) 561 for (i = 0; i < 8; i++) 562 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 563 IXGBE_READ_REG(hw, IXGBE_PRC64); 564 IXGBE_READ_REG(hw, IXGBE_PRC127); 565 IXGBE_READ_REG(hw, IXGBE_PRC255); 566 IXGBE_READ_REG(hw, IXGBE_PRC511); 567 IXGBE_READ_REG(hw, IXGBE_PRC1023); 568 IXGBE_READ_REG(hw, IXGBE_PRC1522); 569 IXGBE_READ_REG(hw, IXGBE_GPRC); 570 IXGBE_READ_REG(hw, IXGBE_BPRC); 571 IXGBE_READ_REG(hw, IXGBE_MPRC); 572 IXGBE_READ_REG(hw, IXGBE_GPTC); 573 IXGBE_READ_REG(hw, IXGBE_GORCL); 574 IXGBE_READ_REG(hw, IXGBE_GORCH); 575 IXGBE_READ_REG(hw, IXGBE_GOTCL); 576 IXGBE_READ_REG(hw, IXGBE_GOTCH); 577 if (hw->mac.type == ixgbe_mac_82598EB) 578 for (i = 0; i < 8; i++) 579 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 580 IXGBE_READ_REG(hw, IXGBE_RUC); 581 IXGBE_READ_REG(hw, IXGBE_RFC); 582 IXGBE_READ_REG(hw, IXGBE_ROC); 583 IXGBE_READ_REG(hw, IXGBE_RJC); 584 IXGBE_READ_REG(hw, IXGBE_MNGPRC); 585 IXGBE_READ_REG(hw, IXGBE_MNGPDC); 586 IXGBE_READ_REG(hw, IXGBE_MNGPTC); 587 IXGBE_READ_REG(hw, IXGBE_TORL); 588 IXGBE_READ_REG(hw, IXGBE_TORH); 589 IXGBE_READ_REG(hw, IXGBE_TPR); 590 IXGBE_READ_REG(hw, IXGBE_TPT); 591 IXGBE_READ_REG(hw, IXGBE_PTC64); 592 IXGBE_READ_REG(hw, IXGBE_PTC127); 593 IXGBE_READ_REG(hw, IXGBE_PTC255); 594 IXGBE_READ_REG(hw, IXGBE_PTC511); 595 IXGBE_READ_REG(hw, IXGBE_PTC1023); 596 IXGBE_READ_REG(hw, IXGBE_PTC1522); 597 IXGBE_READ_REG(hw, IXGBE_MPTC); 598 IXGBE_READ_REG(hw, IXGBE_BPTC); 599 for (i = 0; i < 16; i++) { 600 IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 601 IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 602 if (hw->mac.type >= ixgbe_mac_82599EB) { 603 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 604 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); 605 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 606 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); 607 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 608 } else { 609 IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 610 IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 611 } 612 } 613 614 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { 615 if (hw->phy.id == 0) 616 ixgbe_identify_phy(hw); 617 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, 618 IXGBE_MDIO_PCS_DEV_TYPE, &i); 619 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, 620 IXGBE_MDIO_PCS_DEV_TYPE, &i); 621 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, 622 IXGBE_MDIO_PCS_DEV_TYPE, &i); 623 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, 624 IXGBE_MDIO_PCS_DEV_TYPE, &i); 625 } 626 627 return IXGBE_SUCCESS; 628} 629 630/** 631 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM 632 * @hw: pointer to hardware structure 633 * @pba_num: stores the part number string from the EEPROM 634 * @pba_num_size: part number string buffer length 635 * 636 * Reads the part number string from the EEPROM. 637 **/ 638s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, 639 u32 pba_num_size) 640{ 641 s32 ret_val; 642 u16 data; 643 u16 pba_ptr; 644 u16 offset; 645 u16 length; 646 647 DEBUGFUNC("ixgbe_read_pba_string_generic"); 648 649 if (pba_num == NULL) { 650 DEBUGOUT("PBA string buffer was null\n"); 651 return IXGBE_ERR_INVALID_ARGUMENT; 652 } 653 654 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 655 if (ret_val) { 656 DEBUGOUT("NVM Read Error\n"); 657 return ret_val; 658 } 659 660 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); 661 if (ret_val) { 662 DEBUGOUT("NVM Read Error\n"); 663 return ret_val; 664 } 665 666 /* 667 * if data is not ptr guard the PBA must be in legacy format which 668 * means pba_ptr is actually our second data word for the PBA number 669 * and we can decode it into an ascii string 670 */ 671 if (data != IXGBE_PBANUM_PTR_GUARD) { 672 DEBUGOUT("NVM PBA number is not stored as string\n"); 673 674 /* we will need 11 characters to store the PBA */ 675 if (pba_num_size < 11) { 676 DEBUGOUT("PBA string buffer too small\n"); 677 return IXGBE_ERR_NO_SPACE; 678 } 679 680 /* extract hex string from data and pba_ptr */ 681 pba_num[0] = (data >> 12) & 0xF; 682 pba_num[1] = (data >> 8) & 0xF; 683 pba_num[2] = (data >> 4) & 0xF; 684 pba_num[3] = data & 0xF; 685 pba_num[4] = (pba_ptr >> 12) & 0xF; 686 pba_num[5] = (pba_ptr >> 8) & 0xF; 687 pba_num[6] = '-'; 688 pba_num[7] = 0; 689 pba_num[8] = (pba_ptr >> 4) & 0xF; 690 pba_num[9] = pba_ptr & 0xF; 691 692 /* put a null character on the end of our string */ 693 pba_num[10] = '\0'; 694 695 /* switch all the data but the '-' to hex char */ 696 for (offset = 0; offset < 10; offset++) { 697 if (pba_num[offset] < 0xA) 698 pba_num[offset] += '0'; 699 else if (pba_num[offset] < 0x10) 700 pba_num[offset] += 'A' - 0xA; 701 } 702 703 return IXGBE_SUCCESS; 704 } 705 706 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); 707 if (ret_val) { 708 DEBUGOUT("NVM Read Error\n"); 709 return ret_val; 710 } 711 712 if (length == 0xFFFF || length == 0) { 713 DEBUGOUT("NVM PBA number section invalid length\n"); 714 return IXGBE_ERR_PBA_SECTION; 715 } 716 717 /* check if pba_num buffer is big enough */ 718 if (pba_num_size < (((u32)length * 2) - 1)) { 719 DEBUGOUT("PBA string buffer too small\n"); 720 return IXGBE_ERR_NO_SPACE; 721 } 722 723 /* trim pba length from start of string */ 724 pba_ptr++; 725 length--; 726 727 for (offset = 0; offset < length; offset++) { 728 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); 729 if (ret_val) { 730 DEBUGOUT("NVM Read Error\n"); 731 return ret_val; 732 } 733 pba_num[offset * 2] = (u8)(data >> 8); 734 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); 735 } 736 pba_num[offset * 2] = '\0'; 737 738 return IXGBE_SUCCESS; 739} 740 741/** 742 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 743 * @hw: pointer to hardware structure 744 * @pba_num: stores the part number from the EEPROM 745 * 746 * Reads the part number from the EEPROM. 747 **/ 748s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 749{ 750 s32 ret_val; 751 u16 data; 752 753 DEBUGFUNC("ixgbe_read_pba_num_generic"); 754 755 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 756 if (ret_val) { 757 DEBUGOUT("NVM Read Error\n"); 758 return ret_val; 759 } else if (data == IXGBE_PBANUM_PTR_GUARD) { 760 DEBUGOUT("NVM Not supported\n"); 761 return IXGBE_NOT_IMPLEMENTED; 762 } 763 *pba_num = (u32)(data << 16); 764 765 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 766 if (ret_val) { 767 DEBUGOUT("NVM Read Error\n"); 768 return ret_val; 769 } 770 *pba_num |= data; 771 772 return IXGBE_SUCCESS; 773} 774 775/** 776 * ixgbe_read_pba_raw 777 * @hw: pointer to the HW structure 778 * @eeprom_buf: optional pointer to EEPROM image 779 * @eeprom_buf_size: size of EEPROM image in words 780 * @max_pba_block_size: PBA block size limit 781 * @pba: pointer to output PBA structure 782 * 783 * Reads PBA from EEPROM image when eeprom_buf is not NULL. 784 * Reads PBA from physical EEPROM device when eeprom_buf is NULL. 785 * 786 **/ 787s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 788 u32 eeprom_buf_size, u16 max_pba_block_size, 789 struct ixgbe_pba *pba) 790{ 791 s32 ret_val; 792 u16 pba_block_size; 793 794 if (pba == NULL) 795 return IXGBE_ERR_PARAM; 796 797 if (eeprom_buf == NULL) { 798 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 799 &pba->word[0]); 800 if (ret_val) 801 return ret_val; 802 } else { 803 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 804 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 805 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 806 } else { 807 return IXGBE_ERR_PARAM; 808 } 809 } 810 811 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 812 if (pba->pba_block == NULL) 813 return IXGBE_ERR_PARAM; 814 815 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, 816 eeprom_buf_size, 817 &pba_block_size); 818 if (ret_val) 819 return ret_val; 820 821 if (pba_block_size > max_pba_block_size) 822 return IXGBE_ERR_PARAM; 823 824 if (eeprom_buf == NULL) { 825 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], 826 pba_block_size, 827 pba->pba_block); 828 if (ret_val) 829 return ret_val; 830 } else { 831 if (eeprom_buf_size > (u32)(pba->word[1] + 832 pba_block_size)) { 833 memcpy(pba->pba_block, 834 &eeprom_buf[pba->word[1]], 835 pba_block_size * sizeof(u16)); 836 } else { 837 return IXGBE_ERR_PARAM; 838 } 839 } 840 } 841 842 return IXGBE_SUCCESS; 843} 844 845/** 846 * ixgbe_write_pba_raw 847 * @hw: pointer to the HW structure 848 * @eeprom_buf: optional pointer to EEPROM image 849 * @eeprom_buf_size: size of EEPROM image in words 850 * @pba: pointer to PBA structure 851 * 852 * Writes PBA to EEPROM image when eeprom_buf is not NULL. 853 * Writes PBA to physical EEPROM device when eeprom_buf is NULL. 854 * 855 **/ 856s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, 857 u32 eeprom_buf_size, struct ixgbe_pba *pba) 858{ 859 s32 ret_val; 860 861 if (pba == NULL) 862 return IXGBE_ERR_PARAM; 863 864 if (eeprom_buf == NULL) { 865 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, 866 &pba->word[0]); 867 if (ret_val) 868 return ret_val; 869 } else { 870 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 871 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; 872 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; 873 } else { 874 return IXGBE_ERR_PARAM; 875 } 876 } 877 878 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { 879 if (pba->pba_block == NULL) 880 return IXGBE_ERR_PARAM; 881 882 if (eeprom_buf == NULL) { 883 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], 884 pba->pba_block[0], 885 pba->pba_block); 886 if (ret_val) 887 return ret_val; 888 } else { 889 if (eeprom_buf_size > (u32)(pba->word[1] + 890 pba->pba_block[0])) { 891 memcpy(&eeprom_buf[pba->word[1]], 892 pba->pba_block, 893 pba->pba_block[0] * sizeof(u16)); 894 } else { 895 return IXGBE_ERR_PARAM; 896 } 897 } 898 } 899 900 return IXGBE_SUCCESS; 901} 902 903/** 904 * ixgbe_get_pba_block_size 905 * @hw: pointer to the HW structure 906 * @eeprom_buf: optional pointer to EEPROM image 907 * @eeprom_buf_size: size of EEPROM image in words 908 * @pba_data_size: pointer to output variable 909 * 910 * Returns the size of the PBA block in words. Function operates on EEPROM 911 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical 912 * EEPROM device. 913 * 914 **/ 915s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, 916 u32 eeprom_buf_size, u16 *pba_block_size) 917{ 918 s32 ret_val; 919 u16 pba_word[2]; 920 u16 length; 921 922 DEBUGFUNC("ixgbe_get_pba_block_size"); 923 924 if (eeprom_buf == NULL) { 925 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, 926 &pba_word[0]); 927 if (ret_val) 928 return ret_val; 929 } else { 930 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { 931 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; 932 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; 933 } else { 934 return IXGBE_ERR_PARAM; 935 } 936 } 937 938 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { 939 if (eeprom_buf == NULL) { 940 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, 941 &length); 942 if (ret_val) 943 return ret_val; 944 } else { 945 if (eeprom_buf_size > pba_word[1]) 946 length = eeprom_buf[pba_word[1] + 0]; 947 else 948 return IXGBE_ERR_PARAM; 949 } 950 951 if (length == 0xFFFF || length == 0) 952 return IXGBE_ERR_PBA_SECTION; 953 } else { 954 /* PBA number in legacy format, there is no PBA Block. */ 955 length = 0; 956 } 957 958 if (pba_block_size != NULL) 959 *pba_block_size = length; 960 961 return IXGBE_SUCCESS; 962} 963 964/** 965 * ixgbe_get_mac_addr_generic - Generic get MAC address 966 * @hw: pointer to hardware structure 967 * @mac_addr: Adapter MAC address 968 * 969 * Reads the adapter's MAC address from first Receive Address Register (RAR0) 970 * A reset of the adapter must be performed prior to calling this function 971 * in order for the MAC address to have been loaded from the EEPROM into RAR0 972 **/ 973s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 974{ 975 u32 rar_high; 976 u32 rar_low; 977 u16 i; 978 979 DEBUGFUNC("ixgbe_get_mac_addr_generic"); 980 981 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); 982 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); 983 984 for (i = 0; i < 4; i++) 985 mac_addr[i] = (u8)(rar_low >> (i*8)); 986 987 for (i = 0; i < 2; i++) 988 mac_addr[i+4] = (u8)(rar_high >> (i*8)); 989 990 return IXGBE_SUCCESS; 991} 992 993/** 994 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info 995 * @hw: pointer to hardware structure 996 * @link_status: the link status returned by the PCI config space 997 * 998 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure 999 **/ 1000void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) 1001{ 1002 struct ixgbe_mac_info *mac = &hw->mac; 1003 1004 if (hw->bus.type == ixgbe_bus_type_unknown) 1005 hw->bus.type = ixgbe_bus_type_pci_express; 1006 1007 switch (link_status & IXGBE_PCI_LINK_WIDTH) { 1008 case IXGBE_PCI_LINK_WIDTH_1: 1009 hw->bus.width = ixgbe_bus_width_pcie_x1; 1010 break; 1011 case IXGBE_PCI_LINK_WIDTH_2: 1012 hw->bus.width = ixgbe_bus_width_pcie_x2; 1013 break; 1014 case IXGBE_PCI_LINK_WIDTH_4: 1015 hw->bus.width = ixgbe_bus_width_pcie_x4; 1016 break; 1017 case IXGBE_PCI_LINK_WIDTH_8: 1018 hw->bus.width = ixgbe_bus_width_pcie_x8; 1019 break; 1020 default: 1021 hw->bus.width = ixgbe_bus_width_unknown; 1022 break; 1023 } 1024 1025 switch (link_status & IXGBE_PCI_LINK_SPEED) { 1026 case IXGBE_PCI_LINK_SPEED_2500: 1027 hw->bus.speed = ixgbe_bus_speed_2500; 1028 break; 1029 case IXGBE_PCI_LINK_SPEED_5000: 1030 hw->bus.speed = ixgbe_bus_speed_5000; 1031 break; 1032 case IXGBE_PCI_LINK_SPEED_8000: 1033 hw->bus.speed = ixgbe_bus_speed_8000; 1034 break; 1035 default: 1036 hw->bus.speed = ixgbe_bus_speed_unknown; 1037 break; 1038 } 1039 1040 mac->ops.set_lan_id(hw); 1041} 1042 1043/** 1044 * ixgbe_get_bus_info_generic - Generic set PCI bus info 1045 * @hw: pointer to hardware structure 1046 * 1047 * Gets the PCI bus info (speed, width, type) then calls helper function to 1048 * store this data within the ixgbe_hw structure. 1049 **/ 1050s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 1051{ 1052 u16 link_status; 1053 1054 DEBUGFUNC("ixgbe_get_bus_info_generic"); 1055 1056 /* Get the negotiated link width and speed from PCI config space */ 1057 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); 1058 1059 ixgbe_set_pci_config_data_generic(hw, link_status); 1060 1061 return IXGBE_SUCCESS; 1062} 1063 1064/** 1065 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices 1066 * @hw: pointer to the HW structure 1067 * 1068 * Determines the LAN function id by reading memory-mapped registers and swaps 1069 * the port value if requested, and set MAC instance for devices that share 1070 * CS4227. 1071 **/ 1072void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) 1073{ 1074 struct ixgbe_bus_info *bus = &hw->bus; 1075 u32 reg; 1076 u16 ee_ctrl_4; 1077 1078 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); 1079 1080 reg = IXGBE_READ_REG(hw, IXGBE_STATUS); 1081 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; 1082 bus->lan_id = (u8)bus->func; 1083 1084 /* check for a port swap */ 1085 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 1086 if (reg & IXGBE_FACTPS_LFS) 1087 bus->func ^= 0x1; 1088 1089 /* Get MAC instance from EEPROM for configuring CS4227 */ 1090 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { 1091 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); 1092 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> 1093 IXGBE_EE_CTRL_4_INST_ID_SHIFT; 1094 } 1095} 1096 1097/** 1098 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 1099 * @hw: pointer to hardware structure 1100 * 1101 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 1102 * disables transmit and receive units. The adapter_stopped flag is used by 1103 * the shared code and drivers to determine if the adapter is in a stopped 1104 * state and should not touch the hardware. 1105 **/ 1106s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 1107{ 1108 u32 reg_val; 1109 u16 i; 1110 1111 DEBUGFUNC("ixgbe_stop_adapter_generic"); 1112 1113 /* 1114 * Set the adapter_stopped flag so other driver functions stop touching 1115 * the hardware 1116 */ 1117 hw->adapter_stopped = TRUE; 1118 1119 /* Disable the receive unit */ 1120 ixgbe_disable_rx(hw); 1121 1122 /* Clear interrupt mask to stop interrupts from being generated */ 1123 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 1124 1125 /* Clear any pending interrupts, flush previous writes */ 1126 IXGBE_READ_REG(hw, IXGBE_EICR); 1127 1128 /* Disable the transmit unit. Each queue must be disabled. */ 1129 for (i = 0; i < hw->mac.max_tx_queues; i++) 1130 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); 1131 1132 /* Disable the receive unit by stopping each queue */ 1133 for (i = 0; i < hw->mac.max_rx_queues; i++) { 1134 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1135 reg_val &= ~IXGBE_RXDCTL_ENABLE; 1136 reg_val |= IXGBE_RXDCTL_SWFLSH; 1137 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1138 } 1139 1140 /* flush all queues disables */ 1141 IXGBE_WRITE_FLUSH(hw); 1142 msec_delay(2); 1143 1144 /* 1145 * Prevent the PCI-E bus from hanging by disabling PCI-E master 1146 * access and verify no pending requests 1147 */ 1148 return ixgbe_disable_pcie_master(hw); 1149} 1150 1151/** 1152 * ixgbe_init_led_link_act_generic - Store the LED index link/activity. 1153 * @hw: pointer to hardware structure 1154 * 1155 * Store the index for the link active LED. This will be used to support 1156 * blinking the LED. 1157 **/ 1158s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) 1159{ 1160 struct ixgbe_mac_info *mac = &hw->mac; 1161 u32 led_reg, led_mode; 1162 u8 i; 1163 1164 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1165 1166 /* Get LED link active from the LEDCTL register */ 1167 for (i = 0; i < 4; i++) { 1168 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); 1169 1170 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == 1171 IXGBE_LED_LINK_ACTIVE) { 1172 mac->led_link_act = i; 1173 return IXGBE_SUCCESS; 1174 } 1175 } 1176 1177 /* 1178 * If LEDCTL register does not have the LED link active set, then use 1179 * known MAC defaults. 1180 */ 1181 switch (hw->mac.type) { 1182 case ixgbe_mac_X550EM_a: 1183 case ixgbe_mac_X550EM_x: 1184 mac->led_link_act = 1; 1185 break; 1186 default: 1187 mac->led_link_act = 2; 1188 } 1189 return IXGBE_SUCCESS; 1190} 1191 1192/** 1193 * ixgbe_led_on_generic - Turns on the software controllable LEDs. 1194 * @hw: pointer to hardware structure 1195 * @index: led number to turn on 1196 **/ 1197s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 1198{ 1199 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1200 1201 DEBUGFUNC("ixgbe_led_on_generic"); 1202 1203 if (index > 3) 1204 return IXGBE_ERR_PARAM; 1205 1206 /* To turn on the LED, set mode to ON. */ 1207 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1208 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); 1209 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1210 IXGBE_WRITE_FLUSH(hw); 1211 1212 return IXGBE_SUCCESS; 1213} 1214 1215/** 1216 * ixgbe_led_off_generic - Turns off the software controllable LEDs. 1217 * @hw: pointer to hardware structure 1218 * @index: led number to turn off 1219 **/ 1220s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 1221{ 1222 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1223 1224 DEBUGFUNC("ixgbe_led_off_generic"); 1225 1226 if (index > 3) 1227 return IXGBE_ERR_PARAM; 1228 1229 /* To turn off the LED, set mode to OFF. */ 1230 led_reg &= ~IXGBE_LED_MODE_MASK(index); 1231 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); 1232 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 1233 IXGBE_WRITE_FLUSH(hw); 1234 1235 return IXGBE_SUCCESS; 1236} 1237 1238/** 1239 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 1240 * @hw: pointer to hardware structure 1241 * 1242 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 1243 * ixgbe_hw struct in order to set up EEPROM access. 1244 **/ 1245s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 1246{ 1247 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1248 u32 eec; 1249 u16 eeprom_size; 1250 1251 DEBUGFUNC("ixgbe_init_eeprom_params_generic"); 1252 1253 if (eeprom->type == ixgbe_eeprom_uninitialized) { 1254 eeprom->type = ixgbe_eeprom_none; 1255 /* Set default semaphore delay to 10ms which is a well 1256 * tested value */ 1257 eeprom->semaphore_delay = 10; 1258 /* Clear EEPROM page size, it will be initialized as needed */ 1259 eeprom->word_page_size = 0; 1260 1261 /* 1262 * Check for EEPROM present first. 1263 * If not present leave as none 1264 */ 1265 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1266 if (eec & IXGBE_EEC_PRES) { 1267 eeprom->type = ixgbe_eeprom_spi; 1268 1269 /* 1270 * SPI EEPROM is assumed here. This code would need to 1271 * change if a future EEPROM is not SPI. 1272 */ 1273 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 1274 IXGBE_EEC_SIZE_SHIFT); 1275 eeprom->word_size = 1 << (eeprom_size + 1276 IXGBE_EEPROM_WORD_SIZE_SHIFT); 1277 } 1278 1279 if (eec & IXGBE_EEC_ADDR_SIZE) 1280 eeprom->address_bits = 16; 1281 else 1282 eeprom->address_bits = 8; 1283 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " 1284 "%d\n", eeprom->type, eeprom->word_size, 1285 eeprom->address_bits); 1286 } 1287 1288 return IXGBE_SUCCESS; 1289} 1290 1291/** 1292 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang 1293 * @hw: pointer to hardware structure 1294 * @offset: offset within the EEPROM to write 1295 * @words: number of word(s) 1296 * @data: 16 bit word(s) to write to EEPROM 1297 * 1298 * Reads 16 bit word(s) from EEPROM through bit-bang method 1299 **/ 1300s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1301 u16 words, u16 *data) 1302{ 1303 s32 status = IXGBE_SUCCESS; 1304 u16 i, count; 1305 1306 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); 1307 1308 hw->eeprom.ops.init_params(hw); 1309 1310 if (words == 0) { 1311 status = IXGBE_ERR_INVALID_ARGUMENT; 1312 goto out; 1313 } 1314 1315 if (offset + words > hw->eeprom.word_size) { 1316 status = IXGBE_ERR_EEPROM; 1317 goto out; 1318 } 1319 1320 /* 1321 * The EEPROM page size cannot be queried from the chip. We do lazy 1322 * initialization. It is worth to do that when we write large buffer. 1323 */ 1324 if ((hw->eeprom.word_page_size == 0) && 1325 (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) 1326 ixgbe_detect_eeprom_page_size_generic(hw, offset); 1327 1328 /* 1329 * We cannot hold synchronization semaphores for too long 1330 * to avoid other entity starvation. However it is more efficient 1331 * to read in bursts than synchronizing access for each word. 1332 */ 1333 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1334 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1335 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1336 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, 1337 count, &data[i]); 1338 1339 if (status != IXGBE_SUCCESS) 1340 break; 1341 } 1342 1343out: 1344 return status; 1345} 1346 1347/** 1348 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM 1349 * @hw: pointer to hardware structure 1350 * @offset: offset within the EEPROM to be written to 1351 * @words: number of word(s) 1352 * @data: 16 bit word(s) to be written to the EEPROM 1353 * 1354 * If ixgbe_eeprom_update_checksum is not called after this function, the 1355 * EEPROM will most likely contain an invalid checksum. 1356 **/ 1357static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1358 u16 words, u16 *data) 1359{ 1360 s32 status; 1361 u16 word; 1362 u16 page_size; 1363 u16 i; 1364 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; 1365 1366 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); 1367 1368 /* Prepare the EEPROM for writing */ 1369 status = ixgbe_acquire_eeprom(hw); 1370 1371 if (status == IXGBE_SUCCESS) { 1372 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1373 ixgbe_release_eeprom(hw); 1374 status = IXGBE_ERR_EEPROM; 1375 } 1376 } 1377 1378 if (status == IXGBE_SUCCESS) { 1379 for (i = 0; i < words; i++) { 1380 ixgbe_standby_eeprom(hw); 1381 1382 /* Send the WRITE ENABLE command (8 bit opcode ) */ 1383 ixgbe_shift_out_eeprom_bits(hw, 1384 IXGBE_EEPROM_WREN_OPCODE_SPI, 1385 IXGBE_EEPROM_OPCODE_BITS); 1386 1387 ixgbe_standby_eeprom(hw); 1388 1389 /* 1390 * Some SPI eeproms use the 8th address bit embedded 1391 * in the opcode 1392 */ 1393 if ((hw->eeprom.address_bits == 8) && 1394 ((offset + i) >= 128)) 1395 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1396 1397 /* Send the Write command (8-bit opcode + addr) */ 1398 ixgbe_shift_out_eeprom_bits(hw, write_opcode, 1399 IXGBE_EEPROM_OPCODE_BITS); 1400 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1401 hw->eeprom.address_bits); 1402 1403 page_size = hw->eeprom.word_page_size; 1404 1405 /* Send the data in burst via SPI*/ 1406 do { 1407 word = data[i]; 1408 word = (word >> 8) | (word << 8); 1409 ixgbe_shift_out_eeprom_bits(hw, word, 16); 1410 1411 if (page_size == 0) 1412 break; 1413 1414 /* do not wrap around page */ 1415 if (((offset + i) & (page_size - 1)) == 1416 (page_size - 1)) 1417 break; 1418 } while (++i < words); 1419 1420 ixgbe_standby_eeprom(hw); 1421 msec_delay(10); 1422 } 1423 /* Done with writing - release the EEPROM */ 1424 ixgbe_release_eeprom(hw); 1425 } 1426 1427 return status; 1428} 1429 1430/** 1431 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM 1432 * @hw: pointer to hardware structure 1433 * @offset: offset within the EEPROM to be written to 1434 * @data: 16 bit word to be written to the EEPROM 1435 * 1436 * If ixgbe_eeprom_update_checksum is not called after this function, the 1437 * EEPROM will most likely contain an invalid checksum. 1438 **/ 1439s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1440{ 1441 s32 status; 1442 1443 DEBUGFUNC("ixgbe_write_eeprom_generic"); 1444 1445 hw->eeprom.ops.init_params(hw); 1446 1447 if (offset >= hw->eeprom.word_size) { 1448 status = IXGBE_ERR_EEPROM; 1449 goto out; 1450 } 1451 1452 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); 1453 1454out: 1455 return status; 1456} 1457 1458/** 1459 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang 1460 * @hw: pointer to hardware structure 1461 * @offset: offset within the EEPROM to be read 1462 * @data: read 16 bit words(s) from EEPROM 1463 * @words: number of word(s) 1464 * 1465 * Reads 16 bit word(s) from EEPROM through bit-bang method 1466 **/ 1467s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1468 u16 words, u16 *data) 1469{ 1470 s32 status = IXGBE_SUCCESS; 1471 u16 i, count; 1472 1473 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); 1474 1475 hw->eeprom.ops.init_params(hw); 1476 1477 if (words == 0) { 1478 status = IXGBE_ERR_INVALID_ARGUMENT; 1479 goto out; 1480 } 1481 1482 if (offset + words > hw->eeprom.word_size) { 1483 status = IXGBE_ERR_EEPROM; 1484 goto out; 1485 } 1486 1487 /* 1488 * We cannot hold synchronization semaphores for too long 1489 * to avoid other entity starvation. However it is more efficient 1490 * to read in bursts than synchronizing access for each word. 1491 */ 1492 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { 1493 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? 1494 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); 1495 1496 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, 1497 count, &data[i]); 1498 1499 if (status != IXGBE_SUCCESS) 1500 break; 1501 } 1502 1503out: 1504 return status; 1505} 1506 1507/** 1508 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang 1509 * @hw: pointer to hardware structure 1510 * @offset: offset within the EEPROM to be read 1511 * @words: number of word(s) 1512 * @data: read 16 bit word(s) from EEPROM 1513 * 1514 * Reads 16 bit word(s) from EEPROM through bit-bang method 1515 **/ 1516static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 1517 u16 words, u16 *data) 1518{ 1519 s32 status; 1520 u16 word_in; 1521 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 1522 u16 i; 1523 1524 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); 1525 1526 /* Prepare the EEPROM for reading */ 1527 status = ixgbe_acquire_eeprom(hw); 1528 1529 if (status == IXGBE_SUCCESS) { 1530 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { 1531 ixgbe_release_eeprom(hw); 1532 status = IXGBE_ERR_EEPROM; 1533 } 1534 } 1535 1536 if (status == IXGBE_SUCCESS) { 1537 for (i = 0; i < words; i++) { 1538 ixgbe_standby_eeprom(hw); 1539 /* 1540 * Some SPI eeproms use the 8th address bit embedded 1541 * in the opcode 1542 */ 1543 if ((hw->eeprom.address_bits == 8) && 1544 ((offset + i) >= 128)) 1545 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 1546 1547 /* Send the READ command (opcode + addr) */ 1548 ixgbe_shift_out_eeprom_bits(hw, read_opcode, 1549 IXGBE_EEPROM_OPCODE_BITS); 1550 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), 1551 hw->eeprom.address_bits); 1552 1553 /* Read the data. */ 1554 word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 1555 data[i] = (word_in >> 8) | (word_in << 8); 1556 } 1557 1558 /* End this read operation */ 1559 ixgbe_release_eeprom(hw); 1560 } 1561 1562 return status; 1563} 1564 1565/** 1566 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 1567 * @hw: pointer to hardware structure 1568 * @offset: offset within the EEPROM to be read 1569 * @data: read 16 bit value from EEPROM 1570 * 1571 * Reads 16 bit value from EEPROM through bit-bang method 1572 **/ 1573s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 1574 u16 *data) 1575{ 1576 s32 status; 1577 1578 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); 1579 1580 hw->eeprom.ops.init_params(hw); 1581 1582 if (offset >= hw->eeprom.word_size) { 1583 status = IXGBE_ERR_EEPROM; 1584 goto out; 1585 } 1586 1587 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1588 1589out: 1590 return status; 1591} 1592 1593/** 1594 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD 1595 * @hw: pointer to hardware structure 1596 * @offset: offset of word in the EEPROM to read 1597 * @words: number of word(s) 1598 * @data: 16 bit word(s) from the EEPROM 1599 * 1600 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 1601 **/ 1602s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1603 u16 words, u16 *data) 1604{ 1605 u32 eerd; 1606 s32 status = IXGBE_SUCCESS; 1607 u32 i; 1608 1609 DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); 1610 1611 hw->eeprom.ops.init_params(hw); 1612 1613 if (words == 0) { 1614 status = IXGBE_ERR_INVALID_ARGUMENT; 1615 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1616 goto out; 1617 } 1618 1619 if (offset >= hw->eeprom.word_size) { 1620 status = IXGBE_ERR_EEPROM; 1621 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1622 goto out; 1623 } 1624 1625 for (i = 0; i < words; i++) { 1626 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1627 IXGBE_EEPROM_RW_REG_START; 1628 1629 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 1630 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); 1631 1632 if (status == IXGBE_SUCCESS) { 1633 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 1634 IXGBE_EEPROM_RW_REG_DATA); 1635 } else { 1636 DEBUGOUT("Eeprom read timed out\n"); 1637 goto out; 1638 } 1639 } 1640out: 1641 return status; 1642} 1643 1644/** 1645 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size 1646 * @hw: pointer to hardware structure 1647 * @offset: offset within the EEPROM to be used as a scratch pad 1648 * 1649 * Discover EEPROM page size by writing marching data at given offset. 1650 * This function is called only when we are writing a new large buffer 1651 * at given offset so the data would be overwritten anyway. 1652 **/ 1653static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, 1654 u16 offset) 1655{ 1656 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; 1657 s32 status = IXGBE_SUCCESS; 1658 u16 i; 1659 1660 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); 1661 1662 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) 1663 data[i] = i; 1664 1665 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; 1666 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1667 IXGBE_EEPROM_PAGE_SIZE_MAX, data); 1668 hw->eeprom.word_page_size = 0; 1669 if (status != IXGBE_SUCCESS) 1670 goto out; 1671 1672 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); 1673 if (status != IXGBE_SUCCESS) 1674 goto out; 1675 1676 /* 1677 * When writing in burst more than the actual page size 1678 * EEPROM address wraps around current page. 1679 */ 1680 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1681 1682 DEBUGOUT1("Detected EEPROM page size = %d words.", 1683 hw->eeprom.word_page_size); 1684out: 1685 return status; 1686} 1687 1688/** 1689 * ixgbe_read_eerd_generic - Read EEPROM word using EERD 1690 * @hw: pointer to hardware structure 1691 * @offset: offset of word in the EEPROM to read 1692 * @data: word read from the EEPROM 1693 * 1694 * Reads a 16 bit word from the EEPROM using the EERD register. 1695 **/ 1696s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 1697{ 1698 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); 1699} 1700 1701/** 1702 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR 1703 * @hw: pointer to hardware structure 1704 * @offset: offset of word in the EEPROM to write 1705 * @words: number of word(s) 1706 * @data: word(s) write to the EEPROM 1707 * 1708 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 1709 **/ 1710s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, 1711 u16 words, u16 *data) 1712{ 1713 u32 eewr; 1714 s32 status = IXGBE_SUCCESS; 1715 u16 i; 1716 1717 DEBUGFUNC("ixgbe_write_eewr_generic"); 1718 1719 hw->eeprom.ops.init_params(hw); 1720 1721 if (words == 0) { 1722 status = IXGBE_ERR_INVALID_ARGUMENT; 1723 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); 1724 goto out; 1725 } 1726 1727 if (offset >= hw->eeprom.word_size) { 1728 status = IXGBE_ERR_EEPROM; 1729 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); 1730 goto out; 1731 } 1732 1733 for (i = 0; i < words; i++) { 1734 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | 1735 (data[i] << IXGBE_EEPROM_RW_REG_DATA) | 1736 IXGBE_EEPROM_RW_REG_START; 1737 1738 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1739 if (status != IXGBE_SUCCESS) { 1740 DEBUGOUT("Eeprom write EEWR timed out\n"); 1741 goto out; 1742 } 1743 1744 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); 1745 1746 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 1747 if (status != IXGBE_SUCCESS) { 1748 DEBUGOUT("Eeprom write EEWR timed out\n"); 1749 goto out; 1750 } 1751 } 1752 1753out: 1754 return status; 1755} 1756 1757/** 1758 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR 1759 * @hw: pointer to hardware structure 1760 * @offset: offset of word in the EEPROM to write 1761 * @data: word write to the EEPROM 1762 * 1763 * Write a 16 bit word to the EEPROM using the EEWR register. 1764 **/ 1765s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) 1766{ 1767 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); 1768} 1769 1770/** 1771 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status 1772 * @hw: pointer to hardware structure 1773 * @ee_reg: EEPROM flag for polling 1774 * 1775 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 1776 * read or write is done respectively. 1777 **/ 1778s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 1779{ 1780 u32 i; 1781 u32 reg; 1782 s32 status = IXGBE_ERR_EEPROM; 1783 1784 DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); 1785 1786 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { 1787 if (ee_reg == IXGBE_NVM_POLL_READ) 1788 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 1789 else 1790 reg = IXGBE_READ_REG(hw, IXGBE_EEWR); 1791 1792 if (reg & IXGBE_EEPROM_RW_REG_DONE) { 1793 status = IXGBE_SUCCESS; 1794 break; 1795 } 1796 usec_delay(5); 1797 } 1798 1799 if (i == IXGBE_EERD_EEWR_ATTEMPTS) 1800 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1801 "EEPROM read/write done polling timed out"); 1802 1803 return status; 1804} 1805 1806/** 1807 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 1808 * @hw: pointer to hardware structure 1809 * 1810 * Prepares EEPROM for access using bit-bang method. This function should 1811 * be called before issuing a command to the EEPROM. 1812 **/ 1813static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 1814{ 1815 s32 status = IXGBE_SUCCESS; 1816 u32 eec; 1817 u32 i; 1818 1819 DEBUGFUNC("ixgbe_acquire_eeprom"); 1820 1821 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) 1822 != IXGBE_SUCCESS) 1823 status = IXGBE_ERR_SWFW_SYNC; 1824 1825 if (status == IXGBE_SUCCESS) { 1826 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1827 1828 /* Request EEPROM Access */ 1829 eec |= IXGBE_EEC_REQ; 1830 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1831 1832 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 1833 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 1834 if (eec & IXGBE_EEC_GNT) 1835 break; 1836 usec_delay(5); 1837 } 1838 1839 /* Release if grant not acquired */ 1840 if (!(eec & IXGBE_EEC_GNT)) { 1841 eec &= ~IXGBE_EEC_REQ; 1842 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1843 DEBUGOUT("Could not acquire EEPROM grant\n"); 1844 1845 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1846 status = IXGBE_ERR_EEPROM; 1847 } 1848 1849 /* Setup EEPROM for Read/Write */ 1850 if (status == IXGBE_SUCCESS) { 1851 /* Clear CS and SK */ 1852 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 1853 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 1854 IXGBE_WRITE_FLUSH(hw); 1855 usec_delay(1); 1856 } 1857 } 1858 return status; 1859} 1860 1861/** 1862 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 1863 * @hw: pointer to hardware structure 1864 * 1865 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method 1866 **/ 1867static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 1868{ 1869 s32 status = IXGBE_ERR_EEPROM; 1870 u32 timeout = 2000; 1871 u32 i; 1872 u32 swsm; 1873 1874 DEBUGFUNC("ixgbe_get_eeprom_semaphore"); 1875 1876 1877 /* Get SMBI software semaphore between device drivers first */ 1878 for (i = 0; i < timeout; i++) { 1879 /* 1880 * If the SMBI bit is 0 when we read it, then the bit will be 1881 * set and we have the semaphore 1882 */ 1883 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1884 if (!(swsm & IXGBE_SWSM_SMBI)) { 1885 status = IXGBE_SUCCESS; 1886 break; 1887 } 1888 usec_delay(50); 1889 } 1890 1891 if (i == timeout) { 1892 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " 1893 "not granted.\n"); 1894 /* 1895 * this release is particularly important because our attempts 1896 * above to get the semaphore may have succeeded, and if there 1897 * was a timeout, we should unconditionally clear the semaphore 1898 * bits to free the driver to make progress 1899 */ 1900 ixgbe_release_eeprom_semaphore(hw); 1901 1902 usec_delay(50); 1903 /* 1904 * one last try 1905 * If the SMBI bit is 0 when we read it, then the bit will be 1906 * set and we have the semaphore 1907 */ 1908 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1909 if (!(swsm & IXGBE_SWSM_SMBI)) 1910 status = IXGBE_SUCCESS; 1911 } 1912 1913 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 1914 if (status == IXGBE_SUCCESS) { 1915 for (i = 0; i < timeout; i++) { 1916 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1917 1918 /* Set the SW EEPROM semaphore bit to request access */ 1919 swsm |= IXGBE_SWSM_SWESMBI; 1920 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); 1921 1922 /* 1923 * If we set the bit successfully then we got the 1924 * semaphore. 1925 */ 1926 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 1927 if (swsm & IXGBE_SWSM_SWESMBI) 1928 break; 1929 1930 usec_delay(50); 1931 } 1932 1933 /* 1934 * Release semaphores and return error if SW EEPROM semaphore 1935 * was not granted because we don't have access to the EEPROM 1936 */ 1937 if (i >= timeout) { 1938 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1939 "SWESMBI Software EEPROM semaphore not granted.\n"); 1940 ixgbe_release_eeprom_semaphore(hw); 1941 status = IXGBE_ERR_EEPROM; 1942 } 1943 } else { 1944 ERROR_REPORT1(IXGBE_ERROR_POLLING, 1945 "Software semaphore SMBI between device drivers " 1946 "not granted.\n"); 1947 } 1948 1949 return status; 1950} 1951 1952/** 1953 * ixgbe_release_eeprom_semaphore - Release hardware semaphore 1954 * @hw: pointer to hardware structure 1955 * 1956 * This function clears hardware semaphore bits. 1957 **/ 1958static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) 1959{ 1960 u32 swsm; 1961 1962 DEBUGFUNC("ixgbe_release_eeprom_semaphore"); 1963 1964 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); 1965 1966 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ 1967 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); 1968 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); 1969 IXGBE_WRITE_FLUSH(hw); 1970} 1971 1972/** 1973 * ixgbe_ready_eeprom - Polls for EEPROM ready 1974 * @hw: pointer to hardware structure 1975 **/ 1976static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 1977{ 1978 s32 status = IXGBE_SUCCESS; 1979 u16 i; 1980 u8 spi_stat_reg; 1981 1982 DEBUGFUNC("ixgbe_ready_eeprom"); 1983 1984 /* 1985 * Read "Status Register" repeatedly until the LSB is cleared. The 1986 * EEPROM will signal that the command has been completed by clearing 1987 * bit 0 of the internal status register. If it's not cleared within 1988 * 5 milliseconds, then error out. 1989 */ 1990 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 1991 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 1992 IXGBE_EEPROM_OPCODE_BITS); 1993 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 1994 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 1995 break; 1996 1997 usec_delay(5); 1998 ixgbe_standby_eeprom(hw); 1999 }; 2000 2001 /* 2002 * On some parts, SPI write time could vary from 0-20mSec on 3.3V 2003 * devices (and only 0-5mSec on 5V devices) 2004 */ 2005 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 2006 DEBUGOUT("SPI EEPROM Status error\n"); 2007 status = IXGBE_ERR_EEPROM; 2008 } 2009 2010 return status; 2011} 2012 2013/** 2014 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 2015 * @hw: pointer to hardware structure 2016 **/ 2017static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 2018{ 2019 u32 eec; 2020 2021 DEBUGFUNC("ixgbe_standby_eeprom"); 2022 2023 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2024 2025 /* Toggle CS to flush commands */ 2026 eec |= IXGBE_EEC_CS; 2027 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2028 IXGBE_WRITE_FLUSH(hw); 2029 usec_delay(1); 2030 eec &= ~IXGBE_EEC_CS; 2031 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2032 IXGBE_WRITE_FLUSH(hw); 2033 usec_delay(1); 2034} 2035 2036/** 2037 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 2038 * @hw: pointer to hardware structure 2039 * @data: data to send to the EEPROM 2040 * @count: number of bits to shift out 2041 **/ 2042static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 2043 u16 count) 2044{ 2045 u32 eec; 2046 u32 mask; 2047 u32 i; 2048 2049 DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); 2050 2051 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2052 2053 /* 2054 * Mask is used to shift "count" bits of "data" out to the EEPROM 2055 * one bit at a time. Determine the starting bit based on count 2056 */ 2057 mask = 0x01 << (count - 1); 2058 2059 for (i = 0; i < count; i++) { 2060 /* 2061 * A "1" is shifted out to the EEPROM by setting bit "DI" to a 2062 * "1", and then raising and then lowering the clock (the SK 2063 * bit controls the clock input to the EEPROM). A "0" is 2064 * shifted out to the EEPROM by setting "DI" to "0" and then 2065 * raising and then lowering the clock. 2066 */ 2067 if (data & mask) 2068 eec |= IXGBE_EEC_DI; 2069 else 2070 eec &= ~IXGBE_EEC_DI; 2071 2072 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2073 IXGBE_WRITE_FLUSH(hw); 2074 2075 usec_delay(1); 2076 2077 ixgbe_raise_eeprom_clk(hw, &eec); 2078 ixgbe_lower_eeprom_clk(hw, &eec); 2079 2080 /* 2081 * Shift mask to signify next bit of data to shift in to the 2082 * EEPROM 2083 */ 2084 mask = mask >> 1; 2085 }; 2086 2087 /* We leave the "DI" bit set to "0" when we leave this routine. */ 2088 eec &= ~IXGBE_EEC_DI; 2089 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2090 IXGBE_WRITE_FLUSH(hw); 2091} 2092 2093/** 2094 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 2095 * @hw: pointer to hardware structure 2096 **/ 2097static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 2098{ 2099 u32 eec; 2100 u32 i; 2101 u16 data = 0; 2102 2103 DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); 2104 2105 /* 2106 * In order to read a register from the EEPROM, we need to shift 2107 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 2108 * the clock input to the EEPROM (setting the SK bit), and then reading 2109 * the value of the "DO" bit. During this "shifting in" process the 2110 * "DI" bit should always be clear. 2111 */ 2112 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2113 2114 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 2115 2116 for (i = 0; i < count; i++) { 2117 data = data << 1; 2118 ixgbe_raise_eeprom_clk(hw, &eec); 2119 2120 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2121 2122 eec &= ~(IXGBE_EEC_DI); 2123 if (eec & IXGBE_EEC_DO) 2124 data |= 1; 2125 2126 ixgbe_lower_eeprom_clk(hw, &eec); 2127 } 2128 2129 return data; 2130} 2131 2132/** 2133 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 2134 * @hw: pointer to hardware structure 2135 * @eec: EEC register's current value 2136 **/ 2137static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2138{ 2139 DEBUGFUNC("ixgbe_raise_eeprom_clk"); 2140 2141 /* 2142 * Raise the clock input to the EEPROM 2143 * (setting the SK bit), then delay 2144 */ 2145 *eec = *eec | IXGBE_EEC_SK; 2146 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2147 IXGBE_WRITE_FLUSH(hw); 2148 usec_delay(1); 2149} 2150 2151/** 2152 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 2153 * @hw: pointer to hardware structure 2154 * @eecd: EECD's current value 2155 **/ 2156static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 2157{ 2158 DEBUGFUNC("ixgbe_lower_eeprom_clk"); 2159 2160 /* 2161 * Lower the clock input to the EEPROM (clearing the SK bit), then 2162 * delay 2163 */ 2164 *eec = *eec & ~IXGBE_EEC_SK; 2165 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); 2166 IXGBE_WRITE_FLUSH(hw); 2167 usec_delay(1); 2168} 2169 2170/** 2171 * ixgbe_release_eeprom - Release EEPROM, release semaphores 2172 * @hw: pointer to hardware structure 2173 **/ 2174static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 2175{ 2176 u32 eec; 2177 2178 DEBUGFUNC("ixgbe_release_eeprom"); 2179 2180 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 2181 2182 eec |= IXGBE_EEC_CS; /* Pull CS high */ 2183 eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 2184 2185 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2186 IXGBE_WRITE_FLUSH(hw); 2187 2188 usec_delay(1); 2189 2190 /* Stop requesting EEPROM access */ 2191 eec &= ~IXGBE_EEC_REQ; 2192 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); 2193 2194 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 2195 2196 /* Delay before attempt to obtain semaphore again to allow FW access */ 2197 msec_delay(hw->eeprom.semaphore_delay); 2198} 2199 2200/** 2201 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 2202 * @hw: pointer to hardware structure 2203 * 2204 * Returns a negative error code on error, or the 16-bit checksum 2205 **/ 2206s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 2207{ 2208 u16 i; 2209 u16 j; 2210 u16 checksum = 0; 2211 u16 length = 0; 2212 u16 pointer = 0; 2213 u16 word = 0; 2214 2215 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); 2216 2217 /* Include 0x0-0x3F in the checksum */ 2218 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 2219 if (hw->eeprom.ops.read(hw, i, &word)) { 2220 DEBUGOUT("EEPROM read failed\n"); 2221 return IXGBE_ERR_EEPROM; 2222 } 2223 checksum += word; 2224 } 2225 2226 /* Include all data from pointers except for the fw pointer */ 2227 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 2228 if (hw->eeprom.ops.read(hw, i, &pointer)) { 2229 DEBUGOUT("EEPROM read failed\n"); 2230 return IXGBE_ERR_EEPROM; 2231 } 2232 2233 /* If the pointer seems invalid */ 2234 if (pointer == 0xFFFF || pointer == 0) 2235 continue; 2236 2237 if (hw->eeprom.ops.read(hw, pointer, &length)) { 2238 DEBUGOUT("EEPROM read failed\n"); 2239 return IXGBE_ERR_EEPROM; 2240 } 2241 2242 if (length == 0xFFFF || length == 0) 2243 continue; 2244 2245 for (j = pointer + 1; j <= pointer + length; j++) { 2246 if (hw->eeprom.ops.read(hw, j, &word)) { 2247 DEBUGOUT("EEPROM read failed\n"); 2248 return IXGBE_ERR_EEPROM; 2249 } 2250 checksum += word; 2251 } 2252 } 2253 2254 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 2255 2256 return (s32)checksum; 2257} 2258 2259/** 2260 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 2261 * @hw: pointer to hardware structure 2262 * @checksum_val: calculated checksum 2263 * 2264 * Performs checksum calculation and validates the EEPROM checksum. If the 2265 * caller does not need checksum_val, the value can be NULL. 2266 **/ 2267s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 2268 u16 *checksum_val) 2269{ 2270 s32 status; 2271 u16 checksum; 2272 u16 read_checksum = 0; 2273 2274 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); 2275 2276 /* Read the first word from the EEPROM. If this times out or fails, do 2277 * not continue or we could be in for a very long wait while every 2278 * EEPROM read fails 2279 */ 2280 status = hw->eeprom.ops.read(hw, 0, &checksum); 2281 if (status) { 2282 DEBUGOUT("EEPROM read failed\n"); 2283 return status; 2284 } 2285 2286 status = hw->eeprom.ops.calc_checksum(hw); 2287 if (status < 0) 2288 return status; 2289 2290 checksum = (u16)(status & 0xffff); 2291 2292 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 2293 if (status) { 2294 DEBUGOUT("EEPROM read failed\n"); 2295 return status; 2296 } 2297 2298 /* Verify read checksum from EEPROM is the same as 2299 * calculated checksum 2300 */ 2301 if (read_checksum != checksum) 2302 status = IXGBE_ERR_EEPROM_CHECKSUM; 2303 2304 /* If the user cares, return the calculated checksum */ 2305 if (checksum_val) 2306 *checksum_val = checksum; 2307 2308 return status; 2309} 2310 2311/** 2312 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 2313 * @hw: pointer to hardware structure 2314 **/ 2315s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 2316{ 2317 s32 status; 2318 u16 checksum; 2319 2320 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); 2321 2322 /* Read the first word from the EEPROM. If this times out or fails, do 2323 * not continue or we could be in for a very long wait while every 2324 * EEPROM read fails 2325 */ 2326 status = hw->eeprom.ops.read(hw, 0, &checksum); 2327 if (status) { 2328 DEBUGOUT("EEPROM read failed\n"); 2329 return status; 2330 } 2331 2332 status = hw->eeprom.ops.calc_checksum(hw); 2333 if (status < 0) 2334 return status; 2335 2336 checksum = (u16)(status & 0xffff); 2337 2338 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); 2339 2340 return status; 2341} 2342 2343/** 2344 * ixgbe_validate_mac_addr - Validate MAC address 2345 * @mac_addr: pointer to MAC address. 2346 * 2347 * Tests a MAC address to ensure it is a valid Individual Address. 2348 **/ 2349s32 ixgbe_validate_mac_addr(u8 *mac_addr) 2350{ 2351 s32 status = IXGBE_SUCCESS; 2352 2353 DEBUGFUNC("ixgbe_validate_mac_addr"); 2354 2355 /* Make sure it is not a multicast address */ 2356 if (IXGBE_IS_MULTICAST(mac_addr)) { 2357 status = IXGBE_ERR_INVALID_MAC_ADDR; 2358 /* Not a broadcast address */ 2359 } else if (IXGBE_IS_BROADCAST(mac_addr)) { 2360 status = IXGBE_ERR_INVALID_MAC_ADDR; 2361 /* Reject the zero address */ 2362 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 2363 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { 2364 status = IXGBE_ERR_INVALID_MAC_ADDR; 2365 } 2366 return status; 2367} 2368 2369/** 2370 * ixgbe_set_rar_generic - Set Rx address register 2371 * @hw: pointer to hardware structure 2372 * @index: Receive address register to write 2373 * @addr: Address to put into receive address register 2374 * @vmdq: VMDq "set" or "pool" index 2375 * @enable_addr: set flag that address is active 2376 * 2377 * Puts an ethernet address into a receive address register. 2378 **/ 2379s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 2380 u32 enable_addr) 2381{ 2382 u32 rar_low, rar_high; 2383 u32 rar_entries = hw->mac.num_rar_entries; 2384 2385 DEBUGFUNC("ixgbe_set_rar_generic"); 2386 2387 /* Make sure we are using a valid rar index range */ 2388 if (index >= rar_entries) { 2389 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2390 "RAR index %d is out of range.\n", index); 2391 return IXGBE_ERR_INVALID_ARGUMENT; 2392 } 2393 2394 /* setup VMDq pool selection before this RAR gets enabled */ 2395 hw->mac.ops.set_vmdq(hw, index, vmdq); 2396 2397 /* 2398 * HW expects these in little endian so we reverse the byte 2399 * order from network order (big endian) to little endian 2400 */ 2401 rar_low = ((u32)addr[0] | 2402 ((u32)addr[1] << 8) | 2403 ((u32)addr[2] << 16) | 2404 ((u32)addr[3] << 24)); 2405 /* 2406 * Some parts put the VMDq setting in the extra RAH bits, 2407 * so save everything except the lower 16 bits that hold part 2408 * of the address and the address valid bit. 2409 */ 2410 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2411 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2412 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 2413 2414 if (enable_addr != 0) 2415 rar_high |= IXGBE_RAH_AV; 2416 2417 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 2418 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2419 2420 return IXGBE_SUCCESS; 2421} 2422 2423/** 2424 * ixgbe_clear_rar_generic - Remove Rx address register 2425 * @hw: pointer to hardware structure 2426 * @index: Receive address register to write 2427 * 2428 * Clears an ethernet address from a receive address register. 2429 **/ 2430s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 2431{ 2432 u32 rar_high; 2433 u32 rar_entries = hw->mac.num_rar_entries; 2434 2435 DEBUGFUNC("ixgbe_clear_rar_generic"); 2436 2437 /* Make sure we are using a valid rar index range */ 2438 if (index >= rar_entries) { 2439 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 2440 "RAR index %d is out of range.\n", index); 2441 return IXGBE_ERR_INVALID_ARGUMENT; 2442 } 2443 2444 /* 2445 * Some parts put the VMDq setting in the extra RAH bits, 2446 * so save everything except the lower 16 bits that hold part 2447 * of the address and the address valid bit. 2448 */ 2449 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 2450 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 2451 2452 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 2453 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 2454 2455 /* clear VMDq pool/queue selection for this RAR */ 2456 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 2457 2458 return IXGBE_SUCCESS; 2459} 2460 2461/** 2462 * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 2463 * @hw: pointer to hardware structure 2464 * 2465 * Places the MAC address in receive address register 0 and clears the rest 2466 * of the receive address registers. Clears the multicast table. Assumes 2467 * the receiver is in reset when the routine is called. 2468 **/ 2469s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 2470{ 2471 u32 i; 2472 u32 rar_entries = hw->mac.num_rar_entries; 2473 2474 DEBUGFUNC("ixgbe_init_rx_addrs_generic"); 2475 2476 /* 2477 * If the current mac address is valid, assume it is a software override 2478 * to the permanent address. 2479 * Otherwise, use the permanent address from the eeprom. 2480 */ 2481 if (ixgbe_validate_mac_addr(hw->mac.addr) == 2482 IXGBE_ERR_INVALID_MAC_ADDR) { 2483 /* Get the MAC address from the RAR0 for later reference */ 2484 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2485 2486 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 2487 hw->mac.addr[0], hw->mac.addr[1], 2488 hw->mac.addr[2]); 2489 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2490 hw->mac.addr[4], hw->mac.addr[5]); 2491 } else { 2492 /* Setup the receive address. */ 2493 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 2494 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 2495 hw->mac.addr[0], hw->mac.addr[1], 2496 hw->mac.addr[2]); 2497 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], 2498 hw->mac.addr[4], hw->mac.addr[5]); 2499 2500 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2501 } 2502 2503 /* clear VMDq pool/queue selection for RAR 0 */ 2504 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); 2505 2506 hw->addr_ctrl.overflow_promisc = 0; 2507 2508 hw->addr_ctrl.rar_used_count = 1; 2509 2510 /* Zero out the other receive addresses. */ 2511 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); 2512 for (i = 1; i < rar_entries; i++) { 2513 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 2514 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 2515 } 2516 2517 /* Clear the MTA */ 2518 hw->addr_ctrl.mta_in_use = 0; 2519 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2520 2521 DEBUGOUT(" Clearing MTA\n"); 2522 for (i = 0; i < hw->mac.mcft_size; i++) 2523 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 2524 2525 ixgbe_init_uta_tables(hw); 2526 2527 return IXGBE_SUCCESS; 2528} 2529 2530/** 2531 * ixgbe_add_uc_addr - Adds a secondary unicast address. 2532 * @hw: pointer to hardware structure 2533 * @addr: new address 2534 * 2535 * Adds it to unused receive address register or goes into promiscuous mode. 2536 **/ 2537void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 2538{ 2539 u32 rar_entries = hw->mac.num_rar_entries; 2540 u32 rar; 2541 2542 DEBUGFUNC("ixgbe_add_uc_addr"); 2543 2544 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", 2545 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 2546 2547 /* 2548 * Place this address in the RAR if there is room, 2549 * else put the controller into promiscuous mode 2550 */ 2551 if (hw->addr_ctrl.rar_used_count < rar_entries) { 2552 rar = hw->addr_ctrl.rar_used_count; 2553 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 2554 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); 2555 hw->addr_ctrl.rar_used_count++; 2556 } else { 2557 hw->addr_ctrl.overflow_promisc++; 2558 } 2559 2560 DEBUGOUT("ixgbe_add_uc_addr Complete\n"); 2561} 2562 2563/** 2564 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 2565 * @hw: pointer to hardware structure 2566 * @addr_list: the list of new addresses 2567 * @addr_count: number of addresses 2568 * @next: iterator function to walk the address list 2569 * 2570 * The given list replaces any existing list. Clears the secondary addrs from 2571 * receive address registers. Uses unused receive address registers for the 2572 * first secondary addresses, and falls back to promiscuous mode as needed. 2573 * 2574 * Drivers using secondary unicast addresses must set user_set_promisc when 2575 * manually putting the device into promiscuous mode. 2576 **/ 2577s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 2578 u32 addr_count, ixgbe_mc_addr_itr next) 2579{ 2580 u8 *addr; 2581 u32 i; 2582 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; 2583 u32 uc_addr_in_use; 2584 u32 fctrl; 2585 u32 vmdq; 2586 2587 DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); 2588 2589 /* 2590 * Clear accounting of old secondary address list, 2591 * don't count RAR[0] 2592 */ 2593 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; 2594 hw->addr_ctrl.rar_used_count -= uc_addr_in_use; 2595 hw->addr_ctrl.overflow_promisc = 0; 2596 2597 /* Zero out the other receive addresses */ 2598 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); 2599 for (i = 0; i < uc_addr_in_use; i++) { 2600 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); 2601 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); 2602 } 2603 2604 /* Add the new addresses */ 2605 for (i = 0; i < addr_count; i++) { 2606 DEBUGOUT(" Adding the secondary addresses:\n"); 2607 addr = next(hw, &addr_list, &vmdq); 2608 ixgbe_add_uc_addr(hw, addr, vmdq); 2609 } 2610 2611 if (hw->addr_ctrl.overflow_promisc) { 2612 /* enable promisc if not already in overflow or set by user */ 2613 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2614 DEBUGOUT(" Entering address overflow promisc mode\n"); 2615 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2616 fctrl |= IXGBE_FCTRL_UPE; 2617 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2618 } 2619 } else { 2620 /* only disable if set by overflow, not by user */ 2621 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 2622 DEBUGOUT(" Leaving address overflow promisc mode\n"); 2623 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2624 fctrl &= ~IXGBE_FCTRL_UPE; 2625 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2626 } 2627 } 2628 2629 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); 2630 return IXGBE_SUCCESS; 2631} 2632 2633/** 2634 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 2635 * @hw: pointer to hardware structure 2636 * @mc_addr: the multicast address 2637 * 2638 * Extracts the 12 bits, from a multicast address, to determine which 2639 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 2640 * incoming rx multicast addresses, to determine the bit-vector to check in 2641 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 2642 * by the MO field of the MCSTCTRL. The MO field is set during initialization 2643 * to mc_filter_type. 2644 **/ 2645static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 2646{ 2647 u32 vector = 0; 2648 2649 DEBUGFUNC("ixgbe_mta_vector"); 2650 2651 switch (hw->mac.mc_filter_type) { 2652 case 0: /* use bits [47:36] of the address */ 2653 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 2654 break; 2655 case 1: /* use bits [46:35] of the address */ 2656 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 2657 break; 2658 case 2: /* use bits [45:34] of the address */ 2659 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 2660 break; 2661 case 3: /* use bits [43:32] of the address */ 2662 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 2663 break; 2664 default: /* Invalid mc_filter_type */ 2665 DEBUGOUT("MC filter type param set incorrectly\n"); 2666 ASSERT(0); 2667 break; 2668 } 2669 2670 /* vector can only be 12-bits or boundary will be exceeded */ 2671 vector &= 0xFFF; 2672 return vector; 2673} 2674 2675/** 2676 * ixgbe_set_mta - Set bit-vector in multicast table 2677 * @hw: pointer to hardware structure 2678 * @hash_value: Multicast address hash value 2679 * 2680 * Sets the bit-vector in the multicast table. 2681 **/ 2682void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) 2683{ 2684 u32 vector; 2685 u32 vector_bit; 2686 u32 vector_reg; 2687 2688 DEBUGFUNC("ixgbe_set_mta"); 2689 2690 hw->addr_ctrl.mta_in_use++; 2691 2692 vector = ixgbe_mta_vector(hw, mc_addr); 2693 DEBUGOUT1(" bit-vector = 0x%03X\n", vector); 2694 2695 /* 2696 * The MTA is a register array of 128 32-bit registers. It is treated 2697 * like an array of 4096 bits. We want to set bit 2698 * BitArray[vector_value]. So we figure out what register the bit is 2699 * in, read it, OR in the new bit, then write back the new value. The 2700 * register is determined by the upper 7 bits of the vector value and 2701 * the bit within that register are determined by the lower 5 bits of 2702 * the value. 2703 */ 2704 vector_reg = (vector >> 5) & 0x7F; 2705 vector_bit = vector & 0x1F; 2706 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); 2707} 2708 2709/** 2710 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 2711 * @hw: pointer to hardware structure 2712 * @mc_addr_list: the list of new multicast addresses 2713 * @mc_addr_count: number of addresses 2714 * @next: iterator function to walk the multicast address list 2715 * @clear: flag, when set clears the table beforehand 2716 * 2717 * When the clear flag is set, the given list replaces any existing list. 2718 * Hashes the given addresses into the multicast table. 2719 **/ 2720s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 2721 u32 mc_addr_count, ixgbe_mc_addr_itr next, 2722 bool clear) 2723{ 2724 u32 i; 2725 u32 vmdq; 2726 2727 DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); 2728 2729 /* 2730 * Set the new number of MC addresses that we are being requested to 2731 * use. 2732 */ 2733 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 2734 hw->addr_ctrl.mta_in_use = 0; 2735 2736 /* Clear mta_shadow */ 2737 if (clear) { 2738 DEBUGOUT(" Clearing MTA\n"); 2739 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); 2740 } 2741 2742 /* Update mta_shadow */ 2743 for (i = 0; i < mc_addr_count; i++) { 2744 DEBUGOUT(" Adding the multicast addresses:\n"); 2745 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 2746 } 2747 2748 /* Enable mta */ 2749 for (i = 0; i < hw->mac.mcft_size; i++) 2750 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, 2751 hw->mac.mta_shadow[i]); 2752 2753 if (hw->addr_ctrl.mta_in_use > 0) 2754 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 2755 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 2756 2757 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); 2758 return IXGBE_SUCCESS; 2759} 2760 2761/** 2762 * ixgbe_enable_mc_generic - Enable multicast address in RAR 2763 * @hw: pointer to hardware structure 2764 * 2765 * Enables multicast address in RAR and the use of the multicast hash table. 2766 **/ 2767s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 2768{ 2769 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2770 2771 DEBUGFUNC("ixgbe_enable_mc_generic"); 2772 2773 if (a->mta_in_use > 0) 2774 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 2775 hw->mac.mc_filter_type); 2776 2777 return IXGBE_SUCCESS; 2778} 2779 2780/** 2781 * ixgbe_disable_mc_generic - Disable multicast address in RAR 2782 * @hw: pointer to hardware structure 2783 * 2784 * Disables multicast address in RAR and the use of the multicast hash table. 2785 **/ 2786s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 2787{ 2788 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 2789 2790 DEBUGFUNC("ixgbe_disable_mc_generic"); 2791 2792 if (a->mta_in_use > 0) 2793 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 2794 2795 return IXGBE_SUCCESS; 2796} 2797 2798/** 2799 * ixgbe_fc_enable_generic - Enable flow control 2800 * @hw: pointer to hardware structure 2801 * 2802 * Enable flow control according to the current settings. 2803 **/ 2804s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) 2805{ 2806 s32 ret_val = IXGBE_SUCCESS; 2807 u32 mflcn_reg, fccfg_reg; 2808 u32 reg; 2809 u32 fcrtl, fcrth; 2810 int i; 2811 2812 DEBUGFUNC("ixgbe_fc_enable_generic"); 2813 2814 /* Validate the water mark configuration */ 2815 if (!hw->fc.pause_time) { 2816 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2817 goto out; 2818 } 2819 2820 /* Low water mark of zero causes XOFF floods */ 2821 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2822 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2823 hw->fc.high_water[i]) { 2824 if (!hw->fc.low_water[i] || 2825 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 2826 DEBUGOUT("Invalid water mark configuration\n"); 2827 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 2828 goto out; 2829 } 2830 } 2831 } 2832 2833 /* Negotiate the fc mode to use */ 2834 hw->mac.ops.fc_autoneg(hw); 2835 2836 /* Disable any previous flow control settings */ 2837 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2838 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 2839 2840 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2841 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2842 2843 /* 2844 * The possible values of fc.current_mode are: 2845 * 0: Flow control is completely disabled 2846 * 1: Rx flow control is enabled (we can receive pause frames, 2847 * but not send pause frames). 2848 * 2: Tx flow control is enabled (we can send pause frames but 2849 * we do not support receiving pause frames). 2850 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2851 * other: Invalid. 2852 */ 2853 switch (hw->fc.current_mode) { 2854 case ixgbe_fc_none: 2855 /* 2856 * Flow control is disabled by software override or autoneg. 2857 * The code below will actually disable it in the HW. 2858 */ 2859 break; 2860 case ixgbe_fc_rx_pause: 2861 /* 2862 * Rx Flow control is enabled and Tx Flow control is 2863 * disabled by software override. Since there really 2864 * isn't a way to advertise that we are capable of RX 2865 * Pause ONLY, we will advertise that we support both 2866 * symmetric and asymmetric Rx PAUSE. Later, we will 2867 * disable the adapter's ability to send PAUSE frames. 2868 */ 2869 mflcn_reg |= IXGBE_MFLCN_RFCE; 2870 break; 2871 case ixgbe_fc_tx_pause: 2872 /* 2873 * Tx Flow control is enabled, and Rx Flow control is 2874 * disabled by software override. 2875 */ 2876 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2877 break; 2878 case ixgbe_fc_full: 2879 /* Flow control (both Rx and Tx) is enabled by SW override. */ 2880 mflcn_reg |= IXGBE_MFLCN_RFCE; 2881 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2882 break; 2883 default: 2884 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, 2885 "Flow control param set incorrectly\n"); 2886 ret_val = IXGBE_ERR_CONFIG; 2887 goto out; 2888 break; 2889 } 2890 2891 /* Set 802.3x based flow control settings. */ 2892 mflcn_reg |= IXGBE_MFLCN_DPF; 2893 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2894 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2895 2896 2897 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 2898 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 2899 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 2900 hw->fc.high_water[i]) { 2901 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 2902 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 2903 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 2904 } else { 2905 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 2906 /* 2907 * In order to prevent Tx hangs when the internal Tx 2908 * switch is enabled we must set the high water mark 2909 * to the Rx packet buffer size - 24KB. This allows 2910 * the Tx switch to function even under heavy Rx 2911 * workloads. 2912 */ 2913 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 2914 } 2915 2916 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); 2917 } 2918 2919 /* Configure pause time (2 TCs per register) */ 2920 reg = hw->fc.pause_time * 0x00010001; 2921 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 2922 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 2923 2924 /* Configure flow control refresh threshold value */ 2925 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 2926 2927out: 2928 return ret_val; 2929} 2930 2931/** 2932 * ixgbe_negotiate_fc - Negotiate flow control 2933 * @hw: pointer to hardware structure 2934 * @adv_reg: flow control advertised settings 2935 * @lp_reg: link partner's flow control settings 2936 * @adv_sym: symmetric pause bit in advertisement 2937 * @adv_asm: asymmetric pause bit in advertisement 2938 * @lp_sym: symmetric pause bit in link partner advertisement 2939 * @lp_asm: asymmetric pause bit in link partner advertisement 2940 * 2941 * Find the intersection between advertised settings and link partner's 2942 * advertised settings 2943 **/ 2944s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 2945 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) 2946{ 2947 if ((!(adv_reg)) || (!(lp_reg))) { 2948 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, 2949 "Local or link partner's advertised flow control " 2950 "settings are NULL. Local: %x, link partner: %x\n", 2951 adv_reg, lp_reg); 2952 return IXGBE_ERR_FC_NOT_NEGOTIATED; 2953 } 2954 2955 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { 2956 /* 2957 * Now we need to check if the user selected Rx ONLY 2958 * of pause frames. In this case, we had to advertise 2959 * FULL flow control because we could not advertise RX 2960 * ONLY. Hence, we must now check to see if we need to 2961 * turn OFF the TRANSMISSION of PAUSE frames. 2962 */ 2963 if (hw->fc.requested_mode == ixgbe_fc_full) { 2964 hw->fc.current_mode = ixgbe_fc_full; 2965 DEBUGOUT("Flow Control = FULL.\n"); 2966 } else { 2967 hw->fc.current_mode = ixgbe_fc_rx_pause; 2968 DEBUGOUT("Flow Control=RX PAUSE frames only\n"); 2969 } 2970 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && 2971 (lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2972 hw->fc.current_mode = ixgbe_fc_tx_pause; 2973 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2974 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && 2975 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { 2976 hw->fc.current_mode = ixgbe_fc_rx_pause; 2977 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2978 } else { 2979 hw->fc.current_mode = ixgbe_fc_none; 2980 DEBUGOUT("Flow Control = NONE.\n"); 2981 } 2982 return IXGBE_SUCCESS; 2983} 2984 2985/** 2986 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber 2987 * @hw: pointer to hardware structure 2988 * 2989 * Enable flow control according on 1 gig fiber. 2990 **/ 2991static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2992{ 2993 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2994 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2995 2996 /* 2997 * On multispeed fiber at 1g, bail out if 2998 * - link is up but AN did not complete, or if 2999 * - link is up and AN completed but timed out 3000 */ 3001 3002 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 3003 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 3004 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 3005 DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); 3006 goto out; 3007 } 3008 3009 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3010 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3011 3012 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, 3013 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, 3014 IXGBE_PCS1GANA_ASM_PAUSE, 3015 IXGBE_PCS1GANA_SYM_PAUSE, 3016 IXGBE_PCS1GANA_ASM_PAUSE); 3017 3018out: 3019 return ret_val; 3020} 3021 3022/** 3023 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 3024 * @hw: pointer to hardware structure 3025 * 3026 * Enable flow control according to IEEE clause 37. 3027 **/ 3028static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 3029{ 3030 u32 links2, anlp1_reg, autoc_reg, links; 3031 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3032 3033 /* 3034 * On backplane, bail out if 3035 * - backplane autoneg was not completed, or if 3036 * - we are 82599 and link partner is not AN enabled 3037 */ 3038 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3039 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 3040 DEBUGOUT("Auto-Negotiation did not complete\n"); 3041 goto out; 3042 } 3043 3044 if (hw->mac.type == ixgbe_mac_82599EB) { 3045 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 3046 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 3047 DEBUGOUT("Link partner is not AN enabled\n"); 3048 goto out; 3049 } 3050 } 3051 /* 3052 * Read the 10g AN autoc and LP ability registers and resolve 3053 * local flow control settings accordingly 3054 */ 3055 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3056 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 3057 3058 ret_val = ixgbe_negotiate_fc(hw, autoc_reg, 3059 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, 3060 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); 3061 3062out: 3063 return ret_val; 3064} 3065 3066/** 3067 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 3068 * @hw: pointer to hardware structure 3069 * 3070 * Enable flow control according to IEEE clause 37. 3071 **/ 3072static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) 3073{ 3074 u16 technology_ability_reg = 0; 3075 u16 lp_technology_ability_reg = 0; 3076 3077 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, 3078 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3079 &technology_ability_reg); 3080 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, 3081 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 3082 &lp_technology_ability_reg); 3083 3084 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, 3085 (u32)lp_technology_ability_reg, 3086 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, 3087 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); 3088} 3089 3090/** 3091 * ixgbe_fc_autoneg - Configure flow control 3092 * @hw: pointer to hardware structure 3093 * 3094 * Compares our advertised flow control capabilities to those advertised by 3095 * our link partner, and determines the proper flow control mode to use. 3096 **/ 3097void ixgbe_fc_autoneg(struct ixgbe_hw *hw) 3098{ 3099 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 3100 ixgbe_link_speed speed; 3101 bool link_up; 3102 3103 DEBUGFUNC("ixgbe_fc_autoneg"); 3104 3105 /* 3106 * AN should have completed when the cable was plugged in. 3107 * Look for reasons to bail out. Bail out if: 3108 * - FC autoneg is disabled, or if 3109 * - link is not up. 3110 */ 3111 if (hw->fc.disable_fc_autoneg) { 3112 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, 3113 "Flow control autoneg is disabled"); 3114 goto out; 3115 } 3116 3117 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3118 if (!link_up) { 3119 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); 3120 goto out; 3121 } 3122 3123 switch (hw->phy.media_type) { 3124 /* Autoneg flow control on fiber adapters */ 3125 case ixgbe_media_type_fiber_fixed: 3126 case ixgbe_media_type_fiber_qsfp: 3127 case ixgbe_media_type_fiber: 3128 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 3129 ret_val = ixgbe_fc_autoneg_fiber(hw); 3130 break; 3131 3132 /* Autoneg flow control on backplane adapters */ 3133 case ixgbe_media_type_backplane: 3134 ret_val = ixgbe_fc_autoneg_backplane(hw); 3135 break; 3136 3137 /* Autoneg flow control on copper adapters */ 3138 case ixgbe_media_type_copper: 3139 if (ixgbe_device_supports_autoneg_fc(hw)) 3140 ret_val = ixgbe_fc_autoneg_copper(hw); 3141 break; 3142 3143 default: 3144 break; 3145 } 3146 3147out: 3148 if (ret_val == IXGBE_SUCCESS) { 3149 hw->fc.fc_was_autonegged = TRUE; 3150 } else { 3151 hw->fc.fc_was_autonegged = FALSE; 3152 hw->fc.current_mode = hw->fc.requested_mode; 3153 } 3154} 3155 3156/* 3157 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion 3158 * @hw: pointer to hardware structure 3159 * 3160 * System-wide timeout range is encoded in PCIe Device Control2 register. 3161 * 3162 * Add 10% to specified maximum and return the number of times to poll for 3163 * completion timeout, in units of 100 microsec. Never return less than 3164 * 800 = 80 millisec. 3165 */ 3166static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) 3167{ 3168 s16 devctl2; 3169 u32 pollcnt; 3170 3171 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 3172 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; 3173 3174 switch (devctl2) { 3175 case IXGBE_PCIDEVCTRL2_65_130ms: 3176 pollcnt = 1300; /* 130 millisec */ 3177 break; 3178 case IXGBE_PCIDEVCTRL2_260_520ms: 3179 pollcnt = 5200; /* 520 millisec */ 3180 break; 3181 case IXGBE_PCIDEVCTRL2_1_2s: 3182 pollcnt = 20000; /* 2 sec */ 3183 break; 3184 case IXGBE_PCIDEVCTRL2_4_8s: 3185 pollcnt = 80000; /* 8 sec */ 3186 break; 3187 case IXGBE_PCIDEVCTRL2_17_34s: 3188 pollcnt = 34000; /* 34 sec */ 3189 break; 3190 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ 3191 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ 3192 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ 3193 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ 3194 default: 3195 pollcnt = 800; /* 80 millisec minimum */ 3196 break; 3197 } 3198 3199 /* add 10% to spec maximum */ 3200 return (pollcnt * 11) / 10; 3201} 3202 3203/** 3204 * ixgbe_disable_pcie_master - Disable PCI-express master access 3205 * @hw: pointer to hardware structure 3206 * 3207 * Disables PCI-Express master access and verifies there are no pending 3208 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable 3209 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS 3210 * is returned signifying master requests disabled. 3211 **/ 3212s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 3213{ 3214 s32 status = IXGBE_SUCCESS; 3215 u32 i, poll; 3216 u16 value; 3217 3218 DEBUGFUNC("ixgbe_disable_pcie_master"); 3219 3220 /* Always set this bit to ensure any future transactions are blocked */ 3221 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 3222 3223 /* Exit if master requests are blocked */ 3224 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || 3225 IXGBE_REMOVED(hw->hw_addr)) 3226 goto out; 3227 3228 /* Poll for master request bit to clear */ 3229 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 3230 usec_delay(100); 3231 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 3232 goto out; 3233 } 3234 3235 /* 3236 * Two consecutive resets are required via CTRL.RST per datasheet 3237 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine 3238 * of this need. The first reset prevents new master requests from 3239 * being issued by our device. We then must wait 1usec or more for any 3240 * remaining completions from the PCIe bus to trickle in, and then reset 3241 * again to clear out any effects they may have had on our device. 3242 */ 3243 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); 3244 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 3245 3246 if (hw->mac.type >= ixgbe_mac_X550) 3247 goto out; 3248 3249 /* 3250 * Before proceeding, make sure that the PCIe block does not have 3251 * transactions pending. 3252 */ 3253 poll = ixgbe_pcie_timeout_poll(hw); 3254 for (i = 0; i < poll; i++) { 3255 usec_delay(100); 3256 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 3257 if (IXGBE_REMOVED(hw->hw_addr)) 3258 goto out; 3259 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 3260 goto out; 3261 } 3262 3263 ERROR_REPORT1(IXGBE_ERROR_POLLING, 3264 "PCIe transaction pending bit also did not clear.\n"); 3265 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 3266 3267out: 3268 return status; 3269} 3270 3271/** 3272 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 3273 * @hw: pointer to hardware structure 3274 * @mask: Mask to specify which semaphore to acquire 3275 * 3276 * Acquires the SWFW semaphore through the GSSR register for the specified 3277 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3278 **/ 3279s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3280{ 3281 u32 gssr = 0; 3282 u32 swmask = mask; 3283 u32 fwmask = mask << 5; 3284 u32 timeout = 200; 3285 u32 i; 3286 3287 DEBUGFUNC("ixgbe_acquire_swfw_sync"); 3288 3289 for (i = 0; i < timeout; i++) { 3290 /* 3291 * SW NVM semaphore bit is used for access to all 3292 * SW_FW_SYNC bits (not just NVM) 3293 */ 3294 if (ixgbe_get_eeprom_semaphore(hw)) 3295 return IXGBE_ERR_SWFW_SYNC; 3296 3297 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3298 if (!(gssr & (fwmask | swmask))) { 3299 gssr |= swmask; 3300 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3301 ixgbe_release_eeprom_semaphore(hw); 3302 return IXGBE_SUCCESS; 3303 } else { 3304 /* Resource is currently in use by FW or SW */ 3305 ixgbe_release_eeprom_semaphore(hw); 3306 msec_delay(5); 3307 } 3308 } 3309 3310 /* If time expired clear the bits holding the lock and retry */ 3311 if (gssr & (fwmask | swmask)) 3312 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); 3313 3314 msec_delay(5); 3315 return IXGBE_ERR_SWFW_SYNC; 3316} 3317 3318/** 3319 * ixgbe_release_swfw_sync - Release SWFW semaphore 3320 * @hw: pointer to hardware structure 3321 * @mask: Mask to specify which semaphore to release 3322 * 3323 * Releases the SWFW semaphore through the GSSR register for the specified 3324 * function (CSR, PHY0, PHY1, EEPROM, Flash) 3325 **/ 3326void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) 3327{ 3328 u32 gssr; 3329 u32 swmask = mask; 3330 3331 DEBUGFUNC("ixgbe_release_swfw_sync"); 3332 3333 ixgbe_get_eeprom_semaphore(hw); 3334 3335 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 3336 gssr &= ~swmask; 3337 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 3338 3339 ixgbe_release_eeprom_semaphore(hw); 3340} 3341 3342/** 3343 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path 3344 * @hw: pointer to hardware structure 3345 * 3346 * Stops the receive data path and waits for the HW to internally empty 3347 * the Rx security block 3348 **/ 3349s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) 3350{ 3351#define IXGBE_MAX_SECRX_POLL 40 3352 3353 int i; 3354 int secrxreg; 3355 3356 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); 3357 3358 3359 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3360 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 3361 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3362 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 3363 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 3364 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 3365 break; 3366 else 3367 /* Use interrupt-safe sleep just in case */ 3368 usec_delay(1000); 3369 } 3370 3371 /* For informational purposes only */ 3372 if (i >= IXGBE_MAX_SECRX_POLL) 3373 DEBUGOUT("Rx unit being enabled before security " 3374 "path fully disabled. Continuing with init.\n"); 3375 3376 return IXGBE_SUCCESS; 3377} 3378 3379/** 3380 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read 3381 * @hw: pointer to hardware structure 3382 * @reg_val: Value we read from AUTOC 3383 * 3384 * The default case requires no protection so just to the register read. 3385 */ 3386s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) 3387{ 3388 *locked = FALSE; 3389 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 3390 return IXGBE_SUCCESS; 3391} 3392 3393/** 3394 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write 3395 * @hw: pointer to hardware structure 3396 * @reg_val: value to write to AUTOC 3397 * @locked: bool to indicate whether the SW/FW lock was already taken by 3398 * previous read. 3399 * 3400 * The default case requires no protection so just to the register write. 3401 */ 3402s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) 3403{ 3404 UNREFERENCED_1PARAMETER(locked); 3405 3406 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); 3407 return IXGBE_SUCCESS; 3408} 3409 3410/** 3411 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path 3412 * @hw: pointer to hardware structure 3413 * 3414 * Enables the receive data path. 3415 **/ 3416s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) 3417{ 3418 u32 secrxreg; 3419 3420 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); 3421 3422 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 3423 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 3424 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 3425 IXGBE_WRITE_FLUSH(hw); 3426 3427 return IXGBE_SUCCESS; 3428} 3429 3430/** 3431 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit 3432 * @hw: pointer to hardware structure 3433 * @regval: register value to write to RXCTRL 3434 * 3435 * Enables the Rx DMA unit 3436 **/ 3437s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) 3438{ 3439 DEBUGFUNC("ixgbe_enable_rx_dma_generic"); 3440 3441 if (regval & IXGBE_RXCTRL_RXEN) 3442 ixgbe_enable_rx(hw); 3443 else 3444 ixgbe_disable_rx(hw); 3445 3446 return IXGBE_SUCCESS; 3447} 3448 3449/** 3450 * ixgbe_blink_led_start_generic - Blink LED based on index. 3451 * @hw: pointer to hardware structure 3452 * @index: led number to blink 3453 **/ 3454s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) 3455{ 3456 ixgbe_link_speed speed = 0; 3457 bool link_up = 0; 3458 u32 autoc_reg = 0; 3459 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3460 s32 ret_val = IXGBE_SUCCESS; 3461 bool locked = FALSE; 3462 3463 DEBUGFUNC("ixgbe_blink_led_start_generic"); 3464 3465 if (index > 3) 3466 return IXGBE_ERR_PARAM; 3467 3468 /* 3469 * Link must be up to auto-blink the LEDs; 3470 * Force it if link is down. 3471 */ 3472 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 3473 3474 if (!link_up) { 3475 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3476 if (ret_val != IXGBE_SUCCESS) 3477 goto out; 3478 3479 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3480 autoc_reg |= IXGBE_AUTOC_FLU; 3481 3482 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3483 if (ret_val != IXGBE_SUCCESS) 3484 goto out; 3485 3486 IXGBE_WRITE_FLUSH(hw); 3487 msec_delay(10); 3488 } 3489 3490 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3491 led_reg |= IXGBE_LED_BLINK(index); 3492 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3493 IXGBE_WRITE_FLUSH(hw); 3494 3495out: 3496 return ret_val; 3497} 3498 3499/** 3500 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. 3501 * @hw: pointer to hardware structure 3502 * @index: led number to stop blinking 3503 **/ 3504s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 3505{ 3506 u32 autoc_reg = 0; 3507 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 3508 s32 ret_val = IXGBE_SUCCESS; 3509 bool locked = FALSE; 3510 3511 DEBUGFUNC("ixgbe_blink_led_stop_generic"); 3512 3513 if (index > 3) 3514 return IXGBE_ERR_PARAM; 3515 3516 3517 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); 3518 if (ret_val != IXGBE_SUCCESS) 3519 goto out; 3520 3521 autoc_reg &= ~IXGBE_AUTOC_FLU; 3522 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 3523 3524 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); 3525 if (ret_val != IXGBE_SUCCESS) 3526 goto out; 3527 3528 led_reg &= ~IXGBE_LED_MODE_MASK(index); 3529 led_reg &= ~IXGBE_LED_BLINK(index); 3530 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 3531 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 3532 IXGBE_WRITE_FLUSH(hw); 3533 3534out: 3535 return ret_val; 3536} 3537 3538/** 3539 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM 3540 * @hw: pointer to hardware structure 3541 * @san_mac_offset: SAN MAC address offset 3542 * 3543 * This function will read the EEPROM location for the SAN MAC address 3544 * pointer, and returns the value at that location. This is used in both 3545 * get and set mac_addr routines. 3546 **/ 3547static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, 3548 u16 *san_mac_offset) 3549{ 3550 s32 ret_val; 3551 3552 DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); 3553 3554 /* 3555 * First read the EEPROM pointer to see if the MAC addresses are 3556 * available. 3557 */ 3558 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, 3559 san_mac_offset); 3560 if (ret_val) { 3561 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3562 "eeprom at offset %d failed", 3563 IXGBE_SAN_MAC_ADDR_PTR); 3564 } 3565 3566 return ret_val; 3567} 3568 3569/** 3570 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM 3571 * @hw: pointer to hardware structure 3572 * @san_mac_addr: SAN MAC address 3573 * 3574 * Reads the SAN MAC address from the EEPROM, if it's available. This is 3575 * per-port, so set_lan_id() must be called before reading the addresses. 3576 * set_lan_id() is called by identify_sfp(), but this cannot be relied 3577 * upon for non-SFP connections, so we must call it here. 3578 **/ 3579s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3580{ 3581 u16 san_mac_data, san_mac_offset; 3582 u8 i; 3583 s32 ret_val; 3584 3585 DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); 3586 3587 /* 3588 * First read the EEPROM pointer to see if the MAC addresses are 3589 * available. If they're not, no point in calling set_lan_id() here. 3590 */ 3591 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3592 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3593 goto san_mac_addr_out; 3594 3595 /* make sure we know which port we need to program */ 3596 hw->mac.ops.set_lan_id(hw); 3597 /* apply the port offset to the address offset */ 3598 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3599 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3600 for (i = 0; i < 3; i++) { 3601 ret_val = hw->eeprom.ops.read(hw, san_mac_offset, 3602 &san_mac_data); 3603 if (ret_val) { 3604 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 3605 "eeprom read at offset %d failed", 3606 san_mac_offset); 3607 goto san_mac_addr_out; 3608 } 3609 san_mac_addr[i * 2] = (u8)(san_mac_data); 3610 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 3611 san_mac_offset++; 3612 } 3613 return IXGBE_SUCCESS; 3614 3615san_mac_addr_out: 3616 /* 3617 * No addresses available in this EEPROM. It's not an 3618 * error though, so just wipe the local address and return. 3619 */ 3620 for (i = 0; i < 6; i++) 3621 san_mac_addr[i] = 0xFF; 3622 return IXGBE_SUCCESS; 3623} 3624 3625/** 3626 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM 3627 * @hw: pointer to hardware structure 3628 * @san_mac_addr: SAN MAC address 3629 * 3630 * Write a SAN MAC address to the EEPROM. 3631 **/ 3632s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) 3633{ 3634 s32 ret_val; 3635 u16 san_mac_data, san_mac_offset; 3636 u8 i; 3637 3638 DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); 3639 3640 /* Look for SAN mac address pointer. If not defined, return */ 3641 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); 3642 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) 3643 return IXGBE_ERR_NO_SAN_ADDR_PTR; 3644 3645 /* Make sure we know which port we need to write */ 3646 hw->mac.ops.set_lan_id(hw); 3647 /* Apply the port offset to the address offset */ 3648 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 3649 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 3650 3651 for (i = 0; i < 3; i++) { 3652 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 3653 san_mac_data |= (u16)(san_mac_addr[i * 2]); 3654 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 3655 san_mac_offset++; 3656 } 3657 3658 return IXGBE_SUCCESS; 3659} 3660 3661/** 3662 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count 3663 * @hw: pointer to hardware structure 3664 * 3665 * Read PCIe configuration space, and get the MSI-X vector count from 3666 * the capabilities table. 3667 **/ 3668u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 3669{ 3670 u16 msix_count = 1; 3671 u16 max_msix_count; 3672 u16 pcie_offset; 3673 3674 switch (hw->mac.type) { 3675 case ixgbe_mac_82598EB: 3676 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; 3677 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; 3678 break; 3679 case ixgbe_mac_82599EB: 3680 case ixgbe_mac_X540: 3681 case ixgbe_mac_X550: 3682 case ixgbe_mac_X550EM_x: 3683 case ixgbe_mac_X550EM_a: 3684 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; 3685 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; 3686 break; 3687 default: 3688 return msix_count; 3689 } 3690 3691 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); 3692 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); 3693 if (IXGBE_REMOVED(hw->hw_addr)) 3694 msix_count = 0; 3695 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 3696 3697 /* MSI-X count is zero-based in HW */ 3698 msix_count++; 3699 3700 if (msix_count > max_msix_count) 3701 msix_count = max_msix_count; 3702 3703 return msix_count; 3704} 3705 3706/** 3707 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address 3708 * @hw: pointer to hardware structure 3709 * @addr: Address to put into receive address register 3710 * @vmdq: VMDq pool to assign 3711 * 3712 * Puts an ethernet address into a receive address register, or 3713 * finds the rar that it is aleady in; adds to the pool list 3714 **/ 3715s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 3716{ 3717 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 3718 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 3719 u32 rar; 3720 u32 rar_low, rar_high; 3721 u32 addr_low, addr_high; 3722 3723 DEBUGFUNC("ixgbe_insert_mac_addr_generic"); 3724 3725 /* swap bytes for HW little endian */ 3726 addr_low = addr[0] | (addr[1] << 8) 3727 | (addr[2] << 16) 3728 | (addr[3] << 24); 3729 addr_high = addr[4] | (addr[5] << 8); 3730 3731 /* 3732 * Either find the mac_id in rar or find the first empty space. 3733 * rar_highwater points to just after the highest currently used 3734 * rar in order to shorten the search. It grows when we add a new 3735 * rar to the top. 3736 */ 3737 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 3738 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 3739 3740 if (((IXGBE_RAH_AV & rar_high) == 0) 3741 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 3742 first_empty_rar = rar; 3743 } else if ((rar_high & 0xFFFF) == addr_high) { 3744 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 3745 if (rar_low == addr_low) 3746 break; /* found it already in the rars */ 3747 } 3748 } 3749 3750 if (rar < hw->mac.rar_highwater) { 3751 /* already there so just add to the pool bits */ 3752 ixgbe_set_vmdq(hw, rar, vmdq); 3753 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 3754 /* stick it into first empty RAR slot we found */ 3755 rar = first_empty_rar; 3756 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3757 } else if (rar == hw->mac.rar_highwater) { 3758 /* add it to the top of the list and inc the highwater mark */ 3759 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 3760 hw->mac.rar_highwater++; 3761 } else if (rar >= hw->mac.num_rar_entries) { 3762 return IXGBE_ERR_INVALID_MAC_ADDR; 3763 } 3764 3765 /* 3766 * If we found rar[0], make sure the default pool bit (we use pool 0) 3767 * remains cleared to be sure default pool packets will get delivered 3768 */ 3769 if (rar == 0) 3770 ixgbe_clear_vmdq(hw, rar, 0); 3771 3772 return rar; 3773} 3774 3775/** 3776 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address 3777 * @hw: pointer to hardware struct 3778 * @rar: receive address register index to disassociate 3779 * @vmdq: VMDq pool index to remove from the rar 3780 **/ 3781s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3782{ 3783 u32 mpsar_lo, mpsar_hi; 3784 u32 rar_entries = hw->mac.num_rar_entries; 3785 3786 DEBUGFUNC("ixgbe_clear_vmdq_generic"); 3787 3788 /* Make sure we are using a valid rar index range */ 3789 if (rar >= rar_entries) { 3790 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3791 "RAR index %d is out of range.\n", rar); 3792 return IXGBE_ERR_INVALID_ARGUMENT; 3793 } 3794 3795 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3796 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3797 3798 if (IXGBE_REMOVED(hw->hw_addr)) 3799 goto done; 3800 3801 if (!mpsar_lo && !mpsar_hi) 3802 goto done; 3803 3804 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 3805 if (mpsar_lo) { 3806 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3807 mpsar_lo = 0; 3808 } 3809 if (mpsar_hi) { 3810 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3811 mpsar_hi = 0; 3812 } 3813 } else if (vmdq < 32) { 3814 mpsar_lo &= ~(1 << vmdq); 3815 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 3816 } else { 3817 mpsar_hi &= ~(1 << (vmdq - 32)); 3818 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 3819 } 3820 3821 /* was that the last pool using this rar? */ 3822 if (mpsar_lo == 0 && mpsar_hi == 0 && 3823 rar != 0 && rar != hw->mac.san_mac_rar_index) 3824 hw->mac.ops.clear_rar(hw, rar); 3825done: 3826 return IXGBE_SUCCESS; 3827} 3828 3829/** 3830 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address 3831 * @hw: pointer to hardware struct 3832 * @rar: receive address register index to associate with a VMDq index 3833 * @vmdq: VMDq pool index 3834 **/ 3835s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 3836{ 3837 u32 mpsar; 3838 u32 rar_entries = hw->mac.num_rar_entries; 3839 3840 DEBUGFUNC("ixgbe_set_vmdq_generic"); 3841 3842 /* Make sure we are using a valid rar index range */ 3843 if (rar >= rar_entries) { 3844 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, 3845 "RAR index %d is out of range.\n", rar); 3846 return IXGBE_ERR_INVALID_ARGUMENT; 3847 } 3848 3849 if (vmdq < 32) { 3850 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 3851 mpsar |= 1 << vmdq; 3852 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 3853 } else { 3854 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 3855 mpsar |= 1 << (vmdq - 32); 3856 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 3857 } 3858 return IXGBE_SUCCESS; 3859} 3860 3861/** 3862 * This function should only be involved in the IOV mode. 3863 * In IOV mode, Default pool is next pool after the number of 3864 * VFs advertized and not 0. 3865 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] 3866 * 3867 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address 3868 * @hw: pointer to hardware struct 3869 * @vmdq: VMDq pool index 3870 **/ 3871s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) 3872{ 3873 u32 rar = hw->mac.san_mac_rar_index; 3874 3875 DEBUGFUNC("ixgbe_set_vmdq_san_mac"); 3876 3877 if (vmdq < 32) { 3878 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); 3879 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 3880 } else { 3881 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 3882 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); 3883 } 3884 3885 return IXGBE_SUCCESS; 3886} 3887 3888/** 3889 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array 3890 * @hw: pointer to hardware structure 3891 **/ 3892s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) 3893{ 3894 int i; 3895 3896 DEBUGFUNC("ixgbe_init_uta_tables_generic"); 3897 DEBUGOUT(" Clearing UTA\n"); 3898 3899 for (i = 0; i < 128; i++) 3900 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 3901 3902 return IXGBE_SUCCESS; 3903} 3904 3905/** 3906 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot 3907 * @hw: pointer to hardware structure 3908 * @vlan: VLAN id to write to VLAN filter 3909 * 3910 * return the VLVF index where this VLAN id should be placed 3911 * 3912 **/ 3913s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) 3914{ 3915 s32 regindex, first_empty_slot; 3916 u32 bits; 3917 3918 /* short cut the special case */ 3919 if (vlan == 0) 3920 return 0; 3921 3922 /* if vlvf_bypass is set we don't want to use an empty slot, we 3923 * will simply bypass the VLVF if there are no entries present in the 3924 * VLVF that contain our VLAN 3925 */ 3926 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; 3927 3928 /* add VLAN enable bit for comparison */ 3929 vlan |= IXGBE_VLVF_VIEN; 3930 3931 /* Search for the vlan id in the VLVF entries. Save off the first empty 3932 * slot found along the way. 3933 * 3934 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 3935 */ 3936 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { 3937 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 3938 if (bits == vlan) 3939 return regindex; 3940 if (!first_empty_slot && !bits) 3941 first_empty_slot = regindex; 3942 } 3943 3944 /* If we are here then we didn't find the VLAN. Return first empty 3945 * slot we found during our search, else error. 3946 */ 3947 if (!first_empty_slot) 3948 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); 3949 3950 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; 3951} 3952 3953/** 3954 * ixgbe_set_vfta_generic - Set VLAN filter table 3955 * @hw: pointer to hardware structure 3956 * @vlan: VLAN id to write to VLAN filter 3957 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 3958 * @vlan_on: boolean flag to turn on/off VLAN 3959 * @vlvf_bypass: boolean flag indicating updating default pool is okay 3960 * 3961 * Turn on/off specified VLAN in the VLAN filter table. 3962 **/ 3963s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 3964 bool vlan_on, bool vlvf_bypass) 3965{ 3966 u32 regidx, vfta_delta, vfta; 3967 s32 ret_val; 3968 3969 DEBUGFUNC("ixgbe_set_vfta_generic"); 3970 3971 if (vlan > 4095 || vind > 63) 3972 return IXGBE_ERR_PARAM; 3973 3974 /* 3975 * this is a 2 part operation - first the VFTA, then the 3976 * VLVF and VLVFB if VT Mode is set 3977 * We don't write the VFTA until we know the VLVF part succeeded. 3978 */ 3979 3980 /* Part 1 3981 * The VFTA is a bitstring made up of 128 32-bit registers 3982 * that enable the particular VLAN id, much like the MTA: 3983 * bits[11-5]: which register 3984 * bits[4-0]: which bit in the register 3985 */ 3986 regidx = vlan / 32; 3987 vfta_delta = 1 << (vlan % 32); 3988 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); 3989 3990 /* 3991 * vfta_delta represents the difference between the current value 3992 * of vfta and the value we want in the register. Since the diff 3993 * is an XOR mask we can just update the vfta using an XOR 3994 */ 3995 vfta_delta &= vlan_on ? ~vfta : vfta; 3996 vfta ^= vfta_delta; 3997 3998 /* Part 2 3999 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF 4000 */ 4001 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, 4002 vfta, vlvf_bypass); 4003 if (ret_val != IXGBE_SUCCESS) { 4004 if (vlvf_bypass) 4005 goto vfta_update; 4006 return ret_val; 4007 } 4008 4009vfta_update: 4010 /* Update VFTA now that we are ready for traffic */ 4011 if (vfta_delta) 4012 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); 4013 4014 return IXGBE_SUCCESS; 4015} 4016 4017/** 4018 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter 4019 * @hw: pointer to hardware structure 4020 * @vlan: VLAN id to write to VLAN filter 4021 * @vind: VMDq output index that maps queue to VLAN id in VLVFB 4022 * @vlan_on: boolean flag to turn on/off VLAN in VLVF 4023 * @vfta_delta: pointer to the difference between the current value of VFTA 4024 * and the desired value 4025 * @vfta: the desired value of the VFTA 4026 * @vlvf_bypass: boolean flag indicating updating default pool is okay 4027 * 4028 * Turn on/off specified bit in VLVF table. 4029 **/ 4030s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, 4031 bool vlan_on, u32 *vfta_delta, u32 vfta, 4032 bool vlvf_bypass) 4033{ 4034 u32 bits; 4035 s32 vlvf_index; 4036 4037 DEBUGFUNC("ixgbe_set_vlvf_generic"); 4038 4039 if (vlan > 4095 || vind > 63) 4040 return IXGBE_ERR_PARAM; 4041 4042 /* If VT Mode is set 4043 * Either vlan_on 4044 * make sure the vlan is in VLVF 4045 * set the vind bit in the matching VLVFB 4046 * Or !vlan_on 4047 * clear the pool bit and possibly the vind 4048 */ 4049 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) 4050 return IXGBE_SUCCESS; 4051 4052 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); 4053 if (vlvf_index < 0) 4054 return vlvf_index; 4055 4056 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); 4057 4058 /* set the pool bit */ 4059 bits |= 1 << (vind % 32); 4060 if (vlan_on) 4061 goto vlvf_update; 4062 4063 /* clear the pool bit */ 4064 bits ^= 1 << (vind % 32); 4065 4066 if (!bits && 4067 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { 4068 /* Clear VFTA first, then disable VLVF. Otherwise 4069 * we run the risk of stray packets leaking into 4070 * the PF via the default pool 4071 */ 4072 if (*vfta_delta) 4073 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); 4074 4075 /* disable VLVF and clear remaining bit from pool */ 4076 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); 4077 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); 4078 4079 return IXGBE_SUCCESS; 4080 } 4081 4082 /* If there are still bits set in the VLVFB registers 4083 * for the VLAN ID indicated we need to see if the 4084 * caller is requesting that we clear the VFTA entry bit. 4085 * If the caller has requested that we clear the VFTA 4086 * entry bit but there are still pools/VFs using this VLAN 4087 * ID entry then ignore the request. We're not worried 4088 * about the case where we're turning the VFTA VLAN ID 4089 * entry bit on, only when requested to turn it off as 4090 * there may be multiple pools and/or VFs using the 4091 * VLAN ID entry. In that case we cannot clear the 4092 * VFTA bit until all pools/VFs using that VLAN ID have also 4093 * been cleared. This will be indicated by "bits" being 4094 * zero. 4095 */ 4096 *vfta_delta = 0; 4097 4098vlvf_update: 4099 /* record pool change and enable VLAN ID if not already enabled */ 4100 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); 4101 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); 4102 4103 return IXGBE_SUCCESS; 4104} 4105 4106/** 4107 * ixgbe_clear_vfta_generic - Clear VLAN filter table 4108 * @hw: pointer to hardware structure 4109 * 4110 * Clears the VLAN filer table, and the VMDq index associated with the filter 4111 **/ 4112s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) 4113{ 4114 u32 offset; 4115 4116 DEBUGFUNC("ixgbe_clear_vfta_generic"); 4117 4118 for (offset = 0; offset < hw->mac.vft_size; offset++) 4119 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 4120 4121 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 4122 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 4123 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); 4124 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); 4125 } 4126 4127 return IXGBE_SUCCESS; 4128} 4129 4130/** 4131 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix 4132 * @hw: pointer to hardware structure 4133 * 4134 * Contains the logic to identify if we need to verify link for the 4135 * crosstalk fix 4136 **/ 4137static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) 4138{ 4139 4140 /* Does FW say we need the fix */ 4141 if (!hw->need_crosstalk_fix) 4142 return FALSE; 4143 4144 /* Only consider SFP+ PHYs i.e. media type fiber */ 4145 switch (hw->mac.ops.get_media_type(hw)) { 4146 case ixgbe_media_type_fiber: 4147 case ixgbe_media_type_fiber_qsfp: 4148 break; 4149 default: 4150 return FALSE; 4151 } 4152 4153 return TRUE; 4154} 4155 4156/** 4157 * ixgbe_check_mac_link_generic - Determine link and speed status 4158 * @hw: pointer to hardware structure 4159 * @speed: pointer to link speed 4160 * @link_up: TRUE when link is up 4161 * @link_up_wait_to_complete: bool used to wait for link up or not 4162 * 4163 * Reads the links register to determine if link is up and the current speed 4164 **/ 4165s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4166 bool *link_up, bool link_up_wait_to_complete) 4167{ 4168 u32 links_reg, links_orig; 4169 u32 i; 4170 4171 DEBUGFUNC("ixgbe_check_mac_link_generic"); 4172 4173 /* If Crosstalk fix enabled do the sanity check of making sure 4174 * the SFP+ cage is full. 4175 */ 4176 if (ixgbe_need_crosstalk_fix(hw)) { 4177 u32 sfp_cage_full; 4178 4179 switch (hw->mac.type) { 4180 case ixgbe_mac_82599EB: 4181 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4182 IXGBE_ESDP_SDP2; 4183 break; 4184 case ixgbe_mac_X550EM_x: 4185 case ixgbe_mac_X550EM_a: 4186 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4187 IXGBE_ESDP_SDP0; 4188 break; 4189 default: 4190 /* sanity check - No SFP+ devices here */ 4191 sfp_cage_full = FALSE; 4192 break; 4193 } 4194 4195 if (!sfp_cage_full) { 4196 *link_up = FALSE; 4197 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4198 return IXGBE_SUCCESS; 4199 } 4200 } 4201 4202 /* clear the old state */ 4203 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); 4204 4205 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4206 4207 if (links_orig != links_reg) { 4208 DEBUGOUT2("LINKS changed from %08X to %08X\n", 4209 links_orig, links_reg); 4210 } 4211 4212 if (link_up_wait_to_complete) { 4213 for (i = 0; i < hw->mac.max_link_up_time; i++) { 4214 if (links_reg & IXGBE_LINKS_UP) { 4215 *link_up = TRUE; 4216 break; 4217 } else { 4218 *link_up = FALSE; 4219 } 4220 msec_delay(100); 4221 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 4222 } 4223 } else { 4224 if (links_reg & IXGBE_LINKS_UP) 4225 *link_up = TRUE; 4226 else 4227 *link_up = FALSE; 4228 } 4229 4230 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4231 case IXGBE_LINKS_SPEED_10G_82599: 4232 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4233 if (hw->mac.type >= ixgbe_mac_X550) { 4234 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4235 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4236 } 4237 break; 4238 case IXGBE_LINKS_SPEED_1G_82599: 4239 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4240 break; 4241 case IXGBE_LINKS_SPEED_100_82599: 4242 *speed = IXGBE_LINK_SPEED_100_FULL; 4243 if (hw->mac.type == ixgbe_mac_X550) { 4244 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4245 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4246 } 4247 break; 4248 case IXGBE_LINKS_SPEED_10_X550EM_A: 4249 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4250 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 4251 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { 4252 *speed = IXGBE_LINK_SPEED_10_FULL; 4253 } 4254 break; 4255 default: 4256 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4257 } 4258 4259 return IXGBE_SUCCESS; 4260} 4261 4262/** 4263 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from 4264 * the EEPROM 4265 * @hw: pointer to hardware structure 4266 * @wwnn_prefix: the alternative WWNN prefix 4267 * @wwpn_prefix: the alternative WWPN prefix 4268 * 4269 * This function will read the EEPROM from the alternative SAN MAC address 4270 * block to check the support for the alternative WWNN/WWPN prefix support. 4271 **/ 4272s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 4273 u16 *wwpn_prefix) 4274{ 4275 u16 offset, caps; 4276 u16 alt_san_mac_blk_offset; 4277 4278 DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); 4279 4280 /* clear output first */ 4281 *wwnn_prefix = 0xFFFF; 4282 *wwpn_prefix = 0xFFFF; 4283 4284 /* check if alternative SAN MAC is supported */ 4285 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; 4286 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) 4287 goto wwn_prefix_err; 4288 4289 if ((alt_san_mac_blk_offset == 0) || 4290 (alt_san_mac_blk_offset == 0xFFFF)) 4291 goto wwn_prefix_out; 4292 4293 /* check capability in alternative san mac address block */ 4294 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; 4295 if (hw->eeprom.ops.read(hw, offset, &caps)) 4296 goto wwn_prefix_err; 4297 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) 4298 goto wwn_prefix_out; 4299 4300 /* get the corresponding prefix for WWNN/WWPN */ 4301 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; 4302 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { 4303 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4304 "eeprom read at offset %d failed", offset); 4305 } 4306 4307 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; 4308 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) 4309 goto wwn_prefix_err; 4310 4311wwn_prefix_out: 4312 return IXGBE_SUCCESS; 4313 4314wwn_prefix_err: 4315 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, 4316 "eeprom read at offset %d failed", offset); 4317 return IXGBE_SUCCESS; 4318} 4319 4320/** 4321 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM 4322 * @hw: pointer to hardware structure 4323 * @bs: the fcoe boot status 4324 * 4325 * This function will read the FCOE boot status from the iSCSI FCOE block 4326 **/ 4327s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) 4328{ 4329 u16 offset, caps, flags; 4330 s32 status; 4331 4332 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); 4333 4334 /* clear output first */ 4335 *bs = ixgbe_fcoe_bootstatus_unavailable; 4336 4337 /* check if FCOE IBA block is present */ 4338 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; 4339 status = hw->eeprom.ops.read(hw, offset, &caps); 4340 if (status != IXGBE_SUCCESS) 4341 goto out; 4342 4343 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) 4344 goto out; 4345 4346 /* check if iSCSI FCOE block is populated */ 4347 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); 4348 if (status != IXGBE_SUCCESS) 4349 goto out; 4350 4351 if ((offset == 0) || (offset == 0xFFFF)) 4352 goto out; 4353 4354 /* read fcoe flags in iSCSI FCOE block */ 4355 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; 4356 status = hw->eeprom.ops.read(hw, offset, &flags); 4357 if (status != IXGBE_SUCCESS) 4358 goto out; 4359 4360 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) 4361 *bs = ixgbe_fcoe_bootstatus_enabled; 4362 else 4363 *bs = ixgbe_fcoe_bootstatus_disabled; 4364 4365out: 4366 return status; 4367} 4368 4369/** 4370 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 4371 * @hw: pointer to hardware structure 4372 * @enable: enable or disable switch for MAC anti-spoofing 4373 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing 4374 * 4375 **/ 4376void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4377{ 4378 int vf_target_reg = vf >> 3; 4379 int vf_target_shift = vf % 8; 4380 u32 pfvfspoof; 4381 4382 if (hw->mac.type == ixgbe_mac_82598EB) 4383 return; 4384 4385 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4386 if (enable) 4387 pfvfspoof |= (1 << vf_target_shift); 4388 else 4389 pfvfspoof &= ~(1 << vf_target_shift); 4390 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4391} 4392 4393/** 4394 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing 4395 * @hw: pointer to hardware structure 4396 * @enable: enable or disable switch for VLAN anti-spoofing 4397 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing 4398 * 4399 **/ 4400void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) 4401{ 4402 int vf_target_reg = vf >> 3; 4403 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; 4404 u32 pfvfspoof; 4405 4406 if (hw->mac.type == ixgbe_mac_82598EB) 4407 return; 4408 4409 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); 4410 if (enable) 4411 pfvfspoof |= (1 << vf_target_shift); 4412 else 4413 pfvfspoof &= ~(1 << vf_target_shift); 4414 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); 4415} 4416 4417/** 4418 * ixgbe_get_device_caps_generic - Get additional device capabilities 4419 * @hw: pointer to hardware structure 4420 * @device_caps: the EEPROM word with the extra device capabilities 4421 * 4422 * This function will read the EEPROM location for the device capabilities, 4423 * and return the word through device_caps. 4424 **/ 4425s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) 4426{ 4427 DEBUGFUNC("ixgbe_get_device_caps_generic"); 4428 4429 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 4430 4431 return IXGBE_SUCCESS; 4432} 4433 4434/** 4435 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering 4436 * @hw: pointer to hardware structure 4437 * 4438 **/ 4439void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) 4440{ 4441 u32 regval; 4442 u32 i; 4443 4444 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); 4445 4446 /* Enable relaxed ordering */ 4447 for (i = 0; i < hw->mac.max_tx_queues; i++) { 4448 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 4449 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 4450 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 4451 } 4452 4453 for (i = 0; i < hw->mac.max_rx_queues; i++) { 4454 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 4455 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 4456 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 4457 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 4458 } 4459 4460} 4461 4462/** 4463 * ixgbe_calculate_checksum - Calculate checksum for buffer 4464 * @buffer: pointer to EEPROM 4465 * @length: size of EEPROM to calculate a checksum for 4466 * Calculates the checksum for some buffer on a specified length. The 4467 * checksum calculated is returned. 4468 **/ 4469u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) 4470{ 4471 u32 i; 4472 u8 sum = 0; 4473 4474 DEBUGFUNC("ixgbe_calculate_checksum"); 4475 4476 if (!buffer) 4477 return 0; 4478 4479 for (i = 0; i < length; i++) 4480 sum += buffer[i]; 4481 4482 return (u8) (0 - sum); 4483} 4484 4485/** 4486 * ixgbe_hic_unlocked - Issue command to manageability block unlocked 4487 * @hw: pointer to the HW structure 4488 * @buffer: command to write and where the return status will be placed 4489 * @length: length of buffer, must be multiple of 4 bytes 4490 * @timeout: time in ms to wait for command completion 4491 * 4492 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4493 * else returns semaphore error when encountering an error acquiring 4494 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4495 * 4496 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held 4497 * by the caller. 4498 **/ 4499s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, 4500 u32 timeout) 4501{ 4502 u32 hicr, i, fwsts; 4503 u16 dword_len; 4504 4505 DEBUGFUNC("ixgbe_hic_unlocked"); 4506 4507 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4508 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4509 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4510 } 4511 4512 /* Set bit 9 of FWSTS clearing FW reset indication */ 4513 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); 4514 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); 4515 4516 /* Check that the host interface is enabled. */ 4517 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4518 if (!(hicr & IXGBE_HICR_EN)) { 4519 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); 4520 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4521 } 4522 4523 /* Calculate length in DWORDs. We must be DWORD aligned */ 4524 if (length % sizeof(u32)) { 4525 DEBUGOUT("Buffer length failure, not aligned to dword"); 4526 return IXGBE_ERR_INVALID_ARGUMENT; 4527 } 4528 4529 dword_len = length >> 2; 4530 4531 /* The device driver writes the relevant command block 4532 * into the ram area. 4533 */ 4534 for (i = 0; i < dword_len; i++) 4535 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, 4536 i, IXGBE_CPU_TO_LE32(buffer[i])); 4537 4538 /* Setting this bit tells the ARC that a new command is pending. */ 4539 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 4540 4541 for (i = 0; i < timeout; i++) { 4542 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 4543 if (!(hicr & IXGBE_HICR_C)) 4544 break; 4545 msec_delay(1); 4546 } 4547 4548 /* Check command completion */ 4549 if ((timeout && i == timeout) || 4550 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { 4551 ERROR_REPORT1(IXGBE_ERROR_CAUTION, 4552 "Command has failed with no status valid.\n"); 4553 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4554 } 4555 4556 return IXGBE_SUCCESS; 4557} 4558 4559/** 4560 * ixgbe_host_interface_command - Issue command to manageability block 4561 * @hw: pointer to the HW structure 4562 * @buffer: contains the command to write and where the return status will 4563 * be placed 4564 * @length: length of buffer, must be multiple of 4 bytes 4565 * @timeout: time in ms to wait for command completion 4566 * @return_data: read and return data from the buffer (TRUE) or not (FALSE) 4567 * Needed because FW structures are big endian and decoding of 4568 * these fields can be 8 bit or 16 bit based on command. Decoding 4569 * is not easily understood without making a table of commands. 4570 * So we will leave this up to the caller to read back the data 4571 * in these cases. 4572 * 4573 * Communicates with the manageability block. On success return IXGBE_SUCCESS 4574 * else returns semaphore error when encountering an error acquiring 4575 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4576 **/ 4577s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 4578 u32 length, u32 timeout, bool return_data) 4579{ 4580 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 4581 u16 dword_len; 4582 u16 buf_len; 4583 s32 status; 4584 u32 bi; 4585 4586 DEBUGFUNC("ixgbe_host_interface_command"); 4587 4588 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 4589 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); 4590 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 4591 } 4592 4593 /* Take management host interface semaphore */ 4594 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4595 if (status) 4596 return status; 4597 4598 status = ixgbe_hic_unlocked(hw, buffer, length, timeout); 4599 if (status) 4600 goto rel_out; 4601 4602 if (!return_data) 4603 goto rel_out; 4604 4605 /* Calculate length in DWORDs */ 4606 dword_len = hdr_size >> 2; 4607 4608 /* first pull in the header so we know the buffer length */ 4609 for (bi = 0; bi < dword_len; bi++) { 4610 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4611 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4612 } 4613 4614 /* If there is any thing in data position pull it in */ 4615 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; 4616 if (!buf_len) 4617 goto rel_out; 4618 4619 if (length < buf_len + hdr_size) { 4620 DEBUGOUT("Buffer not large enough for reply message.\n"); 4621 status = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4622 goto rel_out; 4623 } 4624 4625 /* Calculate length in DWORDs, add 3 for odd lengths */ 4626 dword_len = (buf_len + 3) >> 2; 4627 4628 /* Pull in the rest of the buffer (bi is where we left off) */ 4629 for (; bi <= dword_len; bi++) { 4630 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); 4631 IXGBE_LE32_TO_CPUS(&buffer[bi]); 4632 } 4633 4634rel_out: 4635 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); 4636 4637 return status; 4638} 4639 4640/** 4641 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware 4642 * @hw: pointer to the HW structure 4643 * @maj: driver version major number 4644 * @min: driver version minor number 4645 * @build: driver version build number 4646 * @sub: driver version sub build number 4647 * 4648 * Sends driver version number to firmware through the manageability 4649 * block. On success return IXGBE_SUCCESS 4650 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring 4651 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. 4652 **/ 4653s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 4654 u8 build, u8 sub, u16 len, 4655 const char *driver_ver) 4656{ 4657 struct ixgbe_hic_drv_info fw_cmd; 4658 int i; 4659 s32 ret_val = IXGBE_SUCCESS; 4660 4661 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); 4662 UNREFERENCED_2PARAMETER(len, driver_ver); 4663 4664 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; 4665 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; 4666 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; 4667 fw_cmd.port_num = (u8)hw->bus.func; 4668 fw_cmd.ver_maj = maj; 4669 fw_cmd.ver_min = min; 4670 fw_cmd.ver_build = build; 4671 fw_cmd.ver_sub = sub; 4672 fw_cmd.hdr.checksum = 0; 4673 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, 4674 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); 4675 fw_cmd.pad = 0; 4676 fw_cmd.pad2 = 0; 4677 4678 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 4679 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 4680 sizeof(fw_cmd), 4681 IXGBE_HI_COMMAND_TIMEOUT, 4682 TRUE); 4683 if (ret_val != IXGBE_SUCCESS) 4684 continue; 4685 4686 if (fw_cmd.hdr.cmd_or_resp.ret_status == 4687 FW_CEM_RESP_STATUS_SUCCESS) 4688 ret_val = IXGBE_SUCCESS; 4689 else 4690 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; 4691 4692 break; 4693 } 4694 4695 return ret_val; 4696} 4697 4698/** 4699 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer 4700 * @hw: pointer to hardware structure 4701 * @num_pb: number of packet buffers to allocate 4702 * @headroom: reserve n KB of headroom 4703 * @strategy: packet buffer allocation strategy 4704 **/ 4705void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, 4706 int strategy) 4707{ 4708 u32 pbsize = hw->mac.rx_pb_size; 4709 int i = 0; 4710 u32 rxpktsize, txpktsize, txpbthresh; 4711 4712 /* Reserve headroom */ 4713 pbsize -= headroom; 4714 4715 if (!num_pb) 4716 num_pb = 1; 4717 4718 /* Divide remaining packet buffer space amongst the number of packet 4719 * buffers requested using supplied strategy. 4720 */ 4721 switch (strategy) { 4722 case PBA_STRATEGY_WEIGHTED: 4723 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet 4724 * buffer with 5/8 of the packet buffer space. 4725 */ 4726 rxpktsize = (pbsize * 5) / (num_pb * 4); 4727 pbsize -= rxpktsize * (num_pb / 2); 4728 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; 4729 for (; i < (num_pb / 2); i++) 4730 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4731 /* Fall through to configure remaining packet buffers */ 4732 case PBA_STRATEGY_EQUAL: 4733 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; 4734 for (; i < num_pb; i++) 4735 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 4736 break; 4737 default: 4738 break; 4739 } 4740 4741 /* Only support an equally distributed Tx packet buffer strategy. */ 4742 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; 4743 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; 4744 for (i = 0; i < num_pb; i++) { 4745 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); 4746 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); 4747 } 4748 4749 /* Clear unused TCs, if any, to zero buffer size*/ 4750 for (; i < IXGBE_MAX_PB; i++) { 4751 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 4752 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); 4753 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); 4754 } 4755} 4756 4757/** 4758 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo 4759 * @hw: pointer to the hardware structure 4760 * 4761 * The 82599 and x540 MACs can experience issues if TX work is still pending 4762 * when a reset occurs. This function prevents this by flushing the PCIe 4763 * buffers on the system. 4764 **/ 4765void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) 4766{ 4767 u32 gcr_ext, hlreg0, i, poll; 4768 u16 value; 4769 4770 /* 4771 * If double reset is not requested then all transactions should 4772 * already be clear and as such there is no work to do 4773 */ 4774 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) 4775 return; 4776 4777 /* 4778 * Set loopback enable to prevent any transmits from being sent 4779 * should the link come up. This assumes that the RXCTRL.RXEN bit 4780 * has already been cleared. 4781 */ 4782 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4783 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); 4784 4785 /* Wait for a last completion before clearing buffers */ 4786 IXGBE_WRITE_FLUSH(hw); 4787 msec_delay(3); 4788 4789 /* 4790 * Before proceeding, make sure that the PCIe block does not have 4791 * transactions pending. 4792 */ 4793 poll = ixgbe_pcie_timeout_poll(hw); 4794 for (i = 0; i < poll; i++) { 4795 usec_delay(100); 4796 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); 4797 if (IXGBE_REMOVED(hw->hw_addr)) 4798 goto out; 4799 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 4800 goto out; 4801 } 4802 4803out: 4804 /* initiate cleaning flow for buffers in the PCIe transaction layer */ 4805 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 4806 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 4807 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); 4808 4809 /* Flush all writes and allow 20usec for all transactions to clear */ 4810 IXGBE_WRITE_FLUSH(hw); 4811 usec_delay(20); 4812 4813 /* restore previous register values */ 4814 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 4815 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4816} 4817 4818/** 4819 * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW 4820 * 4821 * @hw: pointer to hardware structure 4822 * @cmd: Command we send to the FW 4823 * @status: The reply from the FW 4824 * 4825 * Bit-bangs the cmd to the by_pass FW status points to what is returned. 4826 **/ 4827#define IXGBE_BYPASS_BB_WAIT 1 4828s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) 4829{ 4830 int i; 4831 u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; 4832 u32 esdp; 4833 4834 if (!status) 4835 return IXGBE_ERR_PARAM; 4836 4837 *status = 0; 4838 4839 /* SDP vary by MAC type */ 4840 switch (hw->mac.type) { 4841 case ixgbe_mac_82599EB: 4842 sck = IXGBE_ESDP_SDP7; 4843 sdi = IXGBE_ESDP_SDP0; 4844 sdo = IXGBE_ESDP_SDP6; 4845 dir_sck = IXGBE_ESDP_SDP7_DIR; 4846 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4847 dir_sdo = IXGBE_ESDP_SDP6_DIR; 4848 break; 4849 case ixgbe_mac_X540: 4850 sck = IXGBE_ESDP_SDP2; 4851 sdi = IXGBE_ESDP_SDP0; 4852 sdo = IXGBE_ESDP_SDP1; 4853 dir_sck = IXGBE_ESDP_SDP2_DIR; 4854 dir_sdi = IXGBE_ESDP_SDP0_DIR; 4855 dir_sdo = IXGBE_ESDP_SDP1_DIR; 4856 break; 4857 default: 4858 return IXGBE_ERR_DEVICE_NOT_SUPPORTED; 4859 } 4860 4861 /* Set SDP pins direction */ 4862 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 4863 esdp |= dir_sck; /* SCK as output */ 4864 esdp |= dir_sdi; /* SDI as output */ 4865 esdp &= ~dir_sdo; /* SDO as input */ 4866 esdp |= sck; 4867 esdp |= sdi; 4868 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4869 IXGBE_WRITE_FLUSH(hw); 4870 msec_delay(IXGBE_BYPASS_BB_WAIT); 4871 4872 /* Generate start condition */ 4873 esdp &= ~sdi; 4874 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4875 IXGBE_WRITE_FLUSH(hw); 4876 msec_delay(IXGBE_BYPASS_BB_WAIT); 4877 4878 esdp &= ~sck; 4879 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4880 IXGBE_WRITE_FLUSH(hw); 4881 msec_delay(IXGBE_BYPASS_BB_WAIT); 4882 4883 /* Clock out the new control word and clock in the status */ 4884 for (i = 0; i < 32; i++) { 4885 if ((cmd >> (31 - i)) & 0x01) { 4886 esdp |= sdi; 4887 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4888 } else { 4889 esdp &= ~sdi; 4890 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4891 } 4892 IXGBE_WRITE_FLUSH(hw); 4893 msec_delay(IXGBE_BYPASS_BB_WAIT); 4894 4895 esdp |= sck; 4896 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4897 IXGBE_WRITE_FLUSH(hw); 4898 msec_delay(IXGBE_BYPASS_BB_WAIT); 4899 4900 esdp &= ~sck; 4901 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4902 IXGBE_WRITE_FLUSH(hw); 4903 msec_delay(IXGBE_BYPASS_BB_WAIT); 4904 4905 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 4906 if (esdp & sdo) 4907 *status = (*status << 1) | 0x01; 4908 else 4909 *status = (*status << 1) | 0x00; 4910 msec_delay(IXGBE_BYPASS_BB_WAIT); 4911 } 4912 4913 /* stop condition */ 4914 esdp |= sck; 4915 esdp &= ~sdi; 4916 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4917 IXGBE_WRITE_FLUSH(hw); 4918 msec_delay(IXGBE_BYPASS_BB_WAIT); 4919 4920 esdp |= sdi; 4921 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 4922 IXGBE_WRITE_FLUSH(hw); 4923 4924 /* set the page bits to match the cmd that the status it belongs to */ 4925 *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); 4926 4927 return IXGBE_SUCCESS; 4928} 4929 4930/** 4931 * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. 4932 * 4933 * If we send a write we can't be sure it took until we can read back 4934 * that same register. It can be a problem as some of the feilds may 4935 * for valid reasons change inbetween the time wrote the register and 4936 * we read it again to verify. So this function check everything we 4937 * can check and then assumes it worked. 4938 * 4939 * @u32 in_reg - The register cmd for the bit-bang read. 4940 * @u32 out_reg - The register returned from a bit-bang read. 4941 **/ 4942bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) 4943{ 4944 u32 mask; 4945 4946 /* Page must match for all control pages */ 4947 if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) 4948 return FALSE; 4949 4950 switch (in_reg & BYPASS_PAGE_M) { 4951 case BYPASS_PAGE_CTL0: 4952 /* All the following can't change since the last write 4953 * - All the event actions 4954 * - The timeout value 4955 */ 4956 mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | 4957 BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | 4958 BYPASS_WDTIMEOUT_M | 4959 BYPASS_WDT_VALUE_M; 4960 if ((out_reg & mask) != (in_reg & mask)) 4961 return FALSE; 4962 4963 /* 0x0 is never a valid value for bypass status */ 4964 if (!(out_reg & BYPASS_STATUS_OFF_M)) 4965 return FALSE; 4966 break; 4967 case BYPASS_PAGE_CTL1: 4968 /* All the following can't change since the last write 4969 * - time valid bit 4970 * - time we last sent 4971 */ 4972 mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; 4973 if ((out_reg & mask) != (in_reg & mask)) 4974 return FALSE; 4975 break; 4976 case BYPASS_PAGE_CTL2: 4977 /* All we can check in this page is control number 4978 * which is already done above. 4979 */ 4980 break; 4981 } 4982 4983 /* We are as sure as we can be return TRUE */ 4984 return TRUE; 4985} 4986 4987/** 4988 * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter. 4989 * 4990 * @hw: pointer to hardware structure 4991 * @cmd: The control word we are setting. 4992 * @event: The event we are setting in the FW. This also happens to 4993 * be the mask for the event we are setting (handy) 4994 * @action: The action we set the event to in the FW. This is in a 4995 * bit field that happens to be what we want to put in 4996 * the event spot (also handy) 4997 **/ 4998s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, 4999 u32 action) 5000{ 5001 u32 by_ctl = 0; 5002 u32 cmd, verify; 5003 u32 count = 0; 5004 5005 /* Get current values */ 5006 cmd = ctrl; /* just reading only need control number */ 5007 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5008 return IXGBE_ERR_INVALID_ARGUMENT; 5009 5010 /* Set to new action */ 5011 cmd = (by_ctl & ~event) | BYPASS_WE | action; 5012 if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) 5013 return IXGBE_ERR_INVALID_ARGUMENT; 5014 5015 /* Page 0 force a FW eeprom write which is slow so verify */ 5016 if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { 5017 verify = BYPASS_PAGE_CTL0; 5018 do { 5019 if (count++ > 5) 5020 return IXGBE_BYPASS_FW_WRITE_FAILURE; 5021 5022 if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) 5023 return IXGBE_ERR_INVALID_ARGUMENT; 5024 } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); 5025 } else { 5026 /* We have give the FW time for the write to stick */ 5027 msec_delay(100); 5028 } 5029 5030 return IXGBE_SUCCESS; 5031} 5032 5033/** 5034 * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres. 5035 * 5036 * @hw: pointer to hardware structure 5037 * @addr: The bypass eeprom address to read. 5038 * @value: The 8b of data at the address above. 5039 **/ 5040s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) 5041{ 5042 u32 cmd; 5043 u32 status; 5044 5045 5046 /* send the request */ 5047 cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; 5048 cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; 5049 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5050 return IXGBE_ERR_INVALID_ARGUMENT; 5051 5052 /* We have give the FW time for the write to stick */ 5053 msec_delay(100); 5054 5055 /* now read the results */ 5056 cmd &= ~BYPASS_WE; 5057 if (ixgbe_bypass_rw_generic(hw, cmd, &status)) 5058 return IXGBE_ERR_INVALID_ARGUMENT; 5059 5060 *value = status & BYPASS_CTL2_DATA_M; 5061 5062 return IXGBE_SUCCESS; 5063} 5064 5065 5066/** 5067 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg 5068 * @hw: pointer to hardware structure 5069 * @map: pointer to u8 arr for returning map 5070 * 5071 * Read the rtrup2tc HW register and resolve its content into map 5072 **/ 5073void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) 5074{ 5075 u32 reg, i; 5076 5077 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); 5078 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 5079 map[i] = IXGBE_RTRUP2TC_UP_MASK & 5080 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); 5081 return; 5082} 5083 5084void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) 5085{ 5086 u32 pfdtxgswc; 5087 u32 rxctrl; 5088 5089 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5090 if (rxctrl & IXGBE_RXCTRL_RXEN) { 5091 if (hw->mac.type != ixgbe_mac_82598EB) { 5092 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5093 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { 5094 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; 5095 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5096 hw->mac.set_lben = TRUE; 5097 } else { 5098 hw->mac.set_lben = FALSE; 5099 } 5100 } 5101 rxctrl &= ~IXGBE_RXCTRL_RXEN; 5102 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 5103 } 5104} 5105 5106void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) 5107{ 5108 u32 pfdtxgswc; 5109 u32 rxctrl; 5110 5111 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 5112 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); 5113 5114 if (hw->mac.type != ixgbe_mac_82598EB) { 5115 if (hw->mac.set_lben) { 5116 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); 5117 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; 5118 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); 5119 hw->mac.set_lben = FALSE; 5120 } 5121 } 5122} 5123 5124/** 5125 * ixgbe_mng_present - returns TRUE when management capability is present 5126 * @hw: pointer to hardware structure 5127 */ 5128bool ixgbe_mng_present(struct ixgbe_hw *hw) 5129{ 5130 u32 fwsm; 5131 5132 if (hw->mac.type < ixgbe_mac_82599EB) 5133 return FALSE; 5134 5135 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5136 fwsm &= IXGBE_FWSM_MODE_MASK; 5137 return fwsm == IXGBE_FWSM_FW_MODE_PT; 5138} 5139 5140/** 5141 * ixgbe_mng_enabled - Is the manageability engine enabled? 5142 * @hw: pointer to hardware structure 5143 * 5144 * Returns TRUE if the manageability engine is enabled. 5145 **/ 5146bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 5147{ 5148 u32 fwsm, manc, factps; 5149 5150 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); 5151 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) 5152 return FALSE; 5153 5154 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 5155 if (!(manc & IXGBE_MANC_RCV_TCO_EN)) 5156 return FALSE; 5157 5158 if (hw->mac.type <= ixgbe_mac_X540) { 5159 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); 5160 if (factps & IXGBE_FACTPS_MNGCG) 5161 return FALSE; 5162 } 5163 5164 return TRUE; 5165} 5166 5167/** 5168 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 5169 * @hw: pointer to hardware structure 5170 * @speed: new link speed 5171 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 5172 * 5173 * Set the link speed in the MAC and/or PHY register and restarts link. 5174 **/ 5175s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 5176 ixgbe_link_speed speed, 5177 bool autoneg_wait_to_complete) 5178{ 5179 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5180 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 5181 s32 status = IXGBE_SUCCESS; 5182 u32 speedcnt = 0; 5183 u32 i = 0; 5184 bool autoneg, link_up = FALSE; 5185 5186 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 5187 5188 /* Mask off requested but non-supported speeds */ 5189 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); 5190 if (status != IXGBE_SUCCESS) 5191 return status; 5192 5193 speed &= link_speed; 5194 5195 /* Try each speed one by one, highest priority first. We do this in 5196 * software because 10Gb fiber doesn't support speed autonegotiation. 5197 */ 5198 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 5199 speedcnt++; 5200 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5201 5202 /* Set the module link speed */ 5203 switch (hw->phy.media_type) { 5204 case ixgbe_media_type_fiber_fixed: 5205 case ixgbe_media_type_fiber: 5206 ixgbe_set_rate_select_speed(hw, 5207 IXGBE_LINK_SPEED_10GB_FULL); 5208 break; 5209 case ixgbe_media_type_fiber_qsfp: 5210 /* QSFP module automatically detects MAC link speed */ 5211 break; 5212 default: 5213 DEBUGOUT("Unexpected media type.\n"); 5214 break; 5215 } 5216 5217 /* Allow module to change analog characteristics (1G->10G) */ 5218 msec_delay(40); 5219 5220 status = ixgbe_setup_mac_link(hw, 5221 IXGBE_LINK_SPEED_10GB_FULL, 5222 autoneg_wait_to_complete); 5223 if (status != IXGBE_SUCCESS) 5224 return status; 5225 5226 /* Flap the Tx laser if it has not already been done */ 5227 ixgbe_flap_tx_laser(hw); 5228 5229 /* Wait for the controller to acquire link. Per IEEE 802.3ap, 5230 * Section 73.10.2, we may have to wait up to 500ms if KR is 5231 * attempted. 82599 uses the same timing for 10g SFI. 5232 */ 5233 for (i = 0; i < 5; i++) { 5234 /* Wait for the link partner to also set speed */ 5235 msec_delay(100); 5236 5237 /* If we have link, just jump out */ 5238 status = ixgbe_check_link(hw, &link_speed, 5239 &link_up, FALSE); 5240 if (status != IXGBE_SUCCESS) 5241 return status; 5242 5243 if (link_up) 5244 goto out; 5245 } 5246 } 5247 5248 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 5249 speedcnt++; 5250 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 5251 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 5252 5253 /* Set the module link speed */ 5254 switch (hw->phy.media_type) { 5255 case ixgbe_media_type_fiber_fixed: 5256 case ixgbe_media_type_fiber: 5257 ixgbe_set_rate_select_speed(hw, 5258 IXGBE_LINK_SPEED_1GB_FULL); 5259 break; 5260 case ixgbe_media_type_fiber_qsfp: 5261 /* QSFP module automatically detects link speed */ 5262 break; 5263 default: 5264 DEBUGOUT("Unexpected media type.\n"); 5265 break; 5266 } 5267 5268 /* Allow module to change analog characteristics (10G->1G) */ 5269 msec_delay(40); 5270 5271 status = ixgbe_setup_mac_link(hw, 5272 IXGBE_LINK_SPEED_1GB_FULL, 5273 autoneg_wait_to_complete); 5274 if (status != IXGBE_SUCCESS) 5275 return status; 5276 5277 /* Flap the Tx laser if it has not already been done */ 5278 ixgbe_flap_tx_laser(hw); 5279 5280 /* Wait for the link partner to also set speed */ 5281 msec_delay(100); 5282 5283 /* If we have link, just jump out */ 5284 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 5285 if (status != IXGBE_SUCCESS) 5286 return status; 5287 5288 if (link_up) 5289 goto out; 5290 } 5291 5292 /* We didn't get link. Configure back to the highest speed we tried, 5293 * (if there was more than one). We call ourselves back with just the 5294 * single highest speed that the user requested. 5295 */ 5296 if (speedcnt > 1) 5297 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 5298 highest_link_speed, 5299 autoneg_wait_to_complete); 5300 5301out: 5302 /* Set autoneg_advertised value based on input link speed */ 5303 hw->phy.autoneg_advertised = 0; 5304 5305 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 5306 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 5307 5308 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 5309 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 5310 5311 return status; 5312} 5313 5314/** 5315 * ixgbe_set_soft_rate_select_speed - Set module link speed 5316 * @hw: pointer to hardware structure 5317 * @speed: link speed to set 5318 * 5319 * Set module link speed via the soft rate select. 5320 */ 5321void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, 5322 ixgbe_link_speed speed) 5323{ 5324 s32 status; 5325 u8 rs, eeprom_data; 5326 5327 switch (speed) { 5328 case IXGBE_LINK_SPEED_10GB_FULL: 5329 /* one bit mask same as setting on */ 5330 rs = IXGBE_SFF_SOFT_RS_SELECT_10G; 5331 break; 5332 case IXGBE_LINK_SPEED_1GB_FULL: 5333 rs = IXGBE_SFF_SOFT_RS_SELECT_1G; 5334 break; 5335 default: 5336 DEBUGOUT("Invalid fixed module speed\n"); 5337 return; 5338 } 5339 5340 /* Set RS0 */ 5341 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5342 IXGBE_I2C_EEPROM_DEV_ADDR2, 5343 &eeprom_data); 5344 if (status) { 5345 DEBUGOUT("Failed to read Rx Rate Select RS0\n"); 5346 goto out; 5347 } 5348 5349 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5350 5351 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, 5352 IXGBE_I2C_EEPROM_DEV_ADDR2, 5353 eeprom_data); 5354 if (status) { 5355 DEBUGOUT("Failed to write Rx Rate Select RS0\n"); 5356 goto out; 5357 } 5358 5359 /* Set RS1 */ 5360 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5361 IXGBE_I2C_EEPROM_DEV_ADDR2, 5362 &eeprom_data); 5363 if (status) { 5364 DEBUGOUT("Failed to read Rx Rate Select RS1\n"); 5365 goto out; 5366 } 5367 5368 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; 5369 5370 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, 5371 IXGBE_I2C_EEPROM_DEV_ADDR2, 5372 eeprom_data); 5373 if (status) { 5374 DEBUGOUT("Failed to write Rx Rate Select RS1\n"); 5375 goto out; 5376 } 5377out: 5378 return; 5379} 5380