e1000_ich8lan.c revision 177867
1/****************************************************************************** 2 3 Copyright (c) 2001-2008, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32******************************************************************************/ 33/*$FreeBSD: head/sys/dev/em/e1000_ich8lan.c 177867 2008-04-02 22:00:36Z jfv $*/ 34 35/* e1000_ich8lan 36 * e1000_ich9lan 37 */ 38 39#include "e1000_api.h" 40#include "e1000_ich8lan.h" 41 42static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw); 43static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw); 44static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw); 45static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); 46static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); 47static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 48static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); 49static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); 50static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw); 51static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); 52static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw); 53static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 54 bool active); 55static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 56 bool active); 57static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 58 u16 words, u16 *data); 59static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, 60 u16 words, u16 *data); 61static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); 62static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); 63static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, 64 u16 *data); 65static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); 66static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); 67static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); 68static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); 69static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 70static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, 71 u16 *speed, u16 *duplex); 72static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 73static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 74static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); 75static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); 76static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 77static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout); 78static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw); 79static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw); 80static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); 81static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 82static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 83 u8 size, u16* data); 84static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, 85 u32 offset, u16 *data); 86static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 87 u32 offset, u8 byte); 88static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, 89 u32 offset, u8 data); 90static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 91 u8 size, u16 data); 92static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); 93static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 94 95/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 96/* Offset 04h HSFSTS */ 97union ich8_hws_flash_status { 98 struct ich8_hsfsts { 99 u16 flcdone :1; /* bit 0 Flash Cycle Done */ 100 u16 flcerr :1; /* bit 1 Flash Cycle Error */ 101 u16 dael :1; /* bit 2 Direct Access error Log */ 102 u16 berasesz :2; /* bit 4:3 Sector Erase Size */ 103 u16 flcinprog :1; /* bit 5 flash cycle in Progress */ 104 u16 reserved1 :2; /* bit 13:6 Reserved */ 105 u16 reserved2 :6; /* bit 13:6 Reserved */ 106 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 107 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ 108 } hsf_status; 109 u16 regval; 110}; 111 112/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 113/* Offset 06h FLCTL */ 114union ich8_hws_flash_ctrl { 115 struct ich8_hsflctl { 116 u16 flcgo :1; /* 0 Flash Cycle Go */ 117 u16 flcycle :2; /* 2:1 Flash Cycle */ 118 u16 reserved :5; /* 7:3 Reserved */ 119 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ 120 u16 flockdn :6; /* 15:10 Reserved */ 121 } hsf_ctrl; 122 u16 regval; 123}; 124 125/* ICH Flash Region Access Permissions */ 126union ich8_hws_flash_regacc { 127 struct ich8_flracc { 128 u32 grra :8; /* 0:7 GbE region Read Access */ 129 u32 grwa :8; /* 8:15 GbE region Write Access */ 130 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ 131 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ 132 } hsf_flregacc; 133 u16 regval; 134}; 135 136struct e1000_shadow_ram { 137 u16 value; 138 bool modified; 139}; 140 141struct e1000_dev_spec_ich8lan { 142 bool kmrn_lock_loss_workaround_enabled; 143 struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; 144}; 145 146/** 147 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers 148 * @hw: pointer to the HW structure 149 * 150 * Initialize family-specific PHY parameters and function pointers. 151 **/ 152static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) 153{ 154 struct e1000_phy_info *phy = &hw->phy; 155 s32 ret_val = E1000_SUCCESS; 156 u16 i = 0; 157 158 DEBUGFUNC("e1000_init_phy_params_ich8lan"); 159 160 phy->addr = 1; 161 phy->reset_delay_us = 100; 162 163 phy->ops.acquire = e1000_acquire_swflag_ich8lan; 164 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 165 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; 166 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan; 167 phy->ops.get_cable_length = e1000_get_cable_length_igp_2; 168 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; 169 phy->ops.get_info = e1000_get_phy_info_ich8lan; 170 phy->ops.read_reg = e1000_read_phy_reg_igp; 171 phy->ops.release = e1000_release_swflag_ich8lan; 172 phy->ops.reset = e1000_phy_hw_reset_ich8lan; 173 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; 174 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; 175 phy->ops.write_reg = e1000_write_phy_reg_igp; 176 phy->ops.power_up = e1000_power_up_phy_copper; 177 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 178 179 /* 180 * We may need to do this twice - once for IGP and if that fails, 181 * we'll set BM func pointers and try again 182 */ 183 ret_val = e1000_determine_phy_address(hw); 184 if (ret_val) { 185 phy->ops.write_reg = e1000_write_phy_reg_bm; 186 phy->ops.read_reg = e1000_read_phy_reg_bm; 187 ret_val = e1000_determine_phy_address(hw); 188 if (ret_val) { 189 DEBUGOUT("Cannot determine PHY address. Erroring out\n"); 190 goto out; 191 } 192 } 193 194 phy->id = 0; 195 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && 196 (i++ < 100)) { 197 msec_delay(1); 198 ret_val = e1000_get_phy_id(hw); 199 if (ret_val) 200 goto out; 201 } 202 203 /* Verify phy id */ 204 switch (phy->id) { 205 case IGP03E1000_E_PHY_ID: 206 phy->type = e1000_phy_igp_3; 207 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 208 break; 209 case IFE_E_PHY_ID: 210 case IFE_PLUS_E_PHY_ID: 211 case IFE_C_E_PHY_ID: 212 phy->type = e1000_phy_ife; 213 phy->autoneg_mask = E1000_ALL_NOT_GIG; 214 break; 215 case BME1000_E_PHY_ID: 216 phy->type = e1000_phy_bm; 217 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 218 phy->ops.read_reg = e1000_read_phy_reg_bm; 219 phy->ops.write_reg = e1000_write_phy_reg_bm; 220 phy->ops.commit = e1000_phy_sw_reset_generic; 221 break; 222 default: 223 ret_val = -E1000_ERR_PHY; 224 goto out; 225 } 226 227out: 228 return ret_val; 229} 230 231/** 232 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers 233 * @hw: pointer to the HW structure 234 * 235 * Initialize family-specific NVM parameters and function 236 * pointers. 237 **/ 238static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) 239{ 240 struct e1000_nvm_info *nvm = &hw->nvm; 241 struct e1000_dev_spec_ich8lan *dev_spec; 242 u32 gfpreg, sector_base_addr, sector_end_addr; 243 s32 ret_val = E1000_SUCCESS; 244 u16 i; 245 246 DEBUGFUNC("e1000_init_nvm_params_ich8lan"); 247 248 /* Can't read flash registers if the register set isn't mapped. */ 249 if (!hw->flash_address) { 250 DEBUGOUT("ERROR: Flash registers not mapped\n"); 251 ret_val = -E1000_ERR_CONFIG; 252 goto out; 253 } 254 255 nvm->type = e1000_nvm_flash_sw; 256 257 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); 258 259 /* 260 * sector_X_addr is a "sector"-aligned address (4096 bytes) 261 * Add 1 to sector_end_addr since this sector is included in 262 * the overall size. 263 */ 264 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 265 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 266 267 /* flash_base_addr is byte-aligned */ 268 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 269 270 /* 271 * find total size of the NVM, then cut in half since the total 272 * size represents two separate NVM banks. 273 */ 274 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 275 << FLASH_SECTOR_ADDR_SHIFT; 276 nvm->flash_bank_size /= 2; 277 /* Adjust to word count */ 278 nvm->flash_bank_size /= sizeof(u16); 279 280 nvm->word_size = E1000_SHADOW_RAM_WORDS; 281 282 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 283 284 if (!dev_spec) { 285 DEBUGOUT("dev_spec pointer is set to NULL.\n"); 286 ret_val = -E1000_ERR_CONFIG; 287 goto out; 288 } 289 290 /* Clear shadow ram */ 291 for (i = 0; i < nvm->word_size; i++) { 292 dev_spec->shadow_ram[i].modified = FALSE; 293 dev_spec->shadow_ram[i].value = 0xFFFF; 294 } 295 296 /* Function Pointers */ 297 nvm->ops.acquire = e1000_acquire_swflag_ich8lan; 298 nvm->ops.read = e1000_read_nvm_ich8lan; 299 nvm->ops.release = e1000_release_swflag_ich8lan; 300 nvm->ops.update = e1000_update_nvm_checksum_ich8lan; 301 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; 302 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; 303 nvm->ops.write = e1000_write_nvm_ich8lan; 304 305out: 306 return ret_val; 307} 308 309/** 310 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers 311 * @hw: pointer to the HW structure 312 * 313 * Initialize family-specific MAC parameters and function 314 * pointers. 315 **/ 316static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) 317{ 318 struct e1000_mac_info *mac = &hw->mac; 319 s32 ret_val = E1000_SUCCESS; 320 321 DEBUGFUNC("e1000_init_mac_params_ich8lan"); 322 323 /* Set media type function pointer */ 324 hw->phy.media_type = e1000_media_type_copper; 325 326 /* Set mta register count */ 327 mac->mta_reg_count = 32; 328 /* Set rar entry count */ 329 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 330 if (mac->type == e1000_ich8lan) 331 mac->rar_entry_count--; 332 /* Set if part includes ASF firmware */ 333 mac->asf_firmware_present = TRUE; 334 /* Set if manageability features are enabled. */ 335 mac->arc_subsystem_valid = TRUE; 336 337 /* Function pointers */ 338 339 /* bus type/speed/width */ 340 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; 341 /* reset */ 342 mac->ops.reset_hw = e1000_reset_hw_ich8lan; 343 /* hw initialization */ 344 mac->ops.init_hw = e1000_init_hw_ich8lan; 345 /* link setup */ 346 mac->ops.setup_link = e1000_setup_link_ich8lan; 347 /* physical interface setup */ 348 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; 349 /* check for link */ 350 mac->ops.check_for_link = e1000_check_for_copper_link_generic; 351 /* check management mode */ 352 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; 353 /* link info */ 354 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; 355 /* multicast address update */ 356 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; 357 /* setting MTA */ 358 mac->ops.mta_set = e1000_mta_set_generic; 359 /* blink LED */ 360 mac->ops.blink_led = e1000_blink_led_generic; 361 /* setup LED */ 362 mac->ops.setup_led = e1000_setup_led_generic; 363 /* cleanup LED */ 364 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; 365 /* turn on/off LED */ 366 mac->ops.led_on = e1000_led_on_ich8lan; 367 mac->ops.led_off = e1000_led_off_ich8lan; 368 /* remove device */ 369 mac->ops.remove_device = e1000_remove_device_generic; 370 /* clear hardware counters */ 371 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; 372 373 hw->dev_spec_size = sizeof(struct e1000_dev_spec_ich8lan); 374 375 /* Device-specific structure allocation */ 376 ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); 377 if (ret_val) 378 goto out; 379 380 /* Enable PCS Lock-loss workaround for ICH8 */ 381 if (mac->type == e1000_ich8lan) 382 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); 383 384 385out: 386 return ret_val; 387} 388 389/** 390 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers 391 * @hw: pointer to the HW structure 392 * 393 * Initialize family-specific function pointers for PHY, MAC, and NVM. 394 **/ 395void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) 396{ 397 DEBUGFUNC("e1000_init_function_pointers_ich8lan"); 398 399 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; 400 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; 401 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; 402} 403 404/** 405 * e1000_acquire_swflag_ich8lan - Acquire software control flag 406 * @hw: pointer to the HW structure 407 * 408 * Acquires the software control flag for performing NVM and PHY 409 * operations. This is a function pointer entry point only called by 410 * read/write routines for the PHY and NVM parts. 411 **/ 412static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 413{ 414 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 415 s32 ret_val = E1000_SUCCESS; 416 417 DEBUGFUNC("e1000_acquire_swflag_ich8lan"); 418 419 while (timeout) { 420 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 421 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 422 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 423 424 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 425 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 426 break; 427 msec_delay_irq(1); 428 timeout--; 429 } 430 431 if (!timeout) { 432 DEBUGOUT("FW or HW has locked the resource for too long.\n"); 433 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 434 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 435 ret_val = -E1000_ERR_CONFIG; 436 goto out; 437 } 438 439out: 440 return ret_val; 441} 442 443/** 444 * e1000_release_swflag_ich8lan - Release software control flag 445 * @hw: pointer to the HW structure 446 * 447 * Releases the software control flag for performing NVM and PHY operations. 448 * This is a function pointer entry point only called by read/write 449 * routines for the PHY and NVM parts. 450 **/ 451static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 452{ 453 u32 extcnf_ctrl; 454 455 DEBUGFUNC("e1000_release_swflag_ich8lan"); 456 457 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 458 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 459 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); 460 461 return; 462} 463 464/** 465 * e1000_check_mng_mode_ich8lan - Checks management mode 466 * @hw: pointer to the HW structure 467 * 468 * This checks if the adapter has manageability enabled. 469 * This is a function pointer entry point only called by read/write 470 * routines for the PHY and NVM parts. 471 **/ 472static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 473{ 474 u32 fwsm; 475 476 DEBUGFUNC("e1000_check_mng_mode_ich8lan"); 477 478 fwsm = E1000_READ_REG(hw, E1000_FWSM); 479 480 return ((fwsm & E1000_FWSM_MODE_MASK) == 481 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 482} 483 484/** 485 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 486 * @hw: pointer to the HW structure 487 * 488 * Checks if firmware is blocking the reset of the PHY. 489 * This is a function pointer entry point only called by 490 * reset routines. 491 **/ 492static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 493{ 494 u32 fwsm; 495 496 DEBUGFUNC("e1000_check_reset_block_ich8lan"); 497 498 fwsm = E1000_READ_REG(hw, E1000_FWSM); 499 500 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS 501 : E1000_BLK_PHY_RESET; 502} 503 504/** 505 * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex 506 * @hw: pointer to the HW structure 507 * 508 * Forces the speed and duplex settings of the PHY. 509 * This is a function pointer entry point only called by 510 * PHY setup routines. 511 **/ 512static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) 513{ 514 struct e1000_phy_info *phy = &hw->phy; 515 s32 ret_val; 516 u16 data; 517 bool link; 518 519 DEBUGFUNC("e1000_phy_force_speed_duplex_ich8lan"); 520 521 if (phy->type != e1000_phy_ife) { 522 ret_val = e1000_phy_force_speed_duplex_igp(hw); 523 goto out; 524 } 525 526 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); 527 if (ret_val) 528 goto out; 529 530 e1000_phy_force_speed_duplex_setup(hw, &data); 531 532 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); 533 if (ret_val) 534 goto out; 535 536 /* Disable MDI-X support for 10/100 */ 537 ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); 538 if (ret_val) 539 goto out; 540 541 data &= ~IFE_PMC_AUTO_MDIX; 542 data &= ~IFE_PMC_FORCE_MDIX; 543 544 ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); 545 if (ret_val) 546 goto out; 547 548 DEBUGOUT1("IFE PMC: %X\n", data); 549 550 usec_delay(1); 551 552 if (phy->autoneg_wait_to_complete) { 553 DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); 554 555 ret_val = e1000_phy_has_link_generic(hw, 556 PHY_FORCE_LIMIT, 557 100000, 558 &link); 559 if (ret_val) 560 goto out; 561 562 if (!link) { 563 DEBUGOUT("Link taking longer than expected.\n"); 564 } 565 566 /* Try once more */ 567 ret_val = e1000_phy_has_link_generic(hw, 568 PHY_FORCE_LIMIT, 569 100000, 570 &link); 571 if (ret_val) 572 goto out; 573 } 574 575out: 576 return ret_val; 577} 578 579/** 580 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 581 * @hw: pointer to the HW structure 582 * 583 * Resets the PHY 584 * This is a function pointer entry point called by drivers 585 * or other shared routines. 586 **/ 587static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 588{ 589 struct e1000_phy_info *phy = &hw->phy; 590 struct e1000_nvm_info *nvm = &hw->nvm; 591 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 592 s32 ret_val; 593 u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT; 594 u16 word_addr, reg_data, reg_addr, phy_page = 0; 595 596 DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); 597 598 ret_val = e1000_phy_hw_reset_generic(hw); 599 if (ret_val) 600 goto out; 601 602 /* 603 * Initialize the PHY from the NVM on ICH platforms. This 604 * is needed due to an issue where the NVM configuration is 605 * not properly autoloaded after power transitions. 606 * Therefore, after each PHY reset, we will load the 607 * configuration data out of the NVM manually. 608 */ 609 if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { 610 /* Check if SW needs configure the PHY */ 611 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || 612 (hw->device_id == E1000_DEV_ID_ICH8_IGP_M)) 613 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 614 else 615 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 616 617 data = E1000_READ_REG(hw, E1000_FEXTNVM); 618 if (!(data & sw_cfg_mask)) 619 goto out; 620 621 /* Wait for basic configuration completes before proceeding*/ 622 do { 623 data = E1000_READ_REG(hw, E1000_STATUS); 624 data &= E1000_STATUS_LAN_INIT_DONE; 625 usec_delay(100); 626 } while ((!data) && --loop); 627 628 /* 629 * If basic configuration is incomplete before the above loop 630 * count reaches 0, loading the configuration from NVM will 631 * leave the PHY in a bad state possibly resulting in no link. 632 */ 633 if (loop == 0) { 634 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); 635 } 636 637 /* Clear the Init Done bit for the next init event */ 638 data = E1000_READ_REG(hw, E1000_STATUS); 639 data &= ~E1000_STATUS_LAN_INIT_DONE; 640 E1000_WRITE_REG(hw, E1000_STATUS, data); 641 642 /* 643 * Make sure HW does not configure LCD from PHY 644 * extended configuration before SW configuration 645 */ 646 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); 647 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 648 goto out; 649 650 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); 651 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 652 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 653 if (!cnf_size) 654 goto out; 655 656 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 657 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 658 659 /* 660 * Configure LCD from extended configuration 661 * region. 662 */ 663 664 /* cnf_base_addr is in DWORD */ 665 word_addr = (u16)(cnf_base_addr << 1); 666 667 for (i = 0; i < cnf_size; i++) { 668 ret_val = nvm->ops.read(hw, 669 (word_addr + i * 2), 670 1, 671 ®_data); 672 if (ret_val) 673 goto out; 674 675 ret_val = nvm->ops.read(hw, 676 (word_addr + i * 2 + 1), 677 1, 678 ®_addr); 679 if (ret_val) 680 goto out; 681 682 /* Save off the PHY page for future writes. */ 683 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 684 phy_page = reg_data; 685 continue; 686 } 687 688 reg_addr |= phy_page; 689 690 ret_val = phy->ops.write_reg(hw, 691 (u32)reg_addr, 692 reg_data); 693 if (ret_val) 694 goto out; 695 } 696 } 697 698out: 699 return ret_val; 700} 701 702/** 703 * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info 704 * @hw: pointer to the HW structure 705 * 706 * Wrapper for calling the get_phy_info routines for the appropriate phy type. 707 * This is a function pointer entry point called by drivers 708 * or other shared routines. 709 **/ 710static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) 711{ 712 s32 ret_val = -E1000_ERR_PHY_TYPE; 713 714 DEBUGFUNC("e1000_get_phy_info_ich8lan"); 715 716 switch (hw->phy.type) { 717 case e1000_phy_ife: 718 ret_val = e1000_get_phy_info_ife_ich8lan(hw); 719 break; 720 case e1000_phy_igp_3: 721 case e1000_phy_bm: 722 ret_val = e1000_get_phy_info_igp(hw); 723 break; 724 default: 725 break; 726 } 727 728 return ret_val; 729} 730 731/** 732 * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states 733 * @hw: pointer to the HW structure 734 * 735 * Populates "phy" structure with various feature states. 736 * This function is only called by other family-specific 737 * routines. 738 **/ 739static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) 740{ 741 struct e1000_phy_info *phy = &hw->phy; 742 s32 ret_val; 743 u16 data; 744 bool link; 745 746 DEBUGFUNC("e1000_get_phy_info_ife_ich8lan"); 747 748 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 749 if (ret_val) 750 goto out; 751 752 if (!link) { 753 DEBUGOUT("Phy info is only valid if link is up\n"); 754 ret_val = -E1000_ERR_CONFIG; 755 goto out; 756 } 757 758 ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); 759 if (ret_val) 760 goto out; 761 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) 762 ? FALSE : TRUE; 763 764 if (phy->polarity_correction) { 765 ret_val = e1000_check_polarity_ife_ich8lan(hw); 766 if (ret_val) 767 goto out; 768 } else { 769 /* Polarity is forced */ 770 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) 771 ? e1000_rev_polarity_reversed 772 : e1000_rev_polarity_normal; 773 } 774 775 ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); 776 if (ret_val) 777 goto out; 778 779 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE; 780 781 /* The following parameters are undefined for 10/100 operation. */ 782 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 783 phy->local_rx = e1000_1000t_rx_status_undefined; 784 phy->remote_rx = e1000_1000t_rx_status_undefined; 785 786out: 787 return ret_val; 788} 789 790/** 791 * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY 792 * @hw: pointer to the HW structure 793 * 794 * Polarity is determined on the polarity reversal feature being enabled. 795 * This function is only called by other family-specific 796 * routines. 797 **/ 798static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) 799{ 800 struct e1000_phy_info *phy = &hw->phy; 801 s32 ret_val; 802 u16 phy_data, offset, mask; 803 804 DEBUGFUNC("e1000_check_polarity_ife_ich8lan"); 805 806 /* 807 * Polarity is determined based on the reversal feature 808 * being enabled. 809 */ 810 if (phy->polarity_correction) { 811 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; 812 mask = IFE_PESC_POLARITY_REVERSED; 813 } else { 814 offset = IFE_PHY_SPECIAL_CONTROL; 815 mask = IFE_PSC_FORCE_POLARITY; 816 } 817 818 ret_val = phy->ops.read_reg(hw, offset, &phy_data); 819 820 if (!ret_val) 821 phy->cable_polarity = (phy_data & mask) 822 ? e1000_rev_polarity_reversed 823 : e1000_rev_polarity_normal; 824 825 return ret_val; 826} 827 828/** 829 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 830 * @hw: pointer to the HW structure 831 * @active: TRUE to enable LPLU, FALSE to disable 832 * 833 * Sets the LPLU D0 state according to the active flag. When 834 * activating LPLU this function also disables smart speed 835 * and vice versa. LPLU will not be activated unless the 836 * device autonegotiation advertisement meets standards of 837 * either 10 or 10/100 or 10/100/1000 at all duplexes. 838 * This is a function pointer entry point only called by 839 * PHY setup routines. 840 **/ 841static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, 842 bool active) 843{ 844 struct e1000_phy_info *phy = &hw->phy; 845 u32 phy_ctrl; 846 s32 ret_val = E1000_SUCCESS; 847 u16 data; 848 849 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); 850 851 if (phy->type == e1000_phy_ife) 852 goto out; 853 854 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 855 856 if (active) { 857 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 858 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 859 860 /* 861 * Call gig speed drop workaround on LPLU before accessing 862 * any PHY registers 863 */ 864 if ((hw->mac.type == e1000_ich8lan) && 865 (hw->phy.type == e1000_phy_igp_3)) 866 e1000_gig_downshift_workaround_ich8lan(hw); 867 868 /* When LPLU is enabled, we should disable SmartSpeed */ 869 ret_val = phy->ops.read_reg(hw, 870 IGP01E1000_PHY_PORT_CONFIG, 871 &data); 872 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 873 ret_val = phy->ops.write_reg(hw, 874 IGP01E1000_PHY_PORT_CONFIG, 875 data); 876 if (ret_val) 877 goto out; 878 } else { 879 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 880 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 881 882 /* 883 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 884 * during Dx states where the power conservation is most 885 * important. During driver activity we should enable 886 * SmartSpeed, so performance is maintained. 887 */ 888 if (phy->smart_speed == e1000_smart_speed_on) { 889 ret_val = phy->ops.read_reg(hw, 890 IGP01E1000_PHY_PORT_CONFIG, 891 &data); 892 if (ret_val) 893 goto out; 894 895 data |= IGP01E1000_PSCFR_SMART_SPEED; 896 ret_val = phy->ops.write_reg(hw, 897 IGP01E1000_PHY_PORT_CONFIG, 898 data); 899 if (ret_val) 900 goto out; 901 } else if (phy->smart_speed == e1000_smart_speed_off) { 902 ret_val = phy->ops.read_reg(hw, 903 IGP01E1000_PHY_PORT_CONFIG, 904 &data); 905 if (ret_val) 906 goto out; 907 908 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 909 ret_val = phy->ops.write_reg(hw, 910 IGP01E1000_PHY_PORT_CONFIG, 911 data); 912 if (ret_val) 913 goto out; 914 } 915 } 916 917out: 918 return ret_val; 919} 920 921/** 922 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 923 * @hw: pointer to the HW structure 924 * @active: TRUE to enable LPLU, FALSE to disable 925 * 926 * Sets the LPLU D3 state according to the active flag. When 927 * activating LPLU this function also disables smart speed 928 * and vice versa. LPLU will not be activated unless the 929 * device autonegotiation advertisement meets standards of 930 * either 10 or 10/100 or 10/100/1000 at all duplexes. 931 * This is a function pointer entry point only called by 932 * PHY setup routines. 933 **/ 934static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, 935 bool active) 936{ 937 struct e1000_phy_info *phy = &hw->phy; 938 u32 phy_ctrl; 939 s32 ret_val = E1000_SUCCESS; 940 u16 data; 941 942 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); 943 944 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 945 946 if (!active) { 947 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 948 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 949 /* 950 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 951 * during Dx states where the power conservation is most 952 * important. During driver activity we should enable 953 * SmartSpeed, so performance is maintained. 954 */ 955 if (phy->smart_speed == e1000_smart_speed_on) { 956 ret_val = phy->ops.read_reg(hw, 957 IGP01E1000_PHY_PORT_CONFIG, 958 &data); 959 if (ret_val) 960 goto out; 961 962 data |= IGP01E1000_PSCFR_SMART_SPEED; 963 ret_val = phy->ops.write_reg(hw, 964 IGP01E1000_PHY_PORT_CONFIG, 965 data); 966 if (ret_val) 967 goto out; 968 } else if (phy->smart_speed == e1000_smart_speed_off) { 969 ret_val = phy->ops.read_reg(hw, 970 IGP01E1000_PHY_PORT_CONFIG, 971 &data); 972 if (ret_val) 973 goto out; 974 975 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 976 ret_val = phy->ops.write_reg(hw, 977 IGP01E1000_PHY_PORT_CONFIG, 978 data); 979 if (ret_val) 980 goto out; 981 } 982 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 983 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 984 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 985 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 986 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 987 988 /* 989 * Call gig speed drop workaround on LPLU before accessing 990 * any PHY registers 991 */ 992 if ((hw->mac.type == e1000_ich8lan) && 993 (hw->phy.type == e1000_phy_igp_3)) 994 e1000_gig_downshift_workaround_ich8lan(hw); 995 996 /* When LPLU is enabled, we should disable SmartSpeed */ 997 ret_val = phy->ops.read_reg(hw, 998 IGP01E1000_PHY_PORT_CONFIG, 999 &data); 1000 if (ret_val) 1001 goto out; 1002 1003 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1004 ret_val = phy->ops.write_reg(hw, 1005 IGP01E1000_PHY_PORT_CONFIG, 1006 data); 1007 } 1008 1009out: 1010 return ret_val; 1011} 1012 1013/** 1014 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 1015 * @hw: pointer to the HW structure 1016 * @bank: pointer to the variable that returns the active bank 1017 * 1018 * Reads signature byte from the NVM using the flash access registers. 1019 **/ 1020static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 1021{ 1022 s32 ret_val = E1000_SUCCESS; 1023 if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_SEC1VAL) 1024 *bank = 1; 1025 else 1026 *bank = 0; 1027 1028 return ret_val; 1029} 1030 1031/** 1032 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 1033 * @hw: pointer to the HW structure 1034 * @offset: The offset (in bytes) of the word(s) to read. 1035 * @words: Size of data to read in words 1036 * @data: Pointer to the word(s) to read at offset. 1037 * 1038 * Reads a word(s) from the NVM using the flash access registers. 1039 **/ 1040static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 1041 u16 *data) 1042{ 1043 struct e1000_nvm_info *nvm = &hw->nvm; 1044 struct e1000_dev_spec_ich8lan *dev_spec; 1045 u32 act_offset; 1046 s32 ret_val = E1000_SUCCESS; 1047 u32 bank = 0; 1048 u16 i, word; 1049 1050 DEBUGFUNC("e1000_read_nvm_ich8lan"); 1051 1052 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 1053 1054 if (!dev_spec) { 1055 DEBUGOUT("dev_spec pointer is set to NULL.\n"); 1056 ret_val = -E1000_ERR_CONFIG; 1057 goto out; 1058 } 1059 1060 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1061 (words == 0)) { 1062 DEBUGOUT("nvm parameter(s) out of bounds\n"); 1063 ret_val = -E1000_ERR_NVM; 1064 goto out; 1065 } 1066 1067 ret_val = nvm->ops.acquire(hw); 1068 if (ret_val) 1069 goto out; 1070 1071 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1072 if (ret_val != E1000_SUCCESS) 1073 goto out; 1074 1075 act_offset = (bank) ? nvm->flash_bank_size : 0; 1076 act_offset += offset; 1077 1078 for (i = 0; i < words; i++) { 1079 if ((dev_spec->shadow_ram) && 1080 (dev_spec->shadow_ram[offset+i].modified)) { 1081 data[i] = dev_spec->shadow_ram[offset+i].value; 1082 } else { 1083 ret_val = e1000_read_flash_word_ich8lan(hw, 1084 act_offset + i, 1085 &word); 1086 if (ret_val) 1087 break; 1088 data[i] = word; 1089 } 1090 } 1091 1092 nvm->ops.release(hw); 1093 1094out: 1095 return ret_val; 1096} 1097 1098/** 1099 * e1000_flash_cycle_init_ich8lan - Initialize flash 1100 * @hw: pointer to the HW structure 1101 * 1102 * This function does initial flash setup so that a new read/write/erase cycle 1103 * can be started. 1104 **/ 1105static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) 1106{ 1107 union ich8_hws_flash_status hsfsts; 1108 s32 ret_val = -E1000_ERR_NVM; 1109 s32 i = 0; 1110 1111 DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); 1112 1113 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 1114 1115 /* Check if the flash descriptor is valid */ 1116 if (hsfsts.hsf_status.fldesvalid == 0) { 1117 DEBUGOUT("Flash descriptor invalid. " 1118 "SW Sequencing must be used."); 1119 goto out; 1120 } 1121 1122 /* Clear FCERR and DAEL in hw status by writing 1 */ 1123 hsfsts.hsf_status.flcerr = 1; 1124 hsfsts.hsf_status.dael = 1; 1125 1126 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 1127 1128 /* 1129 * Either we should have a hardware SPI cycle in progress 1130 * bit to check against, in order to start a new cycle or 1131 * FDONE bit should be changed in the hardware so that it 1132 * is 1 after hardware reset, which can then be used as an 1133 * indication whether a cycle is in progress or has been 1134 * completed. 1135 */ 1136 1137 if (hsfsts.hsf_status.flcinprog == 0) { 1138 /* 1139 * There is no cycle running at present, 1140 * so we can start a cycle. 1141 * Begin by setting Flash Cycle Done. 1142 */ 1143 hsfsts.hsf_status.flcdone = 1; 1144 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); 1145 ret_val = E1000_SUCCESS; 1146 } else { 1147 /* 1148 * Otherwise poll for sometime so the current 1149 * cycle has a chance to end before giving up. 1150 */ 1151 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 1152 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1153 ICH_FLASH_HSFSTS); 1154 if (hsfsts.hsf_status.flcinprog == 0) { 1155 ret_val = E1000_SUCCESS; 1156 break; 1157 } 1158 usec_delay(1); 1159 } 1160 if (ret_val == E1000_SUCCESS) { 1161 /* 1162 * Successful in waiting for previous cycle to timeout, 1163 * now set the Flash Cycle Done. 1164 */ 1165 hsfsts.hsf_status.flcdone = 1; 1166 E1000_WRITE_FLASH_REG16(hw, 1167 ICH_FLASH_HSFSTS, 1168 hsfsts.regval); 1169 } else { 1170 DEBUGOUT("Flash controller busy, cannot get access"); 1171 } 1172 } 1173 1174out: 1175 return ret_val; 1176} 1177 1178/** 1179 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) 1180 * @hw: pointer to the HW structure 1181 * @timeout: maximum time to wait for completion 1182 * 1183 * This function starts a flash cycle and waits for its completion. 1184 **/ 1185static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) 1186{ 1187 union ich8_hws_flash_ctrl hsflctl; 1188 union ich8_hws_flash_status hsfsts; 1189 s32 ret_val = -E1000_ERR_NVM; 1190 u32 i = 0; 1191 1192 DEBUGFUNC("e1000_flash_cycle_ich8lan"); 1193 1194 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 1195 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 1196 hsflctl.hsf_ctrl.flcgo = 1; 1197 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 1198 1199 /* wait till FDONE bit is set to 1 */ 1200 do { 1201 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 1202 if (hsfsts.hsf_status.flcdone == 1) 1203 break; 1204 usec_delay(1); 1205 } while (i++ < timeout); 1206 1207 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 1208 ret_val = E1000_SUCCESS; 1209 1210 return ret_val; 1211} 1212 1213/** 1214 * e1000_read_flash_word_ich8lan - Read word from flash 1215 * @hw: pointer to the HW structure 1216 * @offset: offset to data location 1217 * @data: pointer to the location for storing the data 1218 * 1219 * Reads the flash word at offset into data. Offset is converted 1220 * to bytes before read. 1221 **/ 1222static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 1223 u16 *data) 1224{ 1225 s32 ret_val; 1226 1227 DEBUGFUNC("e1000_read_flash_word_ich8lan"); 1228 1229 if (!data) { 1230 ret_val = -E1000_ERR_NVM; 1231 goto out; 1232 } 1233 1234 /* Must convert offset into bytes. */ 1235 offset <<= 1; 1236 1237 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data); 1238 1239out: 1240 return ret_val; 1241} 1242 1243/** 1244 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 1245 * @hw: pointer to the HW structure 1246 * @offset: The offset (in bytes) of the byte or word to read. 1247 * @size: Size of data to read, 1=byte 2=word 1248 * @data: Pointer to the word to store the value read. 1249 * 1250 * Reads a byte or word from the NVM using the flash access registers. 1251 **/ 1252static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 1253 u8 size, u16* data) 1254{ 1255 union ich8_hws_flash_status hsfsts; 1256 union ich8_hws_flash_ctrl hsflctl; 1257 u32 flash_linear_addr; 1258 u32 flash_data = 0; 1259 s32 ret_val = -E1000_ERR_NVM; 1260 u8 count = 0; 1261 1262 DEBUGFUNC("e1000_read_flash_data_ich8lan"); 1263 1264 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 1265 goto out; 1266 1267 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 1268 hw->nvm.flash_base_addr; 1269 1270 do { 1271 usec_delay(1); 1272 /* Steps */ 1273 ret_val = e1000_flash_cycle_init_ich8lan(hw); 1274 if (ret_val != E1000_SUCCESS) 1275 break; 1276 1277 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 1278 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 1279 hsflctl.hsf_ctrl.fldbcount = size - 1; 1280 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; 1281 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 1282 1283 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 1284 1285 ret_val = e1000_flash_cycle_ich8lan(hw, 1286 ICH_FLASH_READ_COMMAND_TIMEOUT); 1287 1288 /* 1289 * Check if FCERR is set to 1, if set to 1, clear it 1290 * and try the whole sequence a few more times, else 1291 * read in (shift in) the Flash Data0, the order is 1292 * least significant byte first msb to lsb 1293 */ 1294 if (ret_val == E1000_SUCCESS) { 1295 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); 1296 if (size == 1) { 1297 *data = (u8)(flash_data & 0x000000FF); 1298 } else if (size == 2) { 1299 *data = (u16)(flash_data & 0x0000FFFF); 1300 } 1301 break; 1302 } else { 1303 /* 1304 * If we've gotten here, then things are probably 1305 * completely hosed, but if the error condition is 1306 * detected, it won't hurt to give it another try... 1307 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 1308 */ 1309 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1310 ICH_FLASH_HSFSTS); 1311 if (hsfsts.hsf_status.flcerr == 1) { 1312 /* Repeat for some time before giving up. */ 1313 continue; 1314 } else if (hsfsts.hsf_status.flcdone == 0) { 1315 DEBUGOUT("Timeout error - flash cycle " 1316 "did not complete."); 1317 break; 1318 } 1319 } 1320 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 1321 1322out: 1323 return ret_val; 1324} 1325 1326/** 1327 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 1328 * @hw: pointer to the HW structure 1329 * @offset: The offset (in bytes) of the word(s) to write. 1330 * @words: Size of data to write in words 1331 * @data: Pointer to the word(s) to write at offset. 1332 * 1333 * Writes a byte or word to the NVM using the flash access registers. 1334 **/ 1335static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, 1336 u16 *data) 1337{ 1338 struct e1000_nvm_info *nvm = &hw->nvm; 1339 struct e1000_dev_spec_ich8lan *dev_spec; 1340 s32 ret_val = E1000_SUCCESS; 1341 u16 i; 1342 1343 DEBUGFUNC("e1000_write_nvm_ich8lan"); 1344 1345 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 1346 1347 if (!dev_spec) { 1348 DEBUGOUT("dev_spec pointer is set to NULL.\n"); 1349 ret_val = -E1000_ERR_CONFIG; 1350 goto out; 1351 } 1352 1353 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1354 (words == 0)) { 1355 DEBUGOUT("nvm parameter(s) out of bounds\n"); 1356 ret_val = -E1000_ERR_NVM; 1357 goto out; 1358 } 1359 1360 ret_val = nvm->ops.acquire(hw); 1361 if (ret_val) 1362 goto out; 1363 1364 for (i = 0; i < words; i++) { 1365 dev_spec->shadow_ram[offset+i].modified = TRUE; 1366 dev_spec->shadow_ram[offset+i].value = data[i]; 1367 } 1368 1369 nvm->ops.release(hw); 1370 1371out: 1372 return ret_val; 1373} 1374 1375/** 1376 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 1377 * @hw: pointer to the HW structure 1378 * 1379 * The NVM checksum is updated by calling the generic update_nvm_checksum, 1380 * which writes the checksum to the shadow ram. The changes in the shadow 1381 * ram are then committed to the EEPROM by processing each bank at a time 1382 * checking for the modified bit and writing only the pending changes. 1383 * After a successful commit, the shadow ram is cleared and is ready for 1384 * future writes. 1385 **/ 1386static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 1387{ 1388 struct e1000_nvm_info *nvm = &hw->nvm; 1389 struct e1000_dev_spec_ich8lan *dev_spec; 1390 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 1391 s32 ret_val; 1392 u16 data; 1393 1394 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); 1395 1396 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 1397 1398 ret_val = e1000_update_nvm_checksum_generic(hw); 1399 if (ret_val) 1400 goto out; 1401 1402 if (nvm->type != e1000_nvm_flash_sw) 1403 goto out; 1404 1405 ret_val = nvm->ops.acquire(hw); 1406 if (ret_val) 1407 goto out; 1408 1409 /* 1410 * We're writing to the opposite bank so if we're on bank 1, 1411 * write to bank 0 etc. We also need to erase the segment that 1412 * is going to be written 1413 */ 1414 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1415 if (ret_val != E1000_SUCCESS) 1416 goto out; 1417 1418 if (bank == 0) { 1419 new_bank_offset = nvm->flash_bank_size; 1420 old_bank_offset = 0; 1421 e1000_erase_flash_bank_ich8lan(hw, 1); 1422 } else { 1423 old_bank_offset = nvm->flash_bank_size; 1424 new_bank_offset = 0; 1425 e1000_erase_flash_bank_ich8lan(hw, 0); 1426 } 1427 1428 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 1429 /* 1430 * Determine whether to write the value stored 1431 * in the other NVM bank or a modified value stored 1432 * in the shadow RAM 1433 */ 1434 if (dev_spec->shadow_ram[i].modified) { 1435 data = dev_spec->shadow_ram[i].value; 1436 } else { 1437 e1000_read_flash_word_ich8lan(hw, 1438 i + old_bank_offset, 1439 &data); 1440 } 1441 1442 /* 1443 * If the word is 0x13, then make sure the signature bits 1444 * (15:14) are 11b until the commit has completed. 1445 * This will allow us to write 10b which indicates the 1446 * signature is valid. We want to do this after the write 1447 * has completed so that we don't mark the segment valid 1448 * while the write is still in progress 1449 */ 1450 if (i == E1000_ICH_NVM_SIG_WORD) 1451 data |= E1000_ICH_NVM_SIG_MASK; 1452 1453 /* Convert offset to bytes. */ 1454 act_offset = (i + new_bank_offset) << 1; 1455 1456 usec_delay(100); 1457 /* Write the bytes to the new bank. */ 1458 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 1459 act_offset, 1460 (u8)data); 1461 if (ret_val) 1462 break; 1463 1464 usec_delay(100); 1465 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 1466 act_offset + 1, 1467 (u8)(data >> 8)); 1468 if (ret_val) 1469 break; 1470 } 1471 1472 /* 1473 * Don't bother writing the segment valid bits if sector 1474 * programming failed. 1475 */ 1476 if (ret_val) { 1477 DEBUGOUT("Flash commit failed.\n"); 1478 nvm->ops.release(hw); 1479 goto out; 1480 } 1481 1482 /* 1483 * Finally validate the new segment by setting bit 15:14 1484 * to 10b in word 0x13 , this can be done without an 1485 * erase as well since these bits are 11 to start with 1486 * and we need to change bit 14 to 0b 1487 */ 1488 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1489 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1490 data &= 0xBFFF; 1491 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 1492 act_offset * 2 + 1, 1493 (u8)(data >> 8)); 1494 if (ret_val) { 1495 nvm->ops.release(hw); 1496 goto out; 1497 } 1498 1499 /* 1500 * And invalidate the previously valid segment by setting 1501 * its signature word (0x13) high_byte to 0b. This can be 1502 * done without an erase because flash erase sets all bits 1503 * to 1's. We can write 1's to 0's without an erase 1504 */ 1505 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1506 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1507 if (ret_val) { 1508 nvm->ops.release(hw); 1509 goto out; 1510 } 1511 1512 /* Great! Everything worked, we can now clear the cached entries. */ 1513 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 1514 dev_spec->shadow_ram[i].modified = FALSE; 1515 dev_spec->shadow_ram[i].value = 0xFFFF; 1516 } 1517 1518 nvm->ops.release(hw); 1519 1520 /* 1521 * Reload the EEPROM, or else modifications will not appear 1522 * until after the next adapter reset. 1523 */ 1524 nvm->ops.reload(hw); 1525 msec_delay(10); 1526 1527out: 1528 return ret_val; 1529} 1530 1531/** 1532 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum 1533 * @hw: pointer to the HW structure 1534 * 1535 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. 1536 * If the bit is 0, that the EEPROM had been modified, but the checksum was 1537 * not calculated, in which case we need to calculate the checksum and set 1538 * bit 6. 1539 **/ 1540static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) 1541{ 1542 s32 ret_val = E1000_SUCCESS; 1543 u16 data; 1544 1545 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); 1546 1547 /* 1548 * Read 0x19 and check bit 6. If this bit is 0, the checksum 1549 * needs to be fixed. This bit is an indication that the NVM 1550 * was prepared by OEM software and did not calculate the 1551 * checksum...a likely scenario. 1552 */ 1553 ret_val = hw->nvm.ops.read(hw, 0x19, 1, &data); 1554 if (ret_val) 1555 goto out; 1556 1557 if ((data & 0x40) == 0) { 1558 data |= 0x40; 1559 ret_val = hw->nvm.ops.write(hw, 0x19, 1, &data); 1560 if (ret_val) 1561 goto out; 1562 ret_val = hw->nvm.ops.update(hw); 1563 if (ret_val) 1564 goto out; 1565 } 1566 1567 ret_val = e1000_validate_nvm_checksum_generic(hw); 1568 1569out: 1570 return ret_val; 1571} 1572 1573/** 1574 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM 1575 * @hw: pointer to the HW structure 1576 * @offset: The offset (in bytes) of the byte/word to read. 1577 * @size: Size of data to read, 1=byte 2=word 1578 * @data: The byte(s) to write to the NVM. 1579 * 1580 * Writes one/two bytes to the NVM using the flash access registers. 1581 **/ 1582static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 1583 u8 size, u16 data) 1584{ 1585 union ich8_hws_flash_status hsfsts; 1586 union ich8_hws_flash_ctrl hsflctl; 1587 u32 flash_linear_addr; 1588 u32 flash_data = 0; 1589 s32 ret_val = -E1000_ERR_NVM; 1590 u8 count = 0; 1591 1592 DEBUGFUNC("e1000_write_ich8_data"); 1593 1594 if (size < 1 || size > 2 || data > size * 0xff || 1595 offset > ICH_FLASH_LINEAR_ADDR_MASK) 1596 goto out; 1597 1598 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 1599 hw->nvm.flash_base_addr; 1600 1601 do { 1602 usec_delay(1); 1603 /* Steps */ 1604 ret_val = e1000_flash_cycle_init_ich8lan(hw); 1605 if (ret_val != E1000_SUCCESS) 1606 break; 1607 1608 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 1609 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 1610 hsflctl.hsf_ctrl.fldbcount = size -1; 1611 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 1612 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); 1613 1614 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); 1615 1616 if (size == 1) 1617 flash_data = (u32)data & 0x00FF; 1618 else 1619 flash_data = (u32)data; 1620 1621 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 1622 1623 /* 1624 * check if FCERR is set to 1 , if set to 1, clear it 1625 * and try the whole sequence a few more times else done 1626 */ 1627 ret_val = e1000_flash_cycle_ich8lan(hw, 1628 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 1629 if (ret_val == E1000_SUCCESS) { 1630 break; 1631 } else { 1632 /* 1633 * If we're here, then things are most likely 1634 * completely hosed, but if the error condition 1635 * is detected, it won't hurt to give it another 1636 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 1637 */ 1638 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1639 ICH_FLASH_HSFSTS); 1640 if (hsfsts.hsf_status.flcerr == 1) { 1641 /* Repeat for some time before giving up. */ 1642 continue; 1643 } else if (hsfsts.hsf_status.flcdone == 0) { 1644 DEBUGOUT("Timeout error - flash cycle " 1645 "did not complete."); 1646 break; 1647 } 1648 } 1649 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 1650 1651out: 1652 return ret_val; 1653} 1654 1655/** 1656 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 1657 * @hw: pointer to the HW structure 1658 * @offset: The index of the byte to read. 1659 * @data: The byte to write to the NVM. 1660 * 1661 * Writes a single byte to the NVM using the flash access registers. 1662 **/ 1663static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, 1664 u8 data) 1665{ 1666 u16 word = (u16)data; 1667 1668 DEBUGFUNC("e1000_write_flash_byte_ich8lan"); 1669 1670 return e1000_write_flash_data_ich8lan(hw, offset, 1, word); 1671} 1672 1673/** 1674 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 1675 * @hw: pointer to the HW structure 1676 * @offset: The offset of the byte to write. 1677 * @byte: The byte to write to the NVM. 1678 * 1679 * Writes a single byte to the NVM using the flash access registers. 1680 * Goes through a retry algorithm before giving up. 1681 **/ 1682static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 1683 u32 offset, u8 byte) 1684{ 1685 s32 ret_val; 1686 u16 program_retries; 1687 1688 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); 1689 1690 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 1691 if (ret_val == E1000_SUCCESS) 1692 goto out; 1693 1694 for (program_retries = 0; program_retries < 100; program_retries++) { 1695 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); 1696 usec_delay(100); 1697 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 1698 if (ret_val == E1000_SUCCESS) 1699 break; 1700 } 1701 if (program_retries == 100) { 1702 ret_val = -E1000_ERR_NVM; 1703 goto out; 1704 } 1705 1706out: 1707 return ret_val; 1708} 1709 1710/** 1711 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM 1712 * @hw: pointer to the HW structure 1713 * @bank: 0 for first bank, 1 for second bank, etc. 1714 * 1715 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. 1716 * bank N is 4096 * N + flash_reg_addr. 1717 **/ 1718static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) 1719{ 1720 struct e1000_nvm_info *nvm = &hw->nvm; 1721 union ich8_hws_flash_status hsfsts; 1722 union ich8_hws_flash_ctrl hsflctl; 1723 u32 flash_linear_addr; 1724 /* bank size is in 16bit words - adjust to bytes */ 1725 u32 flash_bank_size = nvm->flash_bank_size * 2; 1726 s32 ret_val = E1000_SUCCESS; 1727 s32 count = 0; 1728 s32 j, iteration, sector_size; 1729 1730 DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); 1731 1732 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 1733 1734 /* 1735 * Determine HW Sector size: Read BERASE bits of hw flash status 1736 * register 1737 * 00: The Hw sector is 256 bytes, hence we need to erase 16 1738 * consecutive sectors. The start index for the nth Hw sector 1739 * can be calculated as = bank * 4096 + n * 256 1740 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 1741 * The start index for the nth Hw sector can be calculated 1742 * as = bank * 4096 1743 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 1744 * (ich9 only, otherwise error condition) 1745 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 1746 */ 1747 switch (hsfsts.hsf_status.berasesz) { 1748 case 0: 1749 /* Hw sector size 256 */ 1750 sector_size = ICH_FLASH_SEG_SIZE_256; 1751 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; 1752 break; 1753 case 1: 1754 sector_size = ICH_FLASH_SEG_SIZE_4K; 1755 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; 1756 break; 1757 case 2: 1758 if (hw->mac.type == e1000_ich9lan) { 1759 sector_size = ICH_FLASH_SEG_SIZE_8K; 1760 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; 1761 } else { 1762 ret_val = -E1000_ERR_NVM; 1763 goto out; 1764 } 1765 break; 1766 case 3: 1767 sector_size = ICH_FLASH_SEG_SIZE_64K; 1768 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; 1769 break; 1770 default: 1771 ret_val = -E1000_ERR_NVM; 1772 goto out; 1773 } 1774 1775 /* Start with the base address, then add the sector offset. */ 1776 flash_linear_addr = hw->nvm.flash_base_addr; 1777 flash_linear_addr += (bank) ? (sector_size * iteration) : 0; 1778 1779 for (j = 0; j < iteration ; j++) { 1780 do { 1781 /* Steps */ 1782 ret_val = e1000_flash_cycle_init_ich8lan(hw); 1783 if (ret_val) 1784 goto out; 1785 1786 /* 1787 * Write a value 11 (block Erase) in Flash 1788 * Cycle field in hw flash control 1789 */ 1790 hsflctl.regval = E1000_READ_FLASH_REG16(hw, 1791 ICH_FLASH_HSFCTL); 1792 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 1793 E1000_WRITE_FLASH_REG16(hw, 1794 ICH_FLASH_HSFCTL, 1795 hsflctl.regval); 1796 1797 /* 1798 * Write the last 24 bits of an index within the 1799 * block into Flash Linear address field in Flash 1800 * Address. 1801 */ 1802 flash_linear_addr += (j * sector_size); 1803 E1000_WRITE_FLASH_REG(hw, 1804 ICH_FLASH_FADDR, 1805 flash_linear_addr); 1806 1807 ret_val = e1000_flash_cycle_ich8lan(hw, 1808 ICH_FLASH_ERASE_COMMAND_TIMEOUT); 1809 if (ret_val == E1000_SUCCESS) { 1810 break; 1811 } else { 1812 /* 1813 * Check if FCERR is set to 1. If 1, 1814 * clear it and try the whole sequence 1815 * a few more times else Done 1816 */ 1817 hsfsts.regval = E1000_READ_FLASH_REG16(hw, 1818 ICH_FLASH_HSFSTS); 1819 if (hsfsts.hsf_status.flcerr == 1) { 1820 /* 1821 * repeat for some time before 1822 * giving up 1823 */ 1824 continue; 1825 } else if (hsfsts.hsf_status.flcdone == 0) 1826 goto out; 1827 } 1828 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 1829 } 1830 1831out: 1832 return ret_val; 1833} 1834 1835/** 1836 * e1000_valid_led_default_ich8lan - Set the default LED settings 1837 * @hw: pointer to the HW structure 1838 * @data: Pointer to the LED settings 1839 * 1840 * Reads the LED default settings from the NVM to data. If the NVM LED 1841 * settings is all 0's or F's, set the LED default to a valid LED default 1842 * setting. 1843 **/ 1844static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) 1845{ 1846 s32 ret_val; 1847 1848 DEBUGFUNC("e1000_valid_led_default_ich8lan"); 1849 1850 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); 1851 if (ret_val) { 1852 DEBUGOUT("NVM Read Error\n"); 1853 goto out; 1854 } 1855 1856 if (*data == ID_LED_RESERVED_0000 || 1857 *data == ID_LED_RESERVED_FFFF) 1858 *data = ID_LED_DEFAULT_ICH8LAN; 1859 1860out: 1861 return ret_val; 1862} 1863 1864/** 1865 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width 1866 * @hw: pointer to the HW structure 1867 * 1868 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability 1869 * register, so the the bus width is hard coded. 1870 **/ 1871static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) 1872{ 1873 struct e1000_bus_info *bus = &hw->bus; 1874 s32 ret_val; 1875 1876 DEBUGFUNC("e1000_get_bus_info_ich8lan"); 1877 1878 ret_val = e1000_get_bus_info_pcie_generic(hw); 1879 1880 /* 1881 * ICH devices are "PCI Express"-ish. They have 1882 * a configuration space, but do not contain 1883 * PCI Express Capability registers, so bus width 1884 * must be hardcoded. 1885 */ 1886 if (bus->width == e1000_bus_width_unknown) 1887 bus->width = e1000_bus_width_pcie_x1; 1888 1889 return ret_val; 1890} 1891 1892/** 1893 * e1000_reset_hw_ich8lan - Reset the hardware 1894 * @hw: pointer to the HW structure 1895 * 1896 * Does a full reset of the hardware which includes a reset of the PHY and 1897 * MAC. 1898 **/ 1899static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 1900{ 1901 u32 ctrl, icr, kab; 1902 s32 ret_val; 1903 1904 DEBUGFUNC("e1000_reset_hw_ich8lan"); 1905 1906 /* 1907 * Prevent the PCI-E bus from sticking if there is no TLP connection 1908 * on the last TLP read/write transaction when MAC is reset. 1909 */ 1910 ret_val = e1000_disable_pcie_master_generic(hw); 1911 if (ret_val) { 1912 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 1913 } 1914 1915 DEBUGOUT("Masking off all interrupts\n"); 1916 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1917 1918 /* 1919 * Disable the Transmit and Receive units. Then delay to allow 1920 * any pending transactions to complete before we hit the MAC 1921 * with the global reset. 1922 */ 1923 E1000_WRITE_REG(hw, E1000_RCTL, 0); 1924 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); 1925 E1000_WRITE_FLUSH(hw); 1926 1927 msec_delay(10); 1928 1929 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 1930 if (hw->mac.type == e1000_ich8lan) { 1931 /* Set Tx and Rx buffer allocation to 8k apiece. */ 1932 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); 1933 /* Set Packet Buffer Size to 16k. */ 1934 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); 1935 } 1936 1937 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1938 1939 if (!hw->phy.ops.check_reset_block(hw) && !hw->phy.reset_disable) { 1940 /* 1941 * PHY HW reset requires MAC CORE reset at the same 1942 * time to make sure the interface between MAC and the 1943 * external PHY is reset. 1944 */ 1945 ctrl |= E1000_CTRL_PHY_RST; 1946 } 1947 ret_val = e1000_acquire_swflag_ich8lan(hw); 1948 DEBUGOUT("Issuing a global reset to ich8lan"); 1949 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); 1950 msec_delay(20); 1951 1952 ret_val = e1000_get_auto_rd_done_generic(hw); 1953 if (ret_val) { 1954 /* 1955 * When auto config read does not complete, do not 1956 * return with an error. This can happen in situations 1957 * where there is no eeprom and prevents getting link. 1958 */ 1959 DEBUGOUT("Auto Read Done did not complete\n"); 1960 } 1961 1962 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); 1963 icr = E1000_READ_REG(hw, E1000_ICR); 1964 1965 kab = E1000_READ_REG(hw, E1000_KABGTXD); 1966 kab |= E1000_KABGTXD_BGSQLBIAS; 1967 E1000_WRITE_REG(hw, E1000_KABGTXD, kab); 1968 1969 return ret_val; 1970} 1971 1972/** 1973 * e1000_init_hw_ich8lan - Initialize the hardware 1974 * @hw: pointer to the HW structure 1975 * 1976 * Prepares the hardware for transmit and receive by doing the following: 1977 * - initialize hardware bits 1978 * - initialize LED identification 1979 * - setup receive address registers 1980 * - setup flow control 1981 * - setup transmit descriptors 1982 * - clear statistics 1983 **/ 1984static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 1985{ 1986 struct e1000_mac_info *mac = &hw->mac; 1987 u32 ctrl_ext, txdctl, snoop; 1988 s32 ret_val; 1989 u16 i; 1990 1991 DEBUGFUNC("e1000_init_hw_ich8lan"); 1992 1993 e1000_initialize_hw_bits_ich8lan(hw); 1994 1995 /* Initialize identification LED */ 1996 ret_val = e1000_id_led_init_generic(hw); 1997 if (ret_val) { 1998 DEBUGOUT("Error initializing identification LED\n"); 1999 /* This is not fatal and we should not stop init due to this */ 2000 } 2001 2002 /* Setup the receive address. */ 2003 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); 2004 2005 /* Zero out the Multicast HASH table */ 2006 DEBUGOUT("Zeroing the MTA\n"); 2007 for (i = 0; i < mac->mta_reg_count; i++) 2008 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 2009 2010 /* Setup link and flow control */ 2011 ret_val = mac->ops.setup_link(hw); 2012 2013 /* Set the transmit descriptor write-back policy for both queues */ 2014 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); 2015 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 2016 E1000_TXDCTL_FULL_TX_DESC_WB; 2017 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 2018 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 2019 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); 2020 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); 2021 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 2022 E1000_TXDCTL_FULL_TX_DESC_WB; 2023 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 2024 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 2025 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); 2026 2027 /* 2028 * ICH8 has opposite polarity of no_snoop bits. 2029 * By default, we should use snoop behavior. 2030 */ 2031 if (mac->type == e1000_ich8lan) 2032 snoop = PCIE_ICH8_SNOOP_ALL; 2033 else 2034 snoop = (u32)~(PCIE_NO_SNOOP_ALL); 2035 e1000_set_pcie_no_snoop_generic(hw, snoop); 2036 2037 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2038 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 2039 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 2040 2041 /* 2042 * Clear all of the statistics registers (clear on read). It is 2043 * important that we do this after we have tried to establish link 2044 * because the symbol error count will increment wildly if there 2045 * is no link. 2046 */ 2047 e1000_clear_hw_cntrs_ich8lan(hw); 2048 2049 return ret_val; 2050} 2051/** 2052 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 2053 * @hw: pointer to the HW structure 2054 * 2055 * Sets/Clears required hardware bits necessary for correctly setting up the 2056 * hardware for transmit and receive. 2057 **/ 2058static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) 2059{ 2060 u32 reg; 2061 2062 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); 2063 2064 if (hw->mac.disable_hw_init_bits) 2065 goto out; 2066 2067 /* Extended Device Control */ 2068 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2069 reg |= (1 << 22); 2070 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2071 2072 /* Transmit Descriptor Control 0 */ 2073 reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); 2074 reg |= (1 << 22); 2075 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); 2076 2077 /* Transmit Descriptor Control 1 */ 2078 reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); 2079 reg |= (1 << 22); 2080 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); 2081 2082 /* Transmit Arbitration Control 0 */ 2083 reg = E1000_READ_REG(hw, E1000_TARC(0)); 2084 if (hw->mac.type == e1000_ich8lan) 2085 reg |= (1 << 28) | (1 << 29); 2086 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 2087 E1000_WRITE_REG(hw, E1000_TARC(0), reg); 2088 2089 /* Transmit Arbitration Control 1 */ 2090 reg = E1000_READ_REG(hw, E1000_TARC(1)); 2091 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) 2092 reg &= ~(1 << 28); 2093 else 2094 reg |= (1 << 28); 2095 reg |= (1 << 24) | (1 << 26) | (1 << 30); 2096 E1000_WRITE_REG(hw, E1000_TARC(1), reg); 2097 2098 /* Device Status */ 2099 if (hw->mac.type == e1000_ich8lan) { 2100 reg = E1000_READ_REG(hw, E1000_STATUS); 2101 reg &= ~(1 << 31); 2102 E1000_WRITE_REG(hw, E1000_STATUS, reg); 2103 } 2104 2105out: 2106 return; 2107} 2108 2109/** 2110 * e1000_setup_link_ich8lan - Setup flow control and link settings 2111 * @hw: pointer to the HW structure 2112 * 2113 * Determines which flow control settings to use, then configures flow 2114 * control. Calls the appropriate media-specific link configuration 2115 * function. Assuming the adapter has a valid link partner, a valid link 2116 * should be established. Assumes the hardware has previously been reset 2117 * and the transmitter and receiver are not enabled. 2118 **/ 2119static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 2120{ 2121 s32 ret_val = E1000_SUCCESS; 2122 2123 DEBUGFUNC("e1000_setup_link_ich8lan"); 2124 2125 if (hw->phy.ops.check_reset_block(hw)) 2126 goto out; 2127 2128 /* 2129 * ICH parts do not have a word in the NVM to determine 2130 * the default flow control setting, so we explicitly 2131 * set it to full. 2132 */ 2133 if (hw->fc.type == e1000_fc_default) 2134 hw->fc.type = e1000_fc_full; 2135 2136 hw->fc.original_type = hw->fc.type; 2137 2138 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type); 2139 2140 /* Continue to configure the copper link. */ 2141 ret_val = hw->mac.ops.setup_physical_interface(hw); 2142 if (ret_val) 2143 goto out; 2144 2145 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); 2146 2147 ret_val = e1000_set_fc_watermarks_generic(hw); 2148 2149out: 2150 return ret_val; 2151} 2152 2153/** 2154 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface 2155 * @hw: pointer to the HW structure 2156 * 2157 * Configures the kumeran interface to the PHY to wait the appropriate time 2158 * when polling the PHY, then call the generic setup_copper_link to finish 2159 * configuring the copper link. 2160 **/ 2161static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) 2162{ 2163 u32 ctrl; 2164 s32 ret_val; 2165 u16 reg_data; 2166 2167 DEBUGFUNC("e1000_setup_copper_link_ich8lan"); 2168 2169 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2170 ctrl |= E1000_CTRL_SLU; 2171 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 2172 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 2173 2174 /* 2175 * Set the mac to wait the maximum time between each iteration 2176 * and increase the max iterations when polling the phy; 2177 * this fixes erroneous timeouts at 10Mbps. 2178 */ 2179 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 2180 if (ret_val) 2181 goto out; 2182 ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); 2183 if (ret_val) 2184 goto out; 2185 reg_data |= 0x3F; 2186 ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); 2187 if (ret_val) 2188 goto out; 2189 2190 if (hw->phy.type == e1000_phy_igp_3) { 2191 ret_val = e1000_copper_link_setup_igp(hw); 2192 if (ret_val) 2193 goto out; 2194 } else if (hw->phy.type == e1000_phy_bm) { 2195 ret_val = e1000_copper_link_setup_m88(hw); 2196 if (ret_val) 2197 goto out; 2198 } 2199 2200 if (hw->phy.type == e1000_phy_ife) { 2201 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 2202 ®_data); 2203 if (ret_val) 2204 goto out; 2205 2206 reg_data &= ~IFE_PMC_AUTO_MDIX; 2207 2208 switch (hw->phy.mdix) { 2209 case 1: 2210 reg_data &= ~IFE_PMC_FORCE_MDIX; 2211 break; 2212 case 2: 2213 reg_data |= IFE_PMC_FORCE_MDIX; 2214 break; 2215 case 0: 2216 default: 2217 reg_data |= IFE_PMC_AUTO_MDIX; 2218 break; 2219 } 2220 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 2221 reg_data); 2222 if (ret_val) 2223 goto out; 2224 } 2225 ret_val = e1000_setup_copper_link_generic(hw); 2226 2227out: 2228 return ret_val; 2229} 2230 2231/** 2232 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 2233 * @hw: pointer to the HW structure 2234 * @speed: pointer to store current link speed 2235 * @duplex: pointer to store the current link duplex 2236 * 2237 * Calls the generic get_speed_and_duplex to retrieve the current link 2238 * information and then calls the Kumeran lock loss workaround for links at 2239 * gigabit speeds. 2240 **/ 2241static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, 2242 u16 *duplex) 2243{ 2244 s32 ret_val; 2245 2246 DEBUGFUNC("e1000_get_link_up_info_ich8lan"); 2247 2248 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); 2249 if (ret_val) 2250 goto out; 2251 2252 if ((hw->mac.type == e1000_ich8lan) && 2253 (hw->phy.type == e1000_phy_igp_3) && 2254 (*speed == SPEED_1000)) { 2255 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 2256 } 2257 2258out: 2259 return ret_val; 2260} 2261 2262/** 2263 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround 2264 * @hw: pointer to the HW structure 2265 * 2266 * Work-around for 82566 Kumeran PCS lock loss: 2267 * On link status change (i.e. PCI reset, speed change) and link is up and 2268 * speed is gigabit- 2269 * 0) if workaround is optionally disabled do nothing 2270 * 1) wait 1ms for Kumeran link to come up 2271 * 2) check Kumeran Diagnostic register PCS lock loss bit 2272 * 3) if not set the link is locked (all is good), otherwise... 2273 * 4) reset the PHY 2274 * 5) repeat up to 10 times 2275 * Note: this is only called for IGP3 copper when speed is 1gb. 2276 **/ 2277static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) 2278{ 2279 struct e1000_dev_spec_ich8lan *dev_spec; 2280 u32 phy_ctrl; 2281 s32 ret_val = E1000_SUCCESS; 2282 u16 i, data; 2283 bool link; 2284 2285 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); 2286 2287 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 2288 2289 if (!dev_spec) { 2290 DEBUGOUT("dev_spec pointer is set to NULL.\n"); 2291 ret_val = -E1000_ERR_CONFIG; 2292 goto out; 2293 } 2294 2295 if (!(dev_spec->kmrn_lock_loss_workaround_enabled)) 2296 goto out; 2297 2298 /* 2299 * Make sure link is up before proceeding. If not just return. 2300 * Attempting this while link is negotiating fouled up link 2301 * stability 2302 */ 2303 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); 2304 if (!link) { 2305 ret_val = E1000_SUCCESS; 2306 goto out; 2307 } 2308 2309 for (i = 0; i < 10; i++) { 2310 /* read once to clear */ 2311 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 2312 if (ret_val) 2313 goto out; 2314 /* and again to get new status */ 2315 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); 2316 if (ret_val) 2317 goto out; 2318 2319 /* check for PCS lock */ 2320 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) { 2321 ret_val = E1000_SUCCESS; 2322 goto out; 2323 } 2324 2325 /* Issue PHY reset */ 2326 hw->phy.ops.reset(hw); 2327 msec_delay_irq(5); 2328 } 2329 /* Disable GigE link negotiation */ 2330 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 2331 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | 2332 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2333 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2334 2335 /* 2336 * Call gig speed drop workaround on Gig disable before accessing 2337 * any PHY registers 2338 */ 2339 e1000_gig_downshift_workaround_ich8lan(hw); 2340 2341 /* unable to acquire PCS lock */ 2342 ret_val = -E1000_ERR_PHY; 2343 2344out: 2345 return ret_val; 2346} 2347 2348/** 2349 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state 2350 * @hw: pointer to the HW structure 2351 * @state: boolean value used to set the current Kumeran workaround state 2352 * 2353 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 2354 * /disabled - FALSE). 2355 **/ 2356void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 2357 bool state) 2358{ 2359 struct e1000_dev_spec_ich8lan *dev_spec; 2360 2361 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); 2362 2363 if (hw->mac.type != e1000_ich8lan) { 2364 DEBUGOUT("Workaround applies to ICH8 only.\n"); 2365 goto out; 2366 } 2367 2368 dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; 2369 2370 if (!dev_spec) { 2371 DEBUGOUT("dev_spec pointer is set to NULL.\n"); 2372 goto out; 2373 } 2374 2375 dev_spec->kmrn_lock_loss_workaround_enabled = state; 2376 2377out: 2378 return; 2379} 2380 2381/** 2382 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 2383 * @hw: pointer to the HW structure 2384 * 2385 * Workaround for 82566 power-down on D3 entry: 2386 * 1) disable gigabit link 2387 * 2) write VR power-down enable 2388 * 3) read it back 2389 * Continue if successful, else issue LCD reset and repeat 2390 **/ 2391void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) 2392{ 2393 u32 reg; 2394 u16 data; 2395 u8 retry = 0; 2396 2397 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); 2398 2399 if (hw->phy.type != e1000_phy_igp_3) 2400 goto out; 2401 2402 /* Try the workaround twice (if needed) */ 2403 do { 2404 /* Disable link */ 2405 reg = E1000_READ_REG(hw, E1000_PHY_CTRL); 2406 reg |= (E1000_PHY_CTRL_GBE_DISABLE | 2407 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2408 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); 2409 2410 /* 2411 * Call gig speed drop workaround on Gig disable before 2412 * accessing any PHY registers 2413 */ 2414 if (hw->mac.type == e1000_ich8lan) 2415 e1000_gig_downshift_workaround_ich8lan(hw); 2416 2417 /* Write VR power-down enable */ 2418 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 2419 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 2420 hw->phy.ops.write_reg(hw, 2421 IGP3_VR_CTRL, 2422 data | IGP3_VR_CTRL_MODE_SHUTDOWN); 2423 2424 /* Read it back and test */ 2425 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); 2426 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 2427 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) 2428 break; 2429 2430 /* Issue PHY reset and repeat at most one more time */ 2431 reg = E1000_READ_REG(hw, E1000_CTRL); 2432 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); 2433 retry++; 2434 } while (retry); 2435 2436out: 2437 return; 2438} 2439 2440/** 2441 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working 2442 * @hw: pointer to the HW structure 2443 * 2444 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 2445 * LPLU, Gig disable, MDIC PHY reset): 2446 * 1) Set Kumeran Near-end loopback 2447 * 2) Clear Kumeran Near-end loopback 2448 * Should only be called for ICH8[m] devices with IGP_3 Phy. 2449 **/ 2450void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) 2451{ 2452 s32 ret_val = E1000_SUCCESS; 2453 u16 reg_data; 2454 2455 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); 2456 2457 if ((hw->mac.type != e1000_ich8lan) || 2458 (hw->phy.type != e1000_phy_igp_3)) 2459 goto out; 2460 2461 ret_val = e1000_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 2462 ®_data); 2463 if (ret_val) 2464 goto out; 2465 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 2466 ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 2467 reg_data); 2468 if (ret_val) 2469 goto out; 2470 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 2471 ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 2472 reg_data); 2473out: 2474 return; 2475} 2476 2477/** 2478 * e1000_disable_gig_wol_ich8lan - disable gig during WoL 2479 * @hw: pointer to the HW structure 2480 * 2481 * During S0 to Sx transition, it is possible the link remains at gig 2482 * instead of negotiating to a lower speed. Before going to Sx, set 2483 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 2484 * to a lower speed. 2485 * 2486 * Should only be called for ICH9. 2487 **/ 2488void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw) 2489{ 2490 u32 phy_ctrl; 2491 2492 if (hw->mac.type == e1000_ich9lan) { 2493 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); 2494 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 2495 E1000_PHY_CTRL_GBE_DISABLE; 2496 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); 2497 } 2498 2499 return; 2500} 2501 2502/** 2503 * e1000_cleanup_led_ich8lan - Restore the default LED operation 2504 * @hw: pointer to the HW structure 2505 * 2506 * Return the LED back to the default configuration. 2507 **/ 2508static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) 2509{ 2510 s32 ret_val = E1000_SUCCESS; 2511 2512 DEBUGFUNC("e1000_cleanup_led_ich8lan"); 2513 2514 if (hw->phy.type == e1000_phy_ife) 2515 ret_val = hw->phy.ops.write_reg(hw, 2516 IFE_PHY_SPECIAL_CONTROL_LED, 2517 0); 2518 else 2519 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); 2520 2521 return ret_val; 2522} 2523 2524/** 2525 * e1000_led_on_ich8lan - Turn LEDs on 2526 * @hw: pointer to the HW structure 2527 * 2528 * Turn on the LEDs. 2529 **/ 2530static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 2531{ 2532 s32 ret_val = E1000_SUCCESS; 2533 2534 DEBUGFUNC("e1000_led_on_ich8lan"); 2535 2536 if (hw->phy.type == e1000_phy_ife) 2537 ret_val = hw->phy.ops.write_reg(hw, 2538 IFE_PHY_SPECIAL_CONTROL_LED, 2539 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); 2540 else 2541 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); 2542 2543 return ret_val; 2544} 2545 2546/** 2547 * e1000_led_off_ich8lan - Turn LEDs off 2548 * @hw: pointer to the HW structure 2549 * 2550 * Turn off the LEDs. 2551 **/ 2552static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 2553{ 2554 s32 ret_val = E1000_SUCCESS; 2555 2556 DEBUGFUNC("e1000_led_off_ich8lan"); 2557 2558 if (hw->phy.type == e1000_phy_ife) 2559 ret_val = hw->phy.ops.write_reg(hw, 2560 IFE_PHY_SPECIAL_CONTROL_LED, 2561 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 2562 else 2563 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); 2564 2565 return ret_val; 2566} 2567 2568/** 2569 * e1000_get_cfg_done_ich8lan - Read config done bit 2570 * @hw: pointer to the HW structure 2571 * 2572 * Read the management control register for the config done bit for 2573 * completion status. NOTE: silicon which is EEPROM-less will fail trying 2574 * to read the config done bit, so an error is *ONLY* logged and returns 2575 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon 2576 * would not be able to be reset or change link. 2577 **/ 2578static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 2579{ 2580 s32 ret_val = E1000_SUCCESS; 2581 2582 e1000_get_cfg_done_generic(hw); 2583 2584 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 2585 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && 2586 (hw->phy.type == e1000_phy_igp_3)) { 2587 e1000_phy_init_script_igp3(hw); 2588 } 2589 2590 return ret_val; 2591} 2592 2593/** 2594 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down 2595 * @hw: pointer to the HW structure 2596 * 2597 * In the case of a PHY power down to save power, or to turn off link during a 2598 * driver unload, or wake on lan is not enabled, remove the link. 2599 **/ 2600static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) 2601{ 2602 struct e1000_phy_info *phy = &hw->phy; 2603 struct e1000_mac_info *mac = &hw->mac; 2604 2605 /* If the management interface is not enabled, then power down */ 2606 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) 2607 e1000_power_down_phy_copper(hw); 2608 2609 return; 2610} 2611 2612/** 2613 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 2614 * @hw: pointer to the HW structure 2615 * 2616 * Clears hardware counters specific to the silicon family and calls 2617 * clear_hw_cntrs_generic to clear all general purpose counters. 2618 **/ 2619static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 2620{ 2621 volatile u32 temp; 2622 2623 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); 2624 2625 e1000_clear_hw_cntrs_base_generic(hw); 2626 2627 temp = E1000_READ_REG(hw, E1000_ALGNERRC); 2628 temp = E1000_READ_REG(hw, E1000_RXERRC); 2629 temp = E1000_READ_REG(hw, E1000_TNCRS); 2630 temp = E1000_READ_REG(hw, E1000_CEXTERR); 2631 temp = E1000_READ_REG(hw, E1000_TSCTC); 2632 temp = E1000_READ_REG(hw, E1000_TSCTFC); 2633 2634 temp = E1000_READ_REG(hw, E1000_MGTPRC); 2635 temp = E1000_READ_REG(hw, E1000_MGTPDC); 2636 temp = E1000_READ_REG(hw, E1000_MGTPTC); 2637 2638 temp = E1000_READ_REG(hw, E1000_IAC); 2639 temp = E1000_READ_REG(hw, E1000_ICRXOC); 2640} 2641 2642