1/******************************************************************************
2  SPDX-License-Identifier: BSD-3-Clause
3
4  Copyright (c) 2001-2015, Intel Corporation
5  All rights reserved.
6
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions are met:
9
10   1. Redistributions of source code must retain the above copyright notice,
11      this list of conditions and the following disclaimer.
12
13   2. Redistributions in binary form must reproduce the above copyright
14      notice, this list of conditions and the following disclaimer in the
15      documentation and/or other materials provided with the distribution.
16
17   3. Neither the name of the Intel Corporation nor the names of its
18      contributors may be used to endorse or promote products derived from
19      this software without specific prior written permission.
20
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  POSSIBILITY OF SUCH DAMAGE.
32
33******************************************************************************/
34/*$FreeBSD$*/
35
36/* 82562G 10/100 Network Connection
37 * 82562G-2 10/100 Network Connection
38 * 82562GT 10/100 Network Connection
39 * 82562GT-2 10/100 Network Connection
40 * 82562V 10/100 Network Connection
41 * 82562V-2 10/100 Network Connection
42 * 82566DC-2 Gigabit Network Connection
43 * 82566DC Gigabit Network Connection
44 * 82566DM-2 Gigabit Network Connection
45 * 82566DM Gigabit Network Connection
46 * 82566MC Gigabit Network Connection
47 * 82566MM Gigabit Network Connection
48 * 82567LM Gigabit Network Connection
49 * 82567LF Gigabit Network Connection
50 * 82567V Gigabit Network Connection
51 * 82567LM-2 Gigabit Network Connection
52 * 82567LF-2 Gigabit Network Connection
53 * 82567V-2 Gigabit Network Connection
54 * 82567LF-3 Gigabit Network Connection
55 * 82567LM-3 Gigabit Network Connection
56 * 82567LM-4 Gigabit Network Connection
57 * 82577LM Gigabit Network Connection
58 * 82577LC Gigabit Network Connection
59 * 82578DM Gigabit Network Connection
60 * 82578DC Gigabit Network Connection
61 * 82579LM Gigabit Network Connection
62 * 82579V Gigabit Network Connection
63 * Ethernet Connection I217-LM
64 * Ethernet Connection I217-V
65 * Ethernet Connection I218-V
66 * Ethernet Connection I218-LM
67 * Ethernet Connection (2) I218-LM
68 * Ethernet Connection (2) I218-V
69 * Ethernet Connection (3) I218-LM
70 * Ethernet Connection (3) I218-V
71 */
72
73#include "e1000_api.h"
74
75static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
76static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
77static s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
78static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
79static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
80static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
81static int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
82static int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
83static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
84static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85					      u8 *mc_addr_list,
86					      u32 mc_addr_count);
87static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
88static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
89static s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
90static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
91					    bool active);
92static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
93					    bool active);
94static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
95				   u16 words, u16 *data);
96static s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
97			       u16 *data);
98static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
99				    u16 words, u16 *data);
100static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
101static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
102static s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
103static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
104					    u16 *data);
105static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
106static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
107static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
108static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
109static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
110static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
111static s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
112static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
113					   u16 *speed, u16 *duplex);
114static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
115static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
116static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
117static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
118static s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
119static s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
120static s32  e1000_led_on_pchlan(struct e1000_hw *hw);
121static s32  e1000_led_off_pchlan(struct e1000_hw *hw);
122static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
123static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
124static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
125static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
126static s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
127					  u32 offset, u8 *data);
128static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
129					  u8 size, u16 *data);
130static s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
131					    u32 *data);
132static s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
133					   u32 offset, u32 *data);
134static s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
135					     u32 offset, u32 data);
136static s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
137						  u32 offset, u32 dword);
138static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
139					  u32 offset, u16 *data);
140static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
141						 u32 offset, u8 byte);
142static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
143static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
144static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
145static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
146static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
147static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
148static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
149
150/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151/* Offset 04h HSFSTS */
152union ich8_hws_flash_status {
153	struct ich8_hsfsts {
154		u16 flcdone:1; /* bit 0 Flash Cycle Done */
155		u16 flcerr:1; /* bit 1 Flash Cycle Error */
156		u16 dael:1; /* bit 2 Direct Access error Log */
157		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159		u16 reserved1:2; /* bit 13:6 Reserved */
160		u16 reserved2:6; /* bit 13:6 Reserved */
161		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163	} hsf_status;
164	u16 regval;
165};
166
167/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168/* Offset 06h FLCTL */
169union ich8_hws_flash_ctrl {
170	struct ich8_hsflctl {
171		u16 flcgo:1;   /* 0 Flash Cycle Go */
172		u16 flcycle:2;   /* 2:1 Flash Cycle */
173		u16 reserved:5;   /* 7:3 Reserved  */
174		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
175		u16 flockdn:6;   /* 15:10 Reserved */
176	} hsf_ctrl;
177	u16 regval;
178};
179
180/* ICH Flash Region Access Permissions */
181union ich8_hws_flash_regacc {
182	struct ich8_flracc {
183		u32 grra:8; /* 0:7 GbE region Read Access */
184		u32 grwa:8; /* 8:15 GbE region Write Access */
185		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187	} hsf_flregacc;
188	u16 regval;
189};
190
191/**
192 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193 *  @hw: pointer to the HW structure
194 *
195 *  Test access to the PHY registers by reading the PHY ID registers.  If
196 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
197 *  otherwise assume the read PHY ID is correct if it is valid.
198 *
199 *  Assumes the sw/fw/hw semaphore is already acquired.
200 **/
201static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202{
203	u16 phy_reg = 0;
204	u32 phy_id = 0;
205	s32 ret_val = 0;
206	u16 retry_count;
207	u32 mac_reg = 0;
208
209	for (retry_count = 0; retry_count < 2; retry_count++) {
210		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211		if (ret_val || (phy_reg == 0xFFFF))
212			continue;
213		phy_id = (u32)(phy_reg << 16);
214
215		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216		if (ret_val || (phy_reg == 0xFFFF)) {
217			phy_id = 0;
218			continue;
219		}
220		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221		break;
222	}
223
224	if (hw->phy.id) {
225		if  (hw->phy.id == phy_id)
226			goto out;
227	} else if (phy_id) {
228		hw->phy.id = phy_id;
229		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230		goto out;
231	}
232
233	/* In case the PHY needs to be in mdio slow mode,
234	 * set slow mode and try to get the PHY id again.
235	 */
236	if (hw->mac.type < e1000_pch_lpt) {
237		hw->phy.ops.release(hw);
238		ret_val = e1000_set_mdio_slow_mode_hv(hw);
239		if (!ret_val)
240			ret_val = e1000_get_phy_id(hw);
241		hw->phy.ops.acquire(hw);
242	}
243
244	if (ret_val)
245		return FALSE;
246out:
247	if (hw->mac.type >= e1000_pch_lpt) {
248		/* Only unforce SMBus if ME is not active */
249		if (!(E1000_READ_REG(hw, E1000_FWSM) &
250		    E1000_ICH_FWSM_FW_VALID)) {
251			/* Unforce SMBus mode in PHY */
252			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256			/* Unforce SMBus mode in MAC */
257			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260		}
261	}
262
263	return TRUE;
264}
265
266/**
267 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268 *  @hw: pointer to the HW structure
269 *
270 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271 *  used to reset the PHY to a quiescent state when necessary.
272 **/
273static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274{
275	u32 mac_reg;
276
277	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279	/* Set Phy Config Counter to 50msec */
280	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285	/* Toggle LANPHYPC Value bit */
286	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290	E1000_WRITE_FLUSH(hw);
291	msec_delay(1);
292	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294	E1000_WRITE_FLUSH(hw);
295
296	if (hw->mac.type < e1000_pch_lpt) {
297		msec_delay(50);
298	} else {
299		u16 count = 20;
300
301		do {
302			msec_delay(5);
303		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304			   E1000_CTRL_EXT_LPCD) && count--);
305
306		msec_delay(30);
307	}
308}
309
310/**
311 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312 *  @hw: pointer to the HW structure
313 *
314 *  Workarounds/flow necessary for PHY initialization during driver load
315 *  and resume paths.
316 **/
317static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318{
319	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320	s32 ret_val;
321
322	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324	/* Gate automatic PHY configuration by hardware on managed and
325	 * non-managed 82579 and newer adapters.
326	 */
327	e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
328
329	/* It is not possible to be certain of the current state of ULP
330	 * so forcibly disable it.
331	 */
332	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333	e1000_disable_ulp_lpt_lp(hw, TRUE);
334
335	ret_val = hw->phy.ops.acquire(hw);
336	if (ret_val) {
337		DEBUGOUT("Failed to initialize PHY flow\n");
338		goto out;
339	}
340
341	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
342	 * inaccessible and resetting the PHY is not blocked, toggle the
343	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344	 */
345	switch (hw->mac.type) {
346	case e1000_pch_lpt:
347	case e1000_pch_spt:
348	case e1000_pch_cnp:
349	case e1000_pch_tgp:
350	case e1000_pch_adp:
351	case e1000_pch_mtp:
352		if (e1000_phy_is_accessible_pchlan(hw))
353			break;
354
355		/* Before toggling LANPHYPC, see if PHY is accessible by
356		 * forcing MAC to SMBus mode first.
357		 */
358		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
359		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
360		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
361
362		/* Wait 50 milliseconds for MAC to finish any retries
363		 * that it might be trying to perform from previous
364		 * attempts to acknowledge any phy read requests.
365		 */
366		 msec_delay(50);
367
368		/* fall-through */
369	case e1000_pch2lan:
370		if (e1000_phy_is_accessible_pchlan(hw))
371			break;
372
373		/* fall-through */
374	case e1000_pchlan:
375		if ((hw->mac.type == e1000_pchlan) &&
376		    (fwsm & E1000_ICH_FWSM_FW_VALID))
377			break;
378
379		if (hw->phy.ops.check_reset_block(hw)) {
380			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
381			ret_val = -E1000_ERR_PHY;
382			break;
383		}
384
385		/* Toggle LANPHYPC Value bit */
386		e1000_toggle_lanphypc_pch_lpt(hw);
387		if (hw->mac.type >= e1000_pch_lpt) {
388			if (e1000_phy_is_accessible_pchlan(hw))
389				break;
390
391			/* Toggling LANPHYPC brings the PHY out of SMBus mode
392			 * so ensure that the MAC is also out of SMBus mode
393			 */
394			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
395			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
396			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
397
398			if (e1000_phy_is_accessible_pchlan(hw))
399				break;
400
401			ret_val = -E1000_ERR_PHY;
402		}
403		break;
404	default:
405		break;
406	}
407
408	hw->phy.ops.release(hw);
409	if (!ret_val) {
410
411		/* Check to see if able to reset PHY.  Print error if not */
412		if (hw->phy.ops.check_reset_block(hw)) {
413			ERROR_REPORT("Reset blocked by ME\n");
414			goto out;
415		}
416
417		/* Reset the PHY before any access to it.  Doing so, ensures
418		 * that the PHY is in a known good state before we read/write
419		 * PHY registers.  The generic reset is sufficient here,
420		 * because we haven't determined the PHY type yet.
421		 */
422		ret_val = e1000_phy_hw_reset_generic(hw);
423		if (ret_val)
424			goto out;
425
426		/* On a successful reset, possibly need to wait for the PHY
427		 * to quiesce to an accessible state before returning control
428		 * to the calling function.  If the PHY does not quiesce, then
429		 * return E1000E_BLK_PHY_RESET, as this is the condition that
430		 *  the PHY is in.
431		 */
432		ret_val = hw->phy.ops.check_reset_block(hw);
433		if (ret_val)
434			ERROR_REPORT("ME blocked access to PHY after reset\n");
435	}
436
437out:
438	/* Ungate automatic PHY configuration on non-managed 82579 */
439	if ((hw->mac.type == e1000_pch2lan) &&
440	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
441		msec_delay(10);
442		e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
443	}
444
445	return ret_val;
446}
447
448/**
449 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
450 *  @hw: pointer to the HW structure
451 *
452 *  Initialize family-specific PHY parameters and function pointers.
453 **/
454static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
455{
456	struct e1000_phy_info *phy = &hw->phy;
457	s32 ret_val;
458
459	DEBUGFUNC("e1000_init_phy_params_pchlan");
460
461	phy->addr		= 1;
462	phy->reset_delay_us	= 100;
463
464	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
465	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
466	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
467	phy->ops.set_page	= e1000_set_page_igp;
468	phy->ops.read_reg	= e1000_read_phy_reg_hv;
469	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
470	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
471	phy->ops.release	= e1000_release_swflag_ich8lan;
472	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
473	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
474	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
475	phy->ops.write_reg	= e1000_write_phy_reg_hv;
476	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
477	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
478	phy->ops.power_up	= e1000_power_up_phy_copper;
479	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
480	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
481
482	phy->id = e1000_phy_unknown;
483
484	ret_val = e1000_init_phy_workarounds_pchlan(hw);
485	if (ret_val)
486		return ret_val;
487
488	if (phy->id == e1000_phy_unknown)
489		switch (hw->mac.type) {
490		default:
491			ret_val = e1000_get_phy_id(hw);
492			if (ret_val)
493				return ret_val;
494			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
495				break;
496			/* fall-through */
497		case e1000_pch2lan:
498		case e1000_pch_lpt:
499		case e1000_pch_spt:
500		case e1000_pch_cnp:
501		case e1000_pch_tgp:
502		case e1000_pch_adp:
503		case e1000_pch_mtp:
504			/* In case the PHY needs to be in mdio slow mode,
505			 * set slow mode and try to get the PHY id again.
506			 */
507			ret_val = e1000_set_mdio_slow_mode_hv(hw);
508			if (ret_val)
509				return ret_val;
510			ret_val = e1000_get_phy_id(hw);
511			if (ret_val)
512				return ret_val;
513			break;
514		}
515	phy->type = e1000_get_phy_type_from_id(phy->id);
516
517	switch (phy->type) {
518	case e1000_phy_82577:
519	case e1000_phy_82579:
520	case e1000_phy_i217:
521		phy->ops.check_polarity = e1000_check_polarity_82577;
522		phy->ops.force_speed_duplex =
523			e1000_phy_force_speed_duplex_82577;
524		phy->ops.get_cable_length = e1000_get_cable_length_82577;
525		phy->ops.get_info = e1000_get_phy_info_82577;
526		phy->ops.commit = e1000_phy_sw_reset_generic;
527		break;
528	case e1000_phy_82578:
529		phy->ops.check_polarity = e1000_check_polarity_m88;
530		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
531		phy->ops.get_cable_length = e1000_get_cable_length_m88;
532		phy->ops.get_info = e1000_get_phy_info_m88;
533		break;
534	default:
535		ret_val = -E1000_ERR_PHY;
536		break;
537	}
538
539	return ret_val;
540}
541
542/**
543 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
544 *  @hw: pointer to the HW structure
545 *
546 *  Initialize family-specific PHY parameters and function pointers.
547 **/
548static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
549{
550	struct e1000_phy_info *phy = &hw->phy;
551	s32 ret_val;
552	u16 i = 0;
553
554	DEBUGFUNC("e1000_init_phy_params_ich8lan");
555
556	phy->addr		= 1;
557	phy->reset_delay_us	= 100;
558
559	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
560	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
561	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
562	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
563	phy->ops.read_reg	= e1000_read_phy_reg_igp;
564	phy->ops.release	= e1000_release_swflag_ich8lan;
565	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
566	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
567	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
568	phy->ops.write_reg	= e1000_write_phy_reg_igp;
569	phy->ops.power_up	= e1000_power_up_phy_copper;
570	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
571
572	/* We may need to do this twice - once for IGP and if that fails,
573	 * we'll set BM func pointers and try again
574	 */
575	ret_val = e1000_determine_phy_address(hw);
576	if (ret_val) {
577		phy->ops.write_reg = e1000_write_phy_reg_bm;
578		phy->ops.read_reg  = e1000_read_phy_reg_bm;
579		ret_val = e1000_determine_phy_address(hw);
580		if (ret_val) {
581			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
582			return ret_val;
583		}
584	}
585
586	phy->id = 0;
587	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
588	       (i++ < 100)) {
589		msec_delay(1);
590		ret_val = e1000_get_phy_id(hw);
591		if (ret_val)
592			return ret_val;
593	}
594
595	/* Verify phy id */
596	switch (phy->id) {
597	case IGP03E1000_E_PHY_ID:
598		phy->type = e1000_phy_igp_3;
599		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
600		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
601		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
602		phy->ops.get_info = e1000_get_phy_info_igp;
603		phy->ops.check_polarity = e1000_check_polarity_igp;
604		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
605		break;
606	case IFE_E_PHY_ID:
607	case IFE_PLUS_E_PHY_ID:
608	case IFE_C_E_PHY_ID:
609		phy->type = e1000_phy_ife;
610		phy->autoneg_mask = E1000_ALL_NOT_GIG;
611		phy->ops.get_info = e1000_get_phy_info_ife;
612		phy->ops.check_polarity = e1000_check_polarity_ife;
613		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
614		break;
615	case BME1000_E_PHY_ID:
616		phy->type = e1000_phy_bm;
617		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
618		phy->ops.read_reg = e1000_read_phy_reg_bm;
619		phy->ops.write_reg = e1000_write_phy_reg_bm;
620		phy->ops.commit = e1000_phy_sw_reset_generic;
621		phy->ops.get_info = e1000_get_phy_info_m88;
622		phy->ops.check_polarity = e1000_check_polarity_m88;
623		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
624		break;
625	default:
626		return -E1000_ERR_PHY;
627		break;
628	}
629
630	return E1000_SUCCESS;
631}
632
633/**
634 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
635 *  @hw: pointer to the HW structure
636 *
637 *  Initialize family-specific NVM parameters and function
638 *  pointers.
639 **/
640static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
641{
642	struct e1000_nvm_info *nvm = &hw->nvm;
643	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
644	u32 gfpreg, sector_base_addr, sector_end_addr;
645	u16 i;
646	u32 nvm_size;
647
648	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
649
650	nvm->type = e1000_nvm_flash_sw;
651
652	if (hw->mac.type >= e1000_pch_spt) {
653		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
654		 * STRAP register. This is because in SPT the GbE Flash region
655		 * is no longer accessed through the flash registers. Instead,
656		 * the mechanism has changed, and the Flash region access
657		 * registers are now implemented in GbE memory space.
658		 */
659		nvm->flash_base_addr = 0;
660		nvm_size =
661		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
662		    * NVM_SIZE_MULTIPLIER;
663		nvm->flash_bank_size = nvm_size / 2;
664		/* Adjust to word count */
665		nvm->flash_bank_size /= sizeof(u16);
666		/* Set the base address for flash register access */
667		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
668	} else {
669		/* Can't read flash registers if register set isn't mapped. */
670		if (!hw->flash_address) {
671			DEBUGOUT("ERROR: Flash registers not mapped\n");
672			return -E1000_ERR_CONFIG;
673		}
674
675		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
676
677		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
678		 * Add 1 to sector_end_addr since this sector is included in
679		 * the overall size.
680		 */
681		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
682		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
683
684		/* flash_base_addr is byte-aligned */
685		nvm->flash_base_addr = sector_base_addr
686				       << FLASH_SECTOR_ADDR_SHIFT;
687
688		/* find total size of the NVM, then cut in half since the total
689		 * size represents two separate NVM banks.
690		 */
691		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
692					<< FLASH_SECTOR_ADDR_SHIFT);
693		nvm->flash_bank_size /= 2;
694		/* Adjust to word count */
695		nvm->flash_bank_size /= sizeof(u16);
696	}
697
698	nvm->word_size = E1000_SHADOW_RAM_WORDS;
699
700	/* Clear shadow ram */
701	for (i = 0; i < nvm->word_size; i++) {
702		dev_spec->shadow_ram[i].modified = FALSE;
703		dev_spec->shadow_ram[i].value    = 0xFFFF;
704	}
705
706	/* Function Pointers */
707	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
708	nvm->ops.release	= e1000_release_nvm_ich8lan;
709	if (hw->mac.type >= e1000_pch_spt) {
710		nvm->ops.read	= e1000_read_nvm_spt;
711		nvm->ops.update	= e1000_update_nvm_checksum_spt;
712	} else {
713		nvm->ops.read	= e1000_read_nvm_ich8lan;
714		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
715	}
716	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
717	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
718	nvm->ops.write		= e1000_write_nvm_ich8lan;
719
720	return E1000_SUCCESS;
721}
722
723/**
724 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
725 *  @hw: pointer to the HW structure
726 *
727 *  Initialize family-specific MAC parameters and function
728 *  pointers.
729 **/
730static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
731{
732	struct e1000_mac_info *mac = &hw->mac;
733
734	DEBUGFUNC("e1000_init_mac_params_ich8lan");
735
736	/* Set media type function pointer */
737	hw->phy.media_type = e1000_media_type_copper;
738
739	/* Set mta register count */
740	mac->mta_reg_count = 32;
741	/* Set rar entry count */
742	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
743	if (mac->type == e1000_ich8lan)
744		mac->rar_entry_count--;
745	/* Set if part includes ASF firmware */
746	mac->asf_firmware_present = TRUE;
747	/* FWSM register */
748	mac->has_fwsm = TRUE;
749	/* ARC subsystem not supported */
750	mac->arc_subsystem_valid = FALSE;
751	/* Adaptive IFS supported */
752	mac->adaptive_ifs = TRUE;
753
754	/* Function pointers */
755
756	/* bus type/speed/width */
757	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
758	/* function id */
759	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
760	/* reset */
761	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
762	/* hw initialization */
763	mac->ops.init_hw = e1000_init_hw_ich8lan;
764	/* link setup */
765	mac->ops.setup_link = e1000_setup_link_ich8lan;
766	/* physical interface setup */
767	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
768	/* check for link */
769	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
770	/* link info */
771	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
772	/* multicast address update */
773	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
774	/* clear hardware counters */
775	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
776
777	/* LED and other operations */
778	switch (mac->type) {
779	case e1000_ich8lan:
780	case e1000_ich9lan:
781	case e1000_ich10lan:
782		/* check management mode */
783		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
784		/* ID LED init */
785		mac->ops.id_led_init = e1000_id_led_init_generic;
786		/* blink LED */
787		mac->ops.blink_led = e1000_blink_led_generic;
788		/* setup LED */
789		mac->ops.setup_led = e1000_setup_led_generic;
790		/* cleanup LED */
791		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
792		/* turn on/off LED */
793		mac->ops.led_on = e1000_led_on_ich8lan;
794		mac->ops.led_off = e1000_led_off_ich8lan;
795		break;
796	case e1000_pch2lan:
797		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
798		mac->ops.rar_set = e1000_rar_set_pch2lan;
799		/* fall-through */
800	case e1000_pch_lpt:
801	case e1000_pch_spt:
802	case e1000_pch_cnp:
803	case e1000_pch_tgp:
804	case e1000_pch_adp:
805	case e1000_pch_mtp:
806		/* multicast address update for pch2 */
807		mac->ops.update_mc_addr_list =
808			e1000_update_mc_addr_list_pch2lan;
809		/* fall-through */
810	case e1000_pchlan:
811		/* check management mode */
812		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
813		/* ID LED init */
814		mac->ops.id_led_init = e1000_id_led_init_pchlan;
815		/* setup LED */
816		mac->ops.setup_led = e1000_setup_led_pchlan;
817		/* cleanup LED */
818		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
819		/* turn on/off LED */
820		mac->ops.led_on = e1000_led_on_pchlan;
821		mac->ops.led_off = e1000_led_off_pchlan;
822		break;
823	default:
824		break;
825	}
826
827	if (mac->type >= e1000_pch_lpt) {
828		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
829		mac->ops.rar_set = e1000_rar_set_pch_lpt;
830		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
831		mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
832	}
833
834	/* Enable PCS Lock-loss workaround for ICH8 */
835	if (mac->type == e1000_ich8lan)
836		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
837
838	return E1000_SUCCESS;
839}
840
841/**
842 *  __e1000_access_emi_reg_locked - Read/write EMI register
843 *  @hw: pointer to the HW structure
844 *  @addr: EMI address to program
845 *  @data: pointer to value to read/write from/to the EMI address
846 *  @read: boolean flag to indicate read or write
847 *
848 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
849 **/
850static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
851					 u16 *data, bool read)
852{
853	s32 ret_val;
854
855	DEBUGFUNC("__e1000_access_emi_reg_locked");
856
857	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
858	if (ret_val)
859		return ret_val;
860
861	if (read)
862		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
863						      data);
864	else
865		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
866						       *data);
867
868	return ret_val;
869}
870
871/**
872 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
873 *  @hw: pointer to the HW structure
874 *  @addr: EMI address to program
875 *  @data: value to be read from the EMI address
876 *
877 *  Assumes the SW/FW/HW Semaphore is already acquired.
878 **/
879s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
880{
881	DEBUGFUNC("e1000_read_emi_reg_locked");
882
883	return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
884}
885
886/**
887 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
888 *  @hw: pointer to the HW structure
889 *  @addr: EMI address to program
890 *  @data: value to be written to the EMI address
891 *
892 *  Assumes the SW/FW/HW Semaphore is already acquired.
893 **/
894s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
895{
896	DEBUGFUNC("e1000_read_emi_reg_locked");
897
898	return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
899}
900
901/**
902 *  e1000_set_eee_pchlan - Enable/disable EEE support
903 *  @hw: pointer to the HW structure
904 *
905 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
906 *  the link and the EEE capabilities of the link partner.  The LPI Control
907 *  register bits will remain set only if/when link is up.
908 *
909 *  EEE LPI must not be asserted earlier than one second after link is up.
910 *  On 82579, EEE LPI should not be enabled until such time otherwise there
911 *  can be link issues with some switches.  Other devices can have EEE LPI
912 *  enabled immediately upon link up since they have a timer in hardware which
913 *  prevents LPI from being asserted too early.
914 **/
915s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
916{
917	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
918	s32 ret_val;
919	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
920
921	DEBUGFUNC("e1000_set_eee_pchlan");
922
923	switch (hw->phy.type) {
924	case e1000_phy_82579:
925		lpa = I82579_EEE_LP_ABILITY;
926		pcs_status = I82579_EEE_PCS_STATUS;
927		adv_addr = I82579_EEE_ADVERTISEMENT;
928		break;
929	case e1000_phy_i217:
930		lpa = I217_EEE_LP_ABILITY;
931		pcs_status = I217_EEE_PCS_STATUS;
932		adv_addr = I217_EEE_ADVERTISEMENT;
933		break;
934	default:
935		return E1000_SUCCESS;
936	}
937
938	ret_val = hw->phy.ops.acquire(hw);
939	if (ret_val)
940		return ret_val;
941
942	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
943	if (ret_val)
944		goto release;
945
946	/* Clear bits that enable EEE in various speeds */
947	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
948
949	/* Enable EEE if not disabled by user */
950	if (!dev_spec->eee_disable) {
951		/* Save off link partner's EEE ability */
952		ret_val = e1000_read_emi_reg_locked(hw, lpa,
953						    &dev_spec->eee_lp_ability);
954		if (ret_val)
955			goto release;
956
957		/* Read EEE advertisement */
958		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
959		if (ret_val)
960			goto release;
961
962		/* Enable EEE only for speeds in which the link partner is
963		 * EEE capable and for which we advertise EEE.
964		 */
965		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
966			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
967
968		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
969			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
970			if (data & NWAY_LPAR_100TX_FD_CAPS)
971				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
972			else
973				/* EEE is not supported in 100Half, so ignore
974				 * partner's EEE in 100 ability if full-duplex
975				 * is not advertised.
976				 */
977				dev_spec->eee_lp_ability &=
978				    ~I82579_EEE_100_SUPPORTED;
979		}
980	}
981
982	if (hw->phy.type == e1000_phy_82579) {
983		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
984						    &data);
985		if (ret_val)
986			goto release;
987
988		data &= ~I82579_LPI_100_PLL_SHUT;
989		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
990						     data);
991	}
992
993	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
994	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
995	if (ret_val)
996		goto release;
997
998	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
999release:
1000	hw->phy.ops.release(hw);
1001
1002	return ret_val;
1003}
1004
1005/**
1006 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1007 *  @hw:   pointer to the HW structure
1008 *  @link: link up bool flag
1009 *
1010 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1011 *  preventing further DMA write requests.  Workaround the issue by disabling
1012 *  the de-assertion of the clock request when in 1Gpbs mode.
1013 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1014 *  speeds in order to avoid Tx hangs.
1015 **/
1016static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1017{
1018	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1019	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1020	s32 ret_val = E1000_SUCCESS;
1021	u16 reg;
1022
1023	if (link && (status & E1000_STATUS_SPEED_1000)) {
1024		ret_val = hw->phy.ops.acquire(hw);
1025		if (ret_val)
1026			return ret_val;
1027
1028		ret_val =
1029		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1030					       &reg);
1031		if (ret_val)
1032			goto release;
1033
1034		ret_val =
1035		    e1000_write_kmrn_reg_locked(hw,
1036						E1000_KMRNCTRLSTA_K1_CONFIG,
1037						reg &
1038						~E1000_KMRNCTRLSTA_K1_ENABLE);
1039		if (ret_val)
1040			goto release;
1041
1042		usec_delay(10);
1043
1044		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1045				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1046
1047		ret_val =
1048		    e1000_write_kmrn_reg_locked(hw,
1049						E1000_KMRNCTRLSTA_K1_CONFIG,
1050						reg);
1051release:
1052		hw->phy.ops.release(hw);
1053	} else {
1054		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1055		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1056
1057		if ((hw->phy.revision > 5) || !link ||
1058		    ((status & E1000_STATUS_SPEED_100) &&
1059		     (status & E1000_STATUS_FD)))
1060			goto update_fextnvm6;
1061
1062		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1063		if (ret_val)
1064			return ret_val;
1065
1066		/* Clear link status transmit timeout */
1067		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1068
1069		if (status & E1000_STATUS_SPEED_100) {
1070			/* Set inband Tx timeout to 5x10us for 100Half */
1071			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1072
1073			/* Do not extend the K1 entry latency for 100Half */
1074			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1075		} else {
1076			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1077			reg |= 50 <<
1078			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1079
1080			/* Extend the K1 entry latency for 10 Mbps */
1081			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1082		}
1083
1084		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1085		if (ret_val)
1086			return ret_val;
1087
1088update_fextnvm6:
1089		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1090	}
1091
1092	return ret_val;
1093}
1094
1095static u64 e1000_ltr2ns(u16 ltr)
1096{
1097	u32 value, scale;
1098
1099	/* Determine the latency in nsec based on the LTR value & scale */
1100	value = ltr & E1000_LTRV_VALUE_MASK;
1101	scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1102
1103	return value * (1ULL << (scale * E1000_LTRV_SCALE_FACTOR));
1104}
1105
1106/**
1107 *  e1000_platform_pm_pch_lpt - Set platform power management values
1108 *  @hw: pointer to the HW structure
1109 *  @link: bool indicating link status
1110 *
1111 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1112 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1113 *  when link is up (which must not exceed the maximum latency supported
1114 *  by the platform), otherwise specify there is no LTR requirement.
1115 *  Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1116 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1117 *  Capability register set, on this device LTR is set by writing the
1118 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1119 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1120 *  message to the PMC.
1121 *
1122 *  Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1123 *  high-water mark.
1124 **/
1125static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1126{
1127	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1128		  link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1129	u16 lat_enc = 0;	/* latency encoded */
1130	s32 obff_hwm = 0;
1131
1132	DEBUGFUNC("e1000_platform_pm_pch_lpt");
1133
1134	if (link) {
1135		u16 speed, duplex, scale = 0;
1136		u16 max_snoop, max_nosnoop;
1137		u16 max_ltr_enc;	/* max LTR latency encoded */
1138		s64 lat_ns;
1139		s64 value;
1140		u32 rxa;
1141
1142		if (!hw->mac.max_frame_size) {
1143			DEBUGOUT("max_frame_size not set.\n");
1144			return -E1000_ERR_CONFIG;
1145		}
1146
1147		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1148		if (!speed) {
1149			DEBUGOUT("Speed not set.\n");
1150			return -E1000_ERR_CONFIG;
1151		}
1152
1153		/* Rx Packet Buffer Allocation size (KB) */
1154		rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1155
1156		/* Determine the maximum latency tolerated by the device.
1157		 *
1158		 * Per the PCIe spec, the tolerated latencies are encoded as
1159		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1160		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1161		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1162		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1163		 */
1164		lat_ns = ((s64)rxa * 1024 -
1165			  (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1166		if (lat_ns < 0)
1167			lat_ns = 0;
1168		else
1169			lat_ns /= speed;
1170		value = lat_ns;
1171
1172		while (value > E1000_LTRV_VALUE_MASK) {
1173			scale++;
1174			value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1175		}
1176		if (scale > E1000_LTRV_SCALE_MAX) {
1177			DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1178			return -E1000_ERR_CONFIG;
1179		}
1180		lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1181
1182		/* Determine the maximum latency tolerated by the platform */
1183		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1184		e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1185		max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1186
1187		if (lat_enc > max_ltr_enc) {
1188			lat_enc = max_ltr_enc;
1189			lat_ns = e1000_ltr2ns(max_ltr_enc);
1190		}
1191
1192		if (lat_ns) {
1193			lat_ns *= speed * 1000;
1194			lat_ns /= 8;
1195			lat_ns /= 1000000000;
1196			obff_hwm = (s32)(rxa - lat_ns);
1197		}
1198		if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1199			DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1200			return -E1000_ERR_CONFIG;
1201		}
1202	}
1203
1204	/* Set Snoop and No-Snoop latencies the same */
1205	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1206	E1000_WRITE_REG(hw, E1000_LTRV, reg);
1207
1208	/* Set OBFF high water mark */
1209	reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1210	reg |= obff_hwm;
1211	E1000_WRITE_REG(hw, E1000_SVT, reg);
1212
1213	/* Enable OBFF */
1214	reg = E1000_READ_REG(hw, E1000_SVCR);
1215	reg |= E1000_SVCR_OFF_EN;
1216	/* Always unblock interrupts to the CPU even when the system is
1217	 * in OBFF mode. This ensures that small round-robin traffic
1218	 * (like ping) does not get dropped or experience long latency.
1219	 */
1220	reg |= E1000_SVCR_OFF_MASKINT;
1221	E1000_WRITE_REG(hw, E1000_SVCR, reg);
1222
1223	return E1000_SUCCESS;
1224}
1225
1226/**
1227 *  e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1228 *  @hw: pointer to the HW structure
1229 *  @itr: interrupt throttling rate
1230 *
1231 *  Configure OBFF with the updated interrupt rate.
1232 **/
1233static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1234{
1235	u32 svcr;
1236	s32 timer;
1237
1238	DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1239
1240	/* Convert ITR value into microseconds for OBFF timer */
1241	timer = itr & E1000_ITR_MASK;
1242	timer = (timer * E1000_ITR_MULT) / 1000;
1243
1244	if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1245		DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1246		return -E1000_ERR_CONFIG;
1247	}
1248
1249	svcr = E1000_READ_REG(hw, E1000_SVCR);
1250	svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1251	svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1252	E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1253
1254	return E1000_SUCCESS;
1255}
1256
1257/**
1258 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1259 *  @hw: pointer to the HW structure
1260 *  @to_sx: boolean indicating a system power state transition to Sx
1261 *
1262 *  When link is down, configure ULP mode to significantly reduce the power
1263 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1264 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1265 *  system, configure the ULP mode by software.
1266 */
1267s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1268{
1269	u32 mac_reg;
1270	s32 ret_val = E1000_SUCCESS;
1271	u16 phy_reg;
1272	u16 oem_reg = 0;
1273
1274	if ((hw->mac.type < e1000_pch_lpt) ||
1275	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1276	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1277	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1278	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1279	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1280		return 0;
1281
1282	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1283		/* Request ME configure ULP mode in the PHY */
1284		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1285		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1286		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1287
1288		goto out;
1289	}
1290
1291	if (!to_sx) {
1292		int i = 0;
1293
1294		/* Poll up to 5 seconds for Cable Disconnected indication */
1295		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1296			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1297			/* Bail if link is re-acquired */
1298			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1299				return -E1000_ERR_PHY;
1300
1301			if (i++ == 100)
1302				break;
1303
1304			msec_delay(50);
1305		}
1306		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1307			 (E1000_READ_REG(hw, E1000_FEXT) &
1308			  E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1309			 i * 50);
1310	}
1311
1312	ret_val = hw->phy.ops.acquire(hw);
1313	if (ret_val)
1314		goto out;
1315
1316	/* Force SMBus mode in PHY */
1317	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1318	if (ret_val)
1319		goto release;
1320	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1321	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1322
1323	/* Force SMBus mode in MAC */
1324	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1325	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1326	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1327
1328	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1329	 * LPLU and disable Gig speed when entering ULP
1330	 */
1331	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1332		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1333						       &oem_reg);
1334		if (ret_val)
1335			goto release;
1336
1337		phy_reg = oem_reg;
1338		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1339
1340		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1341							phy_reg);
1342
1343		if (ret_val)
1344			goto release;
1345	}
1346
1347	/* Set Inband ULP Exit, Reset to SMBus mode and
1348	 * Disable SMBus Release on PERST# in PHY
1349	 */
1350	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1351	if (ret_val)
1352		goto release;
1353	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1354		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1355	if (to_sx) {
1356		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1357			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1358		else
1359			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1360
1361		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1362		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1363	} else {
1364		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1365		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1366		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1367	}
1368	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1369
1370	/* Set Disable SMBus Release on PERST# in MAC */
1371	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1372	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1373	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1374
1375	/* Commit ULP changes in PHY by starting auto ULP configuration */
1376	phy_reg |= I218_ULP_CONFIG1_START;
1377	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1378
1379	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1380	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1381		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1382							oem_reg);
1383		if (ret_val)
1384			goto release;
1385	}
1386
1387release:
1388	hw->phy.ops.release(hw);
1389out:
1390	if (ret_val)
1391		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1392	else
1393		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1394
1395	return ret_val;
1396}
1397
1398/**
1399 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1400 *  @hw: pointer to the HW structure
1401 *  @force: boolean indicating whether or not to force disabling ULP
1402 *
1403 *  Un-configure ULP mode when link is up, the system is transitioned from
1404 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1405 *  system, poll for an indication from ME that ULP has been un-configured.
1406 *  If not on an ME enabled system, un-configure the ULP mode by software.
1407 *
1408 *  During nominal operation, this function is called when link is acquired
1409 *  to disable ULP mode (force=FALSE); otherwise, for example when unloading
1410 *  the driver or during Sx->S0 transitions, this is called with force=TRUE
1411 *  to forcibly disable ULP.
1412 */
1413s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1414{
1415	s32 ret_val = E1000_SUCCESS;
1416	u32 mac_reg;
1417	u16 phy_reg;
1418	int i = 0;
1419
1420	if ((hw->mac.type < e1000_pch_lpt) ||
1421	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1422	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1423	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1424	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1425	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1426		return 0;
1427
1428	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1429		if (force) {
1430			/* Request ME un-configure ULP mode in the PHY */
1431			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1432			mac_reg &= ~E1000_H2ME_ULP;
1433			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1434			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1435		}
1436
1437		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1438		while (E1000_READ_REG(hw, E1000_FWSM) &
1439		       E1000_FWSM_ULP_CFG_DONE) {
1440			if (i++ == 30) {
1441				ret_val = -E1000_ERR_PHY;
1442				goto out;
1443			}
1444
1445			msec_delay(10);
1446		}
1447		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1448
1449		if (force) {
1450			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1451			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1452			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1453		} else {
1454			/* Clear H2ME.ULP after ME ULP configuration */
1455			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1456			mac_reg &= ~E1000_H2ME_ULP;
1457			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1458		}
1459
1460		goto out;
1461	}
1462
1463	ret_val = hw->phy.ops.acquire(hw);
1464	if (ret_val)
1465		goto out;
1466
1467	if (force)
1468		/* Toggle LANPHYPC Value bit */
1469		e1000_toggle_lanphypc_pch_lpt(hw);
1470
1471	/* Unforce SMBus mode in PHY */
1472	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1473	if (ret_val) {
1474		/* The MAC might be in PCIe mode, so temporarily force to
1475		 * SMBus mode in order to access the PHY.
1476		 */
1477		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1478		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1479		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1480
1481		msec_delay(50);
1482
1483		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1484						       &phy_reg);
1485		if (ret_val)
1486			goto release;
1487	}
1488	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1489	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1490
1491	/* Unforce SMBus mode in MAC */
1492	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1493	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1494	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1495
1496	/* When ULP mode was previously entered, K1 was disabled by the
1497	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1498	 */
1499	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1500	if (ret_val)
1501		goto release;
1502	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1503	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1504
1505	/* Clear ULP enabled configuration */
1506	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1507	if (ret_val)
1508		goto release;
1509	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1510		     I218_ULP_CONFIG1_STICKY_ULP |
1511		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1512		     I218_ULP_CONFIG1_WOL_HOST |
1513		     I218_ULP_CONFIG1_INBAND_EXIT |
1514		     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1515		     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1516		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1517	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1518
1519	/* Commit ULP changes by starting auto ULP configuration */
1520	phy_reg |= I218_ULP_CONFIG1_START;
1521	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1522
1523	/* Clear Disable SMBus Release on PERST# in MAC */
1524	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1525	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1526	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1527
1528release:
1529	hw->phy.ops.release(hw);
1530	if (force) {
1531		hw->phy.ops.reset(hw);
1532		msec_delay(50);
1533	}
1534out:
1535	if (ret_val)
1536		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1537	else
1538		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1539
1540	return ret_val;
1541}
1542
1543/**
1544 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1545 *  @hw: pointer to the HW structure
1546 *
1547 *  Checks to see of the link status of the hardware has changed.  If a
1548 *  change in link status has been detected, then we read the PHY registers
1549 *  to get the current speed/duplex if link exists.
1550 **/
1551static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1552{
1553	struct e1000_mac_info *mac = &hw->mac;
1554	s32 ret_val, tipg_reg = 0;
1555	u16 emi_addr, emi_val = 0;
1556	bool link;
1557	u16 phy_reg;
1558
1559	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1560
1561	/* We only want to go out to the PHY registers to see if Auto-Neg
1562	 * has completed and/or if our link status has changed.  The
1563	 * get_link_status flag is set upon receiving a Link Status
1564	 * Change or Rx Sequence Error interrupt.
1565	 */
1566	if (!mac->get_link_status)
1567		return E1000_SUCCESS;
1568
1569	/* First we want to see if the MII Status Register reports
1570	 * link.  If so, then we want to get the current speed/duplex
1571	 * of the PHY.
1572	 */
1573	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1574	if (ret_val)
1575		return ret_val;
1576
1577	if (hw->mac.type == e1000_pchlan) {
1578		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1579		if (ret_val)
1580			return ret_val;
1581	}
1582
1583	/* When connected at 10Mbps half-duplex, some parts are excessively
1584	 * aggressive resulting in many collisions. To avoid this, increase
1585	 * the IPG and reduce Rx latency in the PHY.
1586	 */
1587	if ((hw->mac.type >= e1000_pch2lan) && link) {
1588		u16 speed, duplex;
1589
1590		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1591		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1592		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1593
1594		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1595			tipg_reg |= 0xFF;
1596			/* Reduce Rx latency in analog PHY */
1597			emi_val = 0;
1598		} else if (hw->mac.type >= e1000_pch_spt &&
1599			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1600			tipg_reg |= 0xC;
1601			emi_val = 1;
1602		} else {
1603			/* Roll back the default values */
1604			tipg_reg |= 0x08;
1605			emi_val = 1;
1606		}
1607
1608		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1609
1610		ret_val = hw->phy.ops.acquire(hw);
1611		if (ret_val)
1612			return ret_val;
1613
1614		if (hw->mac.type == e1000_pch2lan)
1615			emi_addr = I82579_RX_CONFIG;
1616		else
1617			emi_addr = I217_RX_CONFIG;
1618		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1619
1620
1621		if (hw->mac.type >= e1000_pch_lpt) {
1622			u16 phy_reg;
1623
1624			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1625						    &phy_reg);
1626			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1627			if (speed == SPEED_100 || speed == SPEED_10)
1628				phy_reg |= 0x3E8;
1629			else
1630				phy_reg |= 0xFA;
1631			hw->phy.ops.write_reg_locked(hw,
1632						     I217_PLL_CLOCK_GATE_REG,
1633						     phy_reg);
1634
1635			if (speed == SPEED_1000) {
1636				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1637							    &phy_reg);
1638
1639				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1640
1641				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1642							     phy_reg);
1643				}
1644		 }
1645		hw->phy.ops.release(hw);
1646
1647		if (ret_val)
1648			return ret_val;
1649
1650		if (hw->mac.type >= e1000_pch_spt) {
1651			u16 data;
1652			u16 ptr_gap;
1653
1654			if (speed == SPEED_1000) {
1655				ret_val = hw->phy.ops.acquire(hw);
1656				if (ret_val)
1657					return ret_val;
1658
1659				ret_val = hw->phy.ops.read_reg_locked(hw,
1660							      PHY_REG(776, 20),
1661							      &data);
1662				if (ret_val) {
1663					hw->phy.ops.release(hw);
1664					return ret_val;
1665				}
1666
1667				ptr_gap = (data & (0x3FF << 2)) >> 2;
1668				if (ptr_gap < 0x18) {
1669					data &= ~(0x3FF << 2);
1670					data |= (0x18 << 2);
1671					ret_val =
1672						hw->phy.ops.write_reg_locked(hw,
1673							PHY_REG(776, 20), data);
1674				}
1675				hw->phy.ops.release(hw);
1676				if (ret_val)
1677					return ret_val;
1678			} else {
1679				ret_val = hw->phy.ops.acquire(hw);
1680				if (ret_val)
1681					return ret_val;
1682
1683				ret_val = hw->phy.ops.write_reg_locked(hw,
1684							     PHY_REG(776, 20),
1685							     0xC023);
1686				hw->phy.ops.release(hw);
1687				if (ret_val)
1688					return ret_val;
1689
1690			}
1691		}
1692	}
1693
1694	/* I217 Packet Loss issue:
1695	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1696	 * on power up.
1697	 * Set the Beacon Duration for I217 to 8 usec
1698	 */
1699	if (hw->mac.type >= e1000_pch_lpt) {
1700		u32 mac_reg;
1701
1702		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1703		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1704		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1705		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1706	}
1707
1708	/* Work-around I218 hang issue */
1709	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1710	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1711	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1712	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1713		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1714		if (ret_val)
1715			return ret_val;
1716	}
1717	if (hw->mac.type >= e1000_pch_lpt) {
1718		/* Set platform power management values for
1719		 * Latency Tolerance Reporting (LTR)
1720		 * Optimized Buffer Flush/Fill (OBFF)
1721		 */
1722		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1723		if (ret_val)
1724			return ret_val;
1725	}
1726
1727	/* Clear link partner's EEE ability */
1728	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1729
1730	if (hw->mac.type >= e1000_pch_lpt) {
1731		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1732
1733		if (hw->mac.type == e1000_pch_spt) {
1734			/* FEXTNVM6 K1-off workaround - for SPT only */
1735			u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1736
1737			if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1738				fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1739			else
1740				fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1741		}
1742
1743		if (hw->dev_spec.ich8lan.disable_k1_off == TRUE)
1744			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1745
1746		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1747	}
1748
1749	if (!link)
1750		return E1000_SUCCESS; /* No link detected */
1751
1752	mac->get_link_status = FALSE;
1753
1754	switch (hw->mac.type) {
1755	case e1000_pch2lan:
1756		ret_val = e1000_k1_workaround_lv(hw);
1757		if (ret_val)
1758			return ret_val;
1759		/* fall-thru */
1760	case e1000_pchlan:
1761		if (hw->phy.type == e1000_phy_82578) {
1762			ret_val = e1000_link_stall_workaround_hv(hw);
1763			if (ret_val)
1764				return ret_val;
1765		}
1766
1767		/* Workaround for PCHx parts in half-duplex:
1768		 * Set the number of preambles removed from the packet
1769		 * when it is passed from the PHY to the MAC to prevent
1770		 * the MAC from misinterpreting the packet type.
1771		 */
1772		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1773		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1774
1775		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1776		    E1000_STATUS_FD)
1777			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1778
1779		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1780		break;
1781	default:
1782		break;
1783	}
1784
1785	/* Check if there was DownShift, must be checked
1786	 * immediately after link-up
1787	 */
1788	e1000_check_downshift_generic(hw);
1789
1790	/* Enable/Disable EEE after link up */
1791	if (hw->phy.type > e1000_phy_82579) {
1792		ret_val = e1000_set_eee_pchlan(hw);
1793		if (ret_val)
1794			return ret_val;
1795	}
1796
1797	/* If we are forcing speed/duplex, then we simply return since
1798	 * we have already determined whether we have link or not.
1799	 */
1800	if (!mac->autoneg)
1801		return -E1000_ERR_CONFIG;
1802
1803	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1804	 * of MAC speed/duplex configuration.  So we only need to
1805	 * configure Collision Distance in the MAC.
1806	 */
1807	mac->ops.config_collision_dist(hw);
1808
1809	/* Configure Flow Control now that Auto-Neg has completed.
1810	 * First, we need to restore the desired flow control
1811	 * settings because we may have had to re-autoneg with a
1812	 * different link partner.
1813	 */
1814	ret_val = e1000_config_fc_after_link_up_generic(hw);
1815	if (ret_val)
1816		DEBUGOUT("Error configuring flow control\n");
1817
1818	return ret_val;
1819}
1820
1821/**
1822 *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1823 *  @hw: pointer to the HW structure
1824 *
1825 *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1826 **/
1827void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1828{
1829	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1830
1831	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1832	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1833	switch (hw->mac.type) {
1834	case e1000_ich8lan:
1835	case e1000_ich9lan:
1836	case e1000_ich10lan:
1837		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1838		break;
1839	case e1000_pchlan:
1840	case e1000_pch2lan:
1841	case e1000_pch_lpt:
1842	case e1000_pch_spt:
1843	case e1000_pch_cnp:
1844	case e1000_pch_tgp:
1845	case e1000_pch_adp:
1846	case e1000_pch_mtp:
1847		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1848		break;
1849	default:
1850		break;
1851	}
1852}
1853
1854/**
1855 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1856 *  @hw: pointer to the HW structure
1857 *
1858 *  Acquires the mutex for performing NVM operations.
1859 **/
1860static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1861{
1862	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1863
1864	ASSERT_CTX_LOCK_HELD(hw);
1865
1866	return E1000_SUCCESS;
1867}
1868
1869/**
1870 *  e1000_release_nvm_ich8lan - Release NVM mutex
1871 *  @hw: pointer to the HW structure
1872 *
1873 *  Releases the mutex used while performing NVM operations.
1874 **/
1875static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1876{
1877	DEBUGFUNC("e1000_release_nvm_ich8lan");
1878
1879	ASSERT_CTX_LOCK_HELD(hw);
1880}
1881
1882/**
1883 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1884 *  @hw: pointer to the HW structure
1885 *
1886 *  Acquires the software control flag for performing PHY and select
1887 *  MAC CSR accesses.
1888 **/
1889static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1890{
1891	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1892	s32 ret_val = E1000_SUCCESS;
1893
1894	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1895
1896	ASSERT_CTX_LOCK_HELD(hw);
1897
1898	while (timeout) {
1899		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1900		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1901			break;
1902
1903		msec_delay_irq(1);
1904		timeout--;
1905	}
1906
1907	if (!timeout) {
1908		DEBUGOUT("SW has already locked the resource.\n");
1909		ret_val = -E1000_ERR_CONFIG;
1910		goto out;
1911	}
1912
1913	timeout = SW_FLAG_TIMEOUT;
1914
1915	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1916	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1917
1918	while (timeout) {
1919		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1920		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1921			break;
1922
1923		msec_delay_irq(1);
1924		timeout--;
1925	}
1926
1927	if (!timeout) {
1928		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1929			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1930		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1931		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1932		ret_val = -E1000_ERR_CONFIG;
1933		goto out;
1934	}
1935
1936out:
1937	return ret_val;
1938}
1939
1940/**
1941 *  e1000_release_swflag_ich8lan - Release software control flag
1942 *  @hw: pointer to the HW structure
1943 *
1944 *  Releases the software control flag for performing PHY and select
1945 *  MAC CSR accesses.
1946 **/
1947static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1948{
1949	u32 extcnf_ctrl;
1950
1951	DEBUGFUNC("e1000_release_swflag_ich8lan");
1952
1953	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1954
1955	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1956		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1957		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1958	} else {
1959		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1960	}
1961}
1962
1963/**
1964 *  e1000_check_mng_mode_ich8lan - Checks management mode
1965 *  @hw: pointer to the HW structure
1966 *
1967 *  This checks if the adapter has any manageability enabled.
1968 *  This is a function pointer entry point only called by read/write
1969 *  routines for the PHY and NVM parts.
1970 **/
1971static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1972{
1973	u32 fwsm;
1974
1975	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1976
1977	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1978
1979	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1980	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1981		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1982}
1983
1984/**
1985 *  e1000_check_mng_mode_pchlan - Checks management mode
1986 *  @hw: pointer to the HW structure
1987 *
1988 *  This checks if the adapter has iAMT enabled.
1989 *  This is a function pointer entry point only called by read/write
1990 *  routines for the PHY and NVM parts.
1991 **/
1992static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1993{
1994	u32 fwsm;
1995
1996	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1997
1998	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1999
2000	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
2001	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
2002}
2003
2004/**
2005 *  e1000_rar_set_pch2lan - Set receive address register
2006 *  @hw: pointer to the HW structure
2007 *  @addr: pointer to the receive address
2008 *  @index: receive address array register
2009 *
2010 *  Sets the receive address array register at index to the address passed
2011 *  in by addr.  For 82579, RAR[0] is the base address register that is to
2012 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2013 *  Use SHRA[0-3] in place of those reserved for ME.
2014 **/
2015static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2016{
2017	u32 rar_low, rar_high;
2018
2019	DEBUGFUNC("e1000_rar_set_pch2lan");
2020
2021	/* HW expects these in little endian so we reverse the byte order
2022	 * from network order (big endian) to little endian
2023	 */
2024	rar_low = ((u32) addr[0] |
2025		   ((u32) addr[1] << 8) |
2026		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2027
2028	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2029
2030	/* If MAC address zero, no need to set the AV bit */
2031	if (rar_low || rar_high)
2032		rar_high |= E1000_RAH_AV;
2033
2034	if (index == 0) {
2035		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2036		E1000_WRITE_FLUSH(hw);
2037		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2038		E1000_WRITE_FLUSH(hw);
2039		return E1000_SUCCESS;
2040	}
2041
2042	/* RAR[1-6] are owned by manageability.  Skip those and program the
2043	 * next address into the SHRA register array.
2044	 */
2045	if (index < (u32) (hw->mac.rar_entry_count)) {
2046		s32 ret_val;
2047
2048		ret_val = e1000_acquire_swflag_ich8lan(hw);
2049		if (ret_val)
2050			goto out;
2051
2052		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2053		E1000_WRITE_FLUSH(hw);
2054		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2055		E1000_WRITE_FLUSH(hw);
2056
2057		e1000_release_swflag_ich8lan(hw);
2058
2059		/* verify the register updates */
2060		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2061		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2062			return E1000_SUCCESS;
2063
2064		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2065			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2066	}
2067
2068out:
2069	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2070	return -E1000_ERR_CONFIG;
2071}
2072
2073/**
2074 *  e1000_rar_set_pch_lpt - Set receive address registers
2075 *  @hw: pointer to the HW structure
2076 *  @addr: pointer to the receive address
2077 *  @index: receive address array register
2078 *
2079 *  Sets the receive address register array at index to the address passed
2080 *  in by addr. For LPT, RAR[0] is the base address register that is to
2081 *  contain the MAC address. SHRA[0-10] are the shared receive address
2082 *  registers that are shared between the Host and manageability engine (ME).
2083 **/
2084static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2085{
2086	u32 rar_low, rar_high;
2087	u32 wlock_mac;
2088
2089	DEBUGFUNC("e1000_rar_set_pch_lpt");
2090
2091	/* HW expects these in little endian so we reverse the byte order
2092	 * from network order (big endian) to little endian
2093	 */
2094	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2095		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2096
2097	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2098
2099	/* If MAC address zero, no need to set the AV bit */
2100	if (rar_low || rar_high)
2101		rar_high |= E1000_RAH_AV;
2102
2103	if (index == 0) {
2104		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2105		E1000_WRITE_FLUSH(hw);
2106		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2107		E1000_WRITE_FLUSH(hw);
2108		return E1000_SUCCESS;
2109	}
2110
2111	/* The manageability engine (ME) can lock certain SHRAR registers that
2112	 * it is using - those registers are unavailable for use.
2113	 */
2114	if (index < hw->mac.rar_entry_count) {
2115		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2116			    E1000_FWSM_WLOCK_MAC_MASK;
2117		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2118
2119		/* Check if all SHRAR registers are locked */
2120		if (wlock_mac == 1)
2121			goto out;
2122
2123		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2124			s32 ret_val;
2125
2126			ret_val = e1000_acquire_swflag_ich8lan(hw);
2127
2128			if (ret_val)
2129				goto out;
2130
2131			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2132					rar_low);
2133			E1000_WRITE_FLUSH(hw);
2134			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2135					rar_high);
2136			E1000_WRITE_FLUSH(hw);
2137
2138			e1000_release_swflag_ich8lan(hw);
2139
2140			/* verify the register updates */
2141			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2142			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2143				return E1000_SUCCESS;
2144		}
2145	}
2146
2147out:
2148	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2149	return -E1000_ERR_CONFIG;
2150}
2151
2152/**
2153 *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2154 *  @hw: pointer to the HW structure
2155 *  @mc_addr_list: array of multicast addresses to program
2156 *  @mc_addr_count: number of multicast addresses to program
2157 *
2158 *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2159 *  The caller must have a packed mc_addr_list of multicast addresses.
2160 **/
2161static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2162					      u8 *mc_addr_list,
2163					      u32 mc_addr_count)
2164{
2165	u16 phy_reg = 0;
2166	int i;
2167	s32 ret_val;
2168
2169	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2170
2171	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2172
2173	ret_val = hw->phy.ops.acquire(hw);
2174	if (ret_val)
2175		return;
2176
2177	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2178	if (ret_val)
2179		goto release;
2180
2181	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2182		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2183					   (u16)(hw->mac.mta_shadow[i] &
2184						 0xFFFF));
2185		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2186					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2187						 0xFFFF));
2188	}
2189
2190	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2191
2192release:
2193	hw->phy.ops.release(hw);
2194}
2195
2196/**
2197 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2198 *  @hw: pointer to the HW structure
2199 *
2200 *  Checks if firmware is blocking the reset of the PHY.
2201 *  This is a function pointer entry point only called by
2202 *  reset routines.
2203 **/
2204static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2205{
2206	u32 fwsm;
2207	bool blocked = FALSE;
2208	int i = 0;
2209
2210	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2211
2212	do {
2213		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2214		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2215			blocked = TRUE;
2216			msec_delay(10);
2217			continue;
2218		}
2219		blocked = FALSE;
2220	} while (blocked && (i++ < 30));
2221	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2222}
2223
2224/**
2225 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2226 *  @hw: pointer to the HW structure
2227 *
2228 *  Assumes semaphore already acquired.
2229 *
2230 **/
2231static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2232{
2233	u16 phy_data;
2234	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2235	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2236		E1000_STRAP_SMT_FREQ_SHIFT;
2237	s32 ret_val;
2238
2239	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2240
2241	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2242	if (ret_val)
2243		return ret_val;
2244
2245	phy_data &= ~HV_SMB_ADDR_MASK;
2246	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2247	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2248
2249	if (hw->phy.type == e1000_phy_i217) {
2250		/* Restore SMBus frequency */
2251		if (freq--) {
2252			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2253			phy_data |= (freq & (1 << 0)) <<
2254				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2255			phy_data |= (freq & (1 << 1)) <<
2256				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2257		} else {
2258			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2259		}
2260	}
2261
2262	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2263}
2264
2265/**
2266 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2267 *  @hw:   pointer to the HW structure
2268 *
2269 *  SW should configure the LCD from the NVM extended configuration region
2270 *  as a workaround for certain parts.
2271 **/
2272static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2273{
2274	struct e1000_phy_info *phy = &hw->phy;
2275	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2276	s32 ret_val = E1000_SUCCESS;
2277	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2278
2279	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2280
2281	/* Initialize the PHY from the NVM on ICH platforms.  This
2282	 * is needed due to an issue where the NVM configuration is
2283	 * not properly autoloaded after power transitions.
2284	 * Therefore, after each PHY reset, we will load the
2285	 * configuration data out of the NVM manually.
2286	 */
2287	switch (hw->mac.type) {
2288	case e1000_ich8lan:
2289		if (phy->type != e1000_phy_igp_3)
2290			return ret_val;
2291
2292		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2293		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2294			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2295			break;
2296		}
2297		/* Fall-thru */
2298	case e1000_pchlan:
2299	case e1000_pch2lan:
2300	case e1000_pch_lpt:
2301	case e1000_pch_spt:
2302	case e1000_pch_cnp:
2303	case e1000_pch_tgp:
2304	case e1000_pch_adp:
2305	case e1000_pch_mtp:
2306		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2307		break;
2308	default:
2309		return ret_val;
2310	}
2311
2312	ret_val = hw->phy.ops.acquire(hw);
2313	if (ret_val)
2314		return ret_val;
2315
2316	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2317	if (!(data & sw_cfg_mask))
2318		goto release;
2319
2320	/* Make sure HW does not configure LCD from PHY
2321	 * extended configuration before SW configuration
2322	 */
2323	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2324	if ((hw->mac.type < e1000_pch2lan) &&
2325	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2326			goto release;
2327
2328	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2329	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2330	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2331	if (!cnf_size)
2332		goto release;
2333
2334	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2335	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2336
2337	if (((hw->mac.type == e1000_pchlan) &&
2338	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2339	    (hw->mac.type > e1000_pchlan)) {
2340		/* HW configures the SMBus address and LEDs when the
2341		 * OEM and LCD Write Enable bits are set in the NVM.
2342		 * When both NVM bits are cleared, SW will configure
2343		 * them instead.
2344		 */
2345		ret_val = e1000_write_smbus_addr(hw);
2346		if (ret_val)
2347			goto release;
2348
2349		data = E1000_READ_REG(hw, E1000_LEDCTL);
2350		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2351							(u16)data);
2352		if (ret_val)
2353			goto release;
2354	}
2355
2356	/* Configure LCD from extended configuration region. */
2357
2358	/* cnf_base_addr is in DWORD */
2359	word_addr = (u16)(cnf_base_addr << 1);
2360
2361	for (i = 0; i < cnf_size; i++) {
2362		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2363					   &reg_data);
2364		if (ret_val)
2365			goto release;
2366
2367		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2368					   1, &reg_addr);
2369		if (ret_val)
2370			goto release;
2371
2372		/* Save off the PHY page for future writes. */
2373		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2374			phy_page = reg_data;
2375			continue;
2376		}
2377
2378		reg_addr &= PHY_REG_MASK;
2379		reg_addr |= phy_page;
2380
2381		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2382						    reg_data);
2383		if (ret_val)
2384			goto release;
2385	}
2386
2387release:
2388	hw->phy.ops.release(hw);
2389	return ret_val;
2390}
2391
2392/**
2393 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2394 *  @hw:   pointer to the HW structure
2395 *  @link: link up bool flag
2396 *
2397 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2398 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2399 *  If link is down, the function will restore the default K1 setting located
2400 *  in the NVM.
2401 **/
2402static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2403{
2404	s32 ret_val = E1000_SUCCESS;
2405	u16 status_reg = 0;
2406	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2407
2408	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2409
2410	if (hw->mac.type != e1000_pchlan)
2411		return E1000_SUCCESS;
2412
2413	/* Wrap the whole flow with the sw flag */
2414	ret_val = hw->phy.ops.acquire(hw);
2415	if (ret_val)
2416		return ret_val;
2417
2418	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2419	if (link) {
2420		if (hw->phy.type == e1000_phy_82578) {
2421			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2422							      &status_reg);
2423			if (ret_val)
2424				goto release;
2425
2426			status_reg &= (BM_CS_STATUS_LINK_UP |
2427				       BM_CS_STATUS_RESOLVED |
2428				       BM_CS_STATUS_SPEED_MASK);
2429
2430			if (status_reg == (BM_CS_STATUS_LINK_UP |
2431					   BM_CS_STATUS_RESOLVED |
2432					   BM_CS_STATUS_SPEED_1000))
2433				k1_enable = FALSE;
2434		}
2435
2436		if (hw->phy.type == e1000_phy_82577) {
2437			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2438							      &status_reg);
2439			if (ret_val)
2440				goto release;
2441
2442			status_reg &= (HV_M_STATUS_LINK_UP |
2443				       HV_M_STATUS_AUTONEG_COMPLETE |
2444				       HV_M_STATUS_SPEED_MASK);
2445
2446			if (status_reg == (HV_M_STATUS_LINK_UP |
2447					   HV_M_STATUS_AUTONEG_COMPLETE |
2448					   HV_M_STATUS_SPEED_1000))
2449				k1_enable = FALSE;
2450		}
2451
2452		/* Link stall fix for link up */
2453		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2454						       0x0100);
2455		if (ret_val)
2456			goto release;
2457
2458	} else {
2459		/* Link stall fix for link down */
2460		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2461						       0x4100);
2462		if (ret_val)
2463			goto release;
2464	}
2465
2466	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2467
2468release:
2469	hw->phy.ops.release(hw);
2470
2471	return ret_val;
2472}
2473
2474/**
2475 *  e1000_configure_k1_ich8lan - Configure K1 power state
2476 *  @hw: pointer to the HW structure
2477 *  @enable: K1 state to configure
2478 *
2479 *  Configure the K1 power state based on the provided parameter.
2480 *  Assumes semaphore already acquired.
2481 *
2482 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2483 **/
2484s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2485{
2486	s32 ret_val;
2487	u32 ctrl_reg = 0;
2488	u32 ctrl_ext = 0;
2489	u32 reg = 0;
2490	u16 kmrn_reg = 0;
2491
2492	DEBUGFUNC("e1000_configure_k1_ich8lan");
2493
2494	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2495					     &kmrn_reg);
2496	if (ret_val)
2497		return ret_val;
2498
2499	if (k1_enable)
2500		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2501	else
2502		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2503
2504	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2505					      kmrn_reg);
2506	if (ret_val)
2507		return ret_val;
2508
2509	usec_delay(20);
2510	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2511	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2512
2513	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2514	reg |= E1000_CTRL_FRCSPD;
2515	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2516
2517	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2518	E1000_WRITE_FLUSH(hw);
2519	usec_delay(20);
2520	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2521	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2522	E1000_WRITE_FLUSH(hw);
2523	usec_delay(20);
2524
2525	return E1000_SUCCESS;
2526}
2527
2528/**
2529 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2530 *  @hw:       pointer to the HW structure
2531 *  @d0_state: boolean if entering d0 or d3 device state
2532 *
2533 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2534 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2535 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2536 **/
2537static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2538{
2539	s32 ret_val = 0;
2540	u32 mac_reg;
2541	u16 oem_reg;
2542
2543	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2544
2545	if (hw->mac.type < e1000_pchlan)
2546		return ret_val;
2547
2548	ret_val = hw->phy.ops.acquire(hw);
2549	if (ret_val)
2550		return ret_val;
2551
2552	if (hw->mac.type == e1000_pchlan) {
2553		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2554		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2555			goto release;
2556	}
2557
2558	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2559	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2560		goto release;
2561
2562	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2563
2564	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2565	if (ret_val)
2566		goto release;
2567
2568	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2569
2570	if (d0_state) {
2571		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2572			oem_reg |= HV_OEM_BITS_GBE_DIS;
2573
2574		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2575			oem_reg |= HV_OEM_BITS_LPLU;
2576	} else {
2577		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2578		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2579			oem_reg |= HV_OEM_BITS_GBE_DIS;
2580
2581		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2582		    E1000_PHY_CTRL_NOND0A_LPLU))
2583			oem_reg |= HV_OEM_BITS_LPLU;
2584	}
2585
2586	/* Set Restart auto-neg to activate the bits */
2587	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2588	    !hw->phy.ops.check_reset_block(hw))
2589		oem_reg |= HV_OEM_BITS_RESTART_AN;
2590
2591	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2592
2593release:
2594	hw->phy.ops.release(hw);
2595
2596	return ret_val;
2597}
2598
2599
2600/**
2601 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2602 *  @hw:   pointer to the HW structure
2603 **/
2604static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2605{
2606	s32 ret_val;
2607	u16 data;
2608
2609	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2610
2611	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2612	if (ret_val)
2613		return ret_val;
2614
2615	data |= HV_KMRN_MDIO_SLOW;
2616
2617	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2618
2619	return ret_val;
2620}
2621
2622/**
2623 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2624 *  done after every PHY reset.
2625 **/
2626static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2627{
2628	s32 ret_val = E1000_SUCCESS;
2629	u16 phy_data;
2630
2631	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2632
2633	if (hw->mac.type != e1000_pchlan)
2634		return E1000_SUCCESS;
2635
2636	/* Set MDIO slow mode before any other MDIO access */
2637	if (hw->phy.type == e1000_phy_82577) {
2638		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2639		if (ret_val)
2640			return ret_val;
2641	}
2642
2643	if (((hw->phy.type == e1000_phy_82577) &&
2644	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2645	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2646		/* Disable generation of early preamble */
2647		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2648		if (ret_val)
2649			return ret_val;
2650
2651		/* Preamble tuning for SSC */
2652		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2653						0xA204);
2654		if (ret_val)
2655			return ret_val;
2656	}
2657
2658	if (hw->phy.type == e1000_phy_82578) {
2659		/* Return registers to default by doing a soft reset then
2660		 * writing 0x3140 to the control register.
2661		 */
2662		if (hw->phy.revision < 2) {
2663			e1000_phy_sw_reset_generic(hw);
2664			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2665							0x3140);
2666			if (ret_val)
2667				return ret_val;
2668		}
2669	}
2670
2671	/* Select page 0 */
2672	ret_val = hw->phy.ops.acquire(hw);
2673	if (ret_val)
2674		return ret_val;
2675
2676	hw->phy.addr = 1;
2677	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2678	hw->phy.ops.release(hw);
2679	if (ret_val)
2680		return ret_val;
2681
2682	/* Configure the K1 Si workaround during phy reset assuming there is
2683	 * link so that it disables K1 if link is in 1Gbps.
2684	 */
2685	ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2686	if (ret_val)
2687		return ret_val;
2688
2689	/* Workaround for link disconnects on a busy hub in half duplex */
2690	ret_val = hw->phy.ops.acquire(hw);
2691	if (ret_val)
2692		return ret_val;
2693	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2694	if (ret_val)
2695		goto release;
2696	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2697					       phy_data & 0x00FF);
2698	if (ret_val)
2699		goto release;
2700
2701	/* set MSE higher to enable link to stay up when noise is high */
2702	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2703release:
2704	hw->phy.ops.release(hw);
2705
2706	return ret_val;
2707}
2708
2709/**
2710 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2711 *  @hw:   pointer to the HW structure
2712 **/
2713void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2714{
2715	u32 mac_reg;
2716	u16 i, phy_reg = 0;
2717	s32 ret_val;
2718
2719	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2720
2721	ret_val = hw->phy.ops.acquire(hw);
2722	if (ret_val)
2723		return;
2724	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2725	if (ret_val)
2726		goto release;
2727
2728	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2729	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2730		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2731		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2732					   (u16)(mac_reg & 0xFFFF));
2733		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2734					   (u16)((mac_reg >> 16) & 0xFFFF));
2735
2736		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2737		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2738					   (u16)(mac_reg & 0xFFFF));
2739		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2740					   (u16)((mac_reg & E1000_RAH_AV)
2741						 >> 16));
2742	}
2743
2744	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2745
2746release:
2747	hw->phy.ops.release(hw);
2748}
2749
2750static u32 e1000_calc_rx_da_crc(u8 mac[])
2751{
2752	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2753	u32 i, j, mask, crc;
2754
2755	DEBUGFUNC("e1000_calc_rx_da_crc");
2756
2757	crc = 0xffffffff;
2758	for (i = 0; i < 6; i++) {
2759		crc = crc ^ mac[i];
2760		for (j = 8; j > 0; j--) {
2761			mask = (crc & 1) * (-1);
2762			crc = (crc >> 1) ^ (poly & mask);
2763		}
2764	}
2765	return ~crc;
2766}
2767
2768/**
2769 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2770 *  with 82579 PHY
2771 *  @hw: pointer to the HW structure
2772 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2773 **/
2774s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2775{
2776	s32 ret_val = E1000_SUCCESS;
2777	u16 phy_reg, data;
2778	u32 mac_reg;
2779	u16 i;
2780
2781	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2782
2783	if (hw->mac.type < e1000_pch2lan)
2784		return E1000_SUCCESS;
2785
2786	/* disable Rx path while enabling/disabling workaround */
2787	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2788	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2789					phy_reg | (1 << 14));
2790	if (ret_val)
2791		return ret_val;
2792
2793	if (enable) {
2794		/* Write Rx addresses (rar_entry_count for RAL/H, and
2795		 * SHRAL/H) and initial CRC values to the MAC
2796		 */
2797		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2798			u8 mac_addr[ETHER_ADDR_LEN] = {0};
2799			u32 addr_high, addr_low;
2800
2801			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2802			if (!(addr_high & E1000_RAH_AV))
2803				continue;
2804			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2805			mac_addr[0] = (addr_low & 0xFF);
2806			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2807			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2808			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2809			mac_addr[4] = (addr_high & 0xFF);
2810			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2811
2812			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2813					e1000_calc_rx_da_crc(mac_addr));
2814		}
2815
2816		/* Write Rx addresses to the PHY */
2817		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2818
2819		/* Enable jumbo frame workaround in the MAC */
2820		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2821		mac_reg &= ~(1 << 14);
2822		mac_reg |= (7 << 15);
2823		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2824
2825		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2826		mac_reg |= E1000_RCTL_SECRC;
2827		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2828
2829		ret_val = e1000_read_kmrn_reg_generic(hw,
2830						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2831						&data);
2832		if (ret_val)
2833			return ret_val;
2834		ret_val = e1000_write_kmrn_reg_generic(hw,
2835						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2836						data | (1 << 0));
2837		if (ret_val)
2838			return ret_val;
2839		ret_val = e1000_read_kmrn_reg_generic(hw,
2840						E1000_KMRNCTRLSTA_HD_CTRL,
2841						&data);
2842		if (ret_val)
2843			return ret_val;
2844		data &= ~(0xF << 8);
2845		data |= (0xB << 8);
2846		ret_val = e1000_write_kmrn_reg_generic(hw,
2847						E1000_KMRNCTRLSTA_HD_CTRL,
2848						data);
2849		if (ret_val)
2850			return ret_val;
2851
2852		/* Enable jumbo frame workaround in the PHY */
2853		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2854		data &= ~(0x7F << 5);
2855		data |= (0x37 << 5);
2856		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2857		if (ret_val)
2858			return ret_val;
2859		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2860		data &= ~(1 << 13);
2861		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2862		if (ret_val)
2863			return ret_val;
2864		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2865		data &= ~(0x3FF << 2);
2866		data |= (E1000_TX_PTR_GAP << 2);
2867		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2868		if (ret_val)
2869			return ret_val;
2870		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2871		if (ret_val)
2872			return ret_val;
2873		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2874		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2875						(1 << 10));
2876		if (ret_val)
2877			return ret_val;
2878	} else {
2879		/* Write MAC register values back to h/w defaults */
2880		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2881		mac_reg &= ~(0xF << 14);
2882		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2883
2884		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2885		mac_reg &= ~E1000_RCTL_SECRC;
2886		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2887
2888		ret_val = e1000_read_kmrn_reg_generic(hw,
2889						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2890						&data);
2891		if (ret_val)
2892			return ret_val;
2893		ret_val = e1000_write_kmrn_reg_generic(hw,
2894						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2895						data & ~(1 << 0));
2896		if (ret_val)
2897			return ret_val;
2898		ret_val = e1000_read_kmrn_reg_generic(hw,
2899						E1000_KMRNCTRLSTA_HD_CTRL,
2900						&data);
2901		if (ret_val)
2902			return ret_val;
2903		data &= ~(0xF << 8);
2904		data |= (0xB << 8);
2905		ret_val = e1000_write_kmrn_reg_generic(hw,
2906						E1000_KMRNCTRLSTA_HD_CTRL,
2907						data);
2908		if (ret_val)
2909			return ret_val;
2910
2911		/* Write PHY register values back to h/w defaults */
2912		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2913		data &= ~(0x7F << 5);
2914		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2915		if (ret_val)
2916			return ret_val;
2917		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2918		data |= (1 << 13);
2919		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2920		if (ret_val)
2921			return ret_val;
2922		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2923		data &= ~(0x3FF << 2);
2924		data |= (0x8 << 2);
2925		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2926		if (ret_val)
2927			return ret_val;
2928		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2929		if (ret_val)
2930			return ret_val;
2931		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2932		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2933						~(1 << 10));
2934		if (ret_val)
2935			return ret_val;
2936	}
2937
2938	/* re-enable Rx path after enabling/disabling workaround */
2939	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2940				     ~(1 << 14));
2941}
2942
2943/**
2944 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2945 *  done after every PHY reset.
2946 **/
2947static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2948{
2949	s32 ret_val = E1000_SUCCESS;
2950
2951	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2952
2953	if (hw->mac.type != e1000_pch2lan)
2954		return E1000_SUCCESS;
2955
2956	/* Set MDIO slow mode before any other MDIO access */
2957	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2958	if (ret_val)
2959		return ret_val;
2960
2961	ret_val = hw->phy.ops.acquire(hw);
2962	if (ret_val)
2963		return ret_val;
2964	/* set MSE higher to enable link to stay up when noise is high */
2965	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2966	if (ret_val)
2967		goto release;
2968	/* drop link after 5 times MSE threshold was reached */
2969	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2970release:
2971	hw->phy.ops.release(hw);
2972
2973	return ret_val;
2974}
2975
2976/**
2977 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2978 *  @hw:   pointer to the HW structure
2979 *
2980 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2981 *  Disable K1 for 1000 and 100 speeds
2982 **/
2983static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2984{
2985	s32 ret_val = E1000_SUCCESS;
2986	u16 status_reg = 0;
2987
2988	DEBUGFUNC("e1000_k1_workaround_lv");
2989
2990	if (hw->mac.type != e1000_pch2lan)
2991		return E1000_SUCCESS;
2992
2993	/* Set K1 beacon duration based on 10Mbs speed */
2994	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2995	if (ret_val)
2996		return ret_val;
2997
2998	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2999	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
3000		if (status_reg &
3001		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
3002			u16 pm_phy_reg;
3003
3004			/* LV 1G/100 Packet drop issue wa  */
3005			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
3006						       &pm_phy_reg);
3007			if (ret_val)
3008				return ret_val;
3009			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3010			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3011							pm_phy_reg);
3012			if (ret_val)
3013				return ret_val;
3014		} else {
3015			u32 mac_reg;
3016			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3017			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3018			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3019			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3020		}
3021	}
3022
3023	return ret_val;
3024}
3025
3026/**
3027 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3028 *  @hw:   pointer to the HW structure
3029 *  @gate: boolean set to TRUE to gate, FALSE to ungate
3030 *
3031 *  Gate/ungate the automatic PHY configuration via hardware; perform
3032 *  the configuration via software instead.
3033 **/
3034static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3035{
3036	u32 extcnf_ctrl;
3037
3038	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3039
3040	if (hw->mac.type < e1000_pch2lan)
3041		return;
3042
3043	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3044
3045	if (gate)
3046		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3047	else
3048		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3049
3050	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3051}
3052
3053/**
3054 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3055 *  @hw: pointer to the HW structure
3056 *
3057 *  Check the appropriate indication the MAC has finished configuring the
3058 *  PHY after a software reset.
3059 **/
3060static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3061{
3062	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3063
3064	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3065
3066	/* Wait for basic configuration completes before proceeding */
3067	do {
3068		data = E1000_READ_REG(hw, E1000_STATUS);
3069		data &= E1000_STATUS_LAN_INIT_DONE;
3070		usec_delay(100);
3071	} while ((!data) && --loop);
3072
3073	/* If basic configuration is incomplete before the above loop
3074	 * count reaches 0, loading the configuration from NVM will
3075	 * leave the PHY in a bad state possibly resulting in no link.
3076	 */
3077	if (loop == 0)
3078		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3079
3080	/* Clear the Init Done bit for the next init event */
3081	data = E1000_READ_REG(hw, E1000_STATUS);
3082	data &= ~E1000_STATUS_LAN_INIT_DONE;
3083	E1000_WRITE_REG(hw, E1000_STATUS, data);
3084}
3085
3086/**
3087 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3088 *  @hw: pointer to the HW structure
3089 **/
3090static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3091{
3092	s32 ret_val = E1000_SUCCESS;
3093	u16 reg;
3094
3095	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3096
3097	if (hw->phy.ops.check_reset_block(hw))
3098		return E1000_SUCCESS;
3099
3100	/* Allow time for h/w to get to quiescent state after reset */
3101	msec_delay(10);
3102
3103	/* Perform any necessary post-reset workarounds */
3104	switch (hw->mac.type) {
3105	case e1000_pchlan:
3106		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3107		if (ret_val)
3108			return ret_val;
3109		break;
3110	case e1000_pch2lan:
3111		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3112		if (ret_val)
3113			return ret_val;
3114		break;
3115	default:
3116		break;
3117	}
3118
3119	/* Clear the host wakeup bit after lcd reset */
3120	if (hw->mac.type >= e1000_pchlan) {
3121		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3122		reg &= ~BM_WUC_HOST_WU_BIT;
3123		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3124	}
3125
3126	/* Configure the LCD with the extended configuration region in NVM */
3127	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3128	if (ret_val)
3129		return ret_val;
3130
3131	/* Configure the LCD with the OEM bits in NVM */
3132	ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3133
3134	if (hw->mac.type == e1000_pch2lan) {
3135		/* Ungate automatic PHY configuration on non-managed 82579 */
3136		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3137		    E1000_ICH_FWSM_FW_VALID)) {
3138			msec_delay(10);
3139			e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3140		}
3141
3142		/* Set EEE LPI Update Timer to 200usec */
3143		ret_val = hw->phy.ops.acquire(hw);
3144		if (ret_val)
3145			return ret_val;
3146		ret_val = e1000_write_emi_reg_locked(hw,
3147						     I82579_LPI_UPDATE_TIMER,
3148						     0x1387);
3149		hw->phy.ops.release(hw);
3150	}
3151
3152	return ret_val;
3153}
3154
3155/**
3156 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3157 *  @hw: pointer to the HW structure
3158 *
3159 *  Resets the PHY
3160 *  This is a function pointer entry point called by drivers
3161 *  or other shared routines.
3162 **/
3163static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3164{
3165	s32 ret_val = E1000_SUCCESS;
3166
3167	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3168
3169	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3170	if ((hw->mac.type == e1000_pch2lan) &&
3171	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3172		e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3173
3174	ret_val = e1000_phy_hw_reset_generic(hw);
3175	if (ret_val)
3176		return ret_val;
3177
3178	return e1000_post_phy_reset_ich8lan(hw);
3179}
3180
3181/**
3182 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3183 *  @hw: pointer to the HW structure
3184 *  @active: TRUE to enable LPLU, FALSE to disable
3185 *
3186 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3187 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3188 *  the phy speed. This function will manually set the LPLU bit and restart
3189 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3190 *  since it configures the same bit.
3191 **/
3192static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3193{
3194	s32 ret_val;
3195	u16 oem_reg;
3196
3197	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3198	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3199	if (ret_val)
3200		return ret_val;
3201
3202	if (active)
3203		oem_reg |= HV_OEM_BITS_LPLU;
3204	else
3205		oem_reg &= ~HV_OEM_BITS_LPLU;
3206
3207	if (!hw->phy.ops.check_reset_block(hw))
3208		oem_reg |= HV_OEM_BITS_RESTART_AN;
3209
3210	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3211}
3212
3213/**
3214 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3215 *  @hw: pointer to the HW structure
3216 *  @active: TRUE to enable LPLU, FALSE to disable
3217 *
3218 *  Sets the LPLU D0 state according to the active flag.  When
3219 *  activating LPLU this function also disables smart speed
3220 *  and vice versa.  LPLU will not be activated unless the
3221 *  device autonegotiation advertisement meets standards of
3222 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3223 *  This is a function pointer entry point only called by
3224 *  PHY setup routines.
3225 **/
3226static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3227{
3228	struct e1000_phy_info *phy = &hw->phy;
3229	u32 phy_ctrl;
3230	s32 ret_val = E1000_SUCCESS;
3231	u16 data;
3232
3233	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3234
3235	if (phy->type == e1000_phy_ife)
3236		return E1000_SUCCESS;
3237
3238	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3239
3240	if (active) {
3241		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3242		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3243
3244		if (phy->type != e1000_phy_igp_3)
3245			return E1000_SUCCESS;
3246
3247		/* Call gig speed drop workaround on LPLU before accessing
3248		 * any PHY registers
3249		 */
3250		if (hw->mac.type == e1000_ich8lan)
3251			e1000_gig_downshift_workaround_ich8lan(hw);
3252
3253		/* When LPLU is enabled, we should disable SmartSpeed */
3254		ret_val = phy->ops.read_reg(hw,
3255					    IGP01E1000_PHY_PORT_CONFIG,
3256					    &data);
3257		if (ret_val)
3258			return ret_val;
3259		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3260		ret_val = phy->ops.write_reg(hw,
3261					     IGP01E1000_PHY_PORT_CONFIG,
3262					     data);
3263		if (ret_val)
3264			return ret_val;
3265	} else {
3266		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3267		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3268
3269		if (phy->type != e1000_phy_igp_3)
3270			return E1000_SUCCESS;
3271
3272		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3273		 * during Dx states where the power conservation is most
3274		 * important.  During driver activity we should enable
3275		 * SmartSpeed, so performance is maintained.
3276		 */
3277		if (phy->smart_speed == e1000_smart_speed_on) {
3278			ret_val = phy->ops.read_reg(hw,
3279						    IGP01E1000_PHY_PORT_CONFIG,
3280						    &data);
3281			if (ret_val)
3282				return ret_val;
3283
3284			data |= IGP01E1000_PSCFR_SMART_SPEED;
3285			ret_val = phy->ops.write_reg(hw,
3286						     IGP01E1000_PHY_PORT_CONFIG,
3287						     data);
3288			if (ret_val)
3289				return ret_val;
3290		} else if (phy->smart_speed == e1000_smart_speed_off) {
3291			ret_val = phy->ops.read_reg(hw,
3292						    IGP01E1000_PHY_PORT_CONFIG,
3293						    &data);
3294			if (ret_val)
3295				return ret_val;
3296
3297			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3298			ret_val = phy->ops.write_reg(hw,
3299						     IGP01E1000_PHY_PORT_CONFIG,
3300						     data);
3301			if (ret_val)
3302				return ret_val;
3303		}
3304	}
3305
3306	return E1000_SUCCESS;
3307}
3308
3309/**
3310 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3311 *  @hw: pointer to the HW structure
3312 *  @active: TRUE to enable LPLU, FALSE to disable
3313 *
3314 *  Sets the LPLU D3 state according to the active flag.  When
3315 *  activating LPLU this function also disables smart speed
3316 *  and vice versa.  LPLU will not be activated unless the
3317 *  device autonegotiation advertisement meets standards of
3318 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3319 *  This is a function pointer entry point only called by
3320 *  PHY setup routines.
3321 **/
3322static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3323{
3324	struct e1000_phy_info *phy = &hw->phy;
3325	u32 phy_ctrl;
3326	s32 ret_val = E1000_SUCCESS;
3327	u16 data;
3328
3329	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3330
3331	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3332
3333	if (!active) {
3334		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3335		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3336
3337		if (phy->type != e1000_phy_igp_3)
3338			return E1000_SUCCESS;
3339
3340		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3341		 * during Dx states where the power conservation is most
3342		 * important.  During driver activity we should enable
3343		 * SmartSpeed, so performance is maintained.
3344		 */
3345		if (phy->smart_speed == e1000_smart_speed_on) {
3346			ret_val = phy->ops.read_reg(hw,
3347						    IGP01E1000_PHY_PORT_CONFIG,
3348						    &data);
3349			if (ret_val)
3350				return ret_val;
3351
3352			data |= IGP01E1000_PSCFR_SMART_SPEED;
3353			ret_val = phy->ops.write_reg(hw,
3354						     IGP01E1000_PHY_PORT_CONFIG,
3355						     data);
3356			if (ret_val)
3357				return ret_val;
3358		} else if (phy->smart_speed == e1000_smart_speed_off) {
3359			ret_val = phy->ops.read_reg(hw,
3360						    IGP01E1000_PHY_PORT_CONFIG,
3361						    &data);
3362			if (ret_val)
3363				return ret_val;
3364
3365			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3366			ret_val = phy->ops.write_reg(hw,
3367						     IGP01E1000_PHY_PORT_CONFIG,
3368						     data);
3369			if (ret_val)
3370				return ret_val;
3371		}
3372	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3373		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3374		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3375		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3376		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3377
3378		if (phy->type != e1000_phy_igp_3)
3379			return E1000_SUCCESS;
3380
3381		/* Call gig speed drop workaround on LPLU before accessing
3382		 * any PHY registers
3383		 */
3384		if (hw->mac.type == e1000_ich8lan)
3385			e1000_gig_downshift_workaround_ich8lan(hw);
3386
3387		/* When LPLU is enabled, we should disable SmartSpeed */
3388		ret_val = phy->ops.read_reg(hw,
3389					    IGP01E1000_PHY_PORT_CONFIG,
3390					    &data);
3391		if (ret_val)
3392			return ret_val;
3393
3394		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3395		ret_val = phy->ops.write_reg(hw,
3396					     IGP01E1000_PHY_PORT_CONFIG,
3397					     data);
3398	}
3399
3400	return ret_val;
3401}
3402
3403/**
3404 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3405 *  @hw: pointer to the HW structure
3406 *  @bank:  pointer to the variable that returns the active bank
3407 *
3408 *  Reads signature byte from the NVM using the flash access registers.
3409 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3410 **/
3411static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3412{
3413	u32 eecd;
3414	struct e1000_nvm_info *nvm = &hw->nvm;
3415	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3416	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3417	u32 nvm_dword = 0;
3418	u8 sig_byte = 0;
3419	s32 ret_val;
3420
3421	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3422
3423	switch (hw->mac.type) {
3424	case e1000_pch_spt:
3425	case e1000_pch_cnp:
3426	case e1000_pch_tgp:
3427	case e1000_pch_adp:
3428	case e1000_pch_mtp:
3429		bank1_offset = nvm->flash_bank_size;
3430		act_offset = E1000_ICH_NVM_SIG_WORD;
3431
3432		/* set bank to 0 in case flash read fails */
3433		*bank = 0;
3434
3435		/* Check bank 0 */
3436		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3437							 &nvm_dword);
3438		if (ret_val)
3439			return ret_val;
3440		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3441		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3442		    E1000_ICH_NVM_SIG_VALUE) {
3443			*bank = 0;
3444			return E1000_SUCCESS;
3445		}
3446
3447		/* Check bank 1 */
3448		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3449							 bank1_offset,
3450							 &nvm_dword);
3451		if (ret_val)
3452			return ret_val;
3453		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3454		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3455		    E1000_ICH_NVM_SIG_VALUE) {
3456			*bank = 1;
3457			return E1000_SUCCESS;
3458		}
3459
3460		DEBUGOUT("ERROR: No valid NVM bank present\n");
3461		return -E1000_ERR_NVM;
3462	case e1000_ich8lan:
3463	case e1000_ich9lan:
3464		eecd = E1000_READ_REG(hw, E1000_EECD);
3465		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3466		    E1000_EECD_SEC1VAL_VALID_MASK) {
3467			if (eecd & E1000_EECD_SEC1VAL)
3468				*bank = 1;
3469			else
3470				*bank = 0;
3471
3472			return E1000_SUCCESS;
3473		}
3474		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3475		/* fall-thru */
3476	default:
3477		/* set bank to 0 in case flash read fails */
3478		*bank = 0;
3479
3480		/* Check bank 0 */
3481		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3482							&sig_byte);
3483		if (ret_val)
3484			return ret_val;
3485		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3486		    E1000_ICH_NVM_SIG_VALUE) {
3487			*bank = 0;
3488			return E1000_SUCCESS;
3489		}
3490
3491		/* Check bank 1 */
3492		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3493							bank1_offset,
3494							&sig_byte);
3495		if (ret_val)
3496			return ret_val;
3497		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3498		    E1000_ICH_NVM_SIG_VALUE) {
3499			*bank = 1;
3500			return E1000_SUCCESS;
3501		}
3502
3503		DEBUGOUT("ERROR: No valid NVM bank present\n");
3504		return -E1000_ERR_NVM;
3505	}
3506}
3507
3508/**
3509 *  e1000_read_nvm_spt - NVM access for SPT
3510 *  @hw: pointer to the HW structure
3511 *  @offset: The offset (in bytes) of the word(s) to read.
3512 *  @words: Size of data to read in words.
3513 *  @data: pointer to the word(s) to read at offset.
3514 *
3515 *  Reads a word(s) from the NVM
3516 **/
3517static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3518			      u16 *data)
3519{
3520	struct e1000_nvm_info *nvm = &hw->nvm;
3521	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3522	u32 act_offset;
3523	s32 ret_val = E1000_SUCCESS;
3524	u32 bank = 0;
3525	u32 dword = 0;
3526	u16 offset_to_read;
3527	u16 i;
3528
3529	DEBUGFUNC("e1000_read_nvm_spt");
3530
3531	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3532	    (words == 0)) {
3533		DEBUGOUT("nvm parameter(s) out of bounds\n");
3534		ret_val = -E1000_ERR_NVM;
3535		goto out;
3536	}
3537
3538	nvm->ops.acquire(hw);
3539
3540	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3541	if (ret_val != E1000_SUCCESS) {
3542		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3543		bank = 0;
3544	}
3545
3546	act_offset = (bank) ? nvm->flash_bank_size : 0;
3547	act_offset += offset;
3548
3549	ret_val = E1000_SUCCESS;
3550
3551	for (i = 0; i < words; i += 2) {
3552		if (words - i == 1) {
3553			if (dev_spec->shadow_ram[offset+i].modified) {
3554				data[i] = dev_spec->shadow_ram[offset+i].value;
3555			} else {
3556				offset_to_read = act_offset + i -
3557						 ((act_offset + i) % 2);
3558				ret_val =
3559				   e1000_read_flash_dword_ich8lan(hw,
3560								 offset_to_read,
3561								 &dword);
3562				if (ret_val)
3563					break;
3564				if ((act_offset + i) % 2 == 0)
3565					data[i] = (u16)(dword & 0xFFFF);
3566				else
3567					data[i] = (u16)((dword >> 16) & 0xFFFF);
3568			}
3569		} else {
3570			offset_to_read = act_offset + i;
3571			if (!(dev_spec->shadow_ram[offset+i].modified) ||
3572			    !(dev_spec->shadow_ram[offset+i+1].modified)) {
3573				ret_val =
3574				   e1000_read_flash_dword_ich8lan(hw,
3575								 offset_to_read,
3576								 &dword);
3577				if (ret_val)
3578					break;
3579			}
3580			if (dev_spec->shadow_ram[offset+i].modified)
3581				data[i] = dev_spec->shadow_ram[offset+i].value;
3582			else
3583				data[i] = (u16) (dword & 0xFFFF);
3584			if (dev_spec->shadow_ram[offset+i].modified)
3585				data[i+1] =
3586				   dev_spec->shadow_ram[offset+i+1].value;
3587			else
3588				data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3589		}
3590	}
3591
3592	nvm->ops.release(hw);
3593
3594out:
3595	if (ret_val)
3596		DEBUGOUT1("NVM read error: %d\n", ret_val);
3597
3598	return ret_val;
3599}
3600
3601/**
3602 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3603 *  @hw: pointer to the HW structure
3604 *  @offset: The offset (in bytes) of the word(s) to read.
3605 *  @words: Size of data to read in words
3606 *  @data: Pointer to the word(s) to read at offset.
3607 *
3608 *  Reads a word(s) from the NVM using the flash access registers.
3609 **/
3610static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3611				  u16 *data)
3612{
3613	struct e1000_nvm_info *nvm = &hw->nvm;
3614	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3615	u32 act_offset;
3616	s32 ret_val = E1000_SUCCESS;
3617	u32 bank = 0;
3618	u16 i, word;
3619
3620	DEBUGFUNC("e1000_read_nvm_ich8lan");
3621
3622	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3623	    (words == 0)) {
3624		DEBUGOUT("nvm parameter(s) out of bounds\n");
3625		ret_val = -E1000_ERR_NVM;
3626		goto out;
3627	}
3628
3629	nvm->ops.acquire(hw);
3630
3631	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3632	if (ret_val != E1000_SUCCESS) {
3633		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3634		bank = 0;
3635	}
3636
3637	act_offset = (bank) ? nvm->flash_bank_size : 0;
3638	act_offset += offset;
3639
3640	ret_val = E1000_SUCCESS;
3641	for (i = 0; i < words; i++) {
3642		if (dev_spec->shadow_ram[offset+i].modified) {
3643			data[i] = dev_spec->shadow_ram[offset+i].value;
3644		} else {
3645			ret_val = e1000_read_flash_word_ich8lan(hw,
3646								act_offset + i,
3647								&word);
3648			if (ret_val)
3649				break;
3650			data[i] = word;
3651		}
3652	}
3653
3654	nvm->ops.release(hw);
3655
3656out:
3657	if (ret_val)
3658		DEBUGOUT1("NVM read error: %d\n", ret_val);
3659
3660	return ret_val;
3661}
3662
3663/**
3664 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3665 *  @hw: pointer to the HW structure
3666 *
3667 *  This function does initial flash setup so that a new read/write/erase cycle
3668 *  can be started.
3669 **/
3670static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3671{
3672	union ich8_hws_flash_status hsfsts;
3673	s32 ret_val = -E1000_ERR_NVM;
3674
3675	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3676
3677	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3678
3679	/* Check if the flash descriptor is valid */
3680	if (!hsfsts.hsf_status.fldesvalid) {
3681		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3682		return -E1000_ERR_NVM;
3683	}
3684
3685	/* Clear FCERR and DAEL in hw status by writing 1 */
3686	hsfsts.hsf_status.flcerr = 1;
3687	hsfsts.hsf_status.dael = 1;
3688	if (hw->mac.type >= e1000_pch_spt)
3689		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3690				      hsfsts.regval & 0xFFFF);
3691	else
3692		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3693
3694	/* Either we should have a hardware SPI cycle in progress
3695	 * bit to check against, in order to start a new cycle or
3696	 * FDONE bit should be changed in the hardware so that it
3697	 * is 1 after hardware reset, which can then be used as an
3698	 * indication whether a cycle is in progress or has been
3699	 * completed.
3700	 */
3701
3702	if (!hsfsts.hsf_status.flcinprog) {
3703		/* There is no cycle running at present,
3704		 * so we can start a cycle.
3705		 * Begin by setting Flash Cycle Done.
3706		 */
3707		hsfsts.hsf_status.flcdone = 1;
3708		if (hw->mac.type >= e1000_pch_spt)
3709			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3710					      hsfsts.regval & 0xFFFF);
3711		else
3712			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3713						hsfsts.regval);
3714		ret_val = E1000_SUCCESS;
3715	} else {
3716		s32 i;
3717
3718		/* Otherwise poll for sometime so the current
3719		 * cycle has a chance to end before giving up.
3720		 */
3721		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3722			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3723							      ICH_FLASH_HSFSTS);
3724			if (!hsfsts.hsf_status.flcinprog) {
3725				ret_val = E1000_SUCCESS;
3726				break;
3727			}
3728			usec_delay(1);
3729		}
3730		if (ret_val == E1000_SUCCESS) {
3731			/* Successful in waiting for previous cycle to timeout,
3732			 * now set the Flash Cycle Done.
3733			 */
3734			hsfsts.hsf_status.flcdone = 1;
3735			if (hw->mac.type >= e1000_pch_spt)
3736				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3737						      hsfsts.regval & 0xFFFF);
3738			else
3739				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3740							hsfsts.regval);
3741		} else {
3742			DEBUGOUT("Flash controller busy, cannot get access\n");
3743		}
3744	}
3745
3746	return ret_val;
3747}
3748
3749/**
3750 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3751 *  @hw: pointer to the HW structure
3752 *  @timeout: maximum time to wait for completion
3753 *
3754 *  This function starts a flash cycle and waits for its completion.
3755 **/
3756static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3757{
3758	union ich8_hws_flash_ctrl hsflctl;
3759	union ich8_hws_flash_status hsfsts;
3760	u32 i = 0;
3761
3762	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3763
3764	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3765	if (hw->mac.type >= e1000_pch_spt)
3766		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3767	else
3768		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3769	hsflctl.hsf_ctrl.flcgo = 1;
3770
3771	if (hw->mac.type >= e1000_pch_spt)
3772		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3773				      hsflctl.regval << 16);
3774	else
3775		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3776
3777	/* wait till FDONE bit is set to 1 */
3778	do {
3779		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3780		if (hsfsts.hsf_status.flcdone)
3781			break;
3782		usec_delay(1);
3783	} while (i++ < timeout);
3784
3785	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3786		return E1000_SUCCESS;
3787
3788	return -E1000_ERR_NVM;
3789}
3790
3791/**
3792 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3793 *  @hw: pointer to the HW structure
3794 *  @offset: offset to data location
3795 *  @data: pointer to the location for storing the data
3796 *
3797 *  Reads the flash dword at offset into data.  Offset is converted
3798 *  to bytes before read.
3799 **/
3800static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3801					  u32 *data)
3802{
3803	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3804
3805	if (!data)
3806		return -E1000_ERR_NVM;
3807
3808	/* Must convert word offset into bytes. */
3809	offset <<= 1;
3810
3811	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3812}
3813
3814/**
3815 *  e1000_read_flash_word_ich8lan - Read word from flash
3816 *  @hw: pointer to the HW structure
3817 *  @offset: offset to data location
3818 *  @data: pointer to the location for storing the data
3819 *
3820 *  Reads the flash word at offset into data.  Offset is converted
3821 *  to bytes before read.
3822 **/
3823static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3824					 u16 *data)
3825{
3826	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3827
3828	if (!data)
3829		return -E1000_ERR_NVM;
3830
3831	/* Must convert offset into bytes. */
3832	offset <<= 1;
3833
3834	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3835}
3836
3837/**
3838 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3839 *  @hw: pointer to the HW structure
3840 *  @offset: The offset of the byte to read.
3841 *  @data: Pointer to a byte to store the value read.
3842 *
3843 *  Reads a single byte from the NVM using the flash access registers.
3844 **/
3845static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3846					 u8 *data)
3847{
3848	s32 ret_val;
3849	u16 word = 0;
3850
3851	/* In SPT, only 32 bits access is supported,
3852	 * so this function should not be called.
3853	 */
3854	if (hw->mac.type >= e1000_pch_spt)
3855		return -E1000_ERR_NVM;
3856	else
3857		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3858
3859	if (ret_val)
3860		return ret_val;
3861
3862	*data = (u8)word;
3863
3864	return E1000_SUCCESS;
3865}
3866
3867/**
3868 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3869 *  @hw: pointer to the HW structure
3870 *  @offset: The offset (in bytes) of the byte or word to read.
3871 *  @size: Size of data to read, 1=byte 2=word
3872 *  @data: Pointer to the word to store the value read.
3873 *
3874 *  Reads a byte or word from the NVM using the flash access registers.
3875 **/
3876static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3877					 u8 size, u16 *data)
3878{
3879	union ich8_hws_flash_status hsfsts;
3880	union ich8_hws_flash_ctrl hsflctl;
3881	u32 flash_linear_addr;
3882	u32 flash_data = 0;
3883	s32 ret_val = -E1000_ERR_NVM;
3884	u8 count = 0;
3885
3886	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3887
3888	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3889		return -E1000_ERR_NVM;
3890	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3891			     hw->nvm.flash_base_addr);
3892
3893	do {
3894		usec_delay(1);
3895		/* Steps */
3896		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3897		if (ret_val != E1000_SUCCESS)
3898			break;
3899		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3900
3901		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3902		hsflctl.hsf_ctrl.fldbcount = size - 1;
3903		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3904		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3905		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3906
3907		ret_val = e1000_flash_cycle_ich8lan(hw,
3908						ICH_FLASH_READ_COMMAND_TIMEOUT);
3909
3910		/* Check if FCERR is set to 1, if set to 1, clear it
3911		 * and try the whole sequence a few more times, else
3912		 * read in (shift in) the Flash Data0, the order is
3913		 * least significant byte first msb to lsb
3914		 */
3915		if (ret_val == E1000_SUCCESS) {
3916			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3917			if (size == 1)
3918				*data = (u8)(flash_data & 0x000000FF);
3919			else if (size == 2)
3920				*data = (u16)(flash_data & 0x0000FFFF);
3921			break;
3922		} else {
3923			/* If we've gotten here, then things are probably
3924			 * completely hosed, but if the error condition is
3925			 * detected, it won't hurt to give it another try...
3926			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3927			 */
3928			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3929							      ICH_FLASH_HSFSTS);
3930			if (hsfsts.hsf_status.flcerr) {
3931				/* Repeat for some time before giving up. */
3932				continue;
3933			} else if (!hsfsts.hsf_status.flcdone) {
3934				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3935				break;
3936			}
3937		}
3938	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3939
3940	return ret_val;
3941}
3942
3943/**
3944 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3945 *  @hw: pointer to the HW structure
3946 *  @offset: The offset (in bytes) of the dword to read.
3947 *  @data: Pointer to the dword to store the value read.
3948 *
3949 *  Reads a byte or word from the NVM using the flash access registers.
3950 **/
3951static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3952					   u32 *data)
3953{
3954	union ich8_hws_flash_status hsfsts;
3955	union ich8_hws_flash_ctrl hsflctl;
3956	u32 flash_linear_addr;
3957	s32 ret_val = -E1000_ERR_NVM;
3958	u8 count = 0;
3959
3960	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3961
3962		if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3963		    hw->mac.type < e1000_pch_spt)
3964			return -E1000_ERR_NVM;
3965	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3966			     hw->nvm.flash_base_addr);
3967
3968	do {
3969		usec_delay(1);
3970		/* Steps */
3971		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3972		if (ret_val != E1000_SUCCESS)
3973			break;
3974		/* In SPT, This register is in Lan memory space, not flash.
3975		 * Therefore, only 32 bit access is supported
3976		 */
3977		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3978
3979		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3980		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3981		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3982		/* In SPT, This register is in Lan memory space, not flash.
3983		 * Therefore, only 32 bit access is supported
3984		 */
3985		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3986				      (u32)hsflctl.regval << 16);
3987		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3988
3989		ret_val = e1000_flash_cycle_ich8lan(hw,
3990						ICH_FLASH_READ_COMMAND_TIMEOUT);
3991
3992		/* Check if FCERR is set to 1, if set to 1, clear it
3993		 * and try the whole sequence a few more times, else
3994		 * read in (shift in) the Flash Data0, the order is
3995		 * least significant byte first msb to lsb
3996		 */
3997		if (ret_val == E1000_SUCCESS) {
3998			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3999			break;
4000		} else {
4001			/* If we've gotten here, then things are probably
4002			 * completely hosed, but if the error condition is
4003			 * detected, it won't hurt to give it another try...
4004			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
4005			 */
4006			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4007							      ICH_FLASH_HSFSTS);
4008			if (hsfsts.hsf_status.flcerr) {
4009				/* Repeat for some time before giving up. */
4010				continue;
4011			} else if (!hsfsts.hsf_status.flcdone) {
4012				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4013				break;
4014			}
4015		}
4016	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4017
4018	return ret_val;
4019}
4020
4021/**
4022 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
4023 *  @hw: pointer to the HW structure
4024 *  @offset: The offset (in bytes) of the word(s) to write.
4025 *  @words: Size of data to write in words
4026 *  @data: Pointer to the word(s) to write at offset.
4027 *
4028 *  Writes a byte or word to the NVM using the flash access registers.
4029 **/
4030static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4031				   u16 *data)
4032{
4033	struct e1000_nvm_info *nvm = &hw->nvm;
4034	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4035	u16 i;
4036
4037	DEBUGFUNC("e1000_write_nvm_ich8lan");
4038
4039	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4040	    (words == 0)) {
4041		DEBUGOUT("nvm parameter(s) out of bounds\n");
4042		return -E1000_ERR_NVM;
4043	}
4044
4045	nvm->ops.acquire(hw);
4046
4047	for (i = 0; i < words; i++) {
4048		dev_spec->shadow_ram[offset+i].modified = TRUE;
4049		dev_spec->shadow_ram[offset+i].value = data[i];
4050	}
4051
4052	nvm->ops.release(hw);
4053
4054	return E1000_SUCCESS;
4055}
4056
4057/**
4058 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4059 *  @hw: pointer to the HW structure
4060 *
4061 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4062 *  which writes the checksum to the shadow ram.  The changes in the shadow
4063 *  ram are then committed to the EEPROM by processing each bank at a time
4064 *  checking for the modified bit and writing only the pending changes.
4065 *  After a successful commit, the shadow ram is cleared and is ready for
4066 *  future writes.
4067 **/
4068static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4069{
4070	struct e1000_nvm_info *nvm = &hw->nvm;
4071	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4072	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4073	s32 ret_val;
4074	u32 dword = 0;
4075
4076	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4077
4078	ret_val = e1000_update_nvm_checksum_generic(hw);
4079	if (ret_val)
4080		goto out;
4081
4082	if (nvm->type != e1000_nvm_flash_sw)
4083		goto out;
4084
4085	nvm->ops.acquire(hw);
4086
4087	/* We're writing to the opposite bank so if we're on bank 1,
4088	 * write to bank 0 etc.  We also need to erase the segment that
4089	 * is going to be written
4090	 */
4091	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4092	if (ret_val != E1000_SUCCESS) {
4093		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4094		bank = 0;
4095	}
4096
4097	if (bank == 0) {
4098		new_bank_offset = nvm->flash_bank_size;
4099		old_bank_offset = 0;
4100		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4101		if (ret_val)
4102			goto release;
4103	} else {
4104		old_bank_offset = nvm->flash_bank_size;
4105		new_bank_offset = 0;
4106		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4107		if (ret_val)
4108			goto release;
4109	}
4110	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4111		/* Determine whether to write the value stored
4112		 * in the other NVM bank or a modified value stored
4113		 * in the shadow RAM
4114		 */
4115		ret_val = e1000_read_flash_dword_ich8lan(hw,
4116							 i + old_bank_offset,
4117							 &dword);
4118
4119		if (dev_spec->shadow_ram[i].modified) {
4120			dword &= 0xffff0000;
4121			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4122		}
4123		if (dev_spec->shadow_ram[i + 1].modified) {
4124			dword &= 0x0000ffff;
4125			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4126				  << 16);
4127		}
4128		if (ret_val)
4129			break;
4130
4131		/* If the word is 0x13, then make sure the signature bits
4132		 * (15:14) are 11b until the commit has completed.
4133		 * This will allow us to write 10b which indicates the
4134		 * signature is valid.  We want to do this after the write
4135		 * has completed so that we don't mark the segment valid
4136		 * while the write is still in progress
4137		 */
4138		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4139			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4140
4141		/* Convert offset to bytes. */
4142		act_offset = (i + new_bank_offset) << 1;
4143
4144		usec_delay(100);
4145
4146		/* Write the data to the new bank. Offset in words*/
4147		act_offset = i + new_bank_offset;
4148		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4149								dword);
4150		if (ret_val)
4151			break;
4152	 }
4153
4154	/* Don't bother writing the segment valid bits if sector
4155	 * programming failed.
4156	 */
4157	if (ret_val) {
4158		DEBUGOUT("Flash commit failed.\n");
4159		goto release;
4160	}
4161
4162	/* Finally validate the new segment by setting bit 15:14
4163	 * to 10b in word 0x13 , this can be done without an
4164	 * erase as well since these bits are 11 to start with
4165	 * and we need to change bit 14 to 0b
4166	 */
4167	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4168
4169	/*offset in words but we read dword*/
4170	--act_offset;
4171	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4172
4173	if (ret_val)
4174		goto release;
4175
4176	dword &= 0xBFFFFFFF;
4177	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4178
4179	if (ret_val)
4180		goto release;
4181
4182	/* offset in words but we read dword*/
4183	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4184	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4185
4186	if (ret_val)
4187		goto release;
4188
4189	dword &= 0x00FFFFFF;
4190	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4191
4192	if (ret_val)
4193		goto release;
4194
4195	/* Great!  Everything worked, we can now clear the cached entries. */
4196	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4197		dev_spec->shadow_ram[i].modified = FALSE;
4198		dev_spec->shadow_ram[i].value = 0xFFFF;
4199	}
4200
4201release:
4202	nvm->ops.release(hw);
4203
4204	/* Reload the EEPROM, or else modifications will not appear
4205	 * until after the next adapter reset.
4206	 */
4207	if (!ret_val) {
4208		nvm->ops.reload(hw);
4209		msec_delay(10);
4210	}
4211
4212out:
4213	if (ret_val)
4214		DEBUGOUT1("NVM update error: %d\n", ret_val);
4215
4216	return ret_val;
4217}
4218
4219/**
4220 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4221 *  @hw: pointer to the HW structure
4222 *
4223 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4224 *  which writes the checksum to the shadow ram.  The changes in the shadow
4225 *  ram are then committed to the EEPROM by processing each bank at a time
4226 *  checking for the modified bit and writing only the pending changes.
4227 *  After a successful commit, the shadow ram is cleared and is ready for
4228 *  future writes.
4229 **/
4230static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4231{
4232	struct e1000_nvm_info *nvm = &hw->nvm;
4233	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4234	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4235	s32 ret_val;
4236	u16 data = 0;
4237
4238	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4239
4240	ret_val = e1000_update_nvm_checksum_generic(hw);
4241	if (ret_val)
4242		goto out;
4243
4244	if (nvm->type != e1000_nvm_flash_sw)
4245		goto out;
4246
4247	nvm->ops.acquire(hw);
4248
4249	/* We're writing to the opposite bank so if we're on bank 1,
4250	 * write to bank 0 etc.  We also need to erase the segment that
4251	 * is going to be written
4252	 */
4253	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4254	if (ret_val != E1000_SUCCESS) {
4255		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4256		bank = 0;
4257	}
4258
4259	if (bank == 0) {
4260		new_bank_offset = nvm->flash_bank_size;
4261		old_bank_offset = 0;
4262		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4263		if (ret_val)
4264			goto release;
4265	} else {
4266		old_bank_offset = nvm->flash_bank_size;
4267		new_bank_offset = 0;
4268		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4269		if (ret_val)
4270			goto release;
4271	}
4272	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4273		if (dev_spec->shadow_ram[i].modified) {
4274			data = dev_spec->shadow_ram[i].value;
4275		} else {
4276			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4277								old_bank_offset,
4278								&data);
4279			if (ret_val)
4280				break;
4281		}
4282		/* If the word is 0x13, then make sure the signature bits
4283		 * (15:14) are 11b until the commit has completed.
4284		 * This will allow us to write 10b which indicates the
4285		 * signature is valid.  We want to do this after the write
4286		 * has completed so that we don't mark the segment valid
4287		 * while the write is still in progress
4288		 */
4289		if (i == E1000_ICH_NVM_SIG_WORD)
4290			data |= E1000_ICH_NVM_SIG_MASK;
4291
4292		/* Convert offset to bytes. */
4293		act_offset = (i + new_bank_offset) << 1;
4294
4295		usec_delay(100);
4296
4297		/* Write the bytes to the new bank. */
4298		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4299							       act_offset,
4300							       (u8)data);
4301		if (ret_val)
4302			break;
4303
4304		usec_delay(100);
4305		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4306							  act_offset + 1,
4307							  (u8)(data >> 8));
4308		if (ret_val)
4309			break;
4310	 }
4311
4312	/* Don't bother writing the segment valid bits if sector
4313	 * programming failed.
4314	 */
4315	if (ret_val) {
4316		DEBUGOUT("Flash commit failed.\n");
4317		goto release;
4318	}
4319
4320	/* Finally validate the new segment by setting bit 15:14
4321	 * to 10b in word 0x13 , this can be done without an
4322	 * erase as well since these bits are 11 to start with
4323	 * and we need to change bit 14 to 0b
4324	 */
4325	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4326	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4327	if (ret_val)
4328		goto release;
4329
4330	data &= 0xBFFF;
4331	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4332						       (u8)(data >> 8));
4333	if (ret_val)
4334		goto release;
4335
4336	/* And invalidate the previously valid segment by setting
4337	 * its signature word (0x13) high_byte to 0b. This can be
4338	 * done without an erase because flash erase sets all bits
4339	 * to 1's. We can write 1's to 0's without an erase
4340	 */
4341	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4342
4343	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4344
4345	if (ret_val)
4346		goto release;
4347
4348	/* Great!  Everything worked, we can now clear the cached entries. */
4349	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4350		dev_spec->shadow_ram[i].modified = FALSE;
4351		dev_spec->shadow_ram[i].value = 0xFFFF;
4352	}
4353
4354release:
4355	nvm->ops.release(hw);
4356
4357	/* Reload the EEPROM, or else modifications will not appear
4358	 * until after the next adapter reset.
4359	 */
4360	if (!ret_val) {
4361		nvm->ops.reload(hw);
4362		msec_delay(10);
4363	}
4364
4365out:
4366	if (ret_val)
4367		DEBUGOUT1("NVM update error: %d\n", ret_val);
4368
4369	return ret_val;
4370}
4371
4372/**
4373 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4374 *  @hw: pointer to the HW structure
4375 *
4376 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4377 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4378 *  calculated, in which case we need to calculate the checksum and set bit 6.
4379 **/
4380static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4381{
4382	s32 ret_val;
4383	u16 data;
4384	u16 word;
4385	u16 valid_csum_mask;
4386
4387	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4388
4389	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4390	 * the checksum needs to be fixed.  This bit is an indication that
4391	 * the NVM was prepared by OEM software and did not calculate
4392	 * the checksum...a likely scenario.
4393	 */
4394	switch (hw->mac.type) {
4395	case e1000_pch_lpt:
4396	case e1000_pch_spt:
4397	case e1000_pch_cnp:
4398	case e1000_pch_tgp:
4399	case e1000_pch_adp:
4400	case e1000_pch_mtp:
4401		word = NVM_COMPAT;
4402		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4403		break;
4404	default:
4405		word = NVM_FUTURE_INIT_WORD1;
4406		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4407		break;
4408	}
4409
4410	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4411	if (ret_val)
4412		return ret_val;
4413
4414	if (!(data & valid_csum_mask)) {
4415		data |= valid_csum_mask;
4416		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4417		if (ret_val)
4418			return ret_val;
4419		ret_val = hw->nvm.ops.update(hw);
4420		if (ret_val)
4421			return ret_val;
4422	}
4423
4424	return e1000_validate_nvm_checksum_generic(hw);
4425}
4426
4427/**
4428 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4429 *  @hw: pointer to the HW structure
4430 *  @offset: The offset (in bytes) of the byte/word to read.
4431 *  @size: Size of data to read, 1=byte 2=word
4432 *  @data: The byte(s) to write to the NVM.
4433 *
4434 *  Writes one/two bytes to the NVM using the flash access registers.
4435 **/
4436static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4437					  u8 size, u16 data)
4438{
4439	union ich8_hws_flash_status hsfsts;
4440	union ich8_hws_flash_ctrl hsflctl;
4441	u32 flash_linear_addr;
4442	u32 flash_data = 0;
4443	s32 ret_val;
4444	u8 count = 0;
4445
4446	DEBUGFUNC("e1000_write_ich8_data");
4447
4448	if (hw->mac.type >= e1000_pch_spt) {
4449		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4450			return -E1000_ERR_NVM;
4451	} else {
4452		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4453			return -E1000_ERR_NVM;
4454	}
4455
4456	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4457			     hw->nvm.flash_base_addr);
4458
4459	do {
4460		usec_delay(1);
4461		/* Steps */
4462		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4463		if (ret_val != E1000_SUCCESS)
4464			break;
4465		/* In SPT, This register is in Lan memory space, not
4466		 * flash.  Therefore, only 32 bit access is supported
4467		 */
4468		if (hw->mac.type >= e1000_pch_spt)
4469			hsflctl.regval =
4470			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4471		else
4472			hsflctl.regval =
4473			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4474
4475		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4476		hsflctl.hsf_ctrl.fldbcount = size - 1;
4477		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4478		/* In SPT, This register is in Lan memory space,
4479		 * not flash.  Therefore, only 32 bit access is
4480		 * supported
4481		 */
4482		if (hw->mac.type >= e1000_pch_spt)
4483			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4484					      hsflctl.regval << 16);
4485		else
4486			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4487						hsflctl.regval);
4488
4489		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4490
4491		if (size == 1)
4492			flash_data = (u32)data & 0x00FF;
4493		else
4494			flash_data = (u32)data;
4495
4496		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4497
4498		/* check if FCERR is set to 1 , if set to 1, clear it
4499		 * and try the whole sequence a few more times else done
4500		 */
4501		ret_val =
4502		    e1000_flash_cycle_ich8lan(hw,
4503					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4504		if (ret_val == E1000_SUCCESS)
4505			break;
4506
4507		/* If we're here, then things are most likely
4508		 * completely hosed, but if the error condition
4509		 * is detected, it won't hurt to give it another
4510		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4511		 */
4512		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4513		if (hsfsts.hsf_status.flcerr)
4514			/* Repeat for some time before giving up. */
4515			continue;
4516		if (!hsfsts.hsf_status.flcdone) {
4517			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4518			break;
4519		}
4520	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4521
4522	return ret_val;
4523}
4524
4525/**
4526*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4527*  @hw: pointer to the HW structure
4528*  @offset: The offset (in bytes) of the dwords to read.
4529*  @data: The 4 bytes to write to the NVM.
4530*
4531*  Writes one/two/four bytes to the NVM using the flash access registers.
4532**/
4533static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4534					    u32 data)
4535{
4536	union ich8_hws_flash_status hsfsts;
4537	union ich8_hws_flash_ctrl hsflctl;
4538	u32 flash_linear_addr;
4539	s32 ret_val;
4540	u8 count = 0;
4541
4542	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4543
4544	if (hw->mac.type >= e1000_pch_spt) {
4545		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4546			return -E1000_ERR_NVM;
4547	}
4548	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4549			     hw->nvm.flash_base_addr);
4550	do {
4551		usec_delay(1);
4552		/* Steps */
4553		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4554		if (ret_val != E1000_SUCCESS)
4555			break;
4556
4557		/* In SPT, This register is in Lan memory space, not
4558		 * flash.  Therefore, only 32 bit access is supported
4559		 */
4560		if (hw->mac.type >= e1000_pch_spt)
4561			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4562							      ICH_FLASH_HSFSTS)
4563					 >> 16;
4564		else
4565			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4566							      ICH_FLASH_HSFCTL);
4567
4568		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4569		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4570
4571		/* In SPT, This register is in Lan memory space,
4572		 * not flash.  Therefore, only 32 bit access is
4573		 * supported
4574		 */
4575		if (hw->mac.type >= e1000_pch_spt)
4576			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4577					      hsflctl.regval << 16);
4578		else
4579			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4580						hsflctl.regval);
4581
4582		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4583
4584		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4585
4586		/* check if FCERR is set to 1 , if set to 1, clear it
4587		 * and try the whole sequence a few more times else done
4588		 */
4589		ret_val = e1000_flash_cycle_ich8lan(hw,
4590					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4591
4592		if (ret_val == E1000_SUCCESS)
4593			break;
4594
4595		/* If we're here, then things are most likely
4596		 * completely hosed, but if the error condition
4597		 * is detected, it won't hurt to give it another
4598		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4599		 */
4600		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4601
4602		if (hsfsts.hsf_status.flcerr)
4603			/* Repeat for some time before giving up. */
4604			continue;
4605		if (!hsfsts.hsf_status.flcdone) {
4606			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4607			break;
4608		}
4609	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4610
4611	return ret_val;
4612}
4613
4614/**
4615 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4616 *  @hw: pointer to the HW structure
4617 *  @offset: The index of the byte to read.
4618 *  @data: The byte to write to the NVM.
4619 *
4620 *  Writes a single byte to the NVM using the flash access registers.
4621 **/
4622static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4623					  u8 data)
4624{
4625	u16 word = (u16)data;
4626
4627	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4628
4629	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4630}
4631
4632/**
4633*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4634*  @hw: pointer to the HW structure
4635*  @offset: The offset of the word to write.
4636*  @dword: The dword to write to the NVM.
4637*
4638*  Writes a single dword to the NVM using the flash access registers.
4639*  Goes through a retry algorithm before giving up.
4640**/
4641static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4642						 u32 offset, u32 dword)
4643{
4644	s32 ret_val;
4645	u16 program_retries;
4646
4647	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4648
4649	/* Must convert word offset into bytes. */
4650	offset <<= 1;
4651
4652	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4653
4654	if (!ret_val)
4655		return ret_val;
4656	for (program_retries = 0; program_retries < 100; program_retries++) {
4657		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4658		usec_delay(100);
4659		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4660		if (ret_val == E1000_SUCCESS)
4661			break;
4662	}
4663	if (program_retries == 100)
4664		return -E1000_ERR_NVM;
4665
4666	return E1000_SUCCESS;
4667}
4668
4669/**
4670 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4671 *  @hw: pointer to the HW structure
4672 *  @offset: The offset of the byte to write.
4673 *  @byte: The byte to write to the NVM.
4674 *
4675 *  Writes a single byte to the NVM using the flash access registers.
4676 *  Goes through a retry algorithm before giving up.
4677 **/
4678static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4679						u32 offset, u8 byte)
4680{
4681	s32 ret_val;
4682	u16 program_retries;
4683
4684	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4685
4686	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4687	if (!ret_val)
4688		return ret_val;
4689
4690	for (program_retries = 0; program_retries < 100; program_retries++) {
4691		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4692		usec_delay(100);
4693		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4694		if (ret_val == E1000_SUCCESS)
4695			break;
4696	}
4697	if (program_retries == 100)
4698		return -E1000_ERR_NVM;
4699
4700	return E1000_SUCCESS;
4701}
4702
4703/**
4704 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4705 *  @hw: pointer to the HW structure
4706 *  @bank: 0 for first bank, 1 for second bank, etc.
4707 *
4708 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4709 *  bank N is 4096 * N + flash_reg_addr.
4710 **/
4711static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4712{
4713	struct e1000_nvm_info *nvm = &hw->nvm;
4714	union ich8_hws_flash_status hsfsts;
4715	union ich8_hws_flash_ctrl hsflctl;
4716	u32 flash_linear_addr;
4717	/* bank size is in 16bit words - adjust to bytes */
4718	u32 flash_bank_size = nvm->flash_bank_size * 2;
4719	s32 ret_val;
4720	s32 count = 0;
4721	s32 j, iteration, sector_size;
4722
4723	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4724
4725	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4726
4727	/* Determine HW Sector size: Read BERASE bits of hw flash status
4728	 * register
4729	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4730	 *     consecutive sectors.  The start index for the nth Hw sector
4731	 *     can be calculated as = bank * 4096 + n * 256
4732	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4733	 *     The start index for the nth Hw sector can be calculated
4734	 *     as = bank * 4096
4735	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4736	 *     (ich9 only, otherwise error condition)
4737	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4738	 */
4739	switch (hsfsts.hsf_status.berasesz) {
4740	case 0:
4741		/* Hw sector size 256 */
4742		sector_size = ICH_FLASH_SEG_SIZE_256;
4743		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4744		break;
4745	case 1:
4746		sector_size = ICH_FLASH_SEG_SIZE_4K;
4747		iteration = 1;
4748		break;
4749	case 2:
4750		sector_size = ICH_FLASH_SEG_SIZE_8K;
4751		iteration = 1;
4752		break;
4753	case 3:
4754		sector_size = ICH_FLASH_SEG_SIZE_64K;
4755		iteration = 1;
4756		break;
4757	default:
4758		return -E1000_ERR_NVM;
4759	}
4760
4761	/* Start with the base address, then add the sector offset. */
4762	flash_linear_addr = hw->nvm.flash_base_addr;
4763	flash_linear_addr += (bank) ? flash_bank_size : 0;
4764
4765	for (j = 0; j < iteration; j++) {
4766		do {
4767			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4768
4769			/* Steps */
4770			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4771			if (ret_val)
4772				return ret_val;
4773
4774			/* Write a value 11 (block Erase) in Flash
4775			 * Cycle field in hw flash control
4776			 */
4777			if (hw->mac.type >= e1000_pch_spt)
4778				hsflctl.regval =
4779				    E1000_READ_FLASH_REG(hw,
4780							 ICH_FLASH_HSFSTS)>>16;
4781			else
4782				hsflctl.regval =
4783				    E1000_READ_FLASH_REG16(hw,
4784							   ICH_FLASH_HSFCTL);
4785
4786			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4787			if (hw->mac.type >= e1000_pch_spt)
4788				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4789						      hsflctl.regval << 16);
4790			else
4791				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4792							hsflctl.regval);
4793
4794			/* Write the last 24 bits of an index within the
4795			 * block into Flash Linear address field in Flash
4796			 * Address.
4797			 */
4798			flash_linear_addr += (j * sector_size);
4799			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4800					      flash_linear_addr);
4801
4802			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4803			if (ret_val == E1000_SUCCESS)
4804				break;
4805
4806			/* Check if FCERR is set to 1.  If 1,
4807			 * clear it and try the whole sequence
4808			 * a few more times else Done
4809			 */
4810			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4811						      ICH_FLASH_HSFSTS);
4812			if (hsfsts.hsf_status.flcerr)
4813				/* repeat for some time before giving up */
4814				continue;
4815			else if (!hsfsts.hsf_status.flcdone)
4816				return ret_val;
4817		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4818	}
4819
4820	return E1000_SUCCESS;
4821}
4822
4823/**
4824 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4825 *  @hw: pointer to the HW structure
4826 *  @data: Pointer to the LED settings
4827 *
4828 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4829 *  settings is all 0's or F's, set the LED default to a valid LED default
4830 *  setting.
4831 **/
4832static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4833{
4834	s32 ret_val;
4835
4836	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4837
4838	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4839	if (ret_val) {
4840		DEBUGOUT("NVM Read Error\n");
4841		return ret_val;
4842	}
4843
4844	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4845		*data = ID_LED_DEFAULT_ICH8LAN;
4846
4847	return E1000_SUCCESS;
4848}
4849
4850/**
4851 *  e1000_id_led_init_pchlan - store LED configurations
4852 *  @hw: pointer to the HW structure
4853 *
4854 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4855 *  the PHY LED configuration register.
4856 *
4857 *  PCH also does not have an "always on" or "always off" mode which
4858 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4859 *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4860 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4861 *  link based on logic in e1000_led_[on|off]_pchlan().
4862 **/
4863static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4864{
4865	struct e1000_mac_info *mac = &hw->mac;
4866	s32 ret_val;
4867	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4868	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4869	u16 data, i, temp, shift;
4870
4871	DEBUGFUNC("e1000_id_led_init_pchlan");
4872
4873	/* Get default ID LED modes */
4874	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4875	if (ret_val)
4876		return ret_val;
4877
4878	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4879	mac->ledctl_mode1 = mac->ledctl_default;
4880	mac->ledctl_mode2 = mac->ledctl_default;
4881
4882	for (i = 0; i < 4; i++) {
4883		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4884		shift = (i * 5);
4885		switch (temp) {
4886		case ID_LED_ON1_DEF2:
4887		case ID_LED_ON1_ON2:
4888		case ID_LED_ON1_OFF2:
4889			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4890			mac->ledctl_mode1 |= (ledctl_on << shift);
4891			break;
4892		case ID_LED_OFF1_DEF2:
4893		case ID_LED_OFF1_ON2:
4894		case ID_LED_OFF1_OFF2:
4895			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4896			mac->ledctl_mode1 |= (ledctl_off << shift);
4897			break;
4898		default:
4899			/* Do nothing */
4900			break;
4901		}
4902		switch (temp) {
4903		case ID_LED_DEF1_ON2:
4904		case ID_LED_ON1_ON2:
4905		case ID_LED_OFF1_ON2:
4906			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4907			mac->ledctl_mode2 |= (ledctl_on << shift);
4908			break;
4909		case ID_LED_DEF1_OFF2:
4910		case ID_LED_ON1_OFF2:
4911		case ID_LED_OFF1_OFF2:
4912			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4913			mac->ledctl_mode2 |= (ledctl_off << shift);
4914			break;
4915		default:
4916			/* Do nothing */
4917			break;
4918		}
4919	}
4920
4921	return E1000_SUCCESS;
4922}
4923
4924/**
4925 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4926 *  @hw: pointer to the HW structure
4927 *
4928 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4929 *  register, so the bus width is hard coded.
4930 **/
4931static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4932{
4933	struct e1000_bus_info *bus = &hw->bus;
4934	s32 ret_val;
4935
4936	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4937
4938	ret_val = e1000_get_bus_info_pcie_generic(hw);
4939
4940	/* ICH devices are "PCI Express"-ish.  They have
4941	 * a configuration space, but do not contain
4942	 * PCI Express Capability registers, so bus width
4943	 * must be hardcoded.
4944	 */
4945	if (bus->width == e1000_bus_width_unknown)
4946		bus->width = e1000_bus_width_pcie_x1;
4947
4948	return ret_val;
4949}
4950
4951/**
4952 *  e1000_reset_hw_ich8lan - Reset the hardware
4953 *  @hw: pointer to the HW structure
4954 *
4955 *  Does a full reset of the hardware which includes a reset of the PHY and
4956 *  MAC.
4957 **/
4958static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4959{
4960	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4961	u16 kum_cfg;
4962	u32 ctrl, reg;
4963	s32 ret_val;
4964
4965	DEBUGFUNC("e1000_reset_hw_ich8lan");
4966
4967	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4968	 * on the last TLP read/write transaction when MAC is reset.
4969	 */
4970	ret_val = e1000_disable_pcie_master_generic(hw);
4971	if (ret_val)
4972		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4973
4974	DEBUGOUT("Masking off all interrupts\n");
4975	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4976
4977	/* Disable the Transmit and Receive units.  Then delay to allow
4978	 * any pending transactions to complete before we hit the MAC
4979	 * with the global reset.
4980	 */
4981	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4982	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4983	E1000_WRITE_FLUSH(hw);
4984
4985	msec_delay(10);
4986
4987	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4988	if (hw->mac.type == e1000_ich8lan) {
4989		/* Set Tx and Rx buffer allocation to 8k apiece. */
4990		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4991		/* Set Packet Buffer Size to 16k. */
4992		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4993	}
4994
4995	if (hw->mac.type == e1000_pchlan) {
4996		/* Save the NVM K1 bit setting*/
4997		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4998		if (ret_val)
4999			return ret_val;
5000
5001		if (kum_cfg & E1000_NVM_K1_ENABLE)
5002			dev_spec->nvm_k1_enabled = TRUE;
5003		else
5004			dev_spec->nvm_k1_enabled = FALSE;
5005	}
5006
5007	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5008
5009	if (!hw->phy.ops.check_reset_block(hw)) {
5010		/* Full-chip reset requires MAC and PHY reset at the same
5011		 * time to make sure the interface between MAC and the
5012		 * external PHY is reset.
5013		 */
5014		ctrl |= E1000_CTRL_PHY_RST;
5015
5016		/* Gate automatic PHY configuration by hardware on
5017		 * non-managed 82579
5018		 */
5019		if ((hw->mac.type == e1000_pch2lan) &&
5020		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5021			e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5022	}
5023	ret_val = e1000_acquire_swflag_ich8lan(hw);
5024	DEBUGOUT("Issuing a global reset to ich8lan\n");
5025	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5026	/* cannot issue a flush here because it hangs the hardware */
5027	msec_delay(20);
5028
5029	/* Set Phy Config Counter to 50msec */
5030	if (hw->mac.type == e1000_pch2lan) {
5031		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5032		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5033		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5034		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5035	}
5036
5037
5038	if (ctrl & E1000_CTRL_PHY_RST) {
5039		ret_val = hw->phy.ops.get_cfg_done(hw);
5040		if (ret_val)
5041			return ret_val;
5042
5043		ret_val = e1000_post_phy_reset_ich8lan(hw);
5044		if (ret_val)
5045			return ret_val;
5046	}
5047
5048	/* For PCH, this write will make sure that any noise
5049	 * will be detected as a CRC error and be dropped rather than show up
5050	 * as a bad packet to the DMA engine.
5051	 */
5052	if (hw->mac.type == e1000_pchlan)
5053		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5054
5055	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5056	E1000_READ_REG(hw, E1000_ICR);
5057
5058	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5059	reg |= E1000_KABGTXD_BGSQLBIAS;
5060	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5061
5062	return E1000_SUCCESS;
5063}
5064
5065/**
5066 *  e1000_init_hw_ich8lan - Initialize the hardware
5067 *  @hw: pointer to the HW structure
5068 *
5069 *  Prepares the hardware for transmit and receive by doing the following:
5070 *   - initialize hardware bits
5071 *   - initialize LED identification
5072 *   - setup receive address registers
5073 *   - setup flow control
5074 *   - setup transmit descriptors
5075 *   - clear statistics
5076 **/
5077static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5078{
5079	struct e1000_mac_info *mac = &hw->mac;
5080	u32 ctrl_ext, txdctl, snoop;
5081	s32 ret_val;
5082	u16 i;
5083
5084	DEBUGFUNC("e1000_init_hw_ich8lan");
5085
5086	e1000_initialize_hw_bits_ich8lan(hw);
5087
5088	/* Initialize identification LED */
5089	ret_val = mac->ops.id_led_init(hw);
5090	/* An error is not fatal and we should not stop init due to this */
5091	if (ret_val)
5092		DEBUGOUT("Error initializing identification LED\n");
5093
5094	/* Setup the receive address. */
5095	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5096
5097	/* Zero out the Multicast HASH table */
5098	DEBUGOUT("Zeroing the MTA\n");
5099	for (i = 0; i < mac->mta_reg_count; i++)
5100		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5101
5102	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5103	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5104	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5105	 */
5106	if (hw->phy.type == e1000_phy_82578) {
5107		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5108		i &= ~BM_WUC_HOST_WU_BIT;
5109		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5110		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5111		if (ret_val)
5112			return ret_val;
5113	}
5114
5115	/* Setup link and flow control */
5116	ret_val = mac->ops.setup_link(hw);
5117
5118	/* Set the transmit descriptor write-back policy for both queues */
5119	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5120	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5121		  E1000_TXDCTL_FULL_TX_DESC_WB);
5122	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5123		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5124	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5125	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5126	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5127		  E1000_TXDCTL_FULL_TX_DESC_WB);
5128	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5129		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5130	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5131
5132	/* ICH8 has opposite polarity of no_snoop bits.
5133	 * By default, we should use snoop behavior.
5134	 */
5135	if (mac->type == e1000_ich8lan)
5136		snoop = PCIE_ICH8_SNOOP_ALL;
5137	else
5138		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5139	e1000_set_pcie_no_snoop_generic(hw, snoop);
5140
5141	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5142	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5143	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5144
5145	/* Clear all of the statistics registers (clear on read).  It is
5146	 * important that we do this after we have tried to establish link
5147	 * because the symbol error count will increment wildly if there
5148	 * is no link.
5149	 */
5150	e1000_clear_hw_cntrs_ich8lan(hw);
5151
5152	return ret_val;
5153}
5154
5155/**
5156 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5157 *  @hw: pointer to the HW structure
5158 *
5159 *  Sets/Clears required hardware bits necessary for correctly setting up the
5160 *  hardware for transmit and receive.
5161 **/
5162static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5163{
5164	u32 reg;
5165
5166	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5167
5168	/* Extended Device Control */
5169	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5170	reg |= (1 << 22);
5171	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5172	if (hw->mac.type >= e1000_pchlan)
5173		reg |= E1000_CTRL_EXT_PHYPDEN;
5174	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5175
5176	/* Transmit Descriptor Control 0 */
5177	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5178	reg |= (1 << 22);
5179	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5180
5181	/* Transmit Descriptor Control 1 */
5182	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5183	reg |= (1 << 22);
5184	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5185
5186	/* Transmit Arbitration Control 0 */
5187	reg = E1000_READ_REG(hw, E1000_TARC(0));
5188	if (hw->mac.type == e1000_ich8lan)
5189		reg |= (1 << 28) | (1 << 29);
5190	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5191	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5192
5193	/* Transmit Arbitration Control 1 */
5194	reg = E1000_READ_REG(hw, E1000_TARC(1));
5195	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5196		reg &= ~(1 << 28);
5197	else
5198		reg |= (1 << 28);
5199	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5200	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5201
5202	/* Device Status */
5203	if (hw->mac.type == e1000_ich8lan) {
5204		reg = E1000_READ_REG(hw, E1000_STATUS);
5205		reg &= ~(1U << 31);
5206		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5207	}
5208
5209	/* work-around descriptor data corruption issue during nfs v2 udp
5210	 * traffic, just disable the nfs filtering capability
5211	 */
5212	reg = E1000_READ_REG(hw, E1000_RFCTL);
5213	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5214
5215	/* Disable IPv6 extension header parsing because some malformed
5216	 * IPv6 headers can hang the Rx.
5217	 */
5218	if (hw->mac.type == e1000_ich8lan)
5219		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5220	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5221
5222	/* Enable ECC on Lynxpoint */
5223	if (hw->mac.type >= e1000_pch_lpt) {
5224		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5225		reg |= E1000_PBECCSTS_ECC_ENABLE;
5226		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5227
5228		reg = E1000_READ_REG(hw, E1000_CTRL);
5229		reg |= E1000_CTRL_MEHE;
5230		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5231	}
5232
5233	return;
5234}
5235
5236/**
5237 *  e1000_setup_link_ich8lan - Setup flow control and link settings
5238 *  @hw: pointer to the HW structure
5239 *
5240 *  Determines which flow control settings to use, then configures flow
5241 *  control.  Calls the appropriate media-specific link configuration
5242 *  function.  Assuming the adapter has a valid link partner, a valid link
5243 *  should be established.  Assumes the hardware has previously been reset
5244 *  and the transmitter and receiver are not enabled.
5245 **/
5246static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5247{
5248	s32 ret_val;
5249
5250	DEBUGFUNC("e1000_setup_link_ich8lan");
5251
5252	/* ICH parts do not have a word in the NVM to determine
5253	 * the default flow control setting, so we explicitly
5254	 * set it to full.
5255	 */
5256	if (hw->fc.requested_mode == e1000_fc_default)
5257		hw->fc.requested_mode = e1000_fc_full;
5258
5259	/* Save off the requested flow control mode for use later.  Depending
5260	 * on the link partner's capabilities, we may or may not use this mode.
5261	 */
5262	hw->fc.current_mode = hw->fc.requested_mode;
5263
5264	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5265		hw->fc.current_mode);
5266
5267	if (!hw->phy.ops.check_reset_block(hw)) {
5268		/* Continue to configure the copper link. */
5269		ret_val = hw->mac.ops.setup_physical_interface(hw);
5270		if (ret_val)
5271			return ret_val;
5272	}
5273
5274	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5275	if ((hw->phy.type == e1000_phy_82578) ||
5276	    (hw->phy.type == e1000_phy_82579) ||
5277	    (hw->phy.type == e1000_phy_i217) ||
5278	    (hw->phy.type == e1000_phy_82577)) {
5279		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5280
5281		ret_val = hw->phy.ops.write_reg(hw,
5282					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5283					     hw->fc.pause_time);
5284		if (ret_val)
5285			return ret_val;
5286	}
5287
5288	return e1000_set_fc_watermarks_generic(hw);
5289}
5290
5291/**
5292 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5293 *  @hw: pointer to the HW structure
5294 *
5295 *  Configures the kumeran interface to the PHY to wait the appropriate time
5296 *  when polling the PHY, then call the generic setup_copper_link to finish
5297 *  configuring the copper link.
5298 **/
5299static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5300{
5301	u32 ctrl;
5302	s32 ret_val;
5303	u16 reg_data;
5304
5305	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5306
5307	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5308	ctrl |= E1000_CTRL_SLU;
5309	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5310	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5311
5312	/* Set the mac to wait the maximum time between each iteration
5313	 * and increase the max iterations when polling the phy;
5314	 * this fixes erroneous timeouts at 10Mbps.
5315	 */
5316	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5317					       0xFFFF);
5318	if (ret_val)
5319		return ret_val;
5320	ret_val = e1000_read_kmrn_reg_generic(hw,
5321					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5322					      &reg_data);
5323	if (ret_val)
5324		return ret_val;
5325	reg_data |= 0x3F;
5326	ret_val = e1000_write_kmrn_reg_generic(hw,
5327					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5328					       reg_data);
5329	if (ret_val)
5330		return ret_val;
5331
5332	switch (hw->phy.type) {
5333	case e1000_phy_igp_3:
5334		ret_val = e1000_copper_link_setup_igp(hw);
5335		if (ret_val)
5336			return ret_val;
5337		break;
5338	case e1000_phy_bm:
5339	case e1000_phy_82578:
5340		ret_val = e1000_copper_link_setup_m88(hw);
5341		if (ret_val)
5342			return ret_val;
5343		break;
5344	case e1000_phy_82577:
5345	case e1000_phy_82579:
5346		ret_val = e1000_copper_link_setup_82577(hw);
5347		if (ret_val)
5348			return ret_val;
5349		break;
5350	case e1000_phy_ife:
5351		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5352					       &reg_data);
5353		if (ret_val)
5354			return ret_val;
5355
5356		reg_data &= ~IFE_PMC_AUTO_MDIX;
5357
5358		switch (hw->phy.mdix) {
5359		case 1:
5360			reg_data &= ~IFE_PMC_FORCE_MDIX;
5361			break;
5362		case 2:
5363			reg_data |= IFE_PMC_FORCE_MDIX;
5364			break;
5365		case 0:
5366		default:
5367			reg_data |= IFE_PMC_AUTO_MDIX;
5368			break;
5369		}
5370		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5371						reg_data);
5372		if (ret_val)
5373			return ret_val;
5374		break;
5375	default:
5376		break;
5377	}
5378
5379	return e1000_setup_copper_link_generic(hw);
5380}
5381
5382/**
5383 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5384 *  @hw: pointer to the HW structure
5385 *
5386 *  Calls the PHY specific link setup function and then calls the
5387 *  generic setup_copper_link to finish configuring the link for
5388 *  Lynxpoint PCH devices
5389 **/
5390static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5391{
5392	u32 ctrl;
5393	s32 ret_val;
5394
5395	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5396
5397	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5398	ctrl |= E1000_CTRL_SLU;
5399	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5400	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5401
5402	ret_val = e1000_copper_link_setup_82577(hw);
5403	if (ret_val)
5404		return ret_val;
5405
5406	return e1000_setup_copper_link_generic(hw);
5407}
5408
5409/**
5410 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5411 *  @hw: pointer to the HW structure
5412 *  @speed: pointer to store current link speed
5413 *  @duplex: pointer to store the current link duplex
5414 *
5415 *  Calls the generic get_speed_and_duplex to retrieve the current link
5416 *  information and then calls the Kumeran lock loss workaround for links at
5417 *  gigabit speeds.
5418 **/
5419static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5420					  u16 *duplex)
5421{
5422	s32 ret_val;
5423
5424	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5425
5426	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5427	if (ret_val)
5428		return ret_val;
5429
5430	if ((hw->mac.type == e1000_ich8lan) &&
5431	    (hw->phy.type == e1000_phy_igp_3) &&
5432	    (*speed == SPEED_1000)) {
5433		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5434	}
5435
5436	return ret_val;
5437}
5438
5439/**
5440 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5441 *  @hw: pointer to the HW structure
5442 *
5443 *  Work-around for 82566 Kumeran PCS lock loss:
5444 *  On link status change (i.e. PCI reset, speed change) and link is up and
5445 *  speed is gigabit-
5446 *    0) if workaround is optionally disabled do nothing
5447 *    1) wait 1ms for Kumeran link to come up
5448 *    2) check Kumeran Diagnostic register PCS lock loss bit
5449 *    3) if not set the link is locked (all is good), otherwise...
5450 *    4) reset the PHY
5451 *    5) repeat up to 10 times
5452 *  Note: this is only called for IGP3 copper when speed is 1gb.
5453 **/
5454static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5455{
5456	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5457	u32 phy_ctrl;
5458	s32 ret_val;
5459	u16 i, data;
5460	bool link;
5461
5462	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5463
5464	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5465		return E1000_SUCCESS;
5466
5467	/* Make sure link is up before proceeding.  If not just return.
5468	 * Attempting this while link is negotiating fouled up link
5469	 * stability
5470	 */
5471	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5472	if (!link)
5473		return E1000_SUCCESS;
5474
5475	for (i = 0; i < 10; i++) {
5476		/* read once to clear */
5477		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5478		if (ret_val)
5479			return ret_val;
5480		/* and again to get new status */
5481		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5482		if (ret_val)
5483			return ret_val;
5484
5485		/* check for PCS lock */
5486		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5487			return E1000_SUCCESS;
5488
5489		/* Issue PHY reset */
5490		hw->phy.ops.reset(hw);
5491		msec_delay_irq(5);
5492	}
5493	/* Disable GigE link negotiation */
5494	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5495	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5496		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5497	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5498
5499	/* Call gig speed drop workaround on Gig disable before accessing
5500	 * any PHY registers
5501	 */
5502	e1000_gig_downshift_workaround_ich8lan(hw);
5503
5504	/* unable to acquire PCS lock */
5505	return -E1000_ERR_PHY;
5506}
5507
5508/**
5509 *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5510 *  @hw: pointer to the HW structure
5511 *  @state: boolean value used to set the current Kumeran workaround state
5512 *
5513 *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
5514 *  /disabled - FALSE).
5515 **/
5516void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5517						 bool state)
5518{
5519	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5520
5521	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5522
5523	if (hw->mac.type != e1000_ich8lan) {
5524		DEBUGOUT("Workaround applies to ICH8 only.\n");
5525		return;
5526	}
5527
5528	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5529
5530	return;
5531}
5532
5533/**
5534 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5535 *  @hw: pointer to the HW structure
5536 *
5537 *  Workaround for 82566 power-down on D3 entry:
5538 *    1) disable gigabit link
5539 *    2) write VR power-down enable
5540 *    3) read it back
5541 *  Continue if successful, else issue LCD reset and repeat
5542 **/
5543void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5544{
5545	u32 reg;
5546	u16 data;
5547	u8  retry = 0;
5548
5549	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5550
5551	if (hw->phy.type != e1000_phy_igp_3)
5552		return;
5553
5554	/* Try the workaround twice (if needed) */
5555	do {
5556		/* Disable link */
5557		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5558		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5559			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5560		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5561
5562		/* Call gig speed drop workaround on Gig disable before
5563		 * accessing any PHY registers
5564		 */
5565		if (hw->mac.type == e1000_ich8lan)
5566			e1000_gig_downshift_workaround_ich8lan(hw);
5567
5568		/* Write VR power-down enable */
5569		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5570		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5571		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5572				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5573
5574		/* Read it back and test */
5575		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5576		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5577		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5578			break;
5579
5580		/* Issue PHY reset and repeat at most one more time */
5581		reg = E1000_READ_REG(hw, E1000_CTRL);
5582		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5583		retry++;
5584	} while (retry);
5585}
5586
5587/**
5588 *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5589 *  @hw: pointer to the HW structure
5590 *
5591 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5592 *  LPLU, Gig disable, MDIC PHY reset):
5593 *    1) Set Kumeran Near-end loopback
5594 *    2) Clear Kumeran Near-end loopback
5595 *  Should only be called for ICH8[m] devices with any 1G Phy.
5596 **/
5597void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5598{
5599	s32 ret_val;
5600	u16 reg_data;
5601
5602	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5603
5604	if ((hw->mac.type != e1000_ich8lan) ||
5605	    (hw->phy.type == e1000_phy_ife))
5606		return;
5607
5608	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5609					      &reg_data);
5610	if (ret_val)
5611		return;
5612	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5613	ret_val = e1000_write_kmrn_reg_generic(hw,
5614					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5615					       reg_data);
5616	if (ret_val)
5617		return;
5618	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5619	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5620				     reg_data);
5621}
5622
5623/**
5624 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5625 *  @hw: pointer to the HW structure
5626 *
5627 *  During S0 to Sx transition, it is possible the link remains at gig
5628 *  instead of negotiating to a lower speed.  Before going to Sx, set
5629 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5630 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5631 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5632 *  needs to be written.
5633 *  Parts that support (and are linked to a partner which support) EEE in
5634 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5635 *  than 10Mbps w/o EEE.
5636 **/
5637void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5638{
5639	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5640	u32 phy_ctrl;
5641	s32 ret_val;
5642
5643	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5644
5645	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5646	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5647
5648	if (hw->phy.type == e1000_phy_i217) {
5649		u16 phy_reg, device_id = hw->device_id;
5650
5651		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5652		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5653		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5654		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5655		    (hw->mac.type >= e1000_pch_spt)) {
5656			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5657
5658			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5659					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5660		}
5661
5662		ret_val = hw->phy.ops.acquire(hw);
5663		if (ret_val)
5664			goto out;
5665
5666		if (!dev_spec->eee_disable) {
5667			u16 eee_advert;
5668
5669			ret_val =
5670			    e1000_read_emi_reg_locked(hw,
5671						      I217_EEE_ADVERTISEMENT,
5672						      &eee_advert);
5673			if (ret_val)
5674				goto release;
5675
5676			/* Disable LPLU if both link partners support 100BaseT
5677			 * EEE and 100Full is advertised on both ends of the
5678			 * link, and enable Auto Enable LPI since there will
5679			 * be no driver to enable LPI while in Sx.
5680			 */
5681			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5682			    (dev_spec->eee_lp_ability &
5683			     I82579_EEE_100_SUPPORTED) &&
5684			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5685				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5686					      E1000_PHY_CTRL_NOND0A_LPLU);
5687
5688				/* Set Auto Enable LPI after link up */
5689				hw->phy.ops.read_reg_locked(hw,
5690							    I217_LPI_GPIO_CTRL,
5691							    &phy_reg);
5692				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5693				hw->phy.ops.write_reg_locked(hw,
5694							     I217_LPI_GPIO_CTRL,
5695							     phy_reg);
5696			}
5697		}
5698
5699		/* For i217 Intel Rapid Start Technology support,
5700		 * when the system is going into Sx and no manageability engine
5701		 * is present, the driver must configure proxy to reset only on
5702		 * power good.  LPI (Low Power Idle) state must also reset only
5703		 * on power good, as well as the MTA (Multicast table array).
5704		 * The SMBus release must also be disabled on LCD reset.
5705		 */
5706		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5707		      E1000_ICH_FWSM_FW_VALID)) {
5708			/* Enable proxy to reset only on power good. */
5709			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5710						    &phy_reg);
5711			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5712			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5713						     phy_reg);
5714
5715			/* Set bit enable LPI (EEE) to reset only on
5716			 * power good.
5717			*/
5718			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5719			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5720			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5721
5722			/* Disable the SMB release on LCD reset. */
5723			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5724			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5725			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5726		}
5727
5728		/* Enable MTA to reset for Intel Rapid Start Technology
5729		 * Support
5730		 */
5731		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5732		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5733		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5734
5735release:
5736		hw->phy.ops.release(hw);
5737	}
5738out:
5739	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5740
5741	if (hw->mac.type == e1000_ich8lan)
5742		e1000_gig_downshift_workaround_ich8lan(hw);
5743
5744	if (hw->mac.type >= e1000_pchlan) {
5745		e1000_oem_bits_config_ich8lan(hw, FALSE);
5746
5747		/* Reset PHY to activate OEM bits on 82577/8 */
5748		if (hw->mac.type == e1000_pchlan)
5749			e1000_phy_hw_reset_generic(hw);
5750
5751		ret_val = hw->phy.ops.acquire(hw);
5752		if (ret_val)
5753			return;
5754		e1000_write_smbus_addr(hw);
5755		hw->phy.ops.release(hw);
5756	}
5757
5758	return;
5759}
5760
5761/**
5762 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5763 *  @hw: pointer to the HW structure
5764 *
5765 *  During Sx to S0 transitions on non-managed devices or managed devices
5766 *  on which PHY resets are not blocked, if the PHY registers cannot be
5767 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5768 *  the PHY.
5769 *  On i217, setup Intel Rapid Start Technology.
5770 **/
5771u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5772{
5773	s32 ret_val;
5774
5775	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5776	if (hw->mac.type < e1000_pch2lan)
5777		return E1000_SUCCESS;
5778
5779	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5780	if (ret_val) {
5781		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5782		return ret_val;
5783	}
5784
5785	/* For i217 Intel Rapid Start Technology support when the system
5786	 * is transitioning from Sx and no manageability engine is present
5787	 * configure SMBus to restore on reset, disable proxy, and enable
5788	 * the reset on MTA (Multicast table array).
5789	 */
5790	if (hw->phy.type == e1000_phy_i217) {
5791		u16 phy_reg;
5792
5793		ret_val = hw->phy.ops.acquire(hw);
5794		if (ret_val) {
5795			DEBUGOUT("Failed to setup iRST\n");
5796			return ret_val;
5797		}
5798
5799		/* Clear Auto Enable LPI after link up */
5800		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5801		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5802		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5803
5804		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5805		    E1000_ICH_FWSM_FW_VALID)) {
5806			/* Restore clear on SMB if no manageability engine
5807			 * is present
5808			 */
5809			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5810							      &phy_reg);
5811			if (ret_val)
5812				goto release;
5813			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5814			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5815
5816			/* Disable Proxy */
5817			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5818		}
5819		/* Enable reset on MTA */
5820		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5821						      &phy_reg);
5822		if (ret_val)
5823			goto release;
5824		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5825		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5826release:
5827		if (ret_val)
5828			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5829		hw->phy.ops.release(hw);
5830		return ret_val;
5831	}
5832	return E1000_SUCCESS;
5833}
5834
5835/**
5836 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5837 *  @hw: pointer to the HW structure
5838 *
5839 *  Return the LED back to the default configuration.
5840 **/
5841static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5842{
5843	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5844
5845	if (hw->phy.type == e1000_phy_ife)
5846		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5847					     0);
5848
5849	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5850	return E1000_SUCCESS;
5851}
5852
5853/**
5854 *  e1000_led_on_ich8lan - Turn LEDs on
5855 *  @hw: pointer to the HW structure
5856 *
5857 *  Turn on the LEDs.
5858 **/
5859static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5860{
5861	DEBUGFUNC("e1000_led_on_ich8lan");
5862
5863	if (hw->phy.type == e1000_phy_ife)
5864		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5865				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5866
5867	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5868	return E1000_SUCCESS;
5869}
5870
5871/**
5872 *  e1000_led_off_ich8lan - Turn LEDs off
5873 *  @hw: pointer to the HW structure
5874 *
5875 *  Turn off the LEDs.
5876 **/
5877static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5878{
5879	DEBUGFUNC("e1000_led_off_ich8lan");
5880
5881	if (hw->phy.type == e1000_phy_ife)
5882		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5883			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5884
5885	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5886	return E1000_SUCCESS;
5887}
5888
5889/**
5890 *  e1000_setup_led_pchlan - Configures SW controllable LED
5891 *  @hw: pointer to the HW structure
5892 *
5893 *  This prepares the SW controllable LED for use.
5894 **/
5895static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5896{
5897	DEBUGFUNC("e1000_setup_led_pchlan");
5898
5899	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5900				     (u16)hw->mac.ledctl_mode1);
5901}
5902
5903/**
5904 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5905 *  @hw: pointer to the HW structure
5906 *
5907 *  Return the LED back to the default configuration.
5908 **/
5909static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5910{
5911	DEBUGFUNC("e1000_cleanup_led_pchlan");
5912
5913	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5914				     (u16)hw->mac.ledctl_default);
5915}
5916
5917/**
5918 *  e1000_led_on_pchlan - Turn LEDs on
5919 *  @hw: pointer to the HW structure
5920 *
5921 *  Turn on the LEDs.
5922 **/
5923static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5924{
5925	u16 data = (u16)hw->mac.ledctl_mode2;
5926	u32 i, led;
5927
5928	DEBUGFUNC("e1000_led_on_pchlan");
5929
5930	/* If no link, then turn LED on by setting the invert bit
5931	 * for each LED that's mode is "link_up" in ledctl_mode2.
5932	 */
5933	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5934		for (i = 0; i < 3; i++) {
5935			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5936			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5937			    E1000_LEDCTL_MODE_LINK_UP)
5938				continue;
5939			if (led & E1000_PHY_LED0_IVRT)
5940				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5941			else
5942				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5943		}
5944	}
5945
5946	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5947}
5948
5949/**
5950 *  e1000_led_off_pchlan - Turn LEDs off
5951 *  @hw: pointer to the HW structure
5952 *
5953 *  Turn off the LEDs.
5954 **/
5955static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5956{
5957	u16 data = (u16)hw->mac.ledctl_mode1;
5958	u32 i, led;
5959
5960	DEBUGFUNC("e1000_led_off_pchlan");
5961
5962	/* If no link, then turn LED off by clearing the invert bit
5963	 * for each LED that's mode is "link_up" in ledctl_mode1.
5964	 */
5965	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5966		for (i = 0; i < 3; i++) {
5967			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5968			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5969			    E1000_LEDCTL_MODE_LINK_UP)
5970				continue;
5971			if (led & E1000_PHY_LED0_IVRT)
5972				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5973			else
5974				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5975		}
5976	}
5977
5978	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5979}
5980
5981/**
5982 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5983 *  @hw: pointer to the HW structure
5984 *
5985 *  Read appropriate register for the config done bit for completion status
5986 *  and configure the PHY through s/w for EEPROM-less parts.
5987 *
5988 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5989 *  config done bit, so only an error is logged and continues.  If we were
5990 *  to return with error, EEPROM-less silicon would not be able to be reset
5991 *  or change link.
5992 **/
5993static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5994{
5995	s32 ret_val = E1000_SUCCESS;
5996	u32 bank = 0;
5997	u32 status;
5998
5999	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
6000
6001	e1000_get_cfg_done_generic(hw);
6002
6003	/* Wait for indication from h/w that it has completed basic config */
6004	if (hw->mac.type >= e1000_ich10lan) {
6005		e1000_lan_init_done_ich8lan(hw);
6006	} else {
6007		ret_val = e1000_get_auto_rd_done_generic(hw);
6008		if (ret_val) {
6009			/* When auto config read does not complete, do not
6010			 * return with an error. This can happen in situations
6011			 * where there is no eeprom and prevents getting link.
6012			 */
6013			DEBUGOUT("Auto Read Done did not complete\n");
6014			ret_val = E1000_SUCCESS;
6015		}
6016	}
6017
6018	/* Clear PHY Reset Asserted bit */
6019	status = E1000_READ_REG(hw, E1000_STATUS);
6020	if (status & E1000_STATUS_PHYRA)
6021		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6022	else
6023		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6024
6025	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
6026	if (hw->mac.type <= e1000_ich9lan) {
6027		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6028		    (hw->phy.type == e1000_phy_igp_3)) {
6029			e1000_phy_init_script_igp3(hw);
6030		}
6031	} else {
6032		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6033			/* Maybe we should do a basic PHY config */
6034			DEBUGOUT("EEPROM not present\n");
6035			ret_val = -E1000_ERR_CONFIG;
6036		}
6037	}
6038
6039	return ret_val;
6040}
6041
6042/**
6043 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6044 * @hw: pointer to the HW structure
6045 *
6046 * In the case of a PHY power down to save power, or to turn off link during a
6047 * driver unload, or wake on lan is not enabled, remove the link.
6048 **/
6049static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6050{
6051	/* If the management interface is not enabled, then power down */
6052	if (!(hw->mac.ops.check_mng_mode(hw) ||
6053	      hw->phy.ops.check_reset_block(hw)))
6054		e1000_power_down_phy_copper(hw);
6055
6056	return;
6057}
6058
6059/**
6060 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6061 *  @hw: pointer to the HW structure
6062 *
6063 *  Clears hardware counters specific to the silicon family and calls
6064 *  clear_hw_cntrs_generic to clear all general purpose counters.
6065 **/
6066static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6067{
6068	u16 phy_data;
6069	s32 ret_val;
6070
6071	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6072
6073	e1000_clear_hw_cntrs_base_generic(hw);
6074
6075	E1000_READ_REG(hw, E1000_ALGNERRC);
6076	E1000_READ_REG(hw, E1000_RXERRC);
6077	E1000_READ_REG(hw, E1000_TNCRS);
6078	E1000_READ_REG(hw, E1000_CEXTERR);
6079	E1000_READ_REG(hw, E1000_TSCTC);
6080	E1000_READ_REG(hw, E1000_TSCTFC);
6081
6082	E1000_READ_REG(hw, E1000_MGTPRC);
6083	E1000_READ_REG(hw, E1000_MGTPDC);
6084	E1000_READ_REG(hw, E1000_MGTPTC);
6085
6086	E1000_READ_REG(hw, E1000_IAC);
6087	E1000_READ_REG(hw, E1000_ICRXOC);
6088
6089	/* Clear PHY statistics registers */
6090	if ((hw->phy.type == e1000_phy_82578) ||
6091	    (hw->phy.type == e1000_phy_82579) ||
6092	    (hw->phy.type == e1000_phy_i217) ||
6093	    (hw->phy.type == e1000_phy_82577)) {
6094		ret_val = hw->phy.ops.acquire(hw);
6095		if (ret_val)
6096			return;
6097		ret_val = hw->phy.ops.set_page(hw,
6098					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6099		if (ret_val)
6100			goto release;
6101		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6102		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6103		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6104		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6105		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6106		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6107		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6108		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6109		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6110		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6111		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6112		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6113		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6114		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6115release:
6116		hw->phy.ops.release(hw);
6117	}
6118}
6119
6120