e1000_i210.c revision 247064
1251875Speter/******************************************************************************
2251875Speter
3251875Speter  Copyright (c) 2001-2013, Intel Corporation
4251875Speter  All rights reserved.
5251875Speter
6251875Speter  Redistribution and use in source and binary forms, with or without
7251875Speter  modification, are permitted provided that the following conditions are met:
8251875Speter
9251875Speter   1. Redistributions of source code must retain the above copyright notice,
10251875Speter      this list of conditions and the following disclaimer.
11251875Speter
12251875Speter   2. Redistributions in binary form must reproduce the above copyright
13251875Speter      notice, this list of conditions and the following disclaimer in the
14251875Speter      documentation and/or other materials provided with the distribution.
15251875Speter
16251875Speter   3. Neither the name of the Intel Corporation nor the names of its
17251875Speter      contributors may be used to endorse or promote products derived from
18251875Speter      this software without specific prior written permission.
19251875Speter
20251875Speter  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21251875Speter  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22251875Speter  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23251875Speter  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24251875Speter  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25251875Speter  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26251875Speter  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27251875Speter  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28251875Speter  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29251875Speter  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30251875Speter  POSSIBILITY OF SUCH DAMAGE.
31251875Speter
32251875Speter******************************************************************************/
33251875Speter/*$FreeBSD: head/sys/dev/e1000/e1000_i210.c 247064 2013-02-21 00:25:45Z jfv $*/
34251875Speter
35251875Speter#include "e1000_api.h"
36251875Speter
37251875Speter
38251875Speterstatic s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39251875Speterstatic void e1000_release_nvm_i210(struct e1000_hw *hw);
40251875Speterstatic s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
41251875Speterstatic s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42251875Speter				u16 *data);
43251875Speterstatic s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44251875Speterstatic s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45251875Speterstatic s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
46251875Speter			       u16 *data);
47251875Speter
48251875Speter/**
49251875Speter *  e1000_acquire_nvm_i210 - Request for access to EEPROM
50251875Speter *  @hw: pointer to the HW structure
51251875Speter *
52251875Speter *  Acquire the necessary semaphores for exclusive access to the EEPROM.
53251875Speter *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
54251875Speter *  Return successful if access grant bit set, else clear the request for
55251875Speter *  EEPROM access and return -E1000_ERR_NVM (-1).
56251875Speter **/
57251875Speterstatic s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
58251875Speter{
59251875Speter	s32 ret_val;
60251875Speter
61251875Speter	DEBUGFUNC("e1000_acquire_nvm_i210");
62251875Speter
63251875Speter	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
64251875Speter
65251875Speter	return ret_val;
66251875Speter}
67251875Speter
68251875Speter/**
69251875Speter *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
70251875Speter *  @hw: pointer to the HW structure
71251875Speter *
72251875Speter *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
73251875Speter *  then release the semaphores acquired.
74251875Speter **/
75251875Speterstatic void e1000_release_nvm_i210(struct e1000_hw *hw)
76251875Speter{
77251875Speter	DEBUGFUNC("e1000_release_nvm_i210");
78251875Speter
79251875Speter	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
80251875Speter}
81251875Speter
82251875Speter/**
83251875Speter *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
84251875Speter *  @hw: pointer to the HW structure
85251875Speter *  @mask: specifies which semaphore to acquire
86251875Speter *
87251875Speter *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
88251875Speter *  will also specify which port we're acquiring the lock for.
89251875Speter **/
90251875Speters32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
91251875Speter{
92251875Speter	u32 swfw_sync;
93251875Speter	u32 swmask = mask;
94251875Speter	u32 fwmask = mask << 16;
95251875Speter	s32 ret_val = E1000_SUCCESS;
96251875Speter	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
97251875Speter
98251875Speter	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
99251875Speter
100251875Speter	while (i < timeout) {
101251875Speter		if (e1000_get_hw_semaphore_i210(hw)) {
102251875Speter			ret_val = -E1000_ERR_SWFW_SYNC;
103251875Speter			goto out;
104251875Speter		}
105251875Speter
106251875Speter		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
107251875Speter		if (!(swfw_sync & (fwmask | swmask)))
108251875Speter			break;
109251875Speter
110251875Speter		/*
111251875Speter		 * Firmware currently using resource (fwmask)
112251875Speter		 * or other software thread using resource (swmask)
113251875Speter		 */
114251875Speter		e1000_put_hw_semaphore_generic(hw);
115251875Speter		msec_delay_irq(5);
116251875Speter		i++;
117251875Speter	}
118251875Speter
119251875Speter	if (i == timeout) {
120251875Speter		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
121251875Speter		ret_val = -E1000_ERR_SWFW_SYNC;
122251875Speter		goto out;
123251875Speter	}
124251875Speter
125251875Speter	swfw_sync |= swmask;
126251875Speter	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
127251875Speter
128251875Speter	e1000_put_hw_semaphore_generic(hw);
129251875Speter
130251875Speterout:
131251875Speter	return ret_val;
132251875Speter}
133251875Speter
134251875Speter/**
135251875Speter *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
136251875Speter *  @hw: pointer to the HW structure
137251875Speter *  @mask: specifies which semaphore to acquire
138251875Speter *
139251875Speter *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
140251875Speter *  will also specify which port we're releasing the lock for.
141251875Speter **/
142251875Spetervoid e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
143251875Speter{
144251875Speter	u32 swfw_sync;
145251875Speter
146251875Speter	DEBUGFUNC("e1000_release_swfw_sync_i210");
147251875Speter
148251875Speter	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
149251875Speter		; /* Empty */
150251875Speter
151251875Speter	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
152251875Speter	swfw_sync &= ~mask;
153251875Speter	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
154251875Speter
155251875Speter	e1000_put_hw_semaphore_generic(hw);
156251875Speter}
157251875Speter
158251875Speter/**
159251875Speter *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
160251875Speter *  @hw: pointer to the HW structure
161251875Speter *
162251875Speter *  Acquire the HW semaphore to access the PHY or NVM
163251875Speter **/
164251875Speterstatic s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
165251875Speter{
166251875Speter	u32 swsm;
167	s32 timeout = hw->nvm.word_size + 1;
168	s32 i = 0;
169
170	DEBUGFUNC("e1000_get_hw_semaphore_i210");
171
172	/* Get the SW semaphore */
173	while (i < timeout) {
174		swsm = E1000_READ_REG(hw, E1000_SWSM);
175		if (!(swsm & E1000_SWSM_SMBI))
176			break;
177
178		usec_delay(50);
179		i++;
180	}
181
182	if (i == timeout) {
183		/*
184		 * In rare circumstances, the driver may not have released the
185		 * SW semaphore. Clear the semaphore once before giving up.
186		 */
187		if (hw->dev_spec._82575.clear_semaphore_once) {
188			hw->dev_spec._82575.clear_semaphore_once = FALSE;
189			e1000_put_hw_semaphore_generic(hw);
190			for (i = 0; i < timeout; i++) {
191				swsm = E1000_READ_REG(hw, E1000_SWSM);
192				if (!(swsm & E1000_SWSM_SMBI))
193					break;
194
195				usec_delay(50);
196			}
197		}
198
199		/* If we do not have the semaphore here, we have to give up. */
200		if (i == timeout) {
201			DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
202			return -E1000_ERR_NVM;
203		}
204	}
205
206	/* Get the FW semaphore. */
207	for (i = 0; i < timeout; i++) {
208		swsm = E1000_READ_REG(hw, E1000_SWSM);
209		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
210
211		/* Semaphore acquired if bit latched */
212		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
213			break;
214
215		usec_delay(50);
216	}
217
218	if (i == timeout) {
219		/* Release semaphores */
220		e1000_put_hw_semaphore_generic(hw);
221		DEBUGOUT("Driver can't access the NVM\n");
222		return -E1000_ERR_NVM;
223	}
224
225	return E1000_SUCCESS;
226}
227
228/**
229 *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
230 *  @hw: pointer to the HW structure
231 *  @offset: offset of word in the Shadow Ram to read
232 *  @words: number of words to read
233 *  @data: word read from the Shadow Ram
234 *
235 *  Reads a 16 bit word from the Shadow Ram using the EERD register.
236 *  Uses necessary synchronization semaphores.
237 **/
238s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
239			     u16 *data)
240{
241	s32 status = E1000_SUCCESS;
242	u16 i, count;
243
244	DEBUGFUNC("e1000_read_nvm_srrd_i210");
245
246	/* We cannot hold synchronization semaphores for too long,
247	 * because of forceful takeover procedure. However it is more efficient
248	 * to read in bursts than synchronizing access for each word. */
249	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
250		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
251			E1000_EERD_EEWR_MAX_COUNT : (words - i);
252		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
253			status = e1000_read_nvm_eerd(hw, offset, count,
254						     data + i);
255			hw->nvm.ops.release(hw);
256		} else {
257			status = E1000_ERR_SWFW_SYNC;
258		}
259
260		if (status != E1000_SUCCESS)
261			break;
262	}
263
264	return status;
265}
266
267/**
268 *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
269 *  @hw: pointer to the HW structure
270 *  @offset: offset within the Shadow RAM to be written to
271 *  @words: number of words to write
272 *  @data: 16 bit word(s) to be written to the Shadow RAM
273 *
274 *  Writes data to Shadow RAM at offset using EEWR register.
275 *
276 *  If e1000_update_nvm_checksum is not called after this function , the
277 *  data will not be committed to FLASH and also Shadow RAM will most likely
278 *  contain an invalid checksum.
279 *
280 *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
281 *  partially written.
282 **/
283s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
284			      u16 *data)
285{
286	s32 status = E1000_SUCCESS;
287	u16 i, count;
288
289	DEBUGFUNC("e1000_write_nvm_srwr_i210");
290
291	/* We cannot hold synchronization semaphores for too long,
292	 * because of forceful takeover procedure. However it is more efficient
293	 * to write in bursts than synchronizing access for each word. */
294	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
295		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
296			E1000_EERD_EEWR_MAX_COUNT : (words - i);
297		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
298			status = e1000_write_nvm_srwr(hw, offset, count,
299						      data + i);
300			hw->nvm.ops.release(hw);
301		} else {
302			status = E1000_ERR_SWFW_SYNC;
303		}
304
305		if (status != E1000_SUCCESS)
306			break;
307	}
308
309	return status;
310}
311
312/**
313 *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
314 *  @hw: pointer to the HW structure
315 *  @offset: offset within the Shadow Ram to be written to
316 *  @words: number of words to write
317 *  @data: 16 bit word(s) to be written to the Shadow Ram
318 *
319 *  Writes data to Shadow Ram at offset using EEWR register.
320 *
321 *  If e1000_update_nvm_checksum is not called after this function , the
322 *  Shadow Ram will most likely contain an invalid checksum.
323 **/
324static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
325				u16 *data)
326{
327	struct e1000_nvm_info *nvm = &hw->nvm;
328	u32 i, k, eewr = 0;
329	u32 attempts = 100000;
330	s32 ret_val = E1000_SUCCESS;
331
332	DEBUGFUNC("e1000_write_nvm_srwr");
333
334	/*
335	 * A check for invalid values:  offset too large, too many words,
336	 * too many words for the offset, and not enough words.
337	 */
338	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
339	    (words == 0)) {
340		DEBUGOUT("nvm parameter(s) out of bounds\n");
341		ret_val = -E1000_ERR_NVM;
342		goto out;
343	}
344
345	for (i = 0; i < words; i++) {
346		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
347			(data[i] << E1000_NVM_RW_REG_DATA) |
348			E1000_NVM_RW_REG_START;
349
350		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
351
352		for (k = 0; k < attempts; k++) {
353			if (E1000_NVM_RW_REG_DONE &
354			    E1000_READ_REG(hw, E1000_SRWR)) {
355				ret_val = E1000_SUCCESS;
356				break;
357			}
358			usec_delay(5);
359		}
360
361		if (ret_val != E1000_SUCCESS) {
362			DEBUGOUT("Shadow RAM write EEWR timed out\n");
363			break;
364		}
365	}
366
367out:
368	return ret_val;
369}
370
371/**
372 *  e1000_read_nvm_i211 - Read NVM wrapper function for I211
373 *  @hw: pointer to the HW structure
374 *  @address: the word address (aka eeprom offset) to read
375 *  @data: pointer to the data read
376 *
377 *  Wrapper function to return data formerly found in the NVM.
378 **/
379static s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset,
380			       u16 words, u16 *data)
381{
382	s32 ret_val = E1000_SUCCESS;
383
384	DEBUGFUNC("e1000_read_nvm_i211");
385
386	/* Only the MAC addr is required to be present in the iNVM */
387	switch (offset) {
388	case NVM_MAC_ADDR:
389		ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]);
390		ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]);
391		ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]);
392		if (ret_val != E1000_SUCCESS)
393			DEBUGOUT("MAC Addr not found in iNVM\n");
394		break;
395	case NVM_INIT_CTRL_2:
396		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
397		if (ret_val != E1000_SUCCESS) {
398			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
399			ret_val = E1000_SUCCESS;
400		}
401		break;
402	case NVM_INIT_CTRL_4:
403		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
404		if (ret_val != E1000_SUCCESS) {
405			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
406			ret_val = E1000_SUCCESS;
407		}
408		break;
409	case NVM_LED_1_CFG:
410		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
411		if (ret_val != E1000_SUCCESS) {
412			*data = NVM_LED_1_CFG_DEFAULT_I211;
413			ret_val = E1000_SUCCESS;
414		}
415		break;
416	case NVM_LED_0_2_CFG:
417		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
418		if (ret_val != E1000_SUCCESS) {
419			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
420			ret_val = E1000_SUCCESS;
421		}
422		break;
423	case NVM_ID_LED_SETTINGS:
424		ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
425		if (ret_val != E1000_SUCCESS) {
426			*data = ID_LED_RESERVED_FFFF;
427			ret_val = E1000_SUCCESS;
428		}
429		break;
430	case NVM_SUB_DEV_ID:
431		*data = hw->subsystem_device_id;
432		break;
433	case NVM_SUB_VEN_ID:
434		*data = hw->subsystem_vendor_id;
435		break;
436	case NVM_DEV_ID:
437		*data = hw->device_id;
438		break;
439	case NVM_VEN_ID:
440		*data = hw->vendor_id;
441		break;
442	default:
443		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
444		*data = NVM_RESERVED_WORD;
445		break;
446	}
447	return ret_val;
448}
449
450/**
451 *  e1000_read_invm_i211 - Reads OTP
452 *  @hw: pointer to the HW structure
453 *  @address: the word address (aka eeprom offset) to read
454 *  @data: pointer to the data read
455 *
456 *  Reads 16-bit words from the OTP. Return error when the word is not
457 *  stored in OTP.
458 **/
459s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data)
460{
461	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
462	u32 invm_dword;
463	u16 i;
464	u8 record_type, word_address;
465
466	DEBUGFUNC("e1000_read_invm_i211");
467
468	for (i = 0; i < E1000_INVM_SIZE; i++) {
469		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
470		/* Get record type */
471		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
472		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
473			break;
474		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
475			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
476		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
477			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
478		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
479			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
480			if (word_address == address) {
481				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
482				DEBUGOUT2("Read INVM Word 0x%02x = %x",
483					  address, *data);
484				status = E1000_SUCCESS;
485				break;
486			}
487		}
488	}
489	if (status != E1000_SUCCESS)
490		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
491	return status;
492}
493
494/**
495 *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
496 *  @hw: pointer to the HW structure
497 *
498 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
499 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
500 **/
501s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
502{
503	s32 status = E1000_SUCCESS;
504	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
505
506	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
507
508	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
509
510		/*
511		 * Replace the read function with semaphore grabbing with
512		 * the one that skips this for a while.
513		 * We have semaphore taken already here.
514		 */
515		read_op_ptr = hw->nvm.ops.read;
516		hw->nvm.ops.read = e1000_read_nvm_eerd;
517
518		status = e1000_validate_nvm_checksum_generic(hw);
519
520		/* Revert original read operation. */
521		hw->nvm.ops.read = read_op_ptr;
522
523		hw->nvm.ops.release(hw);
524	} else {
525		status = E1000_ERR_SWFW_SYNC;
526	}
527
528	return status;
529}
530
531
532/**
533 *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
534 *  @hw: pointer to the HW structure
535 *
536 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
537 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
538 *  value to the EEPROM. Next commit EEPROM data onto the Flash.
539 **/
540s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
541{
542	s32 ret_val = E1000_SUCCESS;
543	u16 checksum = 0;
544	u16 i, nvm_data;
545
546	DEBUGFUNC("e1000_update_nvm_checksum_i210");
547
548	/*
549	 * Read the first word from the EEPROM. If this times out or fails, do
550	 * not continue or we could be in for a very long wait while every
551	 * EEPROM read fails
552	 */
553	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
554	if (ret_val != E1000_SUCCESS) {
555		DEBUGOUT("EEPROM read failed\n");
556		goto out;
557	}
558
559	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
560		/*
561		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
562		 * because we do not want to take the synchronization
563		 * semaphores twice here.
564		 */
565
566		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
567			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
568			if (ret_val) {
569				hw->nvm.ops.release(hw);
570				DEBUGOUT("NVM Read Error while updating checksum.\n");
571				goto out;
572			}
573			checksum += nvm_data;
574		}
575		checksum = (u16) NVM_SUM - checksum;
576		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
577						&checksum);
578		if (ret_val != E1000_SUCCESS) {
579			hw->nvm.ops.release(hw);
580			DEBUGOUT("NVM Write Error while updating checksum.\n");
581			goto out;
582		}
583
584		hw->nvm.ops.release(hw);
585
586		ret_val = e1000_update_flash_i210(hw);
587	} else {
588		ret_val = E1000_ERR_SWFW_SYNC;
589	}
590out:
591	return ret_val;
592}
593
594/**
595 *  e1000_update_flash_i210 - Commit EEPROM to the flash
596 *  @hw: pointer to the HW structure
597 *
598 **/
599s32 e1000_update_flash_i210(struct e1000_hw *hw)
600{
601	s32 ret_val = E1000_SUCCESS;
602	u32 flup;
603
604	DEBUGFUNC("e1000_update_flash_i210");
605
606	ret_val = e1000_pool_flash_update_done_i210(hw);
607	if (ret_val == -E1000_ERR_NVM) {
608		DEBUGOUT("Flash update time out\n");
609		goto out;
610	}
611
612	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
613	E1000_WRITE_REG(hw, E1000_EECD, flup);
614
615	ret_val = e1000_pool_flash_update_done_i210(hw);
616	if (ret_val == E1000_SUCCESS)
617		DEBUGOUT("Flash update complete\n");
618	else
619		DEBUGOUT("Flash update time out\n");
620
621out:
622	return ret_val;
623}
624
625/**
626 *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
627 *  @hw: pointer to the HW structure
628 *
629 **/
630s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
631{
632	s32 ret_val = -E1000_ERR_NVM;
633	u32 i, reg;
634
635	DEBUGFUNC("e1000_pool_flash_update_done_i210");
636
637	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
638		reg = E1000_READ_REG(hw, E1000_EECD);
639		if (reg & E1000_EECD_FLUDONE_I210) {
640			ret_val = E1000_SUCCESS;
641			break;
642		}
643		usec_delay(5);
644	}
645
646	return ret_val;
647}
648
649/**
650 *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
651 *  @hw: pointer to the HW structure
652 *
653 *  Initialize the i210 NVM parameters and function pointers.
654 **/
655static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
656{
657	s32 ret_val = E1000_SUCCESS;
658	struct e1000_nvm_info *nvm = &hw->nvm;
659
660	DEBUGFUNC("e1000_init_nvm_params_i210");
661
662	ret_val = e1000_init_nvm_params_82575(hw);
663
664	nvm->ops.acquire = e1000_acquire_nvm_i210;
665	nvm->ops.release = e1000_release_nvm_i210;
666	nvm->ops.read    = e1000_read_nvm_srrd_i210;
667	nvm->ops.write   = e1000_write_nvm_srwr_i210;
668	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
669	nvm->ops.validate = e1000_validate_nvm_checksum_i210;
670	nvm->ops.update   = e1000_update_nvm_checksum_i210;
671
672	return ret_val;
673}
674
675/**
676 *  e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers
677 *  @hw: pointer to the HW structure
678 *
679 *  Initialize the NVM parameters and function pointers for i211.
680 **/
681static s32 e1000_init_nvm_params_i211(struct e1000_hw *hw)
682{
683	struct e1000_nvm_info *nvm = &hw->nvm;
684
685	DEBUGFUNC("e1000_init_nvm_params_i211");
686
687	nvm->ops.acquire  = e1000_acquire_nvm_i210;
688	nvm->ops.release  = e1000_release_nvm_i210;
689	nvm->ops.read     = e1000_read_nvm_i211;
690	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
691	nvm->ops.write    = e1000_null_write_nvm;
692	nvm->ops.validate = e1000_null_ops_generic;
693	nvm->ops.update   = e1000_null_ops_generic;
694
695	return E1000_SUCCESS;
696}
697
698/**
699 *  e1000_init_function_pointers_i210 - Init func ptrs.
700 *  @hw: pointer to the HW structure
701 *
702 *  Called to initialize all function pointers and parameters.
703 **/
704void e1000_init_function_pointers_i210(struct e1000_hw *hw)
705{
706	e1000_init_function_pointers_82575(hw);
707
708	switch (hw->mac.type) {
709	case e1000_i210:
710		hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
711		break;
712	case e1000_i211:
713		hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
714		break;
715	default:
716		break;
717	}
718	return;
719}
720
721/**
722 *  e1000_valid_led_default_i210 - Verify a valid default LED config
723 *  @hw: pointer to the HW structure
724 *  @data: pointer to the NVM (EEPROM)
725 *
726 *  Read the EEPROM for the current default LED configuration.  If the
727 *  LED configuration is not valid, set to a valid LED configuration.
728 **/
729static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
730{
731	s32 ret_val;
732
733	DEBUGFUNC("e1000_valid_led_default_i210");
734
735	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
736	if (ret_val) {
737		DEBUGOUT("NVM Read Error\n");
738		goto out;
739	}
740
741	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
742		switch (hw->phy.media_type) {
743		case e1000_media_type_internal_serdes:
744			*data = ID_LED_DEFAULT_I210_SERDES;
745			break;
746		case e1000_media_type_copper:
747		default:
748			*data = ID_LED_DEFAULT_I210;
749			break;
750		}
751	}
752out:
753	return ret_val;
754}
755