1/******************************************************************************
2  SPDX-License-Identifier: BSD-3-Clause
3
4  Copyright (c) 2001-2020, Intel Corporation
5  All rights reserved.
6
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions are met:
9
10   1. Redistributions of source code must retain the above copyright notice,
11      this list of conditions and the following disclaimer.
12
13   2. Redistributions in binary form must reproduce the above copyright
14      notice, this list of conditions and the following disclaimer in the
15      documentation and/or other materials provided with the distribution.
16
17   3. Neither the name of the Intel Corporation nor the names of its
18      contributors may be used to endorse or promote products derived from
19      this software without specific prior written permission.
20
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  POSSIBILITY OF SUCH DAMAGE.
32
33******************************************************************************/
34/*$FreeBSD$*/
35
36#include "e1000_api.h"
37
38
39static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
40static void e1000_release_nvm_i210(struct e1000_hw *hw);
41static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42				u16 *data);
43static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45
46/**
47 *  e1000_acquire_nvm_i210 - Request for access to EEPROM
48 *  @hw: pointer to the HW structure
49 *
50 *  Acquire the necessary semaphores for exclusive access to the EEPROM.
51 *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
52 *  Return successful if access grant bit set, else clear the request for
53 *  EEPROM access and return -E1000_ERR_NVM (-1).
54 **/
55static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
56{
57	s32 ret_val;
58
59	DEBUGFUNC("e1000_acquire_nvm_i210");
60
61	ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
62
63	return ret_val;
64}
65
66/**
67 *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
68 *  @hw: pointer to the HW structure
69 *
70 *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
71 *  then release the semaphores acquired.
72 **/
73static void e1000_release_nvm_i210(struct e1000_hw *hw)
74{
75	DEBUGFUNC("e1000_release_nvm_i210");
76
77	e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
78}
79
80/**
81 *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
82 *  @hw: pointer to the HW structure
83 *  @offset: offset of word in the Shadow Ram to read
84 *  @words: number of words to read
85 *  @data: word read from the Shadow Ram
86 *
87 *  Reads a 16 bit word from the Shadow Ram using the EERD register.
88 *  Uses necessary synchronization semaphores.
89 **/
90s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
91			     u16 *data)
92{
93	s32 status = E1000_SUCCESS;
94	u16 i, count;
95
96	DEBUGFUNC("e1000_read_nvm_srrd_i210");
97
98	/* We cannot hold synchronization semaphores for too long,
99	 * because of forceful takeover procedure. However it is more efficient
100	 * to read in bursts than synchronizing access for each word. */
101	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
102		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
103			E1000_EERD_EEWR_MAX_COUNT : (words - i);
104		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
105			status = e1000_read_nvm_eerd(hw, offset, count,
106						     data + i);
107			hw->nvm.ops.release(hw);
108		} else {
109			status = E1000_ERR_SWFW_SYNC;
110		}
111
112		if (status != E1000_SUCCESS)
113			break;
114	}
115
116	return status;
117}
118
119/**
120 *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
121 *  @hw: pointer to the HW structure
122 *  @offset: offset within the Shadow RAM to be written to
123 *  @words: number of words to write
124 *  @data: 16 bit word(s) to be written to the Shadow RAM
125 *
126 *  Writes data to Shadow RAM at offset using EEWR register.
127 *
128 *  If e1000_update_nvm_checksum is not called after this function , the
129 *  data will not be committed to FLASH and also Shadow RAM will most likely
130 *  contain an invalid checksum.
131 *
132 *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
133 *  partially written.
134 **/
135s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
136			      u16 *data)
137{
138	s32 status = E1000_SUCCESS;
139	u16 i, count;
140
141	DEBUGFUNC("e1000_write_nvm_srwr_i210");
142
143	/* We cannot hold synchronization semaphores for too long,
144	 * because of forceful takeover procedure. However it is more efficient
145	 * to write in bursts than synchronizing access for each word. */
146	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
147		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
148			E1000_EERD_EEWR_MAX_COUNT : (words - i);
149		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
150			status = e1000_write_nvm_srwr(hw, offset, count,
151						      data + i);
152			hw->nvm.ops.release(hw);
153		} else {
154			status = E1000_ERR_SWFW_SYNC;
155		}
156
157		if (status != E1000_SUCCESS)
158			break;
159	}
160
161	return status;
162}
163
164/**
165 *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
166 *  @hw: pointer to the HW structure
167 *  @offset: offset within the Shadow Ram to be written to
168 *  @words: number of words to write
169 *  @data: 16 bit word(s) to be written to the Shadow Ram
170 *
171 *  Writes data to Shadow Ram at offset using EEWR register.
172 *
173 *  If e1000_update_nvm_checksum is not called after this function , the
174 *  Shadow Ram will most likely contain an invalid checksum.
175 **/
176static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
177				u16 *data)
178{
179	struct e1000_nvm_info *nvm = &hw->nvm;
180	u32 i, k, eewr = 0;
181	u32 attempts = 100000;
182	s32 ret_val = E1000_SUCCESS;
183
184	DEBUGFUNC("e1000_write_nvm_srwr");
185
186	/*
187	 * A check for invalid values:  offset too large, too many words,
188	 * too many words for the offset, and not enough words.
189	 */
190	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
191	    (words == 0)) {
192		DEBUGOUT("nvm parameter(s) out of bounds\n");
193		ret_val = -E1000_ERR_NVM;
194		goto out;
195	}
196
197	for (i = 0; i < words; i++) {
198		ret_val = -E1000_ERR_NVM;
199
200		eewr = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
201			(data[i] << E1000_NVM_RW_REG_DATA) |
202			E1000_NVM_RW_REG_START;
203
204		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
205
206		for (k = 0; k < attempts; k++) {
207			if (E1000_NVM_RW_REG_DONE &
208			    E1000_READ_REG(hw, E1000_SRWR)) {
209				ret_val = E1000_SUCCESS;
210				break;
211			}
212			usec_delay(5);
213		}
214
215		if (ret_val != E1000_SUCCESS) {
216			DEBUGOUT("Shadow RAM write EEWR timed out\n");
217			break;
218		}
219	}
220
221out:
222	return ret_val;
223}
224
225/** e1000_read_invm_word_i210 - Reads OTP
226 *  @hw: pointer to the HW structure
227 *  @address: the word address (aka eeprom offset) to read
228 *  @data: pointer to the data read
229 *
230 *  Reads 16-bit words from the OTP. Return error when the word is not
231 *  stored in OTP.
232 **/
233static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
234{
235	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
236	u32 invm_dword;
237	u16 i;
238	u8 record_type, word_address;
239
240	DEBUGFUNC("e1000_read_invm_word_i210");
241
242	for (i = 0; i < E1000_INVM_SIZE; i++) {
243		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
244		/* Get record type */
245		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
246		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
247			break;
248		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
249			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
250		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
251			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
252		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
253			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
254			if (word_address == address) {
255				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
256				DEBUGOUT2("Read INVM Word 0x%02x = %x",
257					  address, *data);
258				status = E1000_SUCCESS;
259				break;
260			}
261		}
262	}
263	if (status != E1000_SUCCESS)
264		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
265	return status;
266}
267
268/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
269 *  @hw: pointer to the HW structure
270 *  @address: the word address (aka eeprom offset) to read
271 *  @data: pointer to the data read
272 *
273 *  Wrapper function to return data formerly found in the NVM.
274 **/
275static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
276				u16 E1000_UNUSEDARG words, u16 *data)
277{
278	s32 ret_val = E1000_SUCCESS;
279
280	DEBUGFUNC("e1000_read_invm_i210");
281
282	/* Only the MAC addr is required to be present in the iNVM */
283	switch (offset) {
284	case NVM_MAC_ADDR:
285		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
286		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 1,
287						     &data[1]);
288		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset + 2,
289						     &data[2]);
290		if (ret_val != E1000_SUCCESS)
291			DEBUGOUT("MAC Addr not found in iNVM\n");
292		break;
293	case NVM_INIT_CTRL_2:
294		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
295		if (ret_val != E1000_SUCCESS) {
296			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
297			ret_val = E1000_SUCCESS;
298		}
299		break;
300	case NVM_INIT_CTRL_4:
301		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
302		if (ret_val != E1000_SUCCESS) {
303			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
304			ret_val = E1000_SUCCESS;
305		}
306		break;
307	case NVM_LED_1_CFG:
308		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
309		if (ret_val != E1000_SUCCESS) {
310			*data = NVM_LED_1_CFG_DEFAULT_I211;
311			ret_val = E1000_SUCCESS;
312		}
313		break;
314	case NVM_LED_0_2_CFG:
315		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
316		if (ret_val != E1000_SUCCESS) {
317			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
318			ret_val = E1000_SUCCESS;
319		}
320		break;
321	case NVM_ID_LED_SETTINGS:
322		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
323		if (ret_val != E1000_SUCCESS) {
324			*data = ID_LED_RESERVED_FFFF;
325			ret_val = E1000_SUCCESS;
326		}
327		break;
328	case NVM_SUB_DEV_ID:
329		*data = hw->subsystem_device_id;
330		break;
331	case NVM_SUB_VEN_ID:
332		*data = hw->subsystem_vendor_id;
333		break;
334	case NVM_DEV_ID:
335		*data = hw->device_id;
336		break;
337	case NVM_VEN_ID:
338		*data = hw->vendor_id;
339		break;
340	default:
341		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
342		*data = NVM_RESERVED_WORD;
343		break;
344	}
345	return ret_val;
346}
347
348/**
349 *  e1000_read_invm_version - Reads iNVM version and image type
350 *  @hw: pointer to the HW structure
351 *  @invm_ver: version structure for the version read
352 *
353 *  Reads iNVM version and image type.
354 **/
355s32 e1000_read_invm_version(struct e1000_hw *hw,
356			    struct e1000_fw_version *invm_ver)
357{
358	u32 *record = NULL;
359	u32 *next_record = NULL;
360	u32 i = 0;
361	u32 invm_dword = 0;
362	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
363					     E1000_INVM_RECORD_SIZE_IN_BYTES);
364	u32 buffer[E1000_INVM_SIZE];
365	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
366	u16 version = 0;
367
368	DEBUGFUNC("e1000_read_invm_version");
369
370	/* Read iNVM memory */
371	for (i = 0; i < E1000_INVM_SIZE; i++) {
372		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
373		buffer[i] = invm_dword;
374	}
375
376	/* Read version number */
377	for (i = 1; i < invm_blocks; i++) {
378		record = &buffer[invm_blocks - i];
379		next_record = &buffer[invm_blocks - i + 1];
380
381		/* Check if we have first version location used */
382		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
383			version = 0;
384			status = E1000_SUCCESS;
385			break;
386		}
387		/* Check if we have second version location used */
388		else if ((i == 1) &&
389			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
390			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
391			status = E1000_SUCCESS;
392			break;
393		}
394		/*
395		 * Check if we have odd version location
396		 * used and it is the last one used
397		 */
398		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
399			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
400			 (i != 1))) {
401			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
402				  >> 13;
403			status = E1000_SUCCESS;
404			break;
405		}
406		/*
407		 * Check if we have even version location
408		 * used and it is the last one used
409		 */
410		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
411			 ((*record & 0x3) == 0)) {
412			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
413			status = E1000_SUCCESS;
414			break;
415		}
416	}
417
418	if (status == E1000_SUCCESS) {
419		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
420					>> E1000_INVM_MAJOR_SHIFT;
421		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
422	}
423	/* Read Image Type */
424	for (i = 1; i < invm_blocks; i++) {
425		record = &buffer[invm_blocks - i];
426		next_record = &buffer[invm_blocks - i + 1];
427
428		/* Check if we have image type in first location used */
429		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
430			invm_ver->invm_img_type = 0;
431			status = E1000_SUCCESS;
432			break;
433		}
434		/* Check if we have image type in first location used */
435		else if ((((*record & 0x3) == 0) &&
436			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
437			 ((((*record & 0x3) != 0) && (i != 1)))) {
438			invm_ver->invm_img_type =
439				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
440			status = E1000_SUCCESS;
441			break;
442		}
443	}
444	return status;
445}
446
447/**
448 *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
449 *  @hw: pointer to the HW structure
450 *
451 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
452 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
453 **/
454s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
455{
456	s32 status = E1000_SUCCESS;
457	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
458
459	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
460
461	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
462
463		/*
464		 * Replace the read function with semaphore grabbing with
465		 * the one that skips this for a while.
466		 * We have semaphore taken already here.
467		 */
468		read_op_ptr = hw->nvm.ops.read;
469		hw->nvm.ops.read = e1000_read_nvm_eerd;
470
471		status = e1000_validate_nvm_checksum_generic(hw);
472
473		/* Revert original read operation. */
474		hw->nvm.ops.read = read_op_ptr;
475
476		hw->nvm.ops.release(hw);
477	} else {
478		status = E1000_ERR_SWFW_SYNC;
479	}
480
481	return status;
482}
483
484
485/**
486 *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
487 *  @hw: pointer to the HW structure
488 *
489 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
490 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
491 *  value to the EEPROM. Next commit EEPROM data onto the Flash.
492 **/
493s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
494{
495	s32 ret_val;
496	u16 checksum = 0;
497	u16 i, nvm_data;
498
499	DEBUGFUNC("e1000_update_nvm_checksum_i210");
500
501	/*
502	 * Read the first word from the EEPROM. If this times out or fails, do
503	 * not continue or we could be in for a very long wait while every
504	 * EEPROM read fails
505	 */
506	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
507	if (ret_val != E1000_SUCCESS) {
508		DEBUGOUT("EEPROM read failed\n");
509		goto out;
510	}
511
512	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
513		/*
514		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
515		 * because we do not want to take the synchronization
516		 * semaphores twice here.
517		 */
518
519		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
520			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
521			if (ret_val) {
522				hw->nvm.ops.release(hw);
523				DEBUGOUT("NVM Read Error while updating checksum.\n");
524				goto out;
525			}
526			checksum += nvm_data;
527		}
528		checksum = (u16) NVM_SUM - checksum;
529		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
530						&checksum);
531		if (ret_val != E1000_SUCCESS) {
532			hw->nvm.ops.release(hw);
533			DEBUGOUT("NVM Write Error while updating checksum.\n");
534			goto out;
535		}
536
537		hw->nvm.ops.release(hw);
538
539		ret_val = e1000_update_flash_i210(hw);
540	} else {
541		ret_val = E1000_ERR_SWFW_SYNC;
542	}
543out:
544	return ret_val;
545}
546
547/**
548 *  e1000_get_flash_presence_i210 - Check if flash device is detected.
549 *  @hw: pointer to the HW structure
550 *
551 **/
552bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
553{
554	u32 eec = 0;
555	bool ret_val = false;
556
557	DEBUGFUNC("e1000_get_flash_presence_i210");
558
559	eec = E1000_READ_REG(hw, E1000_EECD);
560
561	if (eec & E1000_EECD_FLASH_DETECTED_I210)
562		ret_val = true;
563
564	return ret_val;
565}
566
567/**
568 *  e1000_update_flash_i210 - Commit EEPROM to the flash
569 *  @hw: pointer to the HW structure
570 *
571 **/
572s32 e1000_update_flash_i210(struct e1000_hw *hw)
573{
574	s32 ret_val;
575	u32 flup;
576
577	DEBUGFUNC("e1000_update_flash_i210");
578
579	ret_val = e1000_pool_flash_update_done_i210(hw);
580	if (ret_val == -E1000_ERR_NVM) {
581		DEBUGOUT("Flash update time out\n");
582		goto out;
583	}
584
585	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
586	E1000_WRITE_REG(hw, E1000_EECD, flup);
587
588	ret_val = e1000_pool_flash_update_done_i210(hw);
589	if (ret_val == E1000_SUCCESS)
590		DEBUGOUT("Flash update complete\n");
591	else
592		DEBUGOUT("Flash update time out\n");
593
594out:
595	return ret_val;
596}
597
598/**
599 *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
600 *  @hw: pointer to the HW structure
601 *
602 **/
603s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
604{
605	s32 ret_val = -E1000_ERR_NVM;
606	u32 i, reg;
607
608	DEBUGFUNC("e1000_pool_flash_update_done_i210");
609
610	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
611		reg = E1000_READ_REG(hw, E1000_EECD);
612		if (reg & E1000_EECD_FLUDONE_I210) {
613			ret_val = E1000_SUCCESS;
614			break;
615		}
616		usec_delay(5);
617	}
618
619	return ret_val;
620}
621
622/**
623 *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
624 *  @hw: pointer to the HW structure
625 *
626 *  Initialize the i210/i211 NVM parameters and function pointers.
627 **/
628static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
629{
630	s32 ret_val;
631	struct e1000_nvm_info *nvm = &hw->nvm;
632
633	DEBUGFUNC("e1000_init_nvm_params_i210");
634
635	ret_val = e1000_init_nvm_params_82575(hw);
636	nvm->ops.acquire = e1000_acquire_nvm_i210;
637	nvm->ops.release = e1000_release_nvm_i210;
638	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
639	if (e1000_get_flash_presence_i210(hw)) {
640		hw->nvm.type = e1000_nvm_flash_hw;
641		nvm->ops.read    = e1000_read_nvm_srrd_i210;
642		nvm->ops.write   = e1000_write_nvm_srwr_i210;
643		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
644		nvm->ops.update   = e1000_update_nvm_checksum_i210;
645	} else {
646		hw->nvm.type = e1000_nvm_invm;
647		nvm->ops.read     = e1000_read_invm_i210;
648		nvm->ops.write    = e1000_null_write_nvm;
649		nvm->ops.validate = e1000_null_ops_generic;
650		nvm->ops.update   = e1000_null_ops_generic;
651	}
652	return ret_val;
653}
654
655/**
656 *  e1000_init_function_pointers_i210 - Init func ptrs.
657 *  @hw: pointer to the HW structure
658 *
659 *  Called to initialize all function pointers and parameters.
660 **/
661void e1000_init_function_pointers_i210(struct e1000_hw *hw)
662{
663	e1000_init_function_pointers_82575(hw);
664	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
665}
666
667/**
668 *  e1000_valid_led_default_i210 - Verify a valid default LED config
669 *  @hw: pointer to the HW structure
670 *  @data: pointer to the NVM (EEPROM)
671 *
672 *  Read the EEPROM for the current default LED configuration.  If the
673 *  LED configuration is not valid, set to a valid LED configuration.
674 **/
675static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
676{
677	s32 ret_val;
678
679	DEBUGFUNC("e1000_valid_led_default_i210");
680
681	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
682	if (ret_val) {
683		DEBUGOUT("NVM Read Error\n");
684		goto out;
685	}
686
687	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
688		switch (hw->phy.media_type) {
689		case e1000_media_type_internal_serdes:
690			*data = ID_LED_DEFAULT_I210_SERDES;
691			break;
692		case e1000_media_type_copper:
693		default:
694			*data = ID_LED_DEFAULT_I210;
695			break;
696		}
697	}
698out:
699	return ret_val;
700}
701
702/**
703 * e1000_pll_workaround_i210
704 * @hw: pointer to the HW structure
705 *
706 * Works around an errata in the PLL circuit where it occasionally
707 * provides the wrong clock frequency after power up.
708 **/
709static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
710{
711	s32 ret_val;
712	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
713	u16 nvm_word, phy_word, pci_word, tmp_nvm;
714	int i;
715
716	/* Get PHY semaphore */
717	hw->phy.ops.acquire(hw);
718	/* Get and set needed register values */
719	wuc = E1000_READ_REG(hw, E1000_WUC);
720	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
721	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
722	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
723
724	/* Get data from NVM, or set default */
725	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
726					    &nvm_word);
727	if (ret_val != E1000_SUCCESS)
728		nvm_word = E1000_INVM_DEFAULT_AL;
729	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
730	phy_word = E1000_PHY_PLL_UNCONF;
731	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
732		/* check current state directly from internal PHY */
733		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0xFC);
734		usec_delay(20);
735		e1000_read_phy_reg_mdic(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
736		usec_delay(20);
737		e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, 0);
738		if ((phy_word & E1000_PHY_PLL_UNCONF)
739		    != E1000_PHY_PLL_UNCONF) {
740			ret_val = E1000_SUCCESS;
741			break;
742		} else {
743			ret_val = -E1000_ERR_PHY;
744		}
745		/* directly reset the internal PHY */
746		ctrl = E1000_READ_REG(hw, E1000_CTRL);
747		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
748
749		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
750		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
751		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
752
753		E1000_WRITE_REG(hw, E1000_WUC, 0);
754		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
755		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
756
757		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
758		pci_word |= E1000_PCI_PMCSR_D3;
759		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
760		msec_delay(1);
761		pci_word &= ~E1000_PCI_PMCSR_D3;
762		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
763		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
764		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
765
766		/* restore WUC register */
767		E1000_WRITE_REG(hw, E1000_WUC, wuc);
768	}
769	/* restore MDICNFG setting */
770	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
771	/* Release PHY semaphore */
772	hw->phy.ops.release(hw);
773	return ret_val;
774}
775
776/**
777 *  e1000_get_cfg_done_i210 - Read config done bit
778 *  @hw: pointer to the HW structure
779 *
780 *  Read the management control register for the config done bit for
781 *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
782 *  to read the config done bit, so an error is *ONLY* logged and returns
783 *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
784 *  would not be able to be reset or change link.
785 **/
786static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
787{
788	s32 timeout = PHY_CFG_TIMEOUT;
789	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
790
791	DEBUGFUNC("e1000_get_cfg_done_i210");
792
793	while (timeout) {
794		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
795			break;
796		msec_delay(1);
797		timeout--;
798	}
799	if (!timeout)
800		DEBUGOUT("MNG configuration cycle has not completed.\n");
801
802	return E1000_SUCCESS;
803}
804
805/**
806 *  e1000_init_hw_i210 - Init hw for I210/I211
807 *  @hw: pointer to the HW structure
808 *
809 *  Called to initialize hw for i210 hw family.
810 **/
811s32 e1000_init_hw_i210(struct e1000_hw *hw)
812{
813	struct e1000_mac_info *mac = &hw->mac;
814	s32 ret_val;
815
816	DEBUGFUNC("e1000_init_hw_i210");
817	if ((hw->mac.type >= e1000_i210) &&
818	    !(e1000_get_flash_presence_i210(hw))) {
819		ret_val = e1000_pll_workaround_i210(hw);
820		if (ret_val != E1000_SUCCESS)
821			return ret_val;
822	}
823	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
824
825	/* Initialize identification LED */
826	mac->ops.id_led_init(hw);
827
828	ret_val = e1000_init_hw_base(hw);
829	return ret_val;
830}
831