1/******************************************************************************
2
3  Copyright (c) 2001-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "e1000_api.h"
36
37
38static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
39static void e1000_release_nvm_i210(struct e1000_hw *hw);
40static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
41static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42				u16 *data);
43static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45
46/**
47 *  e1000_acquire_nvm_i210 - Request for access to EEPROM
48 *  @hw: pointer to the HW structure
49 *
50 *  Acquire the necessary semaphores for exclusive access to the EEPROM.
51 *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
52 *  Return successful if access grant bit set, else clear the request for
53 *  EEPROM access and return -E1000_ERR_NVM (-1).
54 **/
55static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
56{
57	s32 ret_val;
58
59	DEBUGFUNC("e1000_acquire_nvm_i210");
60
61	ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
62
63	return ret_val;
64}
65
66/**
67 *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
68 *  @hw: pointer to the HW structure
69 *
70 *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
71 *  then release the semaphores acquired.
72 **/
73static void e1000_release_nvm_i210(struct e1000_hw *hw)
74{
75	DEBUGFUNC("e1000_release_nvm_i210");
76
77	e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
78}
79
80/**
81 *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
82 *  @hw: pointer to the HW structure
83 *  @mask: specifies which semaphore to acquire
84 *
85 *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
86 *  will also specify which port we're acquiring the lock for.
87 **/
88s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
89{
90	u32 swfw_sync;
91	u32 swmask = mask;
92	u32 fwmask = mask << 16;
93	s32 ret_val = E1000_SUCCESS;
94	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
95
96	DEBUGFUNC("e1000_acquire_swfw_sync_i210");
97
98	while (i < timeout) {
99		if (e1000_get_hw_semaphore_i210(hw)) {
100			ret_val = -E1000_ERR_SWFW_SYNC;
101			goto out;
102		}
103
104		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
105		if (!(swfw_sync & (fwmask | swmask)))
106			break;
107
108		/*
109		 * Firmware currently using resource (fwmask)
110		 * or other software thread using resource (swmask)
111		 */
112		e1000_put_hw_semaphore_generic(hw);
113		msec_delay_irq(5);
114		i++;
115	}
116
117	if (i == timeout) {
118		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
119		ret_val = -E1000_ERR_SWFW_SYNC;
120		goto out;
121	}
122
123	swfw_sync |= swmask;
124	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
125
126	e1000_put_hw_semaphore_generic(hw);
127
128out:
129	return ret_val;
130}
131
132/**
133 *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
134 *  @hw: pointer to the HW structure
135 *  @mask: specifies which semaphore to acquire
136 *
137 *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
138 *  will also specify which port we're releasing the lock for.
139 **/
140void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
141{
142	u32 swfw_sync;
143
144	DEBUGFUNC("e1000_release_swfw_sync_i210");
145
146	while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
147		; /* Empty */
148
149	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
150	swfw_sync &= ~mask;
151	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
152
153	e1000_put_hw_semaphore_generic(hw);
154}
155
156/**
157 *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
158 *  @hw: pointer to the HW structure
159 *
160 *  Acquire the HW semaphore to access the PHY or NVM
161 **/
162static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
163{
164	u32 swsm;
165	s32 timeout = hw->nvm.word_size + 1;
166	s32 i = 0;
167
168	DEBUGFUNC("e1000_get_hw_semaphore_i210");
169
170	/* Get the SW semaphore */
171	while (i < timeout) {
172		swsm = E1000_READ_REG(hw, E1000_SWSM);
173		if (!(swsm & E1000_SWSM_SMBI))
174			break;
175
176		usec_delay(50);
177		i++;
178	}
179
180	if (i == timeout) {
181		/* In rare circumstances, the SW semaphore may already be held
182		 * unintentionally. Clear the semaphore once before giving up.
183		 */
184		if (hw->dev_spec._82575.clear_semaphore_once) {
185			hw->dev_spec._82575.clear_semaphore_once = FALSE;
186			e1000_put_hw_semaphore_generic(hw);
187			for (i = 0; i < timeout; i++) {
188				swsm = E1000_READ_REG(hw, E1000_SWSM);
189				if (!(swsm & E1000_SWSM_SMBI))
190					break;
191
192				usec_delay(50);
193			}
194		}
195
196		/* If we do not have the semaphore here, we have to give up. */
197		if (i == timeout) {
198			DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
199			return -E1000_ERR_NVM;
200		}
201	}
202
203	/* Get the FW semaphore. */
204	for (i = 0; i < timeout; i++) {
205		swsm = E1000_READ_REG(hw, E1000_SWSM);
206		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
207
208		/* Semaphore acquired if bit latched */
209		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
210			break;
211
212		usec_delay(50);
213	}
214
215	if (i == timeout) {
216		/* Release semaphores */
217		e1000_put_hw_semaphore_generic(hw);
218		DEBUGOUT("Driver can't access the NVM\n");
219		return -E1000_ERR_NVM;
220	}
221
222	return E1000_SUCCESS;
223}
224
225/**
226 *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
227 *  @hw: pointer to the HW structure
228 *  @offset: offset of word in the Shadow Ram to read
229 *  @words: number of words to read
230 *  @data: word read from the Shadow Ram
231 *
232 *  Reads a 16 bit word from the Shadow Ram using the EERD register.
233 *  Uses necessary synchronization semaphores.
234 **/
235s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
236			     u16 *data)
237{
238	s32 status = E1000_SUCCESS;
239	u16 i, count;
240
241	DEBUGFUNC("e1000_read_nvm_srrd_i210");
242
243	/* We cannot hold synchronization semaphores for too long,
244	 * because of forceful takeover procedure. However it is more efficient
245	 * to read in bursts than synchronizing access for each word. */
246	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
247		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
248			E1000_EERD_EEWR_MAX_COUNT : (words - i);
249		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
250			status = e1000_read_nvm_eerd(hw, offset, count,
251						     data + i);
252			hw->nvm.ops.release(hw);
253		} else {
254			status = E1000_ERR_SWFW_SYNC;
255		}
256
257		if (status != E1000_SUCCESS)
258			break;
259	}
260
261	return status;
262}
263
264/**
265 *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
266 *  @hw: pointer to the HW structure
267 *  @offset: offset within the Shadow RAM to be written to
268 *  @words: number of words to write
269 *  @data: 16 bit word(s) to be written to the Shadow RAM
270 *
271 *  Writes data to Shadow RAM at offset using EEWR register.
272 *
273 *  If e1000_update_nvm_checksum is not called after this function , the
274 *  data will not be committed to FLASH and also Shadow RAM will most likely
275 *  contain an invalid checksum.
276 *
277 *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
278 *  partially written.
279 **/
280s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
281			      u16 *data)
282{
283	s32 status = E1000_SUCCESS;
284	u16 i, count;
285
286	DEBUGFUNC("e1000_write_nvm_srwr_i210");
287
288	/* We cannot hold synchronization semaphores for too long,
289	 * because of forceful takeover procedure. However it is more efficient
290	 * to write in bursts than synchronizing access for each word. */
291	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
292		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
293			E1000_EERD_EEWR_MAX_COUNT : (words - i);
294		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
295			status = e1000_write_nvm_srwr(hw, offset, count,
296						      data + i);
297			hw->nvm.ops.release(hw);
298		} else {
299			status = E1000_ERR_SWFW_SYNC;
300		}
301
302		if (status != E1000_SUCCESS)
303			break;
304	}
305
306	return status;
307}
308
309/**
310 *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
311 *  @hw: pointer to the HW structure
312 *  @offset: offset within the Shadow Ram to be written to
313 *  @words: number of words to write
314 *  @data: 16 bit word(s) to be written to the Shadow Ram
315 *
316 *  Writes data to Shadow Ram at offset using EEWR register.
317 *
318 *  If e1000_update_nvm_checksum is not called after this function , the
319 *  Shadow Ram will most likely contain an invalid checksum.
320 **/
321static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
322				u16 *data)
323{
324	struct e1000_nvm_info *nvm = &hw->nvm;
325	u32 i, k, eewr = 0;
326	u32 attempts = 100000;
327	s32 ret_val = E1000_SUCCESS;
328
329	DEBUGFUNC("e1000_write_nvm_srwr");
330
331	/*
332	 * A check for invalid values:  offset too large, too many words,
333	 * too many words for the offset, and not enough words.
334	 */
335	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
336	    (words == 0)) {
337		DEBUGOUT("nvm parameter(s) out of bounds\n");
338		ret_val = -E1000_ERR_NVM;
339		goto out;
340	}
341
342	for (i = 0; i < words; i++) {
343		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
344			(data[i] << E1000_NVM_RW_REG_DATA) |
345			E1000_NVM_RW_REG_START;
346
347		E1000_WRITE_REG(hw, E1000_SRWR, eewr);
348
349		for (k = 0; k < attempts; k++) {
350			if (E1000_NVM_RW_REG_DONE &
351			    E1000_READ_REG(hw, E1000_SRWR)) {
352				ret_val = E1000_SUCCESS;
353				break;
354			}
355			usec_delay(5);
356		}
357
358		if (ret_val != E1000_SUCCESS) {
359			DEBUGOUT("Shadow RAM write EEWR timed out\n");
360			break;
361		}
362	}
363
364out:
365	return ret_val;
366}
367
368/** e1000_read_invm_word_i210 - Reads OTP
369 *  @hw: pointer to the HW structure
370 *  @address: the word address (aka eeprom offset) to read
371 *  @data: pointer to the data read
372 *
373 *  Reads 16-bit words from the OTP. Return error when the word is not
374 *  stored in OTP.
375 **/
376static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
377{
378	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
379	u32 invm_dword;
380	u16 i;
381	u8 record_type, word_address;
382
383	DEBUGFUNC("e1000_read_invm_word_i210");
384
385	for (i = 0; i < E1000_INVM_SIZE; i++) {
386		invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
387		/* Get record type */
388		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
389		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
390			break;
391		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
392			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
393		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
394			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
395		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
396			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
397			if (word_address == address) {
398				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
399				DEBUGOUT2("Read INVM Word 0x%02x = %x",
400					  address, *data);
401				status = E1000_SUCCESS;
402				break;
403			}
404		}
405	}
406	if (status != E1000_SUCCESS)
407		DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
408	return status;
409}
410
411/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
412 *  @hw: pointer to the HW structure
413 *  @address: the word address (aka eeprom offset) to read
414 *  @data: pointer to the data read
415 *
416 *  Wrapper function to return data formerly found in the NVM.
417 **/
418static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
419				u16 E1000_UNUSEDARG words, u16 *data)
420{
421	s32 ret_val = E1000_SUCCESS;
422
423	DEBUGFUNC("e1000_read_invm_i210");
424
425	/* Only the MAC addr is required to be present in the iNVM */
426	switch (offset) {
427	case NVM_MAC_ADDR:
428		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
429		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
430						     &data[1]);
431		ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
432						     &data[2]);
433		if (ret_val != E1000_SUCCESS)
434			DEBUGOUT("MAC Addr not found in iNVM\n");
435		break;
436	case NVM_INIT_CTRL_2:
437		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
438		if (ret_val != E1000_SUCCESS) {
439			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
440			ret_val = E1000_SUCCESS;
441		}
442		break;
443	case NVM_INIT_CTRL_4:
444		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
445		if (ret_val != E1000_SUCCESS) {
446			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
447			ret_val = E1000_SUCCESS;
448		}
449		break;
450	case NVM_LED_1_CFG:
451		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
452		if (ret_val != E1000_SUCCESS) {
453			*data = NVM_LED_1_CFG_DEFAULT_I211;
454			ret_val = E1000_SUCCESS;
455		}
456		break;
457	case NVM_LED_0_2_CFG:
458		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
459		if (ret_val != E1000_SUCCESS) {
460			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
461			ret_val = E1000_SUCCESS;
462		}
463		break;
464	case NVM_ID_LED_SETTINGS:
465		ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
466		if (ret_val != E1000_SUCCESS) {
467			*data = ID_LED_RESERVED_FFFF;
468			ret_val = E1000_SUCCESS;
469		}
470		break;
471	case NVM_SUB_DEV_ID:
472		*data = hw->subsystem_device_id;
473		break;
474	case NVM_SUB_VEN_ID:
475		*data = hw->subsystem_vendor_id;
476		break;
477	case NVM_DEV_ID:
478		*data = hw->device_id;
479		break;
480	case NVM_VEN_ID:
481		*data = hw->vendor_id;
482		break;
483	default:
484		DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
485		*data = NVM_RESERVED_WORD;
486		break;
487	}
488	return ret_val;
489}
490
491/**
492 *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
493 *  @hw: pointer to the HW structure
494 *
495 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
496 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
497 **/
498s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
499{
500	s32 status = E1000_SUCCESS;
501	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
502
503	DEBUGFUNC("e1000_validate_nvm_checksum_i210");
504
505	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
506
507		/*
508		 * Replace the read function with semaphore grabbing with
509		 * the one that skips this for a while.
510		 * We have semaphore taken already here.
511		 */
512		read_op_ptr = hw->nvm.ops.read;
513		hw->nvm.ops.read = e1000_read_nvm_eerd;
514
515		status = e1000_validate_nvm_checksum_generic(hw);
516
517		/* Revert original read operation. */
518		hw->nvm.ops.read = read_op_ptr;
519
520		hw->nvm.ops.release(hw);
521	} else {
522		status = E1000_ERR_SWFW_SYNC;
523	}
524
525	return status;
526}
527
528
529/**
530 *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
531 *  @hw: pointer to the HW structure
532 *
533 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
534 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
535 *  value to the EEPROM. Next commit EEPROM data onto the Flash.
536 **/
537s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
538{
539	s32 ret_val;
540	u16 checksum = 0;
541	u16 i, nvm_data;
542
543	DEBUGFUNC("e1000_update_nvm_checksum_i210");
544
545	/*
546	 * Read the first word from the EEPROM. If this times out or fails, do
547	 * not continue or we could be in for a very long wait while every
548	 * EEPROM read fails
549	 */
550	ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
551	if (ret_val != E1000_SUCCESS) {
552		DEBUGOUT("EEPROM read failed\n");
553		goto out;
554	}
555
556	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
557		/*
558		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
559		 * because we do not want to take the synchronization
560		 * semaphores twice here.
561		 */
562
563		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
564			ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
565			if (ret_val) {
566				hw->nvm.ops.release(hw);
567				DEBUGOUT("NVM Read Error while updating checksum.\n");
568				goto out;
569			}
570			checksum += nvm_data;
571		}
572		checksum = (u16) NVM_SUM - checksum;
573		ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
574						&checksum);
575		if (ret_val != E1000_SUCCESS) {
576			hw->nvm.ops.release(hw);
577			DEBUGOUT("NVM Write Error while updating checksum.\n");
578			goto out;
579		}
580
581		hw->nvm.ops.release(hw);
582
583		ret_val = e1000_update_flash_i210(hw);
584	} else {
585		ret_val = E1000_ERR_SWFW_SYNC;
586	}
587out:
588	return ret_val;
589}
590
591/**
592 *  e1000_get_flash_presence_i210 - Check if flash device is detected.
593 *  @hw: pointer to the HW structure
594 *
595 **/
596bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
597{
598	u32 eec = 0;
599	bool ret_val = FALSE;
600
601	DEBUGFUNC("e1000_get_flash_presence_i210");
602
603	eec = E1000_READ_REG(hw, E1000_EECD);
604
605	if (eec & E1000_EECD_FLASH_DETECTED_I210)
606		ret_val = TRUE;
607
608	return ret_val;
609}
610
611/**
612 *  e1000_update_flash_i210 - Commit EEPROM to the flash
613 *  @hw: pointer to the HW structure
614 *
615 **/
616s32 e1000_update_flash_i210(struct e1000_hw *hw)
617{
618	s32 ret_val;
619	u32 flup;
620
621	DEBUGFUNC("e1000_update_flash_i210");
622
623	ret_val = e1000_pool_flash_update_done_i210(hw);
624	if (ret_val == -E1000_ERR_NVM) {
625		DEBUGOUT("Flash update time out\n");
626		goto out;
627	}
628
629	flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
630	E1000_WRITE_REG(hw, E1000_EECD, flup);
631
632	ret_val = e1000_pool_flash_update_done_i210(hw);
633	if (ret_val == E1000_SUCCESS)
634		DEBUGOUT("Flash update complete\n");
635	else
636		DEBUGOUT("Flash update time out\n");
637
638out:
639	return ret_val;
640}
641
642/**
643 *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
644 *  @hw: pointer to the HW structure
645 *
646 **/
647s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
648{
649	s32 ret_val = -E1000_ERR_NVM;
650	u32 i, reg;
651
652	DEBUGFUNC("e1000_pool_flash_update_done_i210");
653
654	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
655		reg = E1000_READ_REG(hw, E1000_EECD);
656		if (reg & E1000_EECD_FLUDONE_I210) {
657			ret_val = E1000_SUCCESS;
658			break;
659		}
660		usec_delay(5);
661	}
662
663	return ret_val;
664}
665
666/**
667 *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
668 *  @hw: pointer to the HW structure
669 *
670 *  Initialize the i210/i211 NVM parameters and function pointers.
671 **/
672static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
673{
674	s32 ret_val;
675	struct e1000_nvm_info *nvm = &hw->nvm;
676
677	DEBUGFUNC("e1000_init_nvm_params_i210");
678
679	ret_val = e1000_init_nvm_params_82575(hw);
680	nvm->ops.acquire = e1000_acquire_nvm_i210;
681	nvm->ops.release = e1000_release_nvm_i210;
682	nvm->ops.valid_led_default = e1000_valid_led_default_i210;
683	if (e1000_get_flash_presence_i210(hw)) {
684		hw->nvm.type = e1000_nvm_flash_hw;
685		nvm->ops.read    = e1000_read_nvm_srrd_i210;
686		nvm->ops.write   = e1000_write_nvm_srwr_i210;
687		nvm->ops.validate = e1000_validate_nvm_checksum_i210;
688		nvm->ops.update   = e1000_update_nvm_checksum_i210;
689	} else {
690		hw->nvm.type = e1000_nvm_invm;
691		nvm->ops.read     = e1000_read_invm_i210;
692		nvm->ops.write    = e1000_null_write_nvm;
693		nvm->ops.validate = e1000_null_ops_generic;
694		nvm->ops.update   = e1000_null_ops_generic;
695	}
696	return ret_val;
697}
698
699/**
700 *  e1000_init_function_pointers_i210 - Init func ptrs.
701 *  @hw: pointer to the HW structure
702 *
703 *  Called to initialize all function pointers and parameters.
704 **/
705void e1000_init_function_pointers_i210(struct e1000_hw *hw)
706{
707	e1000_init_function_pointers_82575(hw);
708	hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
709
710	return;
711}
712
713/**
714 *  e1000_valid_led_default_i210 - Verify a valid default LED config
715 *  @hw: pointer to the HW structure
716 *  @data: pointer to the NVM (EEPROM)
717 *
718 *  Read the EEPROM for the current default LED configuration.  If the
719 *  LED configuration is not valid, set to a valid LED configuration.
720 **/
721static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
722{
723	s32 ret_val;
724
725	DEBUGFUNC("e1000_valid_led_default_i210");
726
727	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
728	if (ret_val) {
729		DEBUGOUT("NVM Read Error\n");
730		goto out;
731	}
732
733	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
734		switch (hw->phy.media_type) {
735		case e1000_media_type_internal_serdes:
736			*data = ID_LED_DEFAULT_I210_SERDES;
737			break;
738		case e1000_media_type_copper:
739		default:
740			*data = ID_LED_DEFAULT_I210;
741			break;
742		}
743	}
744out:
745	return ret_val;
746}
747
748/**
749 *  __e1000_access_xmdio_reg - Read/write XMDIO register
750 *  @hw: pointer to the HW structure
751 *  @address: XMDIO address to program
752 *  @dev_addr: device address to program
753 *  @data: pointer to value to read/write from/to the XMDIO address
754 *  @read: boolean flag to indicate read or write
755 **/
756static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
757				    u8 dev_addr, u16 *data, bool read)
758{
759	s32 ret_val;
760
761	DEBUGFUNC("__e1000_access_xmdio_reg");
762
763	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
764	if (ret_val)
765		return ret_val;
766
767	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
768	if (ret_val)
769		return ret_val;
770
771	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
772							 dev_addr);
773	if (ret_val)
774		return ret_val;
775
776	if (read)
777		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
778	else
779		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
780	if (ret_val)
781		return ret_val;
782
783	/* Recalibrate the device back to 0 */
784	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
785	if (ret_val)
786		return ret_val;
787
788	return ret_val;
789}
790
791/**
792 *  e1000_read_xmdio_reg - Read XMDIO register
793 *  @hw: pointer to the HW structure
794 *  @addr: XMDIO address to program
795 *  @dev_addr: device address to program
796 *  @data: value to be read from the EMI address
797 **/
798s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
799{
800	DEBUGFUNC("e1000_read_xmdio_reg");
801
802	return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE);
803}
804
805/**
806 *  e1000_write_xmdio_reg - Write XMDIO register
807 *  @hw: pointer to the HW structure
808 *  @addr: XMDIO address to program
809 *  @dev_addr: device address to program
810 *  @data: value to be written to the XMDIO address
811 **/
812s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
813{
814	DEBUGFUNC("e1000_read_xmdio_reg");
815
816	return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE);
817}
818
819/**
820 * e1000_pll_workaround_i210
821 * @hw: pointer to the HW structure
822 *
823 * Works around an errata in the PLL circuit where it occasionally
824 * provides the wrong clock frequency after power up.
825 **/
826static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
827{
828	s32 ret_val;
829	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
830	u16 nvm_word, phy_word, pci_word, tmp_nvm;
831	int i;
832
833	/* Get and set needed register values */
834	wuc = E1000_READ_REG(hw, E1000_WUC);
835	mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
836	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
837	E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
838
839	/* Get data from NVM, or set default */
840	ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
841					    &nvm_word);
842	if (ret_val != E1000_SUCCESS)
843		nvm_word = E1000_INVM_DEFAULT_AL;
844	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
845	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
846		/* check current state directly from internal PHY */
847		e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
848					 E1000_PHY_PLL_FREQ_REG), &phy_word);
849		if ((phy_word & E1000_PHY_PLL_UNCONF)
850		    != E1000_PHY_PLL_UNCONF) {
851			ret_val = E1000_SUCCESS;
852			break;
853		} else {
854			ret_val = -E1000_ERR_PHY;
855		}
856		/* directly reset the internal PHY */
857		ctrl = E1000_READ_REG(hw, E1000_CTRL);
858		E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
859
860		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
861		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
862		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
863
864		E1000_WRITE_REG(hw, E1000_WUC, 0);
865		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
866		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
867
868		e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
869		pci_word |= E1000_PCI_PMCSR_D3;
870		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
871		msec_delay(1);
872		pci_word &= ~E1000_PCI_PMCSR_D3;
873		e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
874		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
875		E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
876
877		/* restore WUC register */
878		E1000_WRITE_REG(hw, E1000_WUC, wuc);
879	}
880	/* restore MDICNFG setting */
881	E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
882	return ret_val;
883}
884
885/**
886 *  e1000_get_cfg_done_i210 - Read config done bit
887 *  @hw: pointer to the HW structure
888 *
889 *  Read the management control register for the config done bit for
890 *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
891 *  to read the config done bit, so an error is *ONLY* logged and returns
892 *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
893 *  would not be able to be reset or change link.
894 **/
895static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
896{
897	s32 timeout = PHY_CFG_TIMEOUT;
898	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
899
900	DEBUGFUNC("e1000_get_cfg_done_i210");
901
902	while (timeout) {
903		if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
904			break;
905		msec_delay(1);
906		timeout--;
907	}
908	if (!timeout)
909		DEBUGOUT("MNG configuration cycle has not completed.\n");
910
911	return E1000_SUCCESS;
912}
913
914/**
915 *  e1000_init_hw_i210 - Init hw for I210/I211
916 *  @hw: pointer to the HW structure
917 *
918 *  Called to initialize hw for i210 hw family.
919 **/
920s32 e1000_init_hw_i210(struct e1000_hw *hw)
921{
922	s32 ret_val;
923
924	DEBUGFUNC("e1000_init_hw_i210");
925	if ((hw->mac.type >= e1000_i210) &&
926	    !(e1000_get_flash_presence_i210(hw))) {
927		ret_val = e1000_pll_workaround_i210(hw);
928		if (ret_val != E1000_SUCCESS)
929			return ret_val;
930	}
931	hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
932	ret_val = e1000_init_hw_82575(hw);
933	return ret_val;
934}
935