1/******************************************************************************
2
3  Copyright (c) 2013-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: releng/11.0/sys/dev/ixl/i40e_nvm.c 303967 2016-08-11 19:13:30Z sbruno $*/
34
35#include "i40e_prototype.h"
36
37enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38					       u16 *data);
39enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40					    u16 *data);
41enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
42						 u16 *words, u16 *data);
43enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
44					      u16 *words, u16 *data);
45enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
46				       u32 offset, u16 words, void *data,
47				       bool last_command);
48
49/**
50 * i40e_init_nvm_ops - Initialize NVM function pointers
51 * @hw: pointer to the HW structure
52 *
53 * Setup the function pointers and the NVM info structure. Should be called
54 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
55 * Please notice that the NVM term is used here (& in all methods covered
56 * in this file) as an equivalent of the FLASH part mapped into the SR.
57 * We are accessing FLASH always through the Shadow RAM.
58 **/
59enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60{
61	struct i40e_nvm_info *nvm = &hw->nvm;
62	enum i40e_status_code ret_code = I40E_SUCCESS;
63	u32 fla, gens;
64	u8 sr_size;
65
66	DEBUGFUNC("i40e_init_nvm");
67
68	/* The SR size is stored regardless of the nvm programming mode
69	 * as the blank mode may be used in the factory line.
70	 */
71	gens = rd32(hw, I40E_GLNVM_GENS);
72	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
73			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
74	/* Switching to words (sr_size contains power of 2KB) */
75	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76
77	/* Check if we are in the normal or blank NVM programming mode */
78	fla = rd32(hw, I40E_GLNVM_FLA);
79	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80		/* Max NVM timeout */
81		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
82		nvm->blank_nvm_mode = FALSE;
83	} else { /* Blank programming mode */
84		nvm->blank_nvm_mode = TRUE;
85		ret_code = I40E_ERR_NVM_BLANK_MODE;
86		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
87	}
88
89	return ret_code;
90}
91
92/**
93 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
94 * @hw: pointer to the HW structure
95 * @access: NVM access type (read or write)
96 *
97 * This function will request NVM ownership for reading
98 * via the proper Admin Command.
99 **/
100enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
101				       enum i40e_aq_resource_access_type access)
102{
103	enum i40e_status_code ret_code = I40E_SUCCESS;
104	u64 gtime, timeout;
105	u64 time_left = 0;
106
107	DEBUGFUNC("i40e_acquire_nvm");
108
109	if (hw->nvm.blank_nvm_mode)
110		goto i40e_i40e_acquire_nvm_exit;
111
112	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
113					    0, &time_left, NULL);
114	/* Reading the Global Device Timer */
115	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116
117	/* Store the timeout */
118	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
119
120	if (ret_code)
121		i40e_debug(hw, I40E_DEBUG_NVM,
122			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
123			   access, time_left, ret_code, hw->aq.asq_last_status);
124
125	if (ret_code && time_left) {
126		/* Poll until the current NVM owner timeouts */
127		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
128		while ((gtime < timeout) && time_left) {
129			i40e_msec_delay(10);
130			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
131			ret_code = i40e_aq_request_resource(hw,
132							I40E_NVM_RESOURCE_ID,
133							access, 0, &time_left,
134							NULL);
135			if (ret_code == I40E_SUCCESS) {
136				hw->nvm.hw_semaphore_timeout =
137					    I40E_MS_TO_GTIME(time_left) + gtime;
138				break;
139			}
140		}
141		if (ret_code != I40E_SUCCESS) {
142			hw->nvm.hw_semaphore_timeout = 0;
143			i40e_debug(hw, I40E_DEBUG_NVM,
144				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
145				   time_left, ret_code, hw->aq.asq_last_status);
146		}
147	}
148
149i40e_i40e_acquire_nvm_exit:
150	return ret_code;
151}
152
153/**
154 * i40e_release_nvm - Generic request for releasing the NVM ownership
155 * @hw: pointer to the HW structure
156 *
157 * This function will release NVM resource via the proper Admin Command.
158 **/
159void i40e_release_nvm(struct i40e_hw *hw)
160{
161	enum i40e_status_code ret_code = I40E_SUCCESS;
162	u32 total_delay = 0;
163
164	DEBUGFUNC("i40e_release_nvm");
165
166	if (hw->nvm.blank_nvm_mode)
167		return;
168
169	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170
171	/* there are some rare cases when trying to release the resource
172	 * results in an admin Q timeout, so handle them correctly
173	 */
174	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
175	       (total_delay < hw->aq.asq_cmd_timeout)) {
176			i40e_msec_delay(1);
177			ret_code = i40e_aq_release_resource(hw,
178						I40E_NVM_RESOURCE_ID, 0, NULL);
179			total_delay++;
180	}
181}
182
183/**
184 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
185 * @hw: pointer to the HW structure
186 *
187 * Polls the SRCTL Shadow RAM register done bit.
188 **/
189static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190{
191	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
192	u32 srctl, wait_cnt;
193
194	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195
196	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
197	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
198		srctl = rd32(hw, I40E_GLNVM_SRCTL);
199		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
200			ret_code = I40E_SUCCESS;
201			break;
202		}
203		i40e_usec_delay(5);
204	}
205	if (ret_code == I40E_ERR_TIMEOUT)
206		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
207	return ret_code;
208}
209
210/**
211 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
212 * @hw: pointer to the HW structure
213 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
214 * @data: word read from the Shadow RAM
215 *
216 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 **/
218enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
219					 u16 *data)
220{
221	enum i40e_status_code ret_code = I40E_SUCCESS;
222
223	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
224		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
225		if (!ret_code) {
226			ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227			i40e_release_nvm(hw);
228		}
229	} else {
230		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
231	}
232	return ret_code;
233}
234
235/**
236 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
237 * @hw: pointer to the HW structure
238 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
239 * @data: word read from the Shadow RAM
240 *
241 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
242 **/
243enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
244					   u16 offset,
245					   u16 *data)
246{
247	enum i40e_status_code ret_code = I40E_SUCCESS;
248
249	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
250		ret_code = i40e_read_nvm_word_aq(hw, offset, data);
251	else
252		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
253	return ret_code;
254}
255
256/**
257 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
258 * @hw: pointer to the HW structure
259 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
260 * @data: word read from the Shadow RAM
261 *
262 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
263 **/
264enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
265					       u16 *data)
266{
267	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
268	u32 sr_reg;
269
270	DEBUGFUNC("i40e_read_nvm_word_srctl");
271
272	if (offset >= hw->nvm.sr_size) {
273		i40e_debug(hw, I40E_DEBUG_NVM,
274			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
275			   offset, hw->nvm.sr_size);
276		ret_code = I40E_ERR_PARAM;
277		goto read_nvm_exit;
278	}
279
280	/* Poll the done bit first */
281	ret_code = i40e_poll_sr_srctl_done_bit(hw);
282	if (ret_code == I40E_SUCCESS) {
283		/* Write the address and start reading */
284		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
285			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
286		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
287
288		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
289		ret_code = i40e_poll_sr_srctl_done_bit(hw);
290		if (ret_code == I40E_SUCCESS) {
291			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
292			*data = (u16)((sr_reg &
293				       I40E_GLNVM_SRDATA_RDDATA_MASK)
294				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
295		}
296	}
297	if (ret_code != I40E_SUCCESS)
298		i40e_debug(hw, I40E_DEBUG_NVM,
299			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
300			   offset);
301
302read_nvm_exit:
303	return ret_code;
304}
305
306/**
307 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
308 * @hw: pointer to the HW structure
309 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
310 * @data: word read from the Shadow RAM
311 *
312 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
313 **/
314enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
315					    u16 *data)
316{
317	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
318
319	DEBUGFUNC("i40e_read_nvm_word_aq");
320
321	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
322	*data = LE16_TO_CPU(*(__le16 *)data);
323
324	return ret_code;
325}
326
327/**
328 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
329 * @hw: pointer to the HW structure
330 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
331 * @words: (in) number of words to read; (out) number of words actually read
332 * @data: words read from the Shadow RAM
333 *
334 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
335 * method. The buffer read is preceded by the NVM ownership take
336 * and followed by the release.
337 **/
338enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
339					     u16 offset,
340					     u16 *words, u16 *data)
341{
342	enum i40e_status_code ret_code = I40E_SUCCESS;
343
344	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
345		ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
346	else
347		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
348	return ret_code;
349}
350
351/**
352 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
353 * @hw: pointer to the HW structure
354 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
355 * @words: (in) number of words to read; (out) number of words actually read
356 * @data: words read from the Shadow RAM
357 *
358 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
359 * method. The buffer read is preceded by the NVM ownership take
360 * and followed by the release.
361 **/
362enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
363					   u16 *words, u16 *data)
364{
365	enum i40e_status_code ret_code = I40E_SUCCESS;
366
367	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
368		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
369		if (!ret_code) {
370			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
371							 data);
372			i40e_release_nvm(hw);
373		}
374	} else {
375		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
376	}
377	return ret_code;
378}
379
380/**
381 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
382 * @hw: pointer to the HW structure
383 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
384 * @words: (in) number of words to read; (out) number of words actually read
385 * @data: words read from the Shadow RAM
386 *
387 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
388 * method. The buffer read is preceded by the NVM ownership take
389 * and followed by the release.
390 **/
391enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
392						 u16 *words, u16 *data)
393{
394	enum i40e_status_code ret_code = I40E_SUCCESS;
395	u16 index, word;
396
397	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
398
399	/* Loop through the selected region */
400	for (word = 0; word < *words; word++) {
401		index = offset + word;
402		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
403		if (ret_code != I40E_SUCCESS)
404			break;
405	}
406
407	/* Update the number of words read from the Shadow RAM */
408	*words = word;
409
410	return ret_code;
411}
412
413/**
414 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
415 * @hw: pointer to the HW structure
416 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417 * @words: (in) number of words to read; (out) number of words actually read
418 * @data: words read from the Shadow RAM
419 *
420 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
421 * method. The buffer read is preceded by the NVM ownership take
422 * and followed by the release.
423 **/
424enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
425					      u16 *words, u16 *data)
426{
427	enum i40e_status_code ret_code;
428	u16 read_size = *words;
429	bool last_cmd = FALSE;
430	u16 words_read = 0;
431	u16 i = 0;
432
433	DEBUGFUNC("i40e_read_nvm_buffer_aq");
434
435	do {
436		/* Calculate number of bytes we should read in this step.
437		 * FVL AQ do not allow to read more than one page at a time or
438		 * to cross page boundaries.
439		 */
440		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
441			read_size = min(*words,
442					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
443				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
444		else
445			read_size = min((*words - words_read),
446					I40E_SR_SECTOR_SIZE_IN_WORDS);
447
448		/* Check if this is last command, if so set proper flag */
449		if ((words_read + read_size) >= *words)
450			last_cmd = TRUE;
451
452		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
453					    data + words_read, last_cmd);
454		if (ret_code != I40E_SUCCESS)
455			goto read_nvm_buffer_aq_exit;
456
457		/* Increment counter for words already read and move offset to
458		 * new read location
459		 */
460		words_read += read_size;
461		offset += read_size;
462	} while (words_read < *words);
463
464	for (i = 0; i < *words; i++)
465		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
466
467read_nvm_buffer_aq_exit:
468	*words = words_read;
469	return ret_code;
470}
471
472/**
473 * i40e_read_nvm_aq - Read Shadow RAM.
474 * @hw: pointer to the HW structure.
475 * @module_pointer: module pointer location in words from the NVM beginning
476 * @offset: offset in words from module start
477 * @words: number of words to write
478 * @data: buffer with words to write to the Shadow RAM
479 * @last_command: tells the AdminQ that this is the last command
480 *
481 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
482 **/
483enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
484				       u32 offset, u16 words, void *data,
485				       bool last_command)
486{
487	enum i40e_status_code ret_code = I40E_ERR_NVM;
488	struct i40e_asq_cmd_details cmd_details;
489
490	DEBUGFUNC("i40e_read_nvm_aq");
491
492	memset(&cmd_details, 0, sizeof(cmd_details));
493	cmd_details.wb_desc = &hw->nvm_wb_desc;
494
495	/* Here we are checking the SR limit only for the flat memory model.
496	 * We cannot do it for the module-based model, as we did not acquire
497	 * the NVM resource yet (we cannot get the module pointer value).
498	 * Firmware will check the module-based model.
499	 */
500	if ((offset + words) > hw->nvm.sr_size)
501		i40e_debug(hw, I40E_DEBUG_NVM,
502			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
503			   (offset + words), hw->nvm.sr_size);
504	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
505		/* We can write only up to 4KB (one sector), in one AQ write */
506		i40e_debug(hw, I40E_DEBUG_NVM,
507			   "NVM write fail error: tried to write %d words, limit is %d.\n",
508			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
509	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
510		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
511		/* A single write cannot spread over two sectors */
512		i40e_debug(hw, I40E_DEBUG_NVM,
513			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
514			   offset, words);
515	else
516		ret_code = i40e_aq_read_nvm(hw, module_pointer,
517					    2 * offset,  /*bytes*/
518					    2 * words,   /*bytes*/
519					    data, last_command, &cmd_details);
520
521	return ret_code;
522}
523
524/**
525 * i40e_write_nvm_aq - Writes Shadow RAM.
526 * @hw: pointer to the HW structure.
527 * @module_pointer: module pointer location in words from the NVM beginning
528 * @offset: offset in words from module start
529 * @words: number of words to write
530 * @data: buffer with words to write to the Shadow RAM
531 * @last_command: tells the AdminQ that this is the last command
532 *
533 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
534 **/
535enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
536					u32 offset, u16 words, void *data,
537					bool last_command)
538{
539	enum i40e_status_code ret_code = I40E_ERR_NVM;
540	struct i40e_asq_cmd_details cmd_details;
541
542	DEBUGFUNC("i40e_write_nvm_aq");
543
544	memset(&cmd_details, 0, sizeof(cmd_details));
545	cmd_details.wb_desc = &hw->nvm_wb_desc;
546
547	/* Here we are checking the SR limit only for the flat memory model.
548	 * We cannot do it for the module-based model, as we did not acquire
549	 * the NVM resource yet (we cannot get the module pointer value).
550	 * Firmware will check the module-based model.
551	 */
552	if ((offset + words) > hw->nvm.sr_size)
553		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
554	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
555		/* We can write only up to 4KB (one sector), in one AQ write */
556		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
557	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
558		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
559		/* A single write cannot spread over two sectors */
560		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
561	else
562		ret_code = i40e_aq_update_nvm(hw, module_pointer,
563					      2 * offset,  /*bytes*/
564					      2 * words,   /*bytes*/
565					      data, last_command, &cmd_details);
566
567	return ret_code;
568}
569
570/**
571 * __i40e_write_nvm_word - Writes Shadow RAM word
572 * @hw: pointer to the HW structure
573 * @offset: offset of the Shadow RAM word to write
574 * @data: word to write to the Shadow RAM
575 *
576 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
577 * NVM ownership have to be acquired and released (on ARQ completion event
578 * reception) by caller. To commit SR to NVM update checksum function
579 * should be called.
580 **/
581enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
582					    void *data)
583{
584	DEBUGFUNC("i40e_write_nvm_word");
585
586	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
587
588	/* Value 0x00 below means that we treat SR as a flat mem */
589	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
590}
591
592/**
593 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
594 * @hw: pointer to the HW structure
595 * @module_pointer: module pointer location in words from the NVM beginning
596 * @offset: offset of the Shadow RAM buffer to write
597 * @words: number of words to write
598 * @data: words to write to the Shadow RAM
599 *
600 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
601 * NVM ownership must be acquired before calling this function and released
602 * on ARQ completion event reception by caller. To commit SR to NVM update
603 * checksum function should be called.
604 **/
605enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
606					      u8 module_pointer, u32 offset,
607					      u16 words, void *data)
608{
609	__le16 *le_word_ptr = (__le16 *)data;
610	u16 *word_ptr = (u16 *)data;
611	u32 i = 0;
612
613	DEBUGFUNC("i40e_write_nvm_buffer");
614
615	for (i = 0; i < words; i++)
616		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
617
618	/* Here we will only write one buffer as the size of the modules
619	 * mirrored in the Shadow RAM is always less than 4K.
620	 */
621	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
622				 data, FALSE);
623}
624
625/**
626 * i40e_calc_nvm_checksum - Calculates and returns the checksum
627 * @hw: pointer to hardware structure
628 * @checksum: pointer to the checksum
629 *
630 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
631 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
632 * is customer specific and unknown. Therefore, this function skips all maximum
633 * possible size of VPD (1kB).
634 **/
635enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
636{
637	enum i40e_status_code ret_code = I40E_SUCCESS;
638	struct i40e_virt_mem vmem;
639	u16 pcie_alt_module = 0;
640	u16 checksum_local = 0;
641	u16 vpd_module = 0;
642	u16 *data;
643	u16 i = 0;
644
645	DEBUGFUNC("i40e_calc_nvm_checksum");
646
647	ret_code = i40e_allocate_virt_mem(hw, &vmem,
648				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
649	if (ret_code)
650		goto i40e_calc_nvm_checksum_exit;
651	data = (u16 *)vmem.va;
652
653	/* read pointer to VPD area */
654	ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
655					&vpd_module);
656	if (ret_code != I40E_SUCCESS) {
657		ret_code = I40E_ERR_NVM_CHECKSUM;
658		goto i40e_calc_nvm_checksum_exit;
659	}
660
661	/* read pointer to PCIe Alt Auto-load module */
662	ret_code = __i40e_read_nvm_word(hw,
663					I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
664					&pcie_alt_module);
665	if (ret_code != I40E_SUCCESS) {
666		ret_code = I40E_ERR_NVM_CHECKSUM;
667		goto i40e_calc_nvm_checksum_exit;
668	}
669
670	/* Calculate SW checksum that covers the whole 64kB shadow RAM
671	 * except the VPD and PCIe ALT Auto-load modules
672	 */
673	for (i = 0; i < hw->nvm.sr_size; i++) {
674		/* Read SR page */
675		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
676			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
677
678			ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
679			if (ret_code != I40E_SUCCESS) {
680				ret_code = I40E_ERR_NVM_CHECKSUM;
681				goto i40e_calc_nvm_checksum_exit;
682			}
683		}
684
685		/* Skip Checksum word */
686		if (i == I40E_SR_SW_CHECKSUM_WORD)
687			continue;
688		/* Skip VPD module (convert byte size to word count) */
689		if ((i >= (u32)vpd_module) &&
690		    (i < ((u32)vpd_module +
691		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
692			continue;
693		}
694		/* Skip PCIe ALT module (convert byte size to word count) */
695		if ((i >= (u32)pcie_alt_module) &&
696		    (i < ((u32)pcie_alt_module +
697		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
698			continue;
699		}
700
701		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
702	}
703
704	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
705
706i40e_calc_nvm_checksum_exit:
707	i40e_free_virt_mem(hw, &vmem);
708	return ret_code;
709}
710
711/**
712 * i40e_update_nvm_checksum - Updates the NVM checksum
713 * @hw: pointer to hardware structure
714 *
715 * NVM ownership must be acquired before calling this function and released
716 * on ARQ completion event reception by caller.
717 * This function will commit SR to NVM.
718 **/
719enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
720{
721	enum i40e_status_code ret_code = I40E_SUCCESS;
722	u16 checksum;
723	__le16 le_sum;
724
725	DEBUGFUNC("i40e_update_nvm_checksum");
726
727	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
728	le_sum = CPU_TO_LE16(checksum);
729	if (ret_code == I40E_SUCCESS)
730		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
731					     1, &le_sum, TRUE);
732
733	return ret_code;
734}
735
736/**
737 * i40e_validate_nvm_checksum - Validate EEPROM checksum
738 * @hw: pointer to hardware structure
739 * @checksum: calculated checksum
740 *
741 * Performs checksum calculation and validates the NVM SW checksum. If the
742 * caller does not need checksum, the value can be NULL.
743 **/
744enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
745						 u16 *checksum)
746{
747	enum i40e_status_code ret_code = I40E_SUCCESS;
748	u16 checksum_sr = 0;
749	u16 checksum_local = 0;
750
751	DEBUGFUNC("i40e_validate_nvm_checksum");
752
753	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
754		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
755	if (!ret_code) {
756		ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
757		if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
758			i40e_release_nvm(hw);
759		if (ret_code != I40E_SUCCESS)
760			goto i40e_validate_nvm_checksum_exit;
761	} else {
762		goto i40e_validate_nvm_checksum_exit;
763	}
764
765	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
766
767	/* Verify read checksum from EEPROM is the same as
768	 * calculated checksum
769	 */
770	if (checksum_local != checksum_sr)
771		ret_code = I40E_ERR_NVM_CHECKSUM;
772
773	/* If the user cares, return the calculated checksum */
774	if (checksum)
775		*checksum = checksum_local;
776
777i40e_validate_nvm_checksum_exit:
778	return ret_code;
779}
780
781static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
782						    struct i40e_nvm_access *cmd,
783						    u8 *bytes, int *perrno);
784static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
785						    struct i40e_nvm_access *cmd,
786						    u8 *bytes, int *perrno);
787static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
788						    struct i40e_nvm_access *cmd,
789						    u8 *bytes, int *perrno);
790static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
791						    struct i40e_nvm_access *cmd,
792						    int *perrno);
793static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
794						   struct i40e_nvm_access *cmd,
795						   int *perrno);
796static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
797						   struct i40e_nvm_access *cmd,
798						   u8 *bytes, int *perrno);
799static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
800						  struct i40e_nvm_access *cmd,
801						  u8 *bytes, int *perrno);
802static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
803						 struct i40e_nvm_access *cmd,
804						 u8 *bytes, int *perrno);
805static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
806						    struct i40e_nvm_access *cmd,
807						    u8 *bytes, int *perrno);
808static INLINE u8 i40e_nvmupd_get_module(u32 val)
809{
810	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
811}
812static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
813{
814	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
815}
816
817static const char *i40e_nvm_update_state_str[] = {
818	"I40E_NVMUPD_INVALID",
819	"I40E_NVMUPD_READ_CON",
820	"I40E_NVMUPD_READ_SNT",
821	"I40E_NVMUPD_READ_LCB",
822	"I40E_NVMUPD_READ_SA",
823	"I40E_NVMUPD_WRITE_ERA",
824	"I40E_NVMUPD_WRITE_CON",
825	"I40E_NVMUPD_WRITE_SNT",
826	"I40E_NVMUPD_WRITE_LCB",
827	"I40E_NVMUPD_WRITE_SA",
828	"I40E_NVMUPD_CSUM_CON",
829	"I40E_NVMUPD_CSUM_SA",
830	"I40E_NVMUPD_CSUM_LCB",
831	"I40E_NVMUPD_STATUS",
832	"I40E_NVMUPD_EXEC_AQ",
833	"I40E_NVMUPD_GET_AQ_RESULT",
834};
835
836/**
837 * i40e_nvmupd_command - Process an NVM update command
838 * @hw: pointer to hardware structure
839 * @cmd: pointer to nvm update command
840 * @bytes: pointer to the data buffer
841 * @perrno: pointer to return error code
842 *
843 * Dispatches command depending on what update state is current
844 **/
845enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
846					  struct i40e_nvm_access *cmd,
847					  u8 *bytes, int *perrno)
848{
849	enum i40e_status_code status;
850	enum i40e_nvmupd_cmd upd_cmd;
851
852	DEBUGFUNC("i40e_nvmupd_command");
853
854	/* assume success */
855	*perrno = 0;
856
857	/* early check for status command and debug msgs */
858	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
859
860	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
861		   i40e_nvm_update_state_str[upd_cmd],
862		   hw->nvmupd_state,
863		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
864		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
865
866	if (upd_cmd == I40E_NVMUPD_INVALID) {
867		*perrno = -EFAULT;
868		i40e_debug(hw, I40E_DEBUG_NVM,
869			   "i40e_nvmupd_validate_command returns %d errno %d\n",
870			   upd_cmd, *perrno);
871	}
872
873	/* a status request returns immediately rather than
874	 * going into the state machine
875	 */
876	if (upd_cmd == I40E_NVMUPD_STATUS) {
877		if (!cmd->data_size) {
878			*perrno = -EFAULT;
879			return I40E_ERR_BUF_TOO_SHORT;
880		}
881
882		bytes[0] = hw->nvmupd_state;
883
884		if (cmd->data_size >= 4) {
885			bytes[1] = 0;
886			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
887		}
888
889		return I40E_SUCCESS;
890	}
891
892	switch (hw->nvmupd_state) {
893	case I40E_NVMUPD_STATE_INIT:
894		status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
895		break;
896
897	case I40E_NVMUPD_STATE_READING:
898		status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
899		break;
900
901	case I40E_NVMUPD_STATE_WRITING:
902		status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
903		break;
904
905	case I40E_NVMUPD_STATE_INIT_WAIT:
906	case I40E_NVMUPD_STATE_WRITE_WAIT:
907		/* if we need to stop waiting for an event, clear
908		 * the wait info and return before doing anything else
909		 */
910		if (cmd->offset == 0xffff) {
911			i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
912			return I40E_SUCCESS;
913		}
914
915		status = I40E_ERR_NOT_READY;
916		*perrno = -EBUSY;
917		break;
918
919	default:
920		/* invalid state, should never happen */
921		i40e_debug(hw, I40E_DEBUG_NVM,
922			   "NVMUPD: no such state %d\n", hw->nvmupd_state);
923		status = I40E_NOT_SUPPORTED;
924		*perrno = -ESRCH;
925		break;
926	}
927	return status;
928}
929
930/**
931 * i40e_nvmupd_state_init - Handle NVM update state Init
932 * @hw: pointer to hardware structure
933 * @cmd: pointer to nvm update command buffer
934 * @bytes: pointer to the data buffer
935 * @perrno: pointer to return error code
936 *
937 * Process legitimate commands of the Init state and conditionally set next
938 * state. Reject all other commands.
939 **/
940static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
941						    struct i40e_nvm_access *cmd,
942						    u8 *bytes, int *perrno)
943{
944	enum i40e_status_code status = I40E_SUCCESS;
945	enum i40e_nvmupd_cmd upd_cmd;
946
947	DEBUGFUNC("i40e_nvmupd_state_init");
948
949	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
950
951	switch (upd_cmd) {
952	case I40E_NVMUPD_READ_SA:
953		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
954		if (status) {
955			*perrno = i40e_aq_rc_to_posix(status,
956						     hw->aq.asq_last_status);
957		} else {
958			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
959			i40e_release_nvm(hw);
960		}
961		break;
962
963	case I40E_NVMUPD_READ_SNT:
964		status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
965		if (status) {
966			*perrno = i40e_aq_rc_to_posix(status,
967						     hw->aq.asq_last_status);
968		} else {
969			status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
970			if (status)
971				i40e_release_nvm(hw);
972			else
973				hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
974		}
975		break;
976
977	case I40E_NVMUPD_WRITE_ERA:
978		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
979		if (status) {
980			*perrno = i40e_aq_rc_to_posix(status,
981						     hw->aq.asq_last_status);
982		} else {
983			status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
984			if (status) {
985				i40e_release_nvm(hw);
986			} else {
987				hw->nvm_release_on_done = TRUE;
988				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
989				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
990			}
991		}
992		break;
993
994	case I40E_NVMUPD_WRITE_SA:
995		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
996		if (status) {
997			*perrno = i40e_aq_rc_to_posix(status,
998						     hw->aq.asq_last_status);
999		} else {
1000			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1001			if (status) {
1002				i40e_release_nvm(hw);
1003			} else {
1004				hw->nvm_release_on_done = TRUE;
1005				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1006				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1007			}
1008		}
1009		break;
1010
1011	case I40E_NVMUPD_WRITE_SNT:
1012		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1013		if (status) {
1014			*perrno = i40e_aq_rc_to_posix(status,
1015						     hw->aq.asq_last_status);
1016		} else {
1017			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1018			if (status) {
1019				i40e_release_nvm(hw);
1020			} else {
1021				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1022				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1023			}
1024		}
1025		break;
1026
1027	case I40E_NVMUPD_CSUM_SA:
1028		status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1029		if (status) {
1030			*perrno = i40e_aq_rc_to_posix(status,
1031						     hw->aq.asq_last_status);
1032		} else {
1033			status = i40e_update_nvm_checksum(hw);
1034			if (status) {
1035				*perrno = hw->aq.asq_last_status ?
1036				   i40e_aq_rc_to_posix(status,
1037						       hw->aq.asq_last_status) :
1038				   -EIO;
1039				i40e_release_nvm(hw);
1040			} else {
1041				hw->nvm_release_on_done = TRUE;
1042				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1043				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1044			}
1045		}
1046		break;
1047
1048	case I40E_NVMUPD_EXEC_AQ:
1049		status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1050		break;
1051
1052	case I40E_NVMUPD_GET_AQ_RESULT:
1053		status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1054		break;
1055
1056	default:
1057		i40e_debug(hw, I40E_DEBUG_NVM,
1058			   "NVMUPD: bad cmd %s in init state\n",
1059			   i40e_nvm_update_state_str[upd_cmd]);
1060		status = I40E_ERR_NVM;
1061		*perrno = -ESRCH;
1062		break;
1063	}
1064	return status;
1065}
1066
1067/**
1068 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1069 * @hw: pointer to hardware structure
1070 * @cmd: pointer to nvm update command buffer
1071 * @bytes: pointer to the data buffer
1072 * @perrno: pointer to return error code
1073 *
1074 * NVM ownership is already held.  Process legitimate commands and set any
1075 * change in state; reject all other commands.
1076 **/
1077static enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1078						    struct i40e_nvm_access *cmd,
1079						    u8 *bytes, int *perrno)
1080{
1081	enum i40e_status_code status = I40E_SUCCESS;
1082	enum i40e_nvmupd_cmd upd_cmd;
1083
1084	DEBUGFUNC("i40e_nvmupd_state_reading");
1085
1086	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1087
1088	switch (upd_cmd) {
1089	case I40E_NVMUPD_READ_SA:
1090	case I40E_NVMUPD_READ_CON:
1091		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1092		break;
1093
1094	case I40E_NVMUPD_READ_LCB:
1095		status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1096		i40e_release_nvm(hw);
1097		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1098		break;
1099
1100	default:
1101		i40e_debug(hw, I40E_DEBUG_NVM,
1102			   "NVMUPD: bad cmd %s in reading state.\n",
1103			   i40e_nvm_update_state_str[upd_cmd]);
1104		status = I40E_NOT_SUPPORTED;
1105		*perrno = -ESRCH;
1106		break;
1107	}
1108	return status;
1109}
1110
1111/**
1112 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1113 * @hw: pointer to hardware structure
1114 * @cmd: pointer to nvm update command buffer
1115 * @bytes: pointer to the data buffer
1116 * @perrno: pointer to return error code
1117 *
1118 * NVM ownership is already held.  Process legitimate commands and set any
1119 * change in state; reject all other commands
1120 **/
1121static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1122						    struct i40e_nvm_access *cmd,
1123						    u8 *bytes, int *perrno)
1124{
1125	enum i40e_status_code status = I40E_SUCCESS;
1126	enum i40e_nvmupd_cmd upd_cmd;
1127	bool retry_attempt = FALSE;
1128
1129	DEBUGFUNC("i40e_nvmupd_state_writing");
1130
1131	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1132
1133retry:
1134	switch (upd_cmd) {
1135	case I40E_NVMUPD_WRITE_CON:
1136		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1137		if (!status) {
1138			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1139			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1140		}
1141		break;
1142
1143	case I40E_NVMUPD_WRITE_LCB:
1144		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1145		if (status) {
1146			*perrno = hw->aq.asq_last_status ?
1147				   i40e_aq_rc_to_posix(status,
1148						       hw->aq.asq_last_status) :
1149				   -EIO;
1150			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1151		} else {
1152			hw->nvm_release_on_done = TRUE;
1153			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1154			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1155		}
1156		break;
1157
1158	case I40E_NVMUPD_CSUM_CON:
1159		/* Assumes the caller has acquired the nvm */
1160		status = i40e_update_nvm_checksum(hw);
1161		if (status) {
1162			*perrno = hw->aq.asq_last_status ?
1163				   i40e_aq_rc_to_posix(status,
1164						       hw->aq.asq_last_status) :
1165				   -EIO;
1166			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1167		} else {
1168			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1169			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1170		}
1171		break;
1172
1173	case I40E_NVMUPD_CSUM_LCB:
1174		/* Assumes the caller has acquired the nvm */
1175		status = i40e_update_nvm_checksum(hw);
1176		if (status) {
1177			*perrno = hw->aq.asq_last_status ?
1178				   i40e_aq_rc_to_posix(status,
1179						       hw->aq.asq_last_status) :
1180				   -EIO;
1181			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1182		} else {
1183			hw->nvm_release_on_done = TRUE;
1184			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1185			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1186		}
1187		break;
1188
1189	default:
1190		i40e_debug(hw, I40E_DEBUG_NVM,
1191			   "NVMUPD: bad cmd %s in writing state.\n",
1192			   i40e_nvm_update_state_str[upd_cmd]);
1193		status = I40E_NOT_SUPPORTED;
1194		*perrno = -ESRCH;
1195		break;
1196	}
1197
1198	/* In some circumstances, a multi-write transaction takes longer
1199	 * than the default 3 minute timeout on the write semaphore.  If
1200	 * the write failed with an EBUSY status, this is likely the problem,
1201	 * so here we try to reacquire the semaphore then retry the write.
1202	 * We only do one retry, then give up.
1203	 */
1204	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1205	    !retry_attempt) {
1206		enum i40e_status_code old_status = status;
1207		u32 old_asq_status = hw->aq.asq_last_status;
1208		u32 gtime;
1209
1210		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1211		if (gtime >= hw->nvm.hw_semaphore_timeout) {
1212			i40e_debug(hw, I40E_DEBUG_ALL,
1213				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1214				   gtime, hw->nvm.hw_semaphore_timeout);
1215			i40e_release_nvm(hw);
1216			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1217			if (status) {
1218				i40e_debug(hw, I40E_DEBUG_ALL,
1219					   "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1220					   hw->aq.asq_last_status);
1221				status = old_status;
1222				hw->aq.asq_last_status = old_asq_status;
1223			} else {
1224				retry_attempt = TRUE;
1225				goto retry;
1226			}
1227		}
1228	}
1229
1230	return status;
1231}
1232
1233/**
1234 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1235 * @hw: pointer to the hardware structure
1236 * @opcode: the event that just happened
1237 **/
1238void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1239{
1240	if (opcode == hw->nvm_wait_opcode) {
1241
1242		i40e_debug(hw, I40E_DEBUG_NVM,
1243			   "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1244		if (hw->nvm_release_on_done) {
1245			i40e_release_nvm(hw);
1246			hw->nvm_release_on_done = FALSE;
1247		}
1248		hw->nvm_wait_opcode = 0;
1249
1250		switch (hw->nvmupd_state) {
1251		case I40E_NVMUPD_STATE_INIT_WAIT:
1252			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1253			break;
1254
1255		case I40E_NVMUPD_STATE_WRITE_WAIT:
1256			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1257			break;
1258
1259		default:
1260			break;
1261		}
1262	}
1263}
1264
1265/**
1266 * i40e_nvmupd_validate_command - Validate given command
1267 * @hw: pointer to hardware structure
1268 * @cmd: pointer to nvm update command buffer
1269 * @perrno: pointer to return error code
1270 *
1271 * Return one of the valid command types or I40E_NVMUPD_INVALID
1272 **/
1273static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1274						    struct i40e_nvm_access *cmd,
1275						    int *perrno)
1276{
1277	enum i40e_nvmupd_cmd upd_cmd;
1278	u8 module, transaction;
1279
1280	DEBUGFUNC("i40e_nvmupd_validate_command\n");
1281
1282	/* anything that doesn't match a recognized case is an error */
1283	upd_cmd = I40E_NVMUPD_INVALID;
1284
1285	transaction = i40e_nvmupd_get_transaction(cmd->config);
1286	module = i40e_nvmupd_get_module(cmd->config);
1287
1288	/* limits on data size */
1289	if ((cmd->data_size < 1) ||
1290	    (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1291		i40e_debug(hw, I40E_DEBUG_NVM,
1292			   "i40e_nvmupd_validate_command data_size %d\n",
1293			   cmd->data_size);
1294		*perrno = -EFAULT;
1295		return I40E_NVMUPD_INVALID;
1296	}
1297
1298	switch (cmd->command) {
1299	case I40E_NVM_READ:
1300		switch (transaction) {
1301		case I40E_NVM_CON:
1302			upd_cmd = I40E_NVMUPD_READ_CON;
1303			break;
1304		case I40E_NVM_SNT:
1305			upd_cmd = I40E_NVMUPD_READ_SNT;
1306			break;
1307		case I40E_NVM_LCB:
1308			upd_cmd = I40E_NVMUPD_READ_LCB;
1309			break;
1310		case I40E_NVM_SA:
1311			upd_cmd = I40E_NVMUPD_READ_SA;
1312			break;
1313		case I40E_NVM_EXEC:
1314			if (module == 0xf)
1315				upd_cmd = I40E_NVMUPD_STATUS;
1316			else if (module == 0)
1317				upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1318			break;
1319		}
1320		break;
1321
1322	case I40E_NVM_WRITE:
1323		switch (transaction) {
1324		case I40E_NVM_CON:
1325			upd_cmd = I40E_NVMUPD_WRITE_CON;
1326			break;
1327		case I40E_NVM_SNT:
1328			upd_cmd = I40E_NVMUPD_WRITE_SNT;
1329			break;
1330		case I40E_NVM_LCB:
1331			upd_cmd = I40E_NVMUPD_WRITE_LCB;
1332			break;
1333		case I40E_NVM_SA:
1334			upd_cmd = I40E_NVMUPD_WRITE_SA;
1335			break;
1336		case I40E_NVM_ERA:
1337			upd_cmd = I40E_NVMUPD_WRITE_ERA;
1338			break;
1339		case I40E_NVM_CSUM:
1340			upd_cmd = I40E_NVMUPD_CSUM_CON;
1341			break;
1342		case (I40E_NVM_CSUM|I40E_NVM_SA):
1343			upd_cmd = I40E_NVMUPD_CSUM_SA;
1344			break;
1345		case (I40E_NVM_CSUM|I40E_NVM_LCB):
1346			upd_cmd = I40E_NVMUPD_CSUM_LCB;
1347			break;
1348		case I40E_NVM_EXEC:
1349			if (module == 0)
1350				upd_cmd = I40E_NVMUPD_EXEC_AQ;
1351			break;
1352		}
1353		break;
1354	}
1355
1356	return upd_cmd;
1357}
1358
1359/**
1360 * i40e_nvmupd_exec_aq - Run an AQ command
1361 * @hw: pointer to hardware structure
1362 * @cmd: pointer to nvm update command buffer
1363 * @bytes: pointer to the data buffer
1364 * @perrno: pointer to return error code
1365 *
1366 * cmd structure contains identifiers and data buffer
1367 **/
1368static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1369						 struct i40e_nvm_access *cmd,
1370						 u8 *bytes, int *perrno)
1371{
1372	struct i40e_asq_cmd_details cmd_details;
1373	enum i40e_status_code status;
1374	struct i40e_aq_desc *aq_desc;
1375	u32 buff_size = 0;
1376	u8 *buff = NULL;
1377	u32 aq_desc_len;
1378	u32 aq_data_len;
1379
1380	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1381	memset(&cmd_details, 0, sizeof(cmd_details));
1382	cmd_details.wb_desc = &hw->nvm_wb_desc;
1383
1384	aq_desc_len = sizeof(struct i40e_aq_desc);
1385	memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1386
1387	/* get the aq descriptor */
1388	if (cmd->data_size < aq_desc_len) {
1389		i40e_debug(hw, I40E_DEBUG_NVM,
1390			   "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1391			   cmd->data_size, aq_desc_len);
1392		*perrno = -EINVAL;
1393		return I40E_ERR_PARAM;
1394	}
1395	aq_desc = (struct i40e_aq_desc *)bytes;
1396
1397	/* if data buffer needed, make sure it's ready */
1398	aq_data_len = cmd->data_size - aq_desc_len;
1399	buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1400	if (buff_size) {
1401		if (!hw->nvm_buff.va) {
1402			status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1403							hw->aq.asq_buf_size);
1404			if (status)
1405				i40e_debug(hw, I40E_DEBUG_NVM,
1406					   "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1407					   status);
1408		}
1409
1410		if (hw->nvm_buff.va) {
1411			buff = hw->nvm_buff.va;
1412			memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1413		}
1414	}
1415
1416	/* and away we go! */
1417	status = i40e_asq_send_command(hw, aq_desc, buff,
1418				       buff_size, &cmd_details);
1419	if (status) {
1420		i40e_debug(hw, I40E_DEBUG_NVM,
1421			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1422			   i40e_stat_str(hw, status),
1423			   i40e_aq_str(hw, hw->aq.asq_last_status));
1424		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1425	}
1426
1427	/* should we wait for a followup event? */
1428	if (cmd->offset) {
1429		hw->nvm_wait_opcode = cmd->offset;
1430		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1431	}
1432
1433	return status;
1434}
1435
1436/**
1437 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1438 * @hw: pointer to hardware structure
1439 * @cmd: pointer to nvm update command buffer
1440 * @bytes: pointer to the data buffer
1441 * @perrno: pointer to return error code
1442 *
1443 * cmd structure contains identifiers and data buffer
1444 **/
1445static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1446						    struct i40e_nvm_access *cmd,
1447						    u8 *bytes, int *perrno)
1448{
1449	u32 aq_total_len;
1450	u32 aq_desc_len;
1451	int remainder;
1452	u8 *buff;
1453
1454	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1455
1456	aq_desc_len = sizeof(struct i40e_aq_desc);
1457	aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1458
1459	/* check offset range */
1460	if (cmd->offset > aq_total_len) {
1461		i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1462			   __func__, cmd->offset, aq_total_len);
1463		*perrno = -EINVAL;
1464		return I40E_ERR_PARAM;
1465	}
1466
1467	/* check copylength range */
1468	if (cmd->data_size > (aq_total_len - cmd->offset)) {
1469		int new_len = aq_total_len - cmd->offset;
1470
1471		i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1472			   __func__, cmd->data_size, new_len);
1473		cmd->data_size = new_len;
1474	}
1475
1476	remainder = cmd->data_size;
1477	if (cmd->offset < aq_desc_len) {
1478		u32 len = aq_desc_len - cmd->offset;
1479
1480		len = min(len, cmd->data_size);
1481		i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1482			   __func__, cmd->offset, cmd->offset + len);
1483
1484		buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1485		memcpy(bytes, buff, len);
1486
1487		bytes += len;
1488		remainder -= len;
1489		buff = hw->nvm_buff.va;
1490	} else {
1491		buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1492	}
1493
1494	if (remainder > 0) {
1495		int start_byte = buff - (u8 *)hw->nvm_buff.va;
1496
1497		i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1498			   __func__, start_byte, start_byte + remainder);
1499		memcpy(bytes, buff, remainder);
1500	}
1501
1502	return I40E_SUCCESS;
1503}
1504
1505/**
1506 * i40e_nvmupd_nvm_read - Read NVM
1507 * @hw: pointer to hardware structure
1508 * @cmd: pointer to nvm update command buffer
1509 * @bytes: pointer to the data buffer
1510 * @perrno: pointer to return error code
1511 *
1512 * cmd structure contains identifiers and data buffer
1513 **/
1514static enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1515						  struct i40e_nvm_access *cmd,
1516						  u8 *bytes, int *perrno)
1517{
1518	struct i40e_asq_cmd_details cmd_details;
1519	enum i40e_status_code status;
1520	u8 module, transaction;
1521	bool last;
1522
1523	transaction = i40e_nvmupd_get_transaction(cmd->config);
1524	module = i40e_nvmupd_get_module(cmd->config);
1525	last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1526
1527	memset(&cmd_details, 0, sizeof(cmd_details));
1528	cmd_details.wb_desc = &hw->nvm_wb_desc;
1529
1530	status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1531				  bytes, last, &cmd_details);
1532	if (status) {
1533		i40e_debug(hw, I40E_DEBUG_NVM,
1534			   "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1535			   module, cmd->offset, cmd->data_size);
1536		i40e_debug(hw, I40E_DEBUG_NVM,
1537			   "i40e_nvmupd_nvm_read status %d aq %d\n",
1538			   status, hw->aq.asq_last_status);
1539		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1540	}
1541
1542	return status;
1543}
1544
1545/**
1546 * i40e_nvmupd_nvm_erase - Erase an NVM module
1547 * @hw: pointer to hardware structure
1548 * @cmd: pointer to nvm update command buffer
1549 * @perrno: pointer to return error code
1550 *
1551 * module, offset, data_size and data are in cmd structure
1552 **/
1553static enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1554						   struct i40e_nvm_access *cmd,
1555						   int *perrno)
1556{
1557	enum i40e_status_code status = I40E_SUCCESS;
1558	struct i40e_asq_cmd_details cmd_details;
1559	u8 module, transaction;
1560	bool last;
1561
1562	transaction = i40e_nvmupd_get_transaction(cmd->config);
1563	module = i40e_nvmupd_get_module(cmd->config);
1564	last = (transaction & I40E_NVM_LCB);
1565
1566	memset(&cmd_details, 0, sizeof(cmd_details));
1567	cmd_details.wb_desc = &hw->nvm_wb_desc;
1568
1569	status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1570				   last, &cmd_details);
1571	if (status) {
1572		i40e_debug(hw, I40E_DEBUG_NVM,
1573			   "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1574			   module, cmd->offset, cmd->data_size);
1575		i40e_debug(hw, I40E_DEBUG_NVM,
1576			   "i40e_nvmupd_nvm_erase status %d aq %d\n",
1577			   status, hw->aq.asq_last_status);
1578		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1579	}
1580
1581	return status;
1582}
1583
1584/**
1585 * i40e_nvmupd_nvm_write - Write NVM
1586 * @hw: pointer to hardware structure
1587 * @cmd: pointer to nvm update command buffer
1588 * @bytes: pointer to the data buffer
1589 * @perrno: pointer to return error code
1590 *
1591 * module, offset, data_size and data are in cmd structure
1592 **/
1593static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1594						   struct i40e_nvm_access *cmd,
1595						   u8 *bytes, int *perrno)
1596{
1597	enum i40e_status_code status = I40E_SUCCESS;
1598	struct i40e_asq_cmd_details cmd_details;
1599	u8 module, transaction;
1600	bool last;
1601
1602	transaction = i40e_nvmupd_get_transaction(cmd->config);
1603	module = i40e_nvmupd_get_module(cmd->config);
1604	last = (transaction & I40E_NVM_LCB);
1605
1606	memset(&cmd_details, 0, sizeof(cmd_details));
1607	cmd_details.wb_desc = &hw->nvm_wb_desc;
1608
1609	status = i40e_aq_update_nvm(hw, module, cmd->offset,
1610				    (u16)cmd->data_size, bytes, last,
1611				    &cmd_details);
1612	if (status) {
1613		i40e_debug(hw, I40E_DEBUG_NVM,
1614			   "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1615			   module, cmd->offset, cmd->data_size);
1616		i40e_debug(hw, I40E_DEBUG_NVM,
1617			   "i40e_nvmupd_nvm_write status %d aq %d\n",
1618			   status, hw->aq.asq_last_status);
1619		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1620	}
1621
1622	return status;
1623}
1624