i40e_adminq.c revision 270631
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/i40e_adminq.c 270631 2014-08-25 22:04:29Z jfv $*/
34
35#include "i40e_status.h"
36#include "i40e_type.h"
37#include "i40e_register.h"
38#include "i40e_adminq.h"
39#include "i40e_prototype.h"
40
41/**
42 * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43 * @desc: API request descriptor
44 **/
45static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46{
47	return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48		desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49}
50
51/**
52 *  i40e_adminq_init_regs - Initialize AdminQ registers
53 *  @hw: pointer to the hardware structure
54 *
55 *  This assumes the alloc_asq and alloc_arq functions have already been called
56 **/
57static void i40e_adminq_init_regs(struct i40e_hw *hw)
58{
59	/* set head and tail registers in our local struct */
60	if (i40e_is_vf(hw)) {
61		hw->aq.asq.tail = I40E_VF_ATQT1;
62		hw->aq.asq.head = I40E_VF_ATQH1;
63		hw->aq.asq.len  = I40E_VF_ATQLEN1;
64		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
65		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
66		hw->aq.arq.tail = I40E_VF_ARQT1;
67		hw->aq.arq.head = I40E_VF_ARQH1;
68		hw->aq.arq.len  = I40E_VF_ARQLEN1;
69		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
70		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
71	} else {
72		hw->aq.asq.tail = I40E_PF_ATQT;
73		hw->aq.asq.head = I40E_PF_ATQH;
74		hw->aq.asq.len  = I40E_PF_ATQLEN;
75		hw->aq.asq.bal  = I40E_PF_ATQBAL;
76		hw->aq.asq.bah  = I40E_PF_ATQBAH;
77		hw->aq.arq.tail = I40E_PF_ARQT;
78		hw->aq.arq.head = I40E_PF_ARQH;
79		hw->aq.arq.len  = I40E_PF_ARQLEN;
80		hw->aq.arq.bal  = I40E_PF_ARQBAL;
81		hw->aq.arq.bah  = I40E_PF_ARQBAH;
82	}
83}
84
85/**
86 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87 *  @hw: pointer to the hardware structure
88 **/
89enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
90{
91	enum i40e_status_code ret_code;
92
93	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
94					 i40e_mem_atq_ring,
95					 (hw->aq.num_asq_entries *
96					 sizeof(struct i40e_aq_desc)),
97					 I40E_ADMINQ_DESC_ALIGNMENT);
98	if (ret_code)
99		return ret_code;
100
101	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102					  (hw->aq.num_asq_entries *
103					  sizeof(struct i40e_asq_cmd_details)));
104	if (ret_code) {
105		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
106		return ret_code;
107	}
108
109	return ret_code;
110}
111
112/**
113 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114 *  @hw: pointer to the hardware structure
115 **/
116enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
117{
118	enum i40e_status_code ret_code;
119
120	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
121					 i40e_mem_arq_ring,
122					 (hw->aq.num_arq_entries *
123					 sizeof(struct i40e_aq_desc)),
124					 I40E_ADMINQ_DESC_ALIGNMENT);
125
126	return ret_code;
127}
128
129/**
130 *  i40e_free_adminq_asq - Free Admin Queue send rings
131 *  @hw: pointer to the hardware structure
132 *
133 *  This assumes the posted send buffers have already been cleaned
134 *  and de-allocated
135 **/
136void i40e_free_adminq_asq(struct i40e_hw *hw)
137{
138	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
139}
140
141/**
142 *  i40e_free_adminq_arq - Free Admin Queue receive rings
143 *  @hw: pointer to the hardware structure
144 *
145 *  This assumes the posted receive buffers have already been cleaned
146 *  and de-allocated
147 **/
148void i40e_free_adminq_arq(struct i40e_hw *hw)
149{
150	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
151}
152
153/**
154 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155 *  @hw: pointer to the hardware structure
156 **/
157static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
158{
159	enum i40e_status_code ret_code;
160	struct i40e_aq_desc *desc;
161	struct i40e_dma_mem *bi;
162	int i;
163
164	/* We'll be allocating the buffer info memory first, then we can
165	 * allocate the mapped buffers for the event processing
166	 */
167
168	/* buffer_info structures do not need alignment */
169	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
171	if (ret_code)
172		goto alloc_arq_bufs;
173	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
174
175	/* allocate the mapped buffers */
176	for (i = 0; i < hw->aq.num_arq_entries; i++) {
177		bi = &hw->aq.arq.r.arq_bi[i];
178		ret_code = i40e_allocate_dma_mem(hw, bi,
179						 i40e_mem_arq_buf,
180						 hw->aq.arq_buf_size,
181						 I40E_ADMINQ_DESC_ALIGNMENT);
182		if (ret_code)
183			goto unwind_alloc_arq_bufs;
184
185		/* now configure the descriptors for use */
186		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187
188		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
191		desc->opcode = 0;
192		/* This is in accordance with Admin queue design, there is no
193		 * register for buffer size configuration
194		 */
195		desc->datalen = CPU_TO_LE16((u16)bi->size);
196		desc->retval = 0;
197		desc->cookie_high = 0;
198		desc->cookie_low = 0;
199		desc->params.external.addr_high =
200			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201		desc->params.external.addr_low =
202			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203		desc->params.external.param0 = 0;
204		desc->params.external.param1 = 0;
205	}
206
207alloc_arq_bufs:
208	return ret_code;
209
210unwind_alloc_arq_bufs:
211	/* don't try to free the one that failed... */
212	i--;
213	for (; i >= 0; i--)
214		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
216
217	return ret_code;
218}
219
220/**
221 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222 *  @hw: pointer to the hardware structure
223 **/
224static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
225{
226	enum i40e_status_code ret_code;
227	struct i40e_dma_mem *bi;
228	int i;
229
230	/* No mapped memory needed yet, just the buffer info structures */
231	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
233	if (ret_code)
234		goto alloc_asq_bufs;
235	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
236
237	/* allocate the mapped buffers */
238	for (i = 0; i < hw->aq.num_asq_entries; i++) {
239		bi = &hw->aq.asq.r.asq_bi[i];
240		ret_code = i40e_allocate_dma_mem(hw, bi,
241						 i40e_mem_asq_buf,
242						 hw->aq.asq_buf_size,
243						 I40E_ADMINQ_DESC_ALIGNMENT);
244		if (ret_code)
245			goto unwind_alloc_asq_bufs;
246	}
247alloc_asq_bufs:
248	return ret_code;
249
250unwind_alloc_asq_bufs:
251	/* don't try to free the one that failed... */
252	i--;
253	for (; i >= 0; i--)
254		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
256
257	return ret_code;
258}
259
260/**
261 *  i40e_free_arq_bufs - Free receive queue buffer info elements
262 *  @hw: pointer to the hardware structure
263 **/
264static void i40e_free_arq_bufs(struct i40e_hw *hw)
265{
266	int i;
267
268	/* free descriptors */
269	for (i = 0; i < hw->aq.num_arq_entries; i++)
270		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
271
272	/* free the descriptor memory */
273	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
274
275	/* free the dma header */
276	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
277}
278
279/**
280 *  i40e_free_asq_bufs - Free send queue buffer info elements
281 *  @hw: pointer to the hardware structure
282 **/
283static void i40e_free_asq_bufs(struct i40e_hw *hw)
284{
285	int i;
286
287	/* only unmap if the address is non-NULL */
288	for (i = 0; i < hw->aq.num_asq_entries; i++)
289		if (hw->aq.asq.r.asq_bi[i].pa)
290			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
291
292	/* free the buffer info list */
293	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
294
295	/* free the descriptor memory */
296	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
297
298	/* free the dma header */
299	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
300}
301
302/**
303 *  i40e_config_asq_regs - configure ASQ registers
304 *  @hw: pointer to the hardware structure
305 *
306 *  Configure base address and length registers for the transmit queue
307 **/
308static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
309{
310	enum i40e_status_code ret_code = I40E_SUCCESS;
311	u32 reg = 0;
312
313	/* Clear Head and Tail */
314	wr32(hw, hw->aq.asq.head, 0);
315	wr32(hw, hw->aq.asq.tail, 0);
316
317	/* set starting point */
318	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
319				  I40E_PF_ATQLEN_ATQENABLE_MASK));
320	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
321	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
322
323	/* Check one register to verify that config was applied */
324	reg = rd32(hw, hw->aq.asq.bal);
325	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
326		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
327
328	return ret_code;
329}
330
331/**
332 *  i40e_config_arq_regs - ARQ register configuration
333 *  @hw: pointer to the hardware structure
334 *
335 * Configure base address and length registers for the receive (event queue)
336 **/
337static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
338{
339	enum i40e_status_code ret_code = I40E_SUCCESS;
340	u32 reg = 0;
341
342	/* Clear Head and Tail */
343	wr32(hw, hw->aq.arq.head, 0);
344	wr32(hw, hw->aq.arq.tail, 0);
345
346	/* set starting point */
347	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
348				  I40E_PF_ARQLEN_ARQENABLE_MASK));
349	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
350	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
351
352	/* Update tail in the HW to post pre-allocated buffers */
353	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
354
355	/* Check one register to verify that config was applied */
356	reg = rd32(hw, hw->aq.arq.bal);
357	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
358		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
359
360	return ret_code;
361}
362
363/**
364 *  i40e_init_asq - main initialization routine for ASQ
365 *  @hw: pointer to the hardware structure
366 *
367 *  This is the main initialization routine for the Admin Send Queue
368 *  Prior to calling this function, drivers *MUST* set the following fields
369 *  in the hw->aq structure:
370 *     - hw->aq.num_asq_entries
371 *     - hw->aq.arq_buf_size
372 *
373 *  Do *NOT* hold the lock when calling this as the memory allocation routines
374 *  called are not going to be atomic context safe
375 **/
376enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
377{
378	enum i40e_status_code ret_code = I40E_SUCCESS;
379
380	if (hw->aq.asq.count > 0) {
381		/* queue already initialized */
382		ret_code = I40E_ERR_NOT_READY;
383		goto init_adminq_exit;
384	}
385
386	/* verify input for valid configuration */
387	if ((hw->aq.num_asq_entries == 0) ||
388	    (hw->aq.asq_buf_size == 0)) {
389		ret_code = I40E_ERR_CONFIG;
390		goto init_adminq_exit;
391	}
392
393	hw->aq.asq.next_to_use = 0;
394	hw->aq.asq.next_to_clean = 0;
395	hw->aq.asq.count = hw->aq.num_asq_entries;
396
397	/* allocate the ring memory */
398	ret_code = i40e_alloc_adminq_asq_ring(hw);
399	if (ret_code != I40E_SUCCESS)
400		goto init_adminq_exit;
401
402	/* allocate buffers in the rings */
403	ret_code = i40e_alloc_asq_bufs(hw);
404	if (ret_code != I40E_SUCCESS)
405		goto init_adminq_free_rings;
406
407	/* initialize base registers */
408	ret_code = i40e_config_asq_regs(hw);
409	if (ret_code != I40E_SUCCESS)
410		goto init_adminq_free_rings;
411
412	/* success! */
413	goto init_adminq_exit;
414
415init_adminq_free_rings:
416	i40e_free_adminq_asq(hw);
417
418init_adminq_exit:
419	return ret_code;
420}
421
422/**
423 *  i40e_init_arq - initialize ARQ
424 *  @hw: pointer to the hardware structure
425 *
426 *  The main initialization routine for the Admin Receive (Event) Queue.
427 *  Prior to calling this function, drivers *MUST* set the following fields
428 *  in the hw->aq structure:
429 *     - hw->aq.num_asq_entries
430 *     - hw->aq.arq_buf_size
431 *
432 *  Do *NOT* hold the lock when calling this as the memory allocation routines
433 *  called are not going to be atomic context safe
434 **/
435enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
436{
437	enum i40e_status_code ret_code = I40E_SUCCESS;
438
439	if (hw->aq.arq.count > 0) {
440		/* queue already initialized */
441		ret_code = I40E_ERR_NOT_READY;
442		goto init_adminq_exit;
443	}
444
445	/* verify input for valid configuration */
446	if ((hw->aq.num_arq_entries == 0) ||
447	    (hw->aq.arq_buf_size == 0)) {
448		ret_code = I40E_ERR_CONFIG;
449		goto init_adminq_exit;
450	}
451
452	hw->aq.arq.next_to_use = 0;
453	hw->aq.arq.next_to_clean = 0;
454	hw->aq.arq.count = hw->aq.num_arq_entries;
455
456	/* allocate the ring memory */
457	ret_code = i40e_alloc_adminq_arq_ring(hw);
458	if (ret_code != I40E_SUCCESS)
459		goto init_adminq_exit;
460
461	/* allocate buffers in the rings */
462	ret_code = i40e_alloc_arq_bufs(hw);
463	if (ret_code != I40E_SUCCESS)
464		goto init_adminq_free_rings;
465
466	/* initialize base registers */
467	ret_code = i40e_config_arq_regs(hw);
468	if (ret_code != I40E_SUCCESS)
469		goto init_adminq_free_rings;
470
471	/* success! */
472	goto init_adminq_exit;
473
474init_adminq_free_rings:
475	i40e_free_adminq_arq(hw);
476
477init_adminq_exit:
478	return ret_code;
479}
480
481/**
482 *  i40e_shutdown_asq - shutdown the ASQ
483 *  @hw: pointer to the hardware structure
484 *
485 *  The main shutdown routine for the Admin Send Queue
486 **/
487enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
488{
489	enum i40e_status_code ret_code = I40E_SUCCESS;
490
491	if (hw->aq.asq.count == 0)
492		return I40E_ERR_NOT_READY;
493
494	/* Stop firmware AdminQ processing */
495	wr32(hw, hw->aq.asq.head, 0);
496	wr32(hw, hw->aq.asq.tail, 0);
497	wr32(hw, hw->aq.asq.len, 0);
498	wr32(hw, hw->aq.asq.bal, 0);
499	wr32(hw, hw->aq.asq.bah, 0);
500
501	/* make sure spinlock is available */
502	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
503
504	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
505
506	/* free ring buffers */
507	i40e_free_asq_bufs(hw);
508
509	i40e_release_spinlock(&hw->aq.asq_spinlock);
510
511	return ret_code;
512}
513
514/**
515 *  i40e_shutdown_arq - shutdown ARQ
516 *  @hw: pointer to the hardware structure
517 *
518 *  The main shutdown routine for the Admin Receive Queue
519 **/
520enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
521{
522	enum i40e_status_code ret_code = I40E_SUCCESS;
523
524	if (hw->aq.arq.count == 0)
525		return I40E_ERR_NOT_READY;
526
527	/* Stop firmware AdminQ processing */
528	wr32(hw, hw->aq.arq.head, 0);
529	wr32(hw, hw->aq.arq.tail, 0);
530	wr32(hw, hw->aq.arq.len, 0);
531	wr32(hw, hw->aq.arq.bal, 0);
532	wr32(hw, hw->aq.arq.bah, 0);
533
534	/* make sure spinlock is available */
535	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
536
537	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538
539	/* free ring buffers */
540	i40e_free_arq_bufs(hw);
541
542	i40e_release_spinlock(&hw->aq.arq_spinlock);
543
544	return ret_code;
545}
546
547/**
548 *  i40e_init_adminq - main initialization routine for Admin Queue
549 *  @hw: pointer to the hardware structure
550 *
551 *  Prior to calling this function, drivers *MUST* set the following fields
552 *  in the hw->aq structure:
553 *     - hw->aq.num_asq_entries
554 *     - hw->aq.num_arq_entries
555 *     - hw->aq.arq_buf_size
556 *     - hw->aq.asq_buf_size
557 **/
558enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
559{
560	enum i40e_status_code ret_code;
561	u16 eetrack_lo, eetrack_hi;
562	int retry = 0;
563	/* verify input for valid configuration */
564	if ((hw->aq.num_arq_entries == 0) ||
565	    (hw->aq.num_asq_entries == 0) ||
566	    (hw->aq.arq_buf_size == 0) ||
567	    (hw->aq.asq_buf_size == 0)) {
568		ret_code = I40E_ERR_CONFIG;
569		goto init_adminq_exit;
570	}
571
572	/* initialize spin locks */
573	i40e_init_spinlock(&hw->aq.asq_spinlock);
574	i40e_init_spinlock(&hw->aq.arq_spinlock);
575
576	/* Set up register offsets */
577	i40e_adminq_init_regs(hw);
578
579	/* setup ASQ command write back timeout */
580	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
581
582	/* allocate the ASQ */
583	ret_code = i40e_init_asq(hw);
584	if (ret_code != I40E_SUCCESS)
585		goto init_adminq_destroy_spinlocks;
586
587	/* allocate the ARQ */
588	ret_code = i40e_init_arq(hw);
589	if (ret_code != I40E_SUCCESS)
590		goto init_adminq_free_asq;
591
592        if (i40e_is_vf(hw))  /* VF has no need of firmware */
593                goto init_adminq_exit;
594
595/* There are some cases where the firmware may not be quite ready
596	 * for AdminQ operations, so we retry the AdminQ setup a few times
597	 * if we see timeouts in this first AQ call.
598	 */
599	do {
600		ret_code = i40e_aq_get_firmware_version(hw,
601							&hw->aq.fw_maj_ver,
602							&hw->aq.fw_min_ver,
603							&hw->aq.api_maj_ver,
604							&hw->aq.api_min_ver,
605							NULL);
606		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
607			break;
608		retry++;
609		i40e_msec_delay(100);
610		i40e_resume_aq(hw);
611	} while (retry < 10);
612	if (ret_code != I40E_SUCCESS)
613		goto init_adminq_free_arq;
614
615	/* get the NVM version info */
616	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
617	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
618	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
619	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
620
621	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
622		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
623		goto init_adminq_free_arq;
624	}
625
626	/* pre-emptive resource lock release */
627	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
628	hw->aq.nvm_busy = FALSE;
629
630	ret_code = i40e_aq_set_hmc_resource_profile(hw,
631						    I40E_HMC_PROFILE_DEFAULT,
632						    0,
633						    NULL);
634	ret_code = I40E_SUCCESS;
635
636	/* success! */
637	goto init_adminq_exit;
638
639init_adminq_free_arq:
640	i40e_shutdown_arq(hw);
641init_adminq_free_asq:
642	i40e_shutdown_asq(hw);
643init_adminq_destroy_spinlocks:
644	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
645	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
646
647init_adminq_exit:
648	return ret_code;
649}
650
651/**
652 *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
653 *  @hw: pointer to the hardware structure
654 **/
655enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
656{
657	enum i40e_status_code ret_code = I40E_SUCCESS;
658
659	if (i40e_check_asq_alive(hw))
660		i40e_aq_queue_shutdown(hw, TRUE);
661
662	i40e_shutdown_asq(hw);
663	i40e_shutdown_arq(hw);
664
665	/* destroy the spinlocks */
666	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
667	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
668
669	return ret_code;
670}
671
672/**
673 *  i40e_clean_asq - cleans Admin send queue
674 *  @hw: pointer to the hardware structure
675 *
676 *  returns the number of free desc
677 **/
678u16 i40e_clean_asq(struct i40e_hw *hw)
679{
680	struct i40e_adminq_ring *asq = &(hw->aq.asq);
681	struct i40e_asq_cmd_details *details;
682	u16 ntc = asq->next_to_clean;
683	struct i40e_aq_desc desc_cb;
684	struct i40e_aq_desc *desc;
685
686	desc = I40E_ADMINQ_DESC(*asq, ntc);
687	details = I40E_ADMINQ_DETAILS(*asq, ntc);
688	while (rd32(hw, hw->aq.asq.head) != ntc) {
689		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
690			   "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
691			   rd32(hw, hw->aq.asq.head));
692
693		if (details->callback) {
694			I40E_ADMINQ_CALLBACK cb_func =
695					(I40E_ADMINQ_CALLBACK)details->callback;
696			i40e_memcpy(&desc_cb, desc,
697			            sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
698			cb_func(hw, &desc_cb);
699		}
700		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
701		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
702		ntc++;
703		if (ntc == asq->count)
704			ntc = 0;
705		desc = I40E_ADMINQ_DESC(*asq, ntc);
706		details = I40E_ADMINQ_DETAILS(*asq, ntc);
707	}
708
709	asq->next_to_clean = ntc;
710
711	return I40E_DESC_UNUSED(asq);
712}
713
714/**
715 *  i40e_asq_done - check if FW has processed the Admin Send Queue
716 *  @hw: pointer to the hw struct
717 *
718 *  Returns TRUE if the firmware has processed all descriptors on the
719 *  admin send queue. Returns FALSE if there are still requests pending.
720 **/
721bool i40e_asq_done(struct i40e_hw *hw)
722{
723	/* AQ designers suggest use of head for better
724	 * timing reliability than DD bit
725	 */
726	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
727
728}
729
730/**
731 *  i40e_asq_send_command - send command to Admin Queue
732 *  @hw: pointer to the hw struct
733 *  @desc: prefilled descriptor describing the command (non DMA mem)
734 *  @buff: buffer to use for indirect commands
735 *  @buff_size: size of buffer for indirect commands
736 *  @cmd_details: pointer to command details structure
737 *
738 *  This is the main send command driver routine for the Admin Queue send
739 *  queue.  It runs the queue, cleans the queue, etc
740 **/
741enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
742				struct i40e_aq_desc *desc,
743				void *buff, /* can be NULL */
744				u16  buff_size,
745				struct i40e_asq_cmd_details *cmd_details)
746{
747	enum i40e_status_code status = I40E_SUCCESS;
748	struct i40e_dma_mem *dma_buff = NULL;
749	struct i40e_asq_cmd_details *details;
750	struct i40e_aq_desc *desc_on_ring;
751	bool cmd_completed = FALSE;
752	u16  retval = 0;
753	u32  val = 0;
754
755	val = rd32(hw, hw->aq.asq.head);
756	if (val >= hw->aq.num_asq_entries) {
757		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
758			   "AQTX: head overrun at %d\n", val);
759		status = I40E_ERR_QUEUE_EMPTY;
760		goto asq_send_command_exit;
761	}
762
763	if (hw->aq.asq.count == 0) {
764		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
765			   "AQTX: Admin queue not initialized.\n");
766		status = I40E_ERR_QUEUE_EMPTY;
767		goto asq_send_command_exit;
768	}
769
770	if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
771		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
772		status = I40E_ERR_NVM;
773		goto asq_send_command_exit;
774	}
775
776	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
777	if (cmd_details) {
778		i40e_memcpy(details,
779			    cmd_details,
780			    sizeof(struct i40e_asq_cmd_details),
781			    I40E_NONDMA_TO_NONDMA);
782
783		/* If the cmd_details are defined copy the cookie.  The
784		 * CPU_TO_LE32 is not needed here because the data is ignored
785		 * by the FW, only used by the driver
786		 */
787		if (details->cookie) {
788			desc->cookie_high =
789				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
790			desc->cookie_low =
791				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
792		}
793	} else {
794		i40e_memset(details, 0,
795			    sizeof(struct i40e_asq_cmd_details),
796			    I40E_NONDMA_MEM);
797	}
798
799	/* clear requested flags and then set additional flags if defined */
800	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
801	desc->flags |= CPU_TO_LE16(details->flags_ena);
802
803	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
804
805	if (buff_size > hw->aq.asq_buf_size) {
806		i40e_debug(hw,
807			   I40E_DEBUG_AQ_MESSAGE,
808			   "AQTX: Invalid buffer size: %d.\n",
809			   buff_size);
810		status = I40E_ERR_INVALID_SIZE;
811		goto asq_send_command_error;
812	}
813
814	if (details->postpone && !details->async) {
815		i40e_debug(hw,
816			   I40E_DEBUG_AQ_MESSAGE,
817			   "AQTX: Async flag not set along with postpone flag");
818		status = I40E_ERR_PARAM;
819		goto asq_send_command_error;
820	}
821
822	/* call clean and check queue available function to reclaim the
823	 * descriptors that were processed by FW, the function returns the
824	 * number of desc available
825	 */
826	/* the clean function called here could be called in a separate thread
827	 * in case of asynchronous completions
828	 */
829	if (i40e_clean_asq(hw) == 0) {
830		i40e_debug(hw,
831			   I40E_DEBUG_AQ_MESSAGE,
832			   "AQTX: Error queue is full.\n");
833		status = I40E_ERR_ADMIN_QUEUE_FULL;
834		goto asq_send_command_error;
835	}
836
837	/* initialize the temp desc pointer with the right desc */
838	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
839
840	/* if the desc is available copy the temp desc to the right place */
841	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
842		    I40E_NONDMA_TO_DMA);
843
844	/* if buff is not NULL assume indirect command */
845	if (buff != NULL) {
846		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
847		/* copy the user buff into the respective DMA buff */
848		i40e_memcpy(dma_buff->va, buff, buff_size,
849			    I40E_NONDMA_TO_DMA);
850		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
851
852		/* Update the address values in the desc with the pa value
853		 * for respective buffer
854		 */
855		desc_on_ring->params.external.addr_high =
856				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
857		desc_on_ring->params.external.addr_low =
858				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
859	}
860
861	/* bump the tail */
862	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
863	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
864		      buff, buff_size);
865	(hw->aq.asq.next_to_use)++;
866	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
867		hw->aq.asq.next_to_use = 0;
868	if (!details->postpone)
869		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
870
871	/* if cmd_details are not defined or async flag is not set,
872	 * we need to wait for desc write back
873	 */
874	if (!details->async && !details->postpone) {
875		u32 total_delay = 0;
876
877		do {
878			/* AQ designers suggest use of head for better
879			 * timing reliability than DD bit
880			 */
881			if (i40e_asq_done(hw))
882				break;
883			/* ugh! delay while spin_lock */
884			i40e_msec_delay(1);
885			total_delay++;
886		} while (total_delay < hw->aq.asq_cmd_timeout);
887	}
888
889	/* if ready, copy the desc back to temp */
890	if (i40e_asq_done(hw)) {
891		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
892			    I40E_DMA_TO_NONDMA);
893		if (buff != NULL)
894			i40e_memcpy(buff, dma_buff->va, buff_size,
895				    I40E_DMA_TO_NONDMA);
896		retval = LE16_TO_CPU(desc->retval);
897		if (retval != 0) {
898			i40e_debug(hw,
899				   I40E_DEBUG_AQ_MESSAGE,
900				   "AQTX: Command completed with error 0x%X.\n",
901				   retval);
902
903			/* strip off FW internal code */
904			retval &= 0xff;
905		}
906		cmd_completed = TRUE;
907		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
908			status = I40E_SUCCESS;
909		else
910			status = I40E_ERR_ADMIN_QUEUE_ERROR;
911		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
912	}
913
914	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
915		   "AQTX: desc and buffer writeback:\n");
916	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
917
918	/* update the error if time out occurred */
919	if ((!cmd_completed) &&
920	    (!details->async && !details->postpone)) {
921		i40e_debug(hw,
922			   I40E_DEBUG_AQ_MESSAGE,
923			   "AQTX: Writeback timeout.\n");
924		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
925	}
926
927	if (!status && i40e_is_nvm_update_op(desc))
928		hw->aq.nvm_busy = TRUE;
929
930asq_send_command_error:
931	i40e_release_spinlock(&hw->aq.asq_spinlock);
932asq_send_command_exit:
933	return status;
934}
935
936/**
937 *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
938 *  @desc:     pointer to the temp descriptor (non DMA mem)
939 *  @opcode:   the opcode can be used to decide which flags to turn off or on
940 *
941 *  Fill the desc with default values
942 **/
943void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
944				       u16 opcode)
945{
946	/* zero out the desc */
947	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
948		    I40E_NONDMA_MEM);
949	desc->opcode = CPU_TO_LE16(opcode);
950	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
951}
952
953/**
954 *  i40e_clean_arq_element
955 *  @hw: pointer to the hw struct
956 *  @e: event info from the receive descriptor, includes any buffers
957 *  @pending: number of events that could be left to process
958 *
959 *  This function cleans one Admin Receive Queue element and returns
960 *  the contents through e.  It can also return how many events are
961 *  left to process through 'pending'
962 **/
963enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
964					     struct i40e_arq_event_info *e,
965					     u16 *pending)
966{
967	enum i40e_status_code ret_code = I40E_SUCCESS;
968	u16 ntc = hw->aq.arq.next_to_clean;
969	struct i40e_aq_desc *desc;
970	struct i40e_dma_mem *bi;
971	u16 desc_idx;
972	u16 datalen;
973	u16 flags;
974	u16 ntu;
975
976	/* take the lock before we start messing with the ring */
977	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
978
979	/* set next_to_use to head */
980	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
981	if (ntu == ntc) {
982		/* nothing to do - shouldn't need to update ring's values */
983		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
984		goto clean_arq_element_out;
985	}
986
987	/* now clean the next descriptor */
988	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
989	desc_idx = ntc;
990
991	flags = LE16_TO_CPU(desc->flags);
992	if (flags & I40E_AQ_FLAG_ERR) {
993		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
994		hw->aq.arq_last_status =
995			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
996		i40e_debug(hw,
997			   I40E_DEBUG_AQ_MESSAGE,
998			   "AQRX: Event received with error 0x%X.\n",
999			   hw->aq.arq_last_status);
1000	}
1001
1002	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1003		    I40E_DMA_TO_NONDMA);
1004	datalen = LE16_TO_CPU(desc->datalen);
1005	e->msg_len = min(datalen, e->buf_len);
1006	if (e->msg_buf != NULL && (e->msg_len != 0))
1007		i40e_memcpy(e->msg_buf,
1008			    hw->aq.arq.r.arq_bi[desc_idx].va,
1009			    e->msg_len, I40E_DMA_TO_NONDMA);
1010
1011	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1012	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1013		      hw->aq.arq_buf_size);
1014
1015	/* Restore the original datalen and buffer address in the desc,
1016	 * FW updates datalen to indicate the event message
1017	 * size
1018	 */
1019	bi = &hw->aq.arq.r.arq_bi[ntc];
1020	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1021
1022	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1023	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1024		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1025	desc->datalen = CPU_TO_LE16((u16)bi->size);
1026	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1027	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1028
1029	/* set tail = the last cleaned desc index. */
1030	wr32(hw, hw->aq.arq.tail, ntc);
1031	/* ntc is updated to tail + 1 */
1032	ntc++;
1033	if (ntc == hw->aq.num_arq_entries)
1034		ntc = 0;
1035	hw->aq.arq.next_to_clean = ntc;
1036	hw->aq.arq.next_to_use = ntu;
1037
1038clean_arq_element_out:
1039	/* Set pending if needed, unlock and return */
1040	if (pending != NULL)
1041		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1042	i40e_release_spinlock(&hw->aq.arq_spinlock);
1043
1044	if (i40e_is_nvm_update_op(&e->desc)) {
1045		hw->aq.nvm_busy = FALSE;
1046		if (hw->aq.nvm_release_on_done) {
1047			i40e_release_nvm(hw);
1048			hw->aq.nvm_release_on_done = FALSE;
1049		}
1050	}
1051
1052	return ret_code;
1053}
1054
1055void i40e_resume_aq(struct i40e_hw *hw)
1056{
1057	/* Registers are reset after PF reset */
1058	hw->aq.asq.next_to_use = 0;
1059	hw->aq.asq.next_to_clean = 0;
1060
1061#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1062#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1063#endif
1064	i40e_config_asq_regs(hw);
1065
1066	hw->aq.arq.next_to_use = 0;
1067	hw->aq.arq.next_to_clean = 0;
1068
1069	i40e_config_arq_regs(hw);
1070}
1071