i40e_adminq.c revision 270346
1266423Sjfv/******************************************************************************
2266423Sjfv
3266423Sjfv  Copyright (c) 2013-2014, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/i40e_adminq.c 270346 2014-08-22 18:59:19Z jfv $*/
34266423Sjfv
35266423Sjfv#include "i40e_status.h"
36266423Sjfv#include "i40e_type.h"
37266423Sjfv#include "i40e_register.h"
38266423Sjfv#include "i40e_adminq.h"
39266423Sjfv#include "i40e_prototype.h"
40266423Sjfv
41266423Sjfv/**
42266423Sjfv * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43266423Sjfv * @desc: API request descriptor
44266423Sjfv **/
45266423Sjfvstatic INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46266423Sjfv{
47266423Sjfv	return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48266423Sjfv		desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49266423Sjfv}
50266423Sjfv
51266423Sjfv/**
52266423Sjfv *  i40e_adminq_init_regs - Initialize AdminQ registers
53266423Sjfv *  @hw: pointer to the hardware structure
54266423Sjfv *
55266423Sjfv *  This assumes the alloc_asq and alloc_arq functions have already been called
56266423Sjfv **/
57266423Sjfvstatic void i40e_adminq_init_regs(struct i40e_hw *hw)
58266423Sjfv{
59266423Sjfv	/* set head and tail registers in our local struct */
60270346Sjfv	if (i40e_is_vf(hw)) {
61266423Sjfv		hw->aq.asq.tail = I40E_VF_ATQT1;
62266423Sjfv		hw->aq.asq.head = I40E_VF_ATQH1;
63266423Sjfv		hw->aq.asq.len  = I40E_VF_ATQLEN1;
64269198Sjfv		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
65269198Sjfv		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
66266423Sjfv		hw->aq.arq.tail = I40E_VF_ARQT1;
67266423Sjfv		hw->aq.arq.head = I40E_VF_ARQH1;
68266423Sjfv		hw->aq.arq.len  = I40E_VF_ARQLEN1;
69269198Sjfv		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
70269198Sjfv		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
71266423Sjfv	} else {
72266423Sjfv		hw->aq.asq.tail = I40E_PF_ATQT;
73266423Sjfv		hw->aq.asq.head = I40E_PF_ATQH;
74266423Sjfv		hw->aq.asq.len  = I40E_PF_ATQLEN;
75269198Sjfv		hw->aq.asq.bal  = I40E_PF_ATQBAL;
76269198Sjfv		hw->aq.asq.bah  = I40E_PF_ATQBAH;
77266423Sjfv		hw->aq.arq.tail = I40E_PF_ARQT;
78266423Sjfv		hw->aq.arq.head = I40E_PF_ARQH;
79266423Sjfv		hw->aq.arq.len  = I40E_PF_ARQLEN;
80269198Sjfv		hw->aq.arq.bal  = I40E_PF_ARQBAL;
81269198Sjfv		hw->aq.arq.bah  = I40E_PF_ARQBAH;
82266423Sjfv	}
83266423Sjfv}
84266423Sjfv
85266423Sjfv/**
86266423Sjfv *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87266423Sjfv *  @hw: pointer to the hardware structure
88266423Sjfv **/
89266423Sjfvenum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
90266423Sjfv{
91266423Sjfv	enum i40e_status_code ret_code;
92266423Sjfv
93266423Sjfv	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
94266423Sjfv					 i40e_mem_atq_ring,
95266423Sjfv					 (hw->aq.num_asq_entries *
96266423Sjfv					 sizeof(struct i40e_aq_desc)),
97266423Sjfv					 I40E_ADMINQ_DESC_ALIGNMENT);
98266423Sjfv	if (ret_code)
99266423Sjfv		return ret_code;
100266423Sjfv
101266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102266423Sjfv					  (hw->aq.num_asq_entries *
103266423Sjfv					  sizeof(struct i40e_asq_cmd_details)));
104266423Sjfv	if (ret_code) {
105266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
106266423Sjfv		return ret_code;
107266423Sjfv	}
108266423Sjfv
109266423Sjfv	return ret_code;
110266423Sjfv}
111266423Sjfv
112266423Sjfv/**
113266423Sjfv *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114266423Sjfv *  @hw: pointer to the hardware structure
115266423Sjfv **/
116266423Sjfvenum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
117266423Sjfv{
118266423Sjfv	enum i40e_status_code ret_code;
119266423Sjfv
120266423Sjfv	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
121266423Sjfv					 i40e_mem_arq_ring,
122266423Sjfv					 (hw->aq.num_arq_entries *
123266423Sjfv					 sizeof(struct i40e_aq_desc)),
124266423Sjfv					 I40E_ADMINQ_DESC_ALIGNMENT);
125266423Sjfv
126266423Sjfv	return ret_code;
127266423Sjfv}
128266423Sjfv
129266423Sjfv/**
130266423Sjfv *  i40e_free_adminq_asq - Free Admin Queue send rings
131266423Sjfv *  @hw: pointer to the hardware structure
132266423Sjfv *
133266423Sjfv *  This assumes the posted send buffers have already been cleaned
134266423Sjfv *  and de-allocated
135266423Sjfv **/
136266423Sjfvvoid i40e_free_adminq_asq(struct i40e_hw *hw)
137266423Sjfv{
138266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
139266423Sjfv}
140266423Sjfv
141266423Sjfv/**
142266423Sjfv *  i40e_free_adminq_arq - Free Admin Queue receive rings
143266423Sjfv *  @hw: pointer to the hardware structure
144266423Sjfv *
145266423Sjfv *  This assumes the posted receive buffers have already been cleaned
146266423Sjfv *  and de-allocated
147266423Sjfv **/
148266423Sjfvvoid i40e_free_adminq_arq(struct i40e_hw *hw)
149266423Sjfv{
150266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
151266423Sjfv}
152266423Sjfv
153266423Sjfv/**
154266423Sjfv *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155266423Sjfv *  @hw: pointer to the hardware structure
156266423Sjfv **/
157266423Sjfvstatic enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
158266423Sjfv{
159266423Sjfv	enum i40e_status_code ret_code;
160266423Sjfv	struct i40e_aq_desc *desc;
161266423Sjfv	struct i40e_dma_mem *bi;
162266423Sjfv	int i;
163266423Sjfv
164266423Sjfv	/* We'll be allocating the buffer info memory first, then we can
165266423Sjfv	 * allocate the mapped buffers for the event processing
166266423Sjfv	 */
167266423Sjfv
168266423Sjfv	/* buffer_info structures do not need alignment */
169266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170266423Sjfv		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
171266423Sjfv	if (ret_code)
172266423Sjfv		goto alloc_arq_bufs;
173266423Sjfv	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
174266423Sjfv
175266423Sjfv	/* allocate the mapped buffers */
176266423Sjfv	for (i = 0; i < hw->aq.num_arq_entries; i++) {
177266423Sjfv		bi = &hw->aq.arq.r.arq_bi[i];
178266423Sjfv		ret_code = i40e_allocate_dma_mem(hw, bi,
179266423Sjfv						 i40e_mem_arq_buf,
180266423Sjfv						 hw->aq.arq_buf_size,
181266423Sjfv						 I40E_ADMINQ_DESC_ALIGNMENT);
182266423Sjfv		if (ret_code)
183266423Sjfv			goto unwind_alloc_arq_bufs;
184266423Sjfv
185266423Sjfv		/* now configure the descriptors for use */
186266423Sjfv		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187266423Sjfv
188266423Sjfv		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189266423Sjfv		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190266423Sjfv			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
191266423Sjfv		desc->opcode = 0;
192266423Sjfv		/* This is in accordance with Admin queue design, there is no
193266423Sjfv		 * register for buffer size configuration
194266423Sjfv		 */
195266423Sjfv		desc->datalen = CPU_TO_LE16((u16)bi->size);
196266423Sjfv		desc->retval = 0;
197266423Sjfv		desc->cookie_high = 0;
198266423Sjfv		desc->cookie_low = 0;
199266423Sjfv		desc->params.external.addr_high =
200266423Sjfv			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201266423Sjfv		desc->params.external.addr_low =
202266423Sjfv			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203266423Sjfv		desc->params.external.param0 = 0;
204266423Sjfv		desc->params.external.param1 = 0;
205266423Sjfv	}
206266423Sjfv
207266423Sjfvalloc_arq_bufs:
208266423Sjfv	return ret_code;
209266423Sjfv
210266423Sjfvunwind_alloc_arq_bufs:
211266423Sjfv	/* don't try to free the one that failed... */
212266423Sjfv	i--;
213266423Sjfv	for (; i >= 0; i--)
214266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
216266423Sjfv
217266423Sjfv	return ret_code;
218266423Sjfv}
219266423Sjfv
220266423Sjfv/**
221266423Sjfv *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222266423Sjfv *  @hw: pointer to the hardware structure
223266423Sjfv **/
224266423Sjfvstatic enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
225266423Sjfv{
226266423Sjfv	enum i40e_status_code ret_code;
227266423Sjfv	struct i40e_dma_mem *bi;
228266423Sjfv	int i;
229266423Sjfv
230266423Sjfv	/* No mapped memory needed yet, just the buffer info structures */
231266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232266423Sjfv		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
233266423Sjfv	if (ret_code)
234266423Sjfv		goto alloc_asq_bufs;
235266423Sjfv	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
236266423Sjfv
237266423Sjfv	/* allocate the mapped buffers */
238266423Sjfv	for (i = 0; i < hw->aq.num_asq_entries; i++) {
239266423Sjfv		bi = &hw->aq.asq.r.asq_bi[i];
240266423Sjfv		ret_code = i40e_allocate_dma_mem(hw, bi,
241266423Sjfv						 i40e_mem_asq_buf,
242266423Sjfv						 hw->aq.asq_buf_size,
243266423Sjfv						 I40E_ADMINQ_DESC_ALIGNMENT);
244266423Sjfv		if (ret_code)
245266423Sjfv			goto unwind_alloc_asq_bufs;
246266423Sjfv	}
247266423Sjfvalloc_asq_bufs:
248266423Sjfv	return ret_code;
249266423Sjfv
250266423Sjfvunwind_alloc_asq_bufs:
251266423Sjfv	/* don't try to free the one that failed... */
252266423Sjfv	i--;
253266423Sjfv	for (; i >= 0; i--)
254266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
256266423Sjfv
257266423Sjfv	return ret_code;
258266423Sjfv}
259266423Sjfv
260266423Sjfv/**
261266423Sjfv *  i40e_free_arq_bufs - Free receive queue buffer info elements
262266423Sjfv *  @hw: pointer to the hardware structure
263266423Sjfv **/
264266423Sjfvstatic void i40e_free_arq_bufs(struct i40e_hw *hw)
265266423Sjfv{
266266423Sjfv	int i;
267266423Sjfv
268266423Sjfv	/* free descriptors */
269266423Sjfv	for (i = 0; i < hw->aq.num_arq_entries; i++)
270266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
271266423Sjfv
272266423Sjfv	/* free the descriptor memory */
273266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
274266423Sjfv
275266423Sjfv	/* free the dma header */
276266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
277266423Sjfv}
278266423Sjfv
279266423Sjfv/**
280266423Sjfv *  i40e_free_asq_bufs - Free send queue buffer info elements
281266423Sjfv *  @hw: pointer to the hardware structure
282266423Sjfv **/
283266423Sjfvstatic void i40e_free_asq_bufs(struct i40e_hw *hw)
284266423Sjfv{
285266423Sjfv	int i;
286266423Sjfv
287266423Sjfv	/* only unmap if the address is non-NULL */
288266423Sjfv	for (i = 0; i < hw->aq.num_asq_entries; i++)
289266423Sjfv		if (hw->aq.asq.r.asq_bi[i].pa)
290266423Sjfv			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
291266423Sjfv
292266423Sjfv	/* free the buffer info list */
293266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
294266423Sjfv
295266423Sjfv	/* free the descriptor memory */
296266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
297266423Sjfv
298266423Sjfv	/* free the dma header */
299266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
300266423Sjfv}
301266423Sjfv
302266423Sjfv/**
303266423Sjfv *  i40e_config_asq_regs - configure ASQ registers
304266423Sjfv *  @hw: pointer to the hardware structure
305266423Sjfv *
306266423Sjfv *  Configure base address and length registers for the transmit queue
307266423Sjfv **/
308266423Sjfvstatic enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
309266423Sjfv{
310266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
311266423Sjfv	u32 reg = 0;
312266423Sjfv
313266423Sjfv	/* Clear Head and Tail */
314266423Sjfv	wr32(hw, hw->aq.asq.head, 0);
315266423Sjfv	wr32(hw, hw->aq.asq.tail, 0);
316266423Sjfv
317269198Sjfv	/* set starting point */
318269198Sjfv	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
319269198Sjfv				  I40E_PF_ATQLEN_ATQENABLE_MASK));
320269198Sjfv	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
321269198Sjfv	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
322266423Sjfv
323266423Sjfv	/* Check one register to verify that config was applied */
324269198Sjfv	reg = rd32(hw, hw->aq.asq.bal);
325266423Sjfv	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
326266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
327266423Sjfv
328266423Sjfv	return ret_code;
329266423Sjfv}
330266423Sjfv
331266423Sjfv/**
332266423Sjfv *  i40e_config_arq_regs - ARQ register configuration
333266423Sjfv *  @hw: pointer to the hardware structure
334266423Sjfv *
335266423Sjfv * Configure base address and length registers for the receive (event queue)
336266423Sjfv **/
337266423Sjfvstatic enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
338266423Sjfv{
339266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
340266423Sjfv	u32 reg = 0;
341266423Sjfv
342266423Sjfv	/* Clear Head and Tail */
343266423Sjfv	wr32(hw, hw->aq.arq.head, 0);
344266423Sjfv	wr32(hw, hw->aq.arq.tail, 0);
345266423Sjfv
346269198Sjfv	/* set starting point */
347269198Sjfv	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
348269198Sjfv				  I40E_PF_ARQLEN_ARQENABLE_MASK));
349269198Sjfv	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
350269198Sjfv	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
351266423Sjfv
352266423Sjfv	/* Update tail in the HW to post pre-allocated buffers */
353266423Sjfv	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
354266423Sjfv
355266423Sjfv	/* Check one register to verify that config was applied */
356269198Sjfv	reg = rd32(hw, hw->aq.arq.bal);
357266423Sjfv	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
358266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
359266423Sjfv
360266423Sjfv	return ret_code;
361266423Sjfv}
362266423Sjfv
363266423Sjfv/**
364266423Sjfv *  i40e_init_asq - main initialization routine for ASQ
365266423Sjfv *  @hw: pointer to the hardware structure
366266423Sjfv *
367266423Sjfv *  This is the main initialization routine for the Admin Send Queue
368266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
369266423Sjfv *  in the hw->aq structure:
370266423Sjfv *     - hw->aq.num_asq_entries
371266423Sjfv *     - hw->aq.arq_buf_size
372266423Sjfv *
373266423Sjfv *  Do *NOT* hold the lock when calling this as the memory allocation routines
374266423Sjfv *  called are not going to be atomic context safe
375266423Sjfv **/
376266423Sjfvenum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
377266423Sjfv{
378266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
379266423Sjfv
380266423Sjfv	if (hw->aq.asq.count > 0) {
381266423Sjfv		/* queue already initialized */
382266423Sjfv		ret_code = I40E_ERR_NOT_READY;
383266423Sjfv		goto init_adminq_exit;
384266423Sjfv	}
385266423Sjfv
386266423Sjfv	/* verify input for valid configuration */
387266423Sjfv	if ((hw->aq.num_asq_entries == 0) ||
388266423Sjfv	    (hw->aq.asq_buf_size == 0)) {
389266423Sjfv		ret_code = I40E_ERR_CONFIG;
390266423Sjfv		goto init_adminq_exit;
391266423Sjfv	}
392266423Sjfv
393266423Sjfv	hw->aq.asq.next_to_use = 0;
394266423Sjfv	hw->aq.asq.next_to_clean = 0;
395266423Sjfv	hw->aq.asq.count = hw->aq.num_asq_entries;
396266423Sjfv
397266423Sjfv	/* allocate the ring memory */
398266423Sjfv	ret_code = i40e_alloc_adminq_asq_ring(hw);
399266423Sjfv	if (ret_code != I40E_SUCCESS)
400266423Sjfv		goto init_adminq_exit;
401266423Sjfv
402266423Sjfv	/* allocate buffers in the rings */
403266423Sjfv	ret_code = i40e_alloc_asq_bufs(hw);
404266423Sjfv	if (ret_code != I40E_SUCCESS)
405266423Sjfv		goto init_adminq_free_rings;
406266423Sjfv
407266423Sjfv	/* initialize base registers */
408266423Sjfv	ret_code = i40e_config_asq_regs(hw);
409266423Sjfv	if (ret_code != I40E_SUCCESS)
410266423Sjfv		goto init_adminq_free_rings;
411266423Sjfv
412266423Sjfv	/* success! */
413266423Sjfv	goto init_adminq_exit;
414266423Sjfv
415266423Sjfvinit_adminq_free_rings:
416266423Sjfv	i40e_free_adminq_asq(hw);
417266423Sjfv
418266423Sjfvinit_adminq_exit:
419266423Sjfv	return ret_code;
420266423Sjfv}
421266423Sjfv
422266423Sjfv/**
423266423Sjfv *  i40e_init_arq - initialize ARQ
424266423Sjfv *  @hw: pointer to the hardware structure
425266423Sjfv *
426266423Sjfv *  The main initialization routine for the Admin Receive (Event) Queue.
427266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
428266423Sjfv *  in the hw->aq structure:
429266423Sjfv *     - hw->aq.num_asq_entries
430266423Sjfv *     - hw->aq.arq_buf_size
431266423Sjfv *
432266423Sjfv *  Do *NOT* hold the lock when calling this as the memory allocation routines
433266423Sjfv *  called are not going to be atomic context safe
434266423Sjfv **/
435266423Sjfvenum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
436266423Sjfv{
437266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
438266423Sjfv
439266423Sjfv	if (hw->aq.arq.count > 0) {
440266423Sjfv		/* queue already initialized */
441266423Sjfv		ret_code = I40E_ERR_NOT_READY;
442266423Sjfv		goto init_adminq_exit;
443266423Sjfv	}
444266423Sjfv
445266423Sjfv	/* verify input for valid configuration */
446266423Sjfv	if ((hw->aq.num_arq_entries == 0) ||
447266423Sjfv	    (hw->aq.arq_buf_size == 0)) {
448266423Sjfv		ret_code = I40E_ERR_CONFIG;
449266423Sjfv		goto init_adminq_exit;
450266423Sjfv	}
451266423Sjfv
452266423Sjfv	hw->aq.arq.next_to_use = 0;
453266423Sjfv	hw->aq.arq.next_to_clean = 0;
454266423Sjfv	hw->aq.arq.count = hw->aq.num_arq_entries;
455266423Sjfv
456266423Sjfv	/* allocate the ring memory */
457266423Sjfv	ret_code = i40e_alloc_adminq_arq_ring(hw);
458266423Sjfv	if (ret_code != I40E_SUCCESS)
459266423Sjfv		goto init_adminq_exit;
460266423Sjfv
461266423Sjfv	/* allocate buffers in the rings */
462266423Sjfv	ret_code = i40e_alloc_arq_bufs(hw);
463266423Sjfv	if (ret_code != I40E_SUCCESS)
464266423Sjfv		goto init_adminq_free_rings;
465266423Sjfv
466266423Sjfv	/* initialize base registers */
467266423Sjfv	ret_code = i40e_config_arq_regs(hw);
468266423Sjfv	if (ret_code != I40E_SUCCESS)
469266423Sjfv		goto init_adminq_free_rings;
470266423Sjfv
471266423Sjfv	/* success! */
472266423Sjfv	goto init_adminq_exit;
473266423Sjfv
474266423Sjfvinit_adminq_free_rings:
475266423Sjfv	i40e_free_adminq_arq(hw);
476266423Sjfv
477266423Sjfvinit_adminq_exit:
478266423Sjfv	return ret_code;
479266423Sjfv}
480266423Sjfv
481266423Sjfv/**
482266423Sjfv *  i40e_shutdown_asq - shutdown the ASQ
483266423Sjfv *  @hw: pointer to the hardware structure
484266423Sjfv *
485266423Sjfv *  The main shutdown routine for the Admin Send Queue
486266423Sjfv **/
487266423Sjfvenum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
488266423Sjfv{
489266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
490266423Sjfv
491266423Sjfv	if (hw->aq.asq.count == 0)
492266423Sjfv		return I40E_ERR_NOT_READY;
493266423Sjfv
494266423Sjfv	/* Stop firmware AdminQ processing */
495266423Sjfv	wr32(hw, hw->aq.asq.head, 0);
496266423Sjfv	wr32(hw, hw->aq.asq.tail, 0);
497266423Sjfv	wr32(hw, hw->aq.asq.len, 0);
498269198Sjfv	wr32(hw, hw->aq.asq.bal, 0);
499269198Sjfv	wr32(hw, hw->aq.asq.bah, 0);
500266423Sjfv
501266423Sjfv	/* make sure spinlock is available */
502266423Sjfv	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
503266423Sjfv
504266423Sjfv	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
505266423Sjfv
506266423Sjfv	/* free ring buffers */
507266423Sjfv	i40e_free_asq_bufs(hw);
508266423Sjfv
509266423Sjfv	i40e_release_spinlock(&hw->aq.asq_spinlock);
510266423Sjfv
511266423Sjfv	return ret_code;
512266423Sjfv}
513266423Sjfv
514266423Sjfv/**
515266423Sjfv *  i40e_shutdown_arq - shutdown ARQ
516266423Sjfv *  @hw: pointer to the hardware structure
517266423Sjfv *
518266423Sjfv *  The main shutdown routine for the Admin Receive Queue
519266423Sjfv **/
520266423Sjfvenum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
521266423Sjfv{
522266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
523266423Sjfv
524266423Sjfv	if (hw->aq.arq.count == 0)
525266423Sjfv		return I40E_ERR_NOT_READY;
526266423Sjfv
527266423Sjfv	/* Stop firmware AdminQ processing */
528266423Sjfv	wr32(hw, hw->aq.arq.head, 0);
529266423Sjfv	wr32(hw, hw->aq.arq.tail, 0);
530266423Sjfv	wr32(hw, hw->aq.arq.len, 0);
531269198Sjfv	wr32(hw, hw->aq.arq.bal, 0);
532269198Sjfv	wr32(hw, hw->aq.arq.bah, 0);
533266423Sjfv
534266423Sjfv	/* make sure spinlock is available */
535266423Sjfv	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
536266423Sjfv
537266423Sjfv	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538266423Sjfv
539266423Sjfv	/* free ring buffers */
540266423Sjfv	i40e_free_arq_bufs(hw);
541266423Sjfv
542266423Sjfv	i40e_release_spinlock(&hw->aq.arq_spinlock);
543266423Sjfv
544266423Sjfv	return ret_code;
545266423Sjfv}
546266423Sjfv
547266423Sjfv/**
548266423Sjfv *  i40e_init_adminq - main initialization routine for Admin Queue
549266423Sjfv *  @hw: pointer to the hardware structure
550266423Sjfv *
551266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
552266423Sjfv *  in the hw->aq structure:
553266423Sjfv *     - hw->aq.num_asq_entries
554266423Sjfv *     - hw->aq.num_arq_entries
555266423Sjfv *     - hw->aq.arq_buf_size
556266423Sjfv *     - hw->aq.asq_buf_size
557266423Sjfv **/
558266423Sjfvenum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
559266423Sjfv{
560266423Sjfv	enum i40e_status_code ret_code;
561266423Sjfv	u16 eetrack_lo, eetrack_hi;
562266423Sjfv	int retry = 0;
563266423Sjfv	/* verify input for valid configuration */
564266423Sjfv	if ((hw->aq.num_arq_entries == 0) ||
565266423Sjfv	    (hw->aq.num_asq_entries == 0) ||
566266423Sjfv	    (hw->aq.arq_buf_size == 0) ||
567266423Sjfv	    (hw->aq.asq_buf_size == 0)) {
568266423Sjfv		ret_code = I40E_ERR_CONFIG;
569266423Sjfv		goto init_adminq_exit;
570266423Sjfv	}
571266423Sjfv
572266423Sjfv	/* initialize spin locks */
573266423Sjfv	i40e_init_spinlock(&hw->aq.asq_spinlock);
574266423Sjfv	i40e_init_spinlock(&hw->aq.arq_spinlock);
575266423Sjfv
576266423Sjfv	/* Set up register offsets */
577266423Sjfv	i40e_adminq_init_regs(hw);
578266423Sjfv
579269198Sjfv	/* setup ASQ command write back timeout */
580269198Sjfv	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
581269198Sjfv
582266423Sjfv	/* allocate the ASQ */
583266423Sjfv	ret_code = i40e_init_asq(hw);
584266423Sjfv	if (ret_code != I40E_SUCCESS)
585266423Sjfv		goto init_adminq_destroy_spinlocks;
586266423Sjfv
587266423Sjfv	/* allocate the ARQ */
588266423Sjfv	ret_code = i40e_init_arq(hw);
589266423Sjfv	if (ret_code != I40E_SUCCESS)
590266423Sjfv		goto init_adminq_free_asq;
591266423Sjfv
592270346Sjfv        if (i40e_is_vf(hw))  /* VF has no need of firmware */
593270346Sjfv                goto init_adminq_exit;
594270346Sjfv
595270346Sjfv/* There are some cases where the firmware may not be quite ready
596266423Sjfv	 * for AdminQ operations, so we retry the AdminQ setup a few times
597266423Sjfv	 * if we see timeouts in this first AQ call.
598266423Sjfv	 */
599266423Sjfv	do {
600266423Sjfv		ret_code = i40e_aq_get_firmware_version(hw,
601266423Sjfv							&hw->aq.fw_maj_ver,
602266423Sjfv							&hw->aq.fw_min_ver,
603266423Sjfv							&hw->aq.api_maj_ver,
604266423Sjfv							&hw->aq.api_min_ver,
605266423Sjfv							NULL);
606266423Sjfv		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
607266423Sjfv			break;
608266423Sjfv		retry++;
609266423Sjfv		i40e_msec_delay(100);
610266423Sjfv		i40e_resume_aq(hw);
611266423Sjfv	} while (retry < 10);
612266423Sjfv	if (ret_code != I40E_SUCCESS)
613266423Sjfv		goto init_adminq_free_arq;
614266423Sjfv
615266423Sjfv	/* get the NVM version info */
616266423Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
617266423Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
618266423Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
619266423Sjfv	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
620266423Sjfv
621266423Sjfv	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
622266423Sjfv		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
623266423Sjfv		goto init_adminq_free_arq;
624266423Sjfv	}
625266423Sjfv
626266423Sjfv	/* pre-emptive resource lock release */
627266423Sjfv	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
628266423Sjfv	hw->aq.nvm_busy = FALSE;
629266423Sjfv
630266423Sjfv	ret_code = i40e_aq_set_hmc_resource_profile(hw,
631266423Sjfv						    I40E_HMC_PROFILE_DEFAULT,
632266423Sjfv						    0,
633266423Sjfv						    NULL);
634266423Sjfv	ret_code = I40E_SUCCESS;
635266423Sjfv
636266423Sjfv	/* success! */
637266423Sjfv	goto init_adminq_exit;
638266423Sjfv
639266423Sjfvinit_adminq_free_arq:
640266423Sjfv	i40e_shutdown_arq(hw);
641266423Sjfvinit_adminq_free_asq:
642266423Sjfv	i40e_shutdown_asq(hw);
643266423Sjfvinit_adminq_destroy_spinlocks:
644266423Sjfv	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
645266423Sjfv	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
646266423Sjfv
647266423Sjfvinit_adminq_exit:
648266423Sjfv	return ret_code;
649266423Sjfv}
650266423Sjfv
651266423Sjfv/**
652266423Sjfv *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
653266423Sjfv *  @hw: pointer to the hardware structure
654266423Sjfv **/
655266423Sjfvenum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
656266423Sjfv{
657266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
658266423Sjfv
659266423Sjfv	if (i40e_check_asq_alive(hw))
660266423Sjfv		i40e_aq_queue_shutdown(hw, TRUE);
661266423Sjfv
662266423Sjfv	i40e_shutdown_asq(hw);
663266423Sjfv	i40e_shutdown_arq(hw);
664266423Sjfv
665266423Sjfv	/* destroy the spinlocks */
666266423Sjfv	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
667266423Sjfv	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
668266423Sjfv
669266423Sjfv	return ret_code;
670266423Sjfv}
671266423Sjfv
672266423Sjfv/**
673266423Sjfv *  i40e_clean_asq - cleans Admin send queue
674266423Sjfv *  @hw: pointer to the hardware structure
675266423Sjfv *
676266423Sjfv *  returns the number of free desc
677266423Sjfv **/
678266423Sjfvu16 i40e_clean_asq(struct i40e_hw *hw)
679266423Sjfv{
680266423Sjfv	struct i40e_adminq_ring *asq = &(hw->aq.asq);
681266423Sjfv	struct i40e_asq_cmd_details *details;
682266423Sjfv	u16 ntc = asq->next_to_clean;
683266423Sjfv	struct i40e_aq_desc desc_cb;
684266423Sjfv	struct i40e_aq_desc *desc;
685266423Sjfv
686266423Sjfv	desc = I40E_ADMINQ_DESC(*asq, ntc);
687266423Sjfv	details = I40E_ADMINQ_DETAILS(*asq, ntc);
688266423Sjfv	while (rd32(hw, hw->aq.asq.head) != ntc) {
689266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
690266423Sjfv			   "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
691266423Sjfv			   rd32(hw, hw->aq.asq.head));
692266423Sjfv
693266423Sjfv		if (details->callback) {
694266423Sjfv			I40E_ADMINQ_CALLBACK cb_func =
695266423Sjfv					(I40E_ADMINQ_CALLBACK)details->callback;
696266423Sjfv			i40e_memcpy(&desc_cb, desc,
697266423Sjfv			            sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
698266423Sjfv			cb_func(hw, &desc_cb);
699266423Sjfv		}
700266423Sjfv		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
701266423Sjfv		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
702266423Sjfv		ntc++;
703266423Sjfv		if (ntc == asq->count)
704266423Sjfv			ntc = 0;
705266423Sjfv		desc = I40E_ADMINQ_DESC(*asq, ntc);
706266423Sjfv		details = I40E_ADMINQ_DETAILS(*asq, ntc);
707266423Sjfv	}
708266423Sjfv
709266423Sjfv	asq->next_to_clean = ntc;
710266423Sjfv
711266423Sjfv	return I40E_DESC_UNUSED(asq);
712266423Sjfv}
713266423Sjfv
714266423Sjfv/**
715266423Sjfv *  i40e_asq_done - check if FW has processed the Admin Send Queue
716266423Sjfv *  @hw: pointer to the hw struct
717266423Sjfv *
718266423Sjfv *  Returns TRUE if the firmware has processed all descriptors on the
719266423Sjfv *  admin send queue. Returns FALSE if there are still requests pending.
720266423Sjfv **/
721266423Sjfvbool i40e_asq_done(struct i40e_hw *hw)
722266423Sjfv{
723266423Sjfv	/* AQ designers suggest use of head for better
724266423Sjfv	 * timing reliability than DD bit
725266423Sjfv	 */
726266423Sjfv	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
727266423Sjfv
728266423Sjfv}
729266423Sjfv
730266423Sjfv/**
731266423Sjfv *  i40e_asq_send_command - send command to Admin Queue
732266423Sjfv *  @hw: pointer to the hw struct
733266423Sjfv *  @desc: prefilled descriptor describing the command (non DMA mem)
734266423Sjfv *  @buff: buffer to use for indirect commands
735266423Sjfv *  @buff_size: size of buffer for indirect commands
736266423Sjfv *  @cmd_details: pointer to command details structure
737266423Sjfv *
738266423Sjfv *  This is the main send command driver routine for the Admin Queue send
739266423Sjfv *  queue.  It runs the queue, cleans the queue, etc
740266423Sjfv **/
741266423Sjfvenum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
742266423Sjfv				struct i40e_aq_desc *desc,
743266423Sjfv				void *buff, /* can be NULL */
744266423Sjfv				u16  buff_size,
745266423Sjfv				struct i40e_asq_cmd_details *cmd_details)
746266423Sjfv{
747266423Sjfv	enum i40e_status_code status = I40E_SUCCESS;
748266423Sjfv	struct i40e_dma_mem *dma_buff = NULL;
749266423Sjfv	struct i40e_asq_cmd_details *details;
750266423Sjfv	struct i40e_aq_desc *desc_on_ring;
751266423Sjfv	bool cmd_completed = FALSE;
752266423Sjfv	u16  retval = 0;
753266423Sjfv	u32  val = 0;
754266423Sjfv
755266423Sjfv	val = rd32(hw, hw->aq.asq.head);
756266423Sjfv	if (val >= hw->aq.num_asq_entries) {
757266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
758266423Sjfv			   "AQTX: head overrun at %d\n", val);
759266423Sjfv		status = I40E_ERR_QUEUE_EMPTY;
760266423Sjfv		goto asq_send_command_exit;
761266423Sjfv	}
762266423Sjfv
763266423Sjfv	if (hw->aq.asq.count == 0) {
764266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
765266423Sjfv			   "AQTX: Admin queue not initialized.\n");
766266423Sjfv		status = I40E_ERR_QUEUE_EMPTY;
767266423Sjfv		goto asq_send_command_exit;
768266423Sjfv	}
769266423Sjfv
770266423Sjfv	if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
771266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
772266423Sjfv		status = I40E_ERR_NVM;
773266423Sjfv		goto asq_send_command_exit;
774266423Sjfv	}
775266423Sjfv
776266423Sjfv	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
777266423Sjfv	if (cmd_details) {
778266423Sjfv		i40e_memcpy(details,
779266423Sjfv			    cmd_details,
780266423Sjfv			    sizeof(struct i40e_asq_cmd_details),
781266423Sjfv			    I40E_NONDMA_TO_NONDMA);
782266423Sjfv
783266423Sjfv		/* If the cmd_details are defined copy the cookie.  The
784266423Sjfv		 * CPU_TO_LE32 is not needed here because the data is ignored
785266423Sjfv		 * by the FW, only used by the driver
786266423Sjfv		 */
787266423Sjfv		if (details->cookie) {
788266423Sjfv			desc->cookie_high =
789266423Sjfv				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
790266423Sjfv			desc->cookie_low =
791266423Sjfv				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
792266423Sjfv		}
793266423Sjfv	} else {
794266423Sjfv		i40e_memset(details, 0,
795266423Sjfv			    sizeof(struct i40e_asq_cmd_details),
796266423Sjfv			    I40E_NONDMA_MEM);
797266423Sjfv	}
798266423Sjfv
799266423Sjfv	/* clear requested flags and then set additional flags if defined */
800266423Sjfv	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
801266423Sjfv	desc->flags |= CPU_TO_LE16(details->flags_ena);
802266423Sjfv
803266423Sjfv	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
804266423Sjfv
805266423Sjfv	if (buff_size > hw->aq.asq_buf_size) {
806266423Sjfv		i40e_debug(hw,
807266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
808266423Sjfv			   "AQTX: Invalid buffer size: %d.\n",
809266423Sjfv			   buff_size);
810266423Sjfv		status = I40E_ERR_INVALID_SIZE;
811266423Sjfv		goto asq_send_command_error;
812266423Sjfv	}
813266423Sjfv
814266423Sjfv	if (details->postpone && !details->async) {
815266423Sjfv		i40e_debug(hw,
816266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
817266423Sjfv			   "AQTX: Async flag not set along with postpone flag");
818266423Sjfv		status = I40E_ERR_PARAM;
819266423Sjfv		goto asq_send_command_error;
820266423Sjfv	}
821266423Sjfv
822266423Sjfv	/* call clean and check queue available function to reclaim the
823266423Sjfv	 * descriptors that were processed by FW, the function returns the
824266423Sjfv	 * number of desc available
825266423Sjfv	 */
826266423Sjfv	/* the clean function called here could be called in a separate thread
827266423Sjfv	 * in case of asynchronous completions
828266423Sjfv	 */
829266423Sjfv	if (i40e_clean_asq(hw) == 0) {
830266423Sjfv		i40e_debug(hw,
831266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
832266423Sjfv			   "AQTX: Error queue is full.\n");
833266423Sjfv		status = I40E_ERR_ADMIN_QUEUE_FULL;
834266423Sjfv		goto asq_send_command_error;
835266423Sjfv	}
836266423Sjfv
837266423Sjfv	/* initialize the temp desc pointer with the right desc */
838266423Sjfv	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
839266423Sjfv
840266423Sjfv	/* if the desc is available copy the temp desc to the right place */
841266423Sjfv	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
842266423Sjfv		    I40E_NONDMA_TO_DMA);
843266423Sjfv
844266423Sjfv	/* if buff is not NULL assume indirect command */
845266423Sjfv	if (buff != NULL) {
846266423Sjfv		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
847266423Sjfv		/* copy the user buff into the respective DMA buff */
848266423Sjfv		i40e_memcpy(dma_buff->va, buff, buff_size,
849266423Sjfv			    I40E_NONDMA_TO_DMA);
850266423Sjfv		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
851266423Sjfv
852266423Sjfv		/* Update the address values in the desc with the pa value
853266423Sjfv		 * for respective buffer
854266423Sjfv		 */
855266423Sjfv		desc_on_ring->params.external.addr_high =
856266423Sjfv				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
857266423Sjfv		desc_on_ring->params.external.addr_low =
858266423Sjfv				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
859266423Sjfv	}
860266423Sjfv
861266423Sjfv	/* bump the tail */
862266423Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
863269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
864269198Sjfv		      buff, buff_size);
865266423Sjfv	(hw->aq.asq.next_to_use)++;
866266423Sjfv	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
867266423Sjfv		hw->aq.asq.next_to_use = 0;
868266423Sjfv	if (!details->postpone)
869266423Sjfv		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
870266423Sjfv
871266423Sjfv	/* if cmd_details are not defined or async flag is not set,
872266423Sjfv	 * we need to wait for desc write back
873266423Sjfv	 */
874266423Sjfv	if (!details->async && !details->postpone) {
875266423Sjfv		u32 total_delay = 0;
876266423Sjfv
877266423Sjfv		do {
878266423Sjfv			/* AQ designers suggest use of head for better
879266423Sjfv			 * timing reliability than DD bit
880266423Sjfv			 */
881266423Sjfv			if (i40e_asq_done(hw))
882266423Sjfv				break;
883266423Sjfv			/* ugh! delay while spin_lock */
884270346Sjfv			i40e_msec_delay(1);
885270346Sjfv			total_delay++;
886269198Sjfv		} while (total_delay < hw->aq.asq_cmd_timeout);
887266423Sjfv	}
888266423Sjfv
889266423Sjfv	/* if ready, copy the desc back to temp */
890266423Sjfv	if (i40e_asq_done(hw)) {
891266423Sjfv		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
892266423Sjfv			    I40E_DMA_TO_NONDMA);
893266423Sjfv		if (buff != NULL)
894266423Sjfv			i40e_memcpy(buff, dma_buff->va, buff_size,
895266423Sjfv				    I40E_DMA_TO_NONDMA);
896266423Sjfv		retval = LE16_TO_CPU(desc->retval);
897266423Sjfv		if (retval != 0) {
898266423Sjfv			i40e_debug(hw,
899266423Sjfv				   I40E_DEBUG_AQ_MESSAGE,
900266423Sjfv				   "AQTX: Command completed with error 0x%X.\n",
901266423Sjfv				   retval);
902266423Sjfv
903266423Sjfv			/* strip off FW internal code */
904266423Sjfv			retval &= 0xff;
905266423Sjfv		}
906266423Sjfv		cmd_completed = TRUE;
907266423Sjfv		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
908266423Sjfv			status = I40E_SUCCESS;
909266423Sjfv		else
910266423Sjfv			status = I40E_ERR_ADMIN_QUEUE_ERROR;
911266423Sjfv		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
912266423Sjfv	}
913266423Sjfv
914269198Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
915269198Sjfv		   "AQTX: desc and buffer writeback:\n");
916269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
917266423Sjfv
918266423Sjfv	/* update the error if time out occurred */
919266423Sjfv	if ((!cmd_completed) &&
920266423Sjfv	    (!details->async && !details->postpone)) {
921266423Sjfv		i40e_debug(hw,
922266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
923266423Sjfv			   "AQTX: Writeback timeout.\n");
924266423Sjfv		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
925266423Sjfv	}
926266423Sjfv
927266423Sjfv	if (!status && i40e_is_nvm_update_op(desc))
928266423Sjfv		hw->aq.nvm_busy = TRUE;
929266423Sjfv
930266423Sjfvasq_send_command_error:
931266423Sjfv	i40e_release_spinlock(&hw->aq.asq_spinlock);
932266423Sjfvasq_send_command_exit:
933266423Sjfv	return status;
934266423Sjfv}
935266423Sjfv
936266423Sjfv/**
937266423Sjfv *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
938266423Sjfv *  @desc:     pointer to the temp descriptor (non DMA mem)
939266423Sjfv *  @opcode:   the opcode can be used to decide which flags to turn off or on
940266423Sjfv *
941266423Sjfv *  Fill the desc with default values
942266423Sjfv **/
943266423Sjfvvoid i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
944266423Sjfv				       u16 opcode)
945266423Sjfv{
946266423Sjfv	/* zero out the desc */
947266423Sjfv	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
948266423Sjfv		    I40E_NONDMA_MEM);
949266423Sjfv	desc->opcode = CPU_TO_LE16(opcode);
950266423Sjfv	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
951266423Sjfv}
952266423Sjfv
953266423Sjfv/**
954266423Sjfv *  i40e_clean_arq_element
955266423Sjfv *  @hw: pointer to the hw struct
956266423Sjfv *  @e: event info from the receive descriptor, includes any buffers
957266423Sjfv *  @pending: number of events that could be left to process
958266423Sjfv *
959266423Sjfv *  This function cleans one Admin Receive Queue element and returns
960266423Sjfv *  the contents through e.  It can also return how many events are
961266423Sjfv *  left to process through 'pending'
962266423Sjfv **/
963266423Sjfvenum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
964266423Sjfv					     struct i40e_arq_event_info *e,
965266423Sjfv					     u16 *pending)
966266423Sjfv{
967266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
968266423Sjfv	u16 ntc = hw->aq.arq.next_to_clean;
969266423Sjfv	struct i40e_aq_desc *desc;
970266423Sjfv	struct i40e_dma_mem *bi;
971266423Sjfv	u16 desc_idx;
972266423Sjfv	u16 datalen;
973266423Sjfv	u16 flags;
974266423Sjfv	u16 ntu;
975266423Sjfv
976266423Sjfv	/* take the lock before we start messing with the ring */
977266423Sjfv	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
978266423Sjfv
979266423Sjfv	/* set next_to_use to head */
980266423Sjfv	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
981266423Sjfv	if (ntu == ntc) {
982266423Sjfv		/* nothing to do - shouldn't need to update ring's values */
983266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
984266423Sjfv		goto clean_arq_element_out;
985266423Sjfv	}
986266423Sjfv
987266423Sjfv	/* now clean the next descriptor */
988266423Sjfv	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
989266423Sjfv	desc_idx = ntc;
990266423Sjfv
991266423Sjfv	flags = LE16_TO_CPU(desc->flags);
992266423Sjfv	if (flags & I40E_AQ_FLAG_ERR) {
993266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
994266423Sjfv		hw->aq.arq_last_status =
995266423Sjfv			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
996266423Sjfv		i40e_debug(hw,
997266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
998266423Sjfv			   "AQRX: Event received with error 0x%X.\n",
999266423Sjfv			   hw->aq.arq_last_status);
1000266423Sjfv	}
1001266423Sjfv
1002269198Sjfv	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1003269198Sjfv		    I40E_DMA_TO_NONDMA);
1004269198Sjfv	datalen = LE16_TO_CPU(desc->datalen);
1005270346Sjfv	e->msg_len = min(datalen, e->buf_len);
1006270346Sjfv	if (e->msg_buf != NULL && (e->msg_len != 0))
1007269198Sjfv		i40e_memcpy(e->msg_buf,
1008269198Sjfv			    hw->aq.arq.r.arq_bi[desc_idx].va,
1009270346Sjfv			    e->msg_len, I40E_DMA_TO_NONDMA);
1010269198Sjfv
1011266423Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1012269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1013269198Sjfv		      hw->aq.arq_buf_size);
1014266423Sjfv
1015266423Sjfv	/* Restore the original datalen and buffer address in the desc,
1016266423Sjfv	 * FW updates datalen to indicate the event message
1017266423Sjfv	 * size
1018266423Sjfv	 */
1019266423Sjfv	bi = &hw->aq.arq.r.arq_bi[ntc];
1020266423Sjfv	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1021266423Sjfv
1022266423Sjfv	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1023266423Sjfv	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1024266423Sjfv		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1025266423Sjfv	desc->datalen = CPU_TO_LE16((u16)bi->size);
1026266423Sjfv	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1027266423Sjfv	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1028266423Sjfv
1029266423Sjfv	/* set tail = the last cleaned desc index. */
1030266423Sjfv	wr32(hw, hw->aq.arq.tail, ntc);
1031266423Sjfv	/* ntc is updated to tail + 1 */
1032266423Sjfv	ntc++;
1033266423Sjfv	if (ntc == hw->aq.num_arq_entries)
1034266423Sjfv		ntc = 0;
1035266423Sjfv	hw->aq.arq.next_to_clean = ntc;
1036266423Sjfv	hw->aq.arq.next_to_use = ntu;
1037266423Sjfv
1038266423Sjfvclean_arq_element_out:
1039266423Sjfv	/* Set pending if needed, unlock and return */
1040266423Sjfv	if (pending != NULL)
1041266423Sjfv		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1042266423Sjfv	i40e_release_spinlock(&hw->aq.arq_spinlock);
1043266423Sjfv
1044266423Sjfv	if (i40e_is_nvm_update_op(&e->desc)) {
1045266423Sjfv		hw->aq.nvm_busy = FALSE;
1046266423Sjfv		if (hw->aq.nvm_release_on_done) {
1047266423Sjfv			i40e_release_nvm(hw);
1048266423Sjfv			hw->aq.nvm_release_on_done = FALSE;
1049266423Sjfv		}
1050266423Sjfv	}
1051266423Sjfv
1052266423Sjfv	return ret_code;
1053266423Sjfv}
1054266423Sjfv
1055266423Sjfvvoid i40e_resume_aq(struct i40e_hw *hw)
1056266423Sjfv{
1057266423Sjfv	/* Registers are reset after PF reset */
1058266423Sjfv	hw->aq.asq.next_to_use = 0;
1059266423Sjfv	hw->aq.asq.next_to_clean = 0;
1060266423Sjfv
1061266423Sjfv#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
1062266423Sjfv#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
1063266423Sjfv#endif
1064266423Sjfv	i40e_config_asq_regs(hw);
1065266423Sjfv
1066266423Sjfv	hw->aq.arq.next_to_use = 0;
1067266423Sjfv	hw->aq.arq.next_to_clean = 0;
1068266423Sjfv
1069266423Sjfv	i40e_config_arq_regs(hw);
1070266423Sjfv}
1071