1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: releng/11.0/sys/dev/ixl/i40e_adminq.c 303967 2016-08-11 19:13:30Z sbruno $*/
34266423Sjfv
35266423Sjfv#include "i40e_status.h"
36266423Sjfv#include "i40e_type.h"
37266423Sjfv#include "i40e_register.h"
38266423Sjfv#include "i40e_adminq.h"
39266423Sjfv#include "i40e_prototype.h"
40266423Sjfv
41266423Sjfv/**
42266423Sjfv *  i40e_adminq_init_regs - Initialize AdminQ registers
43266423Sjfv *  @hw: pointer to the hardware structure
44266423Sjfv *
45266423Sjfv *  This assumes the alloc_asq and alloc_arq functions have already been called
46266423Sjfv **/
47266423Sjfvstatic void i40e_adminq_init_regs(struct i40e_hw *hw)
48266423Sjfv{
49266423Sjfv	/* set head and tail registers in our local struct */
50270346Sjfv	if (i40e_is_vf(hw)) {
51266423Sjfv		hw->aq.asq.tail = I40E_VF_ATQT1;
52266423Sjfv		hw->aq.asq.head = I40E_VF_ATQH1;
53266423Sjfv		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54269198Sjfv		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55269198Sjfv		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56266423Sjfv		hw->aq.arq.tail = I40E_VF_ARQT1;
57266423Sjfv		hw->aq.arq.head = I40E_VF_ARQH1;
58266423Sjfv		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59269198Sjfv		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60269198Sjfv		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61266423Sjfv	} else {
62266423Sjfv		hw->aq.asq.tail = I40E_PF_ATQT;
63266423Sjfv		hw->aq.asq.head = I40E_PF_ATQH;
64266423Sjfv		hw->aq.asq.len  = I40E_PF_ATQLEN;
65269198Sjfv		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66269198Sjfv		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67266423Sjfv		hw->aq.arq.tail = I40E_PF_ARQT;
68266423Sjfv		hw->aq.arq.head = I40E_PF_ARQH;
69266423Sjfv		hw->aq.arq.len  = I40E_PF_ARQLEN;
70269198Sjfv		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71269198Sjfv		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72266423Sjfv	}
73266423Sjfv}
74266423Sjfv
75266423Sjfv/**
76266423Sjfv *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77266423Sjfv *  @hw: pointer to the hardware structure
78266423Sjfv **/
79266423Sjfvenum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80266423Sjfv{
81266423Sjfv	enum i40e_status_code ret_code;
82266423Sjfv
83266423Sjfv	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84266423Sjfv					 i40e_mem_atq_ring,
85266423Sjfv					 (hw->aq.num_asq_entries *
86266423Sjfv					 sizeof(struct i40e_aq_desc)),
87266423Sjfv					 I40E_ADMINQ_DESC_ALIGNMENT);
88266423Sjfv	if (ret_code)
89266423Sjfv		return ret_code;
90266423Sjfv
91266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92266423Sjfv					  (hw->aq.num_asq_entries *
93266423Sjfv					  sizeof(struct i40e_asq_cmd_details)));
94266423Sjfv	if (ret_code) {
95266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96266423Sjfv		return ret_code;
97266423Sjfv	}
98266423Sjfv
99266423Sjfv	return ret_code;
100266423Sjfv}
101266423Sjfv
102266423Sjfv/**
103266423Sjfv *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104266423Sjfv *  @hw: pointer to the hardware structure
105266423Sjfv **/
106266423Sjfvenum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107266423Sjfv{
108266423Sjfv	enum i40e_status_code ret_code;
109266423Sjfv
110266423Sjfv	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111266423Sjfv					 i40e_mem_arq_ring,
112266423Sjfv					 (hw->aq.num_arq_entries *
113266423Sjfv					 sizeof(struct i40e_aq_desc)),
114266423Sjfv					 I40E_ADMINQ_DESC_ALIGNMENT);
115266423Sjfv
116266423Sjfv	return ret_code;
117266423Sjfv}
118266423Sjfv
119266423Sjfv/**
120266423Sjfv *  i40e_free_adminq_asq - Free Admin Queue send rings
121266423Sjfv *  @hw: pointer to the hardware structure
122266423Sjfv *
123266423Sjfv *  This assumes the posted send buffers have already been cleaned
124266423Sjfv *  and de-allocated
125266423Sjfv **/
126266423Sjfvvoid i40e_free_adminq_asq(struct i40e_hw *hw)
127266423Sjfv{
128266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
129266423Sjfv}
130266423Sjfv
131266423Sjfv/**
132266423Sjfv *  i40e_free_adminq_arq - Free Admin Queue receive rings
133266423Sjfv *  @hw: pointer to the hardware structure
134266423Sjfv *
135266423Sjfv *  This assumes the posted receive buffers have already been cleaned
136266423Sjfv *  and de-allocated
137266423Sjfv **/
138266423Sjfvvoid i40e_free_adminq_arq(struct i40e_hw *hw)
139266423Sjfv{
140266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
141266423Sjfv}
142266423Sjfv
143266423Sjfv/**
144266423Sjfv *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145266423Sjfv *  @hw: pointer to the hardware structure
146266423Sjfv **/
147266423Sjfvstatic enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
148266423Sjfv{
149266423Sjfv	enum i40e_status_code ret_code;
150266423Sjfv	struct i40e_aq_desc *desc;
151266423Sjfv	struct i40e_dma_mem *bi;
152266423Sjfv	int i;
153266423Sjfv
154266423Sjfv	/* We'll be allocating the buffer info memory first, then we can
155266423Sjfv	 * allocate the mapped buffers for the event processing
156266423Sjfv	 */
157266423Sjfv
158266423Sjfv	/* buffer_info structures do not need alignment */
159266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160266423Sjfv		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161266423Sjfv	if (ret_code)
162266423Sjfv		goto alloc_arq_bufs;
163266423Sjfv	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
164266423Sjfv
165266423Sjfv	/* allocate the mapped buffers */
166266423Sjfv	for (i = 0; i < hw->aq.num_arq_entries; i++) {
167266423Sjfv		bi = &hw->aq.arq.r.arq_bi[i];
168266423Sjfv		ret_code = i40e_allocate_dma_mem(hw, bi,
169266423Sjfv						 i40e_mem_arq_buf,
170266423Sjfv						 hw->aq.arq_buf_size,
171266423Sjfv						 I40E_ADMINQ_DESC_ALIGNMENT);
172266423Sjfv		if (ret_code)
173266423Sjfv			goto unwind_alloc_arq_bufs;
174266423Sjfv
175266423Sjfv		/* now configure the descriptors for use */
176266423Sjfv		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
177266423Sjfv
178266423Sjfv		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179266423Sjfv		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180266423Sjfv			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181266423Sjfv		desc->opcode = 0;
182266423Sjfv		/* This is in accordance with Admin queue design, there is no
183266423Sjfv		 * register for buffer size configuration
184266423Sjfv		 */
185266423Sjfv		desc->datalen = CPU_TO_LE16((u16)bi->size);
186266423Sjfv		desc->retval = 0;
187266423Sjfv		desc->cookie_high = 0;
188266423Sjfv		desc->cookie_low = 0;
189266423Sjfv		desc->params.external.addr_high =
190266423Sjfv			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191266423Sjfv		desc->params.external.addr_low =
192266423Sjfv			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193266423Sjfv		desc->params.external.param0 = 0;
194266423Sjfv		desc->params.external.param1 = 0;
195266423Sjfv	}
196266423Sjfv
197266423Sjfvalloc_arq_bufs:
198266423Sjfv	return ret_code;
199266423Sjfv
200266423Sjfvunwind_alloc_arq_bufs:
201266423Sjfv	/* don't try to free the one that failed... */
202266423Sjfv	i--;
203266423Sjfv	for (; i >= 0; i--)
204266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
206266423Sjfv
207266423Sjfv	return ret_code;
208266423Sjfv}
209266423Sjfv
210266423Sjfv/**
211266423Sjfv *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212266423Sjfv *  @hw: pointer to the hardware structure
213266423Sjfv **/
214266423Sjfvstatic enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
215266423Sjfv{
216266423Sjfv	enum i40e_status_code ret_code;
217266423Sjfv	struct i40e_dma_mem *bi;
218266423Sjfv	int i;
219266423Sjfv
220266423Sjfv	/* No mapped memory needed yet, just the buffer info structures */
221266423Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222266423Sjfv		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223266423Sjfv	if (ret_code)
224266423Sjfv		goto alloc_asq_bufs;
225266423Sjfv	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
226266423Sjfv
227266423Sjfv	/* allocate the mapped buffers */
228266423Sjfv	for (i = 0; i < hw->aq.num_asq_entries; i++) {
229266423Sjfv		bi = &hw->aq.asq.r.asq_bi[i];
230266423Sjfv		ret_code = i40e_allocate_dma_mem(hw, bi,
231266423Sjfv						 i40e_mem_asq_buf,
232266423Sjfv						 hw->aq.asq_buf_size,
233266423Sjfv						 I40E_ADMINQ_DESC_ALIGNMENT);
234266423Sjfv		if (ret_code)
235266423Sjfv			goto unwind_alloc_asq_bufs;
236266423Sjfv	}
237266423Sjfvalloc_asq_bufs:
238266423Sjfv	return ret_code;
239266423Sjfv
240266423Sjfvunwind_alloc_asq_bufs:
241266423Sjfv	/* don't try to free the one that failed... */
242266423Sjfv	i--;
243266423Sjfv	for (; i >= 0; i--)
244266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
246266423Sjfv
247266423Sjfv	return ret_code;
248266423Sjfv}
249266423Sjfv
250266423Sjfv/**
251266423Sjfv *  i40e_free_arq_bufs - Free receive queue buffer info elements
252266423Sjfv *  @hw: pointer to the hardware structure
253266423Sjfv **/
254266423Sjfvstatic void i40e_free_arq_bufs(struct i40e_hw *hw)
255266423Sjfv{
256266423Sjfv	int i;
257266423Sjfv
258266423Sjfv	/* free descriptors */
259266423Sjfv	for (i = 0; i < hw->aq.num_arq_entries; i++)
260266423Sjfv		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
261266423Sjfv
262266423Sjfv	/* free the descriptor memory */
263266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
264266423Sjfv
265266423Sjfv	/* free the dma header */
266266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
267266423Sjfv}
268266423Sjfv
269266423Sjfv/**
270266423Sjfv *  i40e_free_asq_bufs - Free send queue buffer info elements
271266423Sjfv *  @hw: pointer to the hardware structure
272266423Sjfv **/
273266423Sjfvstatic void i40e_free_asq_bufs(struct i40e_hw *hw)
274266423Sjfv{
275266423Sjfv	int i;
276266423Sjfv
277266423Sjfv	/* only unmap if the address is non-NULL */
278266423Sjfv	for (i = 0; i < hw->aq.num_asq_entries; i++)
279266423Sjfv		if (hw->aq.asq.r.asq_bi[i].pa)
280266423Sjfv			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
281266423Sjfv
282266423Sjfv	/* free the buffer info list */
283266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
284266423Sjfv
285266423Sjfv	/* free the descriptor memory */
286266423Sjfv	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
287266423Sjfv
288266423Sjfv	/* free the dma header */
289266423Sjfv	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
290266423Sjfv}
291266423Sjfv
292266423Sjfv/**
293266423Sjfv *  i40e_config_asq_regs - configure ASQ registers
294266423Sjfv *  @hw: pointer to the hardware structure
295266423Sjfv *
296266423Sjfv *  Configure base address and length registers for the transmit queue
297266423Sjfv **/
298266423Sjfvstatic enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
299266423Sjfv{
300266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
301266423Sjfv	u32 reg = 0;
302266423Sjfv
303266423Sjfv	/* Clear Head and Tail */
304266423Sjfv	wr32(hw, hw->aq.asq.head, 0);
305266423Sjfv	wr32(hw, hw->aq.asq.tail, 0);
306266423Sjfv
307269198Sjfv	/* set starting point */
308284049Sjfv	if (!i40e_is_vf(hw))
309284049Sjfv		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
310284049Sjfv					  I40E_PF_ATQLEN_ATQENABLE_MASK));
311284049Sjfv	if (i40e_is_vf(hw))
312284049Sjfv		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313284049Sjfv					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
314269198Sjfv	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
315269198Sjfv	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
316266423Sjfv
317266423Sjfv	/* Check one register to verify that config was applied */
318269198Sjfv	reg = rd32(hw, hw->aq.asq.bal);
319266423Sjfv	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
320266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
321266423Sjfv
322266423Sjfv	return ret_code;
323266423Sjfv}
324266423Sjfv
325266423Sjfv/**
326266423Sjfv *  i40e_config_arq_regs - ARQ register configuration
327266423Sjfv *  @hw: pointer to the hardware structure
328266423Sjfv *
329266423Sjfv * Configure base address and length registers for the receive (event queue)
330266423Sjfv **/
331266423Sjfvstatic enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
332266423Sjfv{
333266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
334266423Sjfv	u32 reg = 0;
335266423Sjfv
336266423Sjfv	/* Clear Head and Tail */
337266423Sjfv	wr32(hw, hw->aq.arq.head, 0);
338266423Sjfv	wr32(hw, hw->aq.arq.tail, 0);
339266423Sjfv
340269198Sjfv	/* set starting point */
341284049Sjfv	if (!i40e_is_vf(hw))
342284049Sjfv		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
343284049Sjfv					  I40E_PF_ARQLEN_ARQENABLE_MASK));
344284049Sjfv	if (i40e_is_vf(hw))
345284049Sjfv		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
346284049Sjfv					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
347269198Sjfv	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348269198Sjfv	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
349266423Sjfv
350266423Sjfv	/* Update tail in the HW to post pre-allocated buffers */
351266423Sjfv	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
352266423Sjfv
353266423Sjfv	/* Check one register to verify that config was applied */
354269198Sjfv	reg = rd32(hw, hw->aq.arq.bal);
355266423Sjfv	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
357266423Sjfv
358266423Sjfv	return ret_code;
359266423Sjfv}
360266423Sjfv
361266423Sjfv/**
362266423Sjfv *  i40e_init_asq - main initialization routine for ASQ
363266423Sjfv *  @hw: pointer to the hardware structure
364266423Sjfv *
365266423Sjfv *  This is the main initialization routine for the Admin Send Queue
366266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
367266423Sjfv *  in the hw->aq structure:
368266423Sjfv *     - hw->aq.num_asq_entries
369266423Sjfv *     - hw->aq.arq_buf_size
370266423Sjfv *
371266423Sjfv *  Do *NOT* hold the lock when calling this as the memory allocation routines
372266423Sjfv *  called are not going to be atomic context safe
373266423Sjfv **/
374266423Sjfvenum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
375266423Sjfv{
376266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
377266423Sjfv
378266423Sjfv	if (hw->aq.asq.count > 0) {
379266423Sjfv		/* queue already initialized */
380266423Sjfv		ret_code = I40E_ERR_NOT_READY;
381266423Sjfv		goto init_adminq_exit;
382266423Sjfv	}
383266423Sjfv
384266423Sjfv	/* verify input for valid configuration */
385266423Sjfv	if ((hw->aq.num_asq_entries == 0) ||
386266423Sjfv	    (hw->aq.asq_buf_size == 0)) {
387266423Sjfv		ret_code = I40E_ERR_CONFIG;
388266423Sjfv		goto init_adminq_exit;
389266423Sjfv	}
390266423Sjfv
391266423Sjfv	hw->aq.asq.next_to_use = 0;
392266423Sjfv	hw->aq.asq.next_to_clean = 0;
393266423Sjfv
394266423Sjfv	/* allocate the ring memory */
395266423Sjfv	ret_code = i40e_alloc_adminq_asq_ring(hw);
396266423Sjfv	if (ret_code != I40E_SUCCESS)
397266423Sjfv		goto init_adminq_exit;
398266423Sjfv
399266423Sjfv	/* allocate buffers in the rings */
400266423Sjfv	ret_code = i40e_alloc_asq_bufs(hw);
401266423Sjfv	if (ret_code != I40E_SUCCESS)
402266423Sjfv		goto init_adminq_free_rings;
403266423Sjfv
404266423Sjfv	/* initialize base registers */
405266423Sjfv	ret_code = i40e_config_asq_regs(hw);
406266423Sjfv	if (ret_code != I40E_SUCCESS)
407266423Sjfv		goto init_adminq_free_rings;
408266423Sjfv
409266423Sjfv	/* success! */
410299548Serj	hw->aq.asq.count = hw->aq.num_asq_entries;
411266423Sjfv	goto init_adminq_exit;
412266423Sjfv
413266423Sjfvinit_adminq_free_rings:
414266423Sjfv	i40e_free_adminq_asq(hw);
415266423Sjfv
416266423Sjfvinit_adminq_exit:
417266423Sjfv	return ret_code;
418266423Sjfv}
419266423Sjfv
420266423Sjfv/**
421266423Sjfv *  i40e_init_arq - initialize ARQ
422266423Sjfv *  @hw: pointer to the hardware structure
423266423Sjfv *
424266423Sjfv *  The main initialization routine for the Admin Receive (Event) Queue.
425266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
426266423Sjfv *  in the hw->aq structure:
427266423Sjfv *     - hw->aq.num_asq_entries
428266423Sjfv *     - hw->aq.arq_buf_size
429266423Sjfv *
430266423Sjfv *  Do *NOT* hold the lock when calling this as the memory allocation routines
431266423Sjfv *  called are not going to be atomic context safe
432266423Sjfv **/
433266423Sjfvenum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
434266423Sjfv{
435266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
436266423Sjfv
437266423Sjfv	if (hw->aq.arq.count > 0) {
438266423Sjfv		/* queue already initialized */
439266423Sjfv		ret_code = I40E_ERR_NOT_READY;
440266423Sjfv		goto init_adminq_exit;
441266423Sjfv	}
442266423Sjfv
443266423Sjfv	/* verify input for valid configuration */
444266423Sjfv	if ((hw->aq.num_arq_entries == 0) ||
445266423Sjfv	    (hw->aq.arq_buf_size == 0)) {
446266423Sjfv		ret_code = I40E_ERR_CONFIG;
447266423Sjfv		goto init_adminq_exit;
448266423Sjfv	}
449266423Sjfv
450266423Sjfv	hw->aq.arq.next_to_use = 0;
451266423Sjfv	hw->aq.arq.next_to_clean = 0;
452266423Sjfv
453266423Sjfv	/* allocate the ring memory */
454266423Sjfv	ret_code = i40e_alloc_adminq_arq_ring(hw);
455266423Sjfv	if (ret_code != I40E_SUCCESS)
456266423Sjfv		goto init_adminq_exit;
457266423Sjfv
458266423Sjfv	/* allocate buffers in the rings */
459266423Sjfv	ret_code = i40e_alloc_arq_bufs(hw);
460266423Sjfv	if (ret_code != I40E_SUCCESS)
461266423Sjfv		goto init_adminq_free_rings;
462266423Sjfv
463266423Sjfv	/* initialize base registers */
464266423Sjfv	ret_code = i40e_config_arq_regs(hw);
465266423Sjfv	if (ret_code != I40E_SUCCESS)
466266423Sjfv		goto init_adminq_free_rings;
467266423Sjfv
468266423Sjfv	/* success! */
469299548Serj	hw->aq.arq.count = hw->aq.num_arq_entries;
470266423Sjfv	goto init_adminq_exit;
471266423Sjfv
472266423Sjfvinit_adminq_free_rings:
473266423Sjfv	i40e_free_adminq_arq(hw);
474266423Sjfv
475266423Sjfvinit_adminq_exit:
476266423Sjfv	return ret_code;
477266423Sjfv}
478266423Sjfv
479266423Sjfv/**
480266423Sjfv *  i40e_shutdown_asq - shutdown the ASQ
481266423Sjfv *  @hw: pointer to the hardware structure
482266423Sjfv *
483266423Sjfv *  The main shutdown routine for the Admin Send Queue
484266423Sjfv **/
485266423Sjfvenum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
486266423Sjfv{
487266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
488266423Sjfv
489299545Serj	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
490266423Sjfv
491299545Serj	if (hw->aq.asq.count == 0) {
492299545Serj		ret_code = I40E_ERR_NOT_READY;
493299545Serj		goto shutdown_asq_out;
494299545Serj	}
495299545Serj
496266423Sjfv	/* Stop firmware AdminQ processing */
497266423Sjfv	wr32(hw, hw->aq.asq.head, 0);
498266423Sjfv	wr32(hw, hw->aq.asq.tail, 0);
499266423Sjfv	wr32(hw, hw->aq.asq.len, 0);
500269198Sjfv	wr32(hw, hw->aq.asq.bal, 0);
501269198Sjfv	wr32(hw, hw->aq.asq.bah, 0);
502266423Sjfv
503266423Sjfv	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
504266423Sjfv
505266423Sjfv	/* free ring buffers */
506266423Sjfv	i40e_free_asq_bufs(hw);
507266423Sjfv
508299545Serjshutdown_asq_out:
509266423Sjfv	i40e_release_spinlock(&hw->aq.asq_spinlock);
510266423Sjfv	return ret_code;
511266423Sjfv}
512266423Sjfv
513266423Sjfv/**
514266423Sjfv *  i40e_shutdown_arq - shutdown ARQ
515266423Sjfv *  @hw: pointer to the hardware structure
516266423Sjfv *
517266423Sjfv *  The main shutdown routine for the Admin Receive Queue
518266423Sjfv **/
519266423Sjfvenum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
520266423Sjfv{
521266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
522266423Sjfv
523299545Serj	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
524266423Sjfv
525299545Serj	if (hw->aq.arq.count == 0) {
526299545Serj		ret_code = I40E_ERR_NOT_READY;
527299545Serj		goto shutdown_arq_out;
528299545Serj	}
529299545Serj
530266423Sjfv	/* Stop firmware AdminQ processing */
531266423Sjfv	wr32(hw, hw->aq.arq.head, 0);
532266423Sjfv	wr32(hw, hw->aq.arq.tail, 0);
533266423Sjfv	wr32(hw, hw->aq.arq.len, 0);
534269198Sjfv	wr32(hw, hw->aq.arq.bal, 0);
535269198Sjfv	wr32(hw, hw->aq.arq.bah, 0);
536266423Sjfv
537266423Sjfv	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538266423Sjfv
539266423Sjfv	/* free ring buffers */
540266423Sjfv	i40e_free_arq_bufs(hw);
541266423Sjfv
542299545Serjshutdown_arq_out:
543266423Sjfv	i40e_release_spinlock(&hw->aq.arq_spinlock);
544266423Sjfv	return ret_code;
545266423Sjfv}
546266423Sjfv
547266423Sjfv/**
548299555Serj *  i40e_resume_aq - resume AQ processing from 0
549299555Serj *  @hw: pointer to the hardware structure
550299555Serj **/
551299555Serjstatic void i40e_resume_aq(struct i40e_hw *hw)
552299555Serj{
553299555Serj	/* Registers are reset after PF reset */
554299555Serj	hw->aq.asq.next_to_use = 0;
555299555Serj	hw->aq.asq.next_to_clean = 0;
556299555Serj
557299555Serj	i40e_config_asq_regs(hw);
558299555Serj
559299555Serj	hw->aq.arq.next_to_use = 0;
560299555Serj	hw->aq.arq.next_to_clean = 0;
561299555Serj
562299555Serj	i40e_config_arq_regs(hw);
563299555Serj}
564299555Serj
565299555Serj/**
566266423Sjfv *  i40e_init_adminq - main initialization routine for Admin Queue
567266423Sjfv *  @hw: pointer to the hardware structure
568266423Sjfv *
569266423Sjfv *  Prior to calling this function, drivers *MUST* set the following fields
570266423Sjfv *  in the hw->aq structure:
571266423Sjfv *     - hw->aq.num_asq_entries
572266423Sjfv *     - hw->aq.num_arq_entries
573266423Sjfv *     - hw->aq.arq_buf_size
574266423Sjfv *     - hw->aq.asq_buf_size
575266423Sjfv **/
576266423Sjfvenum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
577266423Sjfv{
578299555Serj	u16 cfg_ptr, oem_hi, oem_lo;
579299555Serj	u16 eetrack_lo, eetrack_hi;
580266423Sjfv	enum i40e_status_code ret_code;
581266423Sjfv	int retry = 0;
582299555Serj
583266423Sjfv	/* verify input for valid configuration */
584266423Sjfv	if ((hw->aq.num_arq_entries == 0) ||
585266423Sjfv	    (hw->aq.num_asq_entries == 0) ||
586266423Sjfv	    (hw->aq.arq_buf_size == 0) ||
587266423Sjfv	    (hw->aq.asq_buf_size == 0)) {
588266423Sjfv		ret_code = I40E_ERR_CONFIG;
589266423Sjfv		goto init_adminq_exit;
590266423Sjfv	}
591266423Sjfv	i40e_init_spinlock(&hw->aq.asq_spinlock);
592266423Sjfv	i40e_init_spinlock(&hw->aq.arq_spinlock);
593266423Sjfv
594266423Sjfv	/* Set up register offsets */
595266423Sjfv	i40e_adminq_init_regs(hw);
596266423Sjfv
597269198Sjfv	/* setup ASQ command write back timeout */
598269198Sjfv	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
599269198Sjfv
600266423Sjfv	/* allocate the ASQ */
601266423Sjfv	ret_code = i40e_init_asq(hw);
602266423Sjfv	if (ret_code != I40E_SUCCESS)
603266423Sjfv		goto init_adminq_destroy_spinlocks;
604266423Sjfv
605266423Sjfv	/* allocate the ARQ */
606266423Sjfv	ret_code = i40e_init_arq(hw);
607266423Sjfv	if (ret_code != I40E_SUCCESS)
608266423Sjfv		goto init_adminq_free_asq;
609266423Sjfv
610277082Sjfv	/* VF has no need of firmware */
611277082Sjfv	if (i40e_is_vf(hw))
612277082Sjfv		goto init_adminq_exit;
613277082Sjfv	/* There are some cases where the firmware may not be quite ready
614266423Sjfv	 * for AdminQ operations, so we retry the AdminQ setup a few times
615266423Sjfv	 * if we see timeouts in this first AQ call.
616266423Sjfv	 */
617266423Sjfv	do {
618266423Sjfv		ret_code = i40e_aq_get_firmware_version(hw,
619266423Sjfv							&hw->aq.fw_maj_ver,
620266423Sjfv							&hw->aq.fw_min_ver,
621277082Sjfv							&hw->aq.fw_build,
622266423Sjfv							&hw->aq.api_maj_ver,
623266423Sjfv							&hw->aq.api_min_ver,
624266423Sjfv							NULL);
625266423Sjfv		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
626266423Sjfv			break;
627266423Sjfv		retry++;
628266423Sjfv		i40e_msec_delay(100);
629266423Sjfv		i40e_resume_aq(hw);
630266423Sjfv	} while (retry < 10);
631266423Sjfv	if (ret_code != I40E_SUCCESS)
632266423Sjfv		goto init_adminq_free_arq;
633266423Sjfv
634266423Sjfv	/* get the NVM version info */
635279033Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
636279033Sjfv			   &hw->nvm.version);
637266423Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
638266423Sjfv	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
639266423Sjfv	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
640284049Sjfv	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
641284049Sjfv	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
642284049Sjfv			   &oem_hi);
643284049Sjfv	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
644284049Sjfv			   &oem_lo);
645284049Sjfv	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
646266423Sjfv
647266423Sjfv	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
648266423Sjfv		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
649266423Sjfv		goto init_adminq_free_arq;
650266423Sjfv	}
651266423Sjfv
652266423Sjfv	/* pre-emptive resource lock release */
653266423Sjfv	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
654303967Ssbruno	hw->nvm_release_on_done = FALSE;
655277082Sjfv	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
656266423Sjfv
657266423Sjfv	ret_code = I40E_SUCCESS;
658266423Sjfv
659266423Sjfv	/* success! */
660266423Sjfv	goto init_adminq_exit;
661266423Sjfv
662266423Sjfvinit_adminq_free_arq:
663266423Sjfv	i40e_shutdown_arq(hw);
664266423Sjfvinit_adminq_free_asq:
665266423Sjfv	i40e_shutdown_asq(hw);
666266423Sjfvinit_adminq_destroy_spinlocks:
667266423Sjfv	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
668266423Sjfv	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
669266423Sjfv
670266423Sjfvinit_adminq_exit:
671266423Sjfv	return ret_code;
672266423Sjfv}
673266423Sjfv
674266423Sjfv/**
675266423Sjfv *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
676266423Sjfv *  @hw: pointer to the hardware structure
677266423Sjfv **/
678266423Sjfvenum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
679266423Sjfv{
680266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
681266423Sjfv
682266423Sjfv	if (i40e_check_asq_alive(hw))
683266423Sjfv		i40e_aq_queue_shutdown(hw, TRUE);
684266423Sjfv
685266423Sjfv	i40e_shutdown_asq(hw);
686266423Sjfv	i40e_shutdown_arq(hw);
687266423Sjfv	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
688266423Sjfv	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
689266423Sjfv
690284049Sjfv	if (hw->nvm_buff.va)
691284049Sjfv		i40e_free_virt_mem(hw, &hw->nvm_buff);
692284049Sjfv
693266423Sjfv	return ret_code;
694266423Sjfv}
695266423Sjfv
696266423Sjfv/**
697266423Sjfv *  i40e_clean_asq - cleans Admin send queue
698266423Sjfv *  @hw: pointer to the hardware structure
699266423Sjfv *
700266423Sjfv *  returns the number of free desc
701266423Sjfv **/
702266423Sjfvu16 i40e_clean_asq(struct i40e_hw *hw)
703266423Sjfv{
704266423Sjfv	struct i40e_adminq_ring *asq = &(hw->aq.asq);
705266423Sjfv	struct i40e_asq_cmd_details *details;
706266423Sjfv	u16 ntc = asq->next_to_clean;
707266423Sjfv	struct i40e_aq_desc desc_cb;
708266423Sjfv	struct i40e_aq_desc *desc;
709266423Sjfv
710266423Sjfv	desc = I40E_ADMINQ_DESC(*asq, ntc);
711266423Sjfv	details = I40E_ADMINQ_DETAILS(*asq, ntc);
712266423Sjfv	while (rd32(hw, hw->aq.asq.head) != ntc) {
713266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
714284049Sjfv			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
715266423Sjfv
716266423Sjfv		if (details->callback) {
717266423Sjfv			I40E_ADMINQ_CALLBACK cb_func =
718266423Sjfv					(I40E_ADMINQ_CALLBACK)details->callback;
719284049Sjfv			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
720284049Sjfv				    I40E_DMA_TO_DMA);
721266423Sjfv			cb_func(hw, &desc_cb);
722266423Sjfv		}
723266423Sjfv		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
724266423Sjfv		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
725266423Sjfv		ntc++;
726266423Sjfv		if (ntc == asq->count)
727266423Sjfv			ntc = 0;
728266423Sjfv		desc = I40E_ADMINQ_DESC(*asq, ntc);
729266423Sjfv		details = I40E_ADMINQ_DETAILS(*asq, ntc);
730266423Sjfv	}
731266423Sjfv
732266423Sjfv	asq->next_to_clean = ntc;
733266423Sjfv
734266423Sjfv	return I40E_DESC_UNUSED(asq);
735266423Sjfv}
736266423Sjfv
737266423Sjfv/**
738266423Sjfv *  i40e_asq_done - check if FW has processed the Admin Send Queue
739266423Sjfv *  @hw: pointer to the hw struct
740266423Sjfv *
741266423Sjfv *  Returns TRUE if the firmware has processed all descriptors on the
742266423Sjfv *  admin send queue. Returns FALSE if there are still requests pending.
743266423Sjfv **/
744266423Sjfvbool i40e_asq_done(struct i40e_hw *hw)
745266423Sjfv{
746266423Sjfv	/* AQ designers suggest use of head for better
747266423Sjfv	 * timing reliability than DD bit
748266423Sjfv	 */
749266423Sjfv	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
750266423Sjfv
751266423Sjfv}
752266423Sjfv
753266423Sjfv/**
754266423Sjfv *  i40e_asq_send_command - send command to Admin Queue
755266423Sjfv *  @hw: pointer to the hw struct
756266423Sjfv *  @desc: prefilled descriptor describing the command (non DMA mem)
757266423Sjfv *  @buff: buffer to use for indirect commands
758266423Sjfv *  @buff_size: size of buffer for indirect commands
759266423Sjfv *  @cmd_details: pointer to command details structure
760266423Sjfv *
761266423Sjfv *  This is the main send command driver routine for the Admin Queue send
762266423Sjfv *  queue.  It runs the queue, cleans the queue, etc
763266423Sjfv **/
764266423Sjfvenum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
765266423Sjfv				struct i40e_aq_desc *desc,
766266423Sjfv				void *buff, /* can be NULL */
767266423Sjfv				u16  buff_size,
768266423Sjfv				struct i40e_asq_cmd_details *cmd_details)
769266423Sjfv{
770266423Sjfv	enum i40e_status_code status = I40E_SUCCESS;
771266423Sjfv	struct i40e_dma_mem *dma_buff = NULL;
772266423Sjfv	struct i40e_asq_cmd_details *details;
773266423Sjfv	struct i40e_aq_desc *desc_on_ring;
774266423Sjfv	bool cmd_completed = FALSE;
775266423Sjfv	u16  retval = 0;
776266423Sjfv	u32  val = 0;
777266423Sjfv
778299545Serj	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
779299545Serj
780284049Sjfv	hw->aq.asq_last_status = I40E_AQ_RC_OK;
781284049Sjfv
782299545Serj	if (hw->aq.asq.count == 0) {
783266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
784299545Serj			   "AQTX: Admin queue not initialized.\n");
785266423Sjfv		status = I40E_ERR_QUEUE_EMPTY;
786299545Serj		goto asq_send_command_error;
787266423Sjfv	}
788266423Sjfv
789299545Serj	val = rd32(hw, hw->aq.asq.head);
790299545Serj	if (val >= hw->aq.num_asq_entries) {
791266423Sjfv		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
792299545Serj			   "AQTX: head overrun at %d\n", val);
793266423Sjfv		status = I40E_ERR_QUEUE_EMPTY;
794299545Serj		goto asq_send_command_error;
795266423Sjfv	}
796266423Sjfv
797266423Sjfv	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
798266423Sjfv	if (cmd_details) {
799266423Sjfv		i40e_memcpy(details,
800266423Sjfv			    cmd_details,
801266423Sjfv			    sizeof(struct i40e_asq_cmd_details),
802266423Sjfv			    I40E_NONDMA_TO_NONDMA);
803266423Sjfv
804266423Sjfv		/* If the cmd_details are defined copy the cookie.  The
805266423Sjfv		 * CPU_TO_LE32 is not needed here because the data is ignored
806266423Sjfv		 * by the FW, only used by the driver
807266423Sjfv		 */
808266423Sjfv		if (details->cookie) {
809266423Sjfv			desc->cookie_high =
810266423Sjfv				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
811266423Sjfv			desc->cookie_low =
812266423Sjfv				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
813266423Sjfv		}
814266423Sjfv	} else {
815266423Sjfv		i40e_memset(details, 0,
816266423Sjfv			    sizeof(struct i40e_asq_cmd_details),
817266423Sjfv			    I40E_NONDMA_MEM);
818266423Sjfv	}
819266423Sjfv
820266423Sjfv	/* clear requested flags and then set additional flags if defined */
821266423Sjfv	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
822266423Sjfv	desc->flags |= CPU_TO_LE16(details->flags_ena);
823266423Sjfv
824266423Sjfv	if (buff_size > hw->aq.asq_buf_size) {
825266423Sjfv		i40e_debug(hw,
826266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
827266423Sjfv			   "AQTX: Invalid buffer size: %d.\n",
828266423Sjfv			   buff_size);
829266423Sjfv		status = I40E_ERR_INVALID_SIZE;
830266423Sjfv		goto asq_send_command_error;
831266423Sjfv	}
832266423Sjfv
833266423Sjfv	if (details->postpone && !details->async) {
834266423Sjfv		i40e_debug(hw,
835266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
836266423Sjfv			   "AQTX: Async flag not set along with postpone flag");
837266423Sjfv		status = I40E_ERR_PARAM;
838266423Sjfv		goto asq_send_command_error;
839266423Sjfv	}
840266423Sjfv
841266423Sjfv	/* call clean and check queue available function to reclaim the
842266423Sjfv	 * descriptors that were processed by FW, the function returns the
843266423Sjfv	 * number of desc available
844266423Sjfv	 */
845266423Sjfv	/* the clean function called here could be called in a separate thread
846266423Sjfv	 * in case of asynchronous completions
847266423Sjfv	 */
848266423Sjfv	if (i40e_clean_asq(hw) == 0) {
849266423Sjfv		i40e_debug(hw,
850266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
851266423Sjfv			   "AQTX: Error queue is full.\n");
852266423Sjfv		status = I40E_ERR_ADMIN_QUEUE_FULL;
853266423Sjfv		goto asq_send_command_error;
854266423Sjfv	}
855266423Sjfv
856266423Sjfv	/* initialize the temp desc pointer with the right desc */
857266423Sjfv	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
858266423Sjfv
859266423Sjfv	/* if the desc is available copy the temp desc to the right place */
860266423Sjfv	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
861266423Sjfv		    I40E_NONDMA_TO_DMA);
862266423Sjfv
863266423Sjfv	/* if buff is not NULL assume indirect command */
864266423Sjfv	if (buff != NULL) {
865266423Sjfv		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
866266423Sjfv		/* copy the user buff into the respective DMA buff */
867266423Sjfv		i40e_memcpy(dma_buff->va, buff, buff_size,
868266423Sjfv			    I40E_NONDMA_TO_DMA);
869266423Sjfv		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
870266423Sjfv
871266423Sjfv		/* Update the address values in the desc with the pa value
872266423Sjfv		 * for respective buffer
873266423Sjfv		 */
874266423Sjfv		desc_on_ring->params.external.addr_high =
875266423Sjfv				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
876266423Sjfv		desc_on_ring->params.external.addr_low =
877266423Sjfv				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
878266423Sjfv	}
879266423Sjfv
880266423Sjfv	/* bump the tail */
881266423Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
882269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
883269198Sjfv		      buff, buff_size);
884266423Sjfv	(hw->aq.asq.next_to_use)++;
885266423Sjfv	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
886266423Sjfv		hw->aq.asq.next_to_use = 0;
887266423Sjfv	if (!details->postpone)
888266423Sjfv		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
889266423Sjfv
890266423Sjfv	/* if cmd_details are not defined or async flag is not set,
891266423Sjfv	 * we need to wait for desc write back
892266423Sjfv	 */
893266423Sjfv	if (!details->async && !details->postpone) {
894266423Sjfv		u32 total_delay = 0;
895266423Sjfv
896266423Sjfv		do {
897266423Sjfv			/* AQ designers suggest use of head for better
898266423Sjfv			 * timing reliability than DD bit
899266423Sjfv			 */
900266423Sjfv			if (i40e_asq_done(hw))
901266423Sjfv				break;
902270346Sjfv			i40e_msec_delay(1);
903270346Sjfv			total_delay++;
904269198Sjfv		} while (total_delay < hw->aq.asq_cmd_timeout);
905266423Sjfv	}
906266423Sjfv
907266423Sjfv	/* if ready, copy the desc back to temp */
908266423Sjfv	if (i40e_asq_done(hw)) {
909266423Sjfv		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
910266423Sjfv			    I40E_DMA_TO_NONDMA);
911266423Sjfv		if (buff != NULL)
912266423Sjfv			i40e_memcpy(buff, dma_buff->va, buff_size,
913266423Sjfv				    I40E_DMA_TO_NONDMA);
914266423Sjfv		retval = LE16_TO_CPU(desc->retval);
915266423Sjfv		if (retval != 0) {
916266423Sjfv			i40e_debug(hw,
917266423Sjfv				   I40E_DEBUG_AQ_MESSAGE,
918266423Sjfv				   "AQTX: Command completed with error 0x%X.\n",
919266423Sjfv				   retval);
920266423Sjfv
921266423Sjfv			/* strip off FW internal code */
922266423Sjfv			retval &= 0xff;
923266423Sjfv		}
924266423Sjfv		cmd_completed = TRUE;
925266423Sjfv		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
926266423Sjfv			status = I40E_SUCCESS;
927266423Sjfv		else
928266423Sjfv			status = I40E_ERR_ADMIN_QUEUE_ERROR;
929266423Sjfv		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
930266423Sjfv	}
931266423Sjfv
932269198Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
933269198Sjfv		   "AQTX: desc and buffer writeback:\n");
934269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
935266423Sjfv
936284049Sjfv	/* save writeback aq if requested */
937284049Sjfv	if (details->wb_desc)
938284049Sjfv		i40e_memcpy(details->wb_desc, desc_on_ring,
939284049Sjfv			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
940284049Sjfv
941266423Sjfv	/* update the error if time out occurred */
942266423Sjfv	if ((!cmd_completed) &&
943266423Sjfv	    (!details->async && !details->postpone)) {
944266423Sjfv		i40e_debug(hw,
945266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
946266423Sjfv			   "AQTX: Writeback timeout.\n");
947266423Sjfv		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
948266423Sjfv	}
949266423Sjfv
950266423Sjfvasq_send_command_error:
951266423Sjfv	i40e_release_spinlock(&hw->aq.asq_spinlock);
952266423Sjfv	return status;
953266423Sjfv}
954266423Sjfv
955266423Sjfv/**
956266423Sjfv *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
957266423Sjfv *  @desc:     pointer to the temp descriptor (non DMA mem)
958266423Sjfv *  @opcode:   the opcode can be used to decide which flags to turn off or on
959266423Sjfv *
960266423Sjfv *  Fill the desc with default values
961266423Sjfv **/
962266423Sjfvvoid i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
963266423Sjfv				       u16 opcode)
964266423Sjfv{
965266423Sjfv	/* zero out the desc */
966266423Sjfv	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
967266423Sjfv		    I40E_NONDMA_MEM);
968266423Sjfv	desc->opcode = CPU_TO_LE16(opcode);
969266423Sjfv	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
970266423Sjfv}
971266423Sjfv
972266423Sjfv/**
973266423Sjfv *  i40e_clean_arq_element
974266423Sjfv *  @hw: pointer to the hw struct
975266423Sjfv *  @e: event info from the receive descriptor, includes any buffers
976266423Sjfv *  @pending: number of events that could be left to process
977266423Sjfv *
978266423Sjfv *  This function cleans one Admin Receive Queue element and returns
979266423Sjfv *  the contents through e.  It can also return how many events are
980266423Sjfv *  left to process through 'pending'
981266423Sjfv **/
982266423Sjfvenum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
983266423Sjfv					     struct i40e_arq_event_info *e,
984266423Sjfv					     u16 *pending)
985266423Sjfv{
986266423Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
987266423Sjfv	u16 ntc = hw->aq.arq.next_to_clean;
988266423Sjfv	struct i40e_aq_desc *desc;
989266423Sjfv	struct i40e_dma_mem *bi;
990266423Sjfv	u16 desc_idx;
991266423Sjfv	u16 datalen;
992266423Sjfv	u16 flags;
993266423Sjfv	u16 ntu;
994266423Sjfv
995299554Serj	/* pre-clean the event info */
996299554Serj	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
997299554Serj
998266423Sjfv	/* take the lock before we start messing with the ring */
999266423Sjfv	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1000266423Sjfv
1001299548Serj	if (hw->aq.arq.count == 0) {
1002299548Serj		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1003299548Serj			   "AQRX: Admin queue not initialized.\n");
1004299548Serj		ret_code = I40E_ERR_QUEUE_EMPTY;
1005299548Serj		goto clean_arq_element_err;
1006299548Serj	}
1007299548Serj
1008266423Sjfv	/* set next_to_use to head */
1009284049Sjfv	if (!i40e_is_vf(hw))
1010284049Sjfv		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1011284049Sjfv	if (i40e_is_vf(hw))
1012284049Sjfv		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1013266423Sjfv	if (ntu == ntc) {
1014266423Sjfv		/* nothing to do - shouldn't need to update ring's values */
1015266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1016266423Sjfv		goto clean_arq_element_out;
1017266423Sjfv	}
1018266423Sjfv
1019266423Sjfv	/* now clean the next descriptor */
1020266423Sjfv	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1021266423Sjfv	desc_idx = ntc;
1022266423Sjfv
1023266423Sjfv	flags = LE16_TO_CPU(desc->flags);
1024266423Sjfv	if (flags & I40E_AQ_FLAG_ERR) {
1025266423Sjfv		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1026266423Sjfv		hw->aq.arq_last_status =
1027266423Sjfv			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1028266423Sjfv		i40e_debug(hw,
1029266423Sjfv			   I40E_DEBUG_AQ_MESSAGE,
1030266423Sjfv			   "AQRX: Event received with error 0x%X.\n",
1031266423Sjfv			   hw->aq.arq_last_status);
1032266423Sjfv	}
1033266423Sjfv
1034269198Sjfv	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1035269198Sjfv		    I40E_DMA_TO_NONDMA);
1036269198Sjfv	datalen = LE16_TO_CPU(desc->datalen);
1037270346Sjfv	e->msg_len = min(datalen, e->buf_len);
1038270346Sjfv	if (e->msg_buf != NULL && (e->msg_len != 0))
1039269198Sjfv		i40e_memcpy(e->msg_buf,
1040269198Sjfv			    hw->aq.arq.r.arq_bi[desc_idx].va,
1041270346Sjfv			    e->msg_len, I40E_DMA_TO_NONDMA);
1042269198Sjfv
1043266423Sjfv	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1044269198Sjfv	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1045269198Sjfv		      hw->aq.arq_buf_size);
1046266423Sjfv
1047266423Sjfv	/* Restore the original datalen and buffer address in the desc,
1048266423Sjfv	 * FW updates datalen to indicate the event message
1049266423Sjfv	 * size
1050266423Sjfv	 */
1051266423Sjfv	bi = &hw->aq.arq.r.arq_bi[ntc];
1052266423Sjfv	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1053266423Sjfv
1054266423Sjfv	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1055266423Sjfv	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1056266423Sjfv		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1057266423Sjfv	desc->datalen = CPU_TO_LE16((u16)bi->size);
1058266423Sjfv	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1059266423Sjfv	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1060266423Sjfv
1061266423Sjfv	/* set tail = the last cleaned desc index. */
1062266423Sjfv	wr32(hw, hw->aq.arq.tail, ntc);
1063266423Sjfv	/* ntc is updated to tail + 1 */
1064266423Sjfv	ntc++;
1065266423Sjfv	if (ntc == hw->aq.num_arq_entries)
1066266423Sjfv		ntc = 0;
1067266423Sjfv	hw->aq.arq.next_to_clean = ntc;
1068266423Sjfv	hw->aq.arq.next_to_use = ntu;
1069266423Sjfv
1070303967Ssbruno	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1071299554Serjclean_arq_element_out:
1072299554Serj	/* Set pending if needed, unlock and return */
1073299554Serj	if (pending != NULL)
1074299554Serj		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1075299554Serjclean_arq_element_err:
1076299554Serj	i40e_release_spinlock(&hw->aq.arq_spinlock);
1077299554Serj
1078266423Sjfv	return ret_code;
1079266423Sjfv}
1080266423Sjfv
1081