1/******************************************************************************
2
3  Copyright (c) 2013-2019, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixl/i40e_adminq.c 349163 2019-06-18 00:08:02Z erj $*/
34
35#include "i40e_status.h"
36#include "i40e_type.h"
37#include "i40e_register.h"
38#include "i40e_adminq.h"
39#include "i40e_prototype.h"
40
41/**
42 *  i40e_adminq_init_regs - Initialize AdminQ registers
43 *  @hw: pointer to the hardware structure
44 *
45 *  This assumes the alloc_asq and alloc_arq functions have already been called
46 **/
47static void i40e_adminq_init_regs(struct i40e_hw *hw)
48{
49	/* set head and tail registers in our local struct */
50	if (i40e_is_vf(hw)) {
51		hw->aq.asq.tail = I40E_VF_ATQT1;
52		hw->aq.asq.head = I40E_VF_ATQH1;
53		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56		hw->aq.arq.tail = I40E_VF_ARQT1;
57		hw->aq.arq.head = I40E_VF_ARQH1;
58		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61	} else {
62		hw->aq.asq.tail = I40E_PF_ATQT;
63		hw->aq.asq.head = I40E_PF_ATQH;
64		hw->aq.asq.len  = I40E_PF_ATQLEN;
65		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67		hw->aq.arq.tail = I40E_PF_ARQT;
68		hw->aq.arq.head = I40E_PF_ARQH;
69		hw->aq.arq.len  = I40E_PF_ARQLEN;
70		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72	}
73}
74
75/**
76 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77 *  @hw: pointer to the hardware structure
78 **/
79enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80{
81	enum i40e_status_code ret_code;
82
83	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84					 i40e_mem_atq_ring,
85					 (hw->aq.num_asq_entries *
86					 sizeof(struct i40e_aq_desc)),
87					 I40E_ADMINQ_DESC_ALIGNMENT);
88	if (ret_code)
89		return ret_code;
90
91	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92					  (hw->aq.num_asq_entries *
93					  sizeof(struct i40e_asq_cmd_details)));
94	if (ret_code) {
95		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96		return ret_code;
97	}
98
99	return ret_code;
100}
101
102/**
103 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104 *  @hw: pointer to the hardware structure
105 **/
106enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107{
108	enum i40e_status_code ret_code;
109
110	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111					 i40e_mem_arq_ring,
112					 (hw->aq.num_arq_entries *
113					 sizeof(struct i40e_aq_desc)),
114					 I40E_ADMINQ_DESC_ALIGNMENT);
115
116	return ret_code;
117}
118
119/**
120 *  i40e_free_adminq_asq - Free Admin Queue send rings
121 *  @hw: pointer to the hardware structure
122 *
123 *  This assumes the posted send buffers have already been cleaned
124 *  and de-allocated
125 **/
126void i40e_free_adminq_asq(struct i40e_hw *hw)
127{
128	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
129	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
130}
131
132/**
133 *  i40e_free_adminq_arq - Free Admin Queue receive rings
134 *  @hw: pointer to the hardware structure
135 *
136 *  This assumes the posted receive buffers have already been cleaned
137 *  and de-allocated
138 **/
139void i40e_free_adminq_arq(struct i40e_hw *hw)
140{
141	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
142}
143
144/**
145 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
146 *  @hw: pointer to the hardware structure
147 **/
148static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
149{
150	enum i40e_status_code ret_code;
151	struct i40e_aq_desc *desc;
152	struct i40e_dma_mem *bi;
153	int i;
154
155	/* We'll be allocating the buffer info memory first, then we can
156	 * allocate the mapped buffers for the event processing
157	 */
158
159	/* buffer_info structures do not need alignment */
160	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
161		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
162	if (ret_code)
163		goto alloc_arq_bufs;
164	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
165
166	/* allocate the mapped buffers */
167	for (i = 0; i < hw->aq.num_arq_entries; i++) {
168		bi = &hw->aq.arq.r.arq_bi[i];
169		ret_code = i40e_allocate_dma_mem(hw, bi,
170						 i40e_mem_arq_buf,
171						 hw->aq.arq_buf_size,
172						 I40E_ADMINQ_DESC_ALIGNMENT);
173		if (ret_code)
174			goto unwind_alloc_arq_bufs;
175
176		/* now configure the descriptors for use */
177		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
178
179		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
180		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
181			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
182		desc->opcode = 0;
183		/* This is in accordance with Admin queue design, there is no
184		 * register for buffer size configuration
185		 */
186		desc->datalen = CPU_TO_LE16((u16)bi->size);
187		desc->retval = 0;
188		desc->cookie_high = 0;
189		desc->cookie_low = 0;
190		desc->params.external.addr_high =
191			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
192		desc->params.external.addr_low =
193			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
194		desc->params.external.param0 = 0;
195		desc->params.external.param1 = 0;
196	}
197
198alloc_arq_bufs:
199	return ret_code;
200
201unwind_alloc_arq_bufs:
202	/* don't try to free the one that failed... */
203	i--;
204	for (; i >= 0; i--)
205		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
206	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
207
208	return ret_code;
209}
210
211/**
212 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
213 *  @hw: pointer to the hardware structure
214 **/
215static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
216{
217	enum i40e_status_code ret_code;
218	struct i40e_dma_mem *bi;
219	int i;
220
221	/* No mapped memory needed yet, just the buffer info structures */
222	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
223		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
224	if (ret_code)
225		goto alloc_asq_bufs;
226	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
227
228	/* allocate the mapped buffers */
229	for (i = 0; i < hw->aq.num_asq_entries; i++) {
230		bi = &hw->aq.asq.r.asq_bi[i];
231		ret_code = i40e_allocate_dma_mem(hw, bi,
232						 i40e_mem_asq_buf,
233						 hw->aq.asq_buf_size,
234						 I40E_ADMINQ_DESC_ALIGNMENT);
235		if (ret_code)
236			goto unwind_alloc_asq_bufs;
237	}
238alloc_asq_bufs:
239	return ret_code;
240
241unwind_alloc_asq_bufs:
242	/* don't try to free the one that failed... */
243	i--;
244	for (; i >= 0; i--)
245		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
246	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
247
248	return ret_code;
249}
250
251/**
252 *  i40e_free_arq_bufs - Free receive queue buffer info elements
253 *  @hw: pointer to the hardware structure
254 **/
255static void i40e_free_arq_bufs(struct i40e_hw *hw)
256{
257	int i;
258
259	/* free descriptors */
260	for (i = 0; i < hw->aq.num_arq_entries; i++)
261		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
262
263	/* free the descriptor memory */
264	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
265
266	/* free the dma header */
267	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
268}
269
270/**
271 *  i40e_free_asq_bufs - Free send queue buffer info elements
272 *  @hw: pointer to the hardware structure
273 **/
274static void i40e_free_asq_bufs(struct i40e_hw *hw)
275{
276	int i;
277
278	/* only unmap if the address is non-NULL */
279	for (i = 0; i < hw->aq.num_asq_entries; i++)
280		if (hw->aq.asq.r.asq_bi[i].pa)
281			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
282
283	/* free the buffer info list */
284	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
285
286	/* free the descriptor memory */
287	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
288
289	/* free the dma header */
290	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
291}
292
293/**
294 *  i40e_config_asq_regs - configure ASQ registers
295 *  @hw: pointer to the hardware structure
296 *
297 *  Configure base address and length registers for the transmit queue
298 **/
299static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
300{
301	enum i40e_status_code ret_code = I40E_SUCCESS;
302	u32 reg = 0;
303
304	/* Clear Head and Tail */
305	wr32(hw, hw->aq.asq.head, 0);
306	wr32(hw, hw->aq.asq.tail, 0);
307
308	/* set starting point */
309	if (!i40e_is_vf(hw))
310		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
311					  I40E_PF_ATQLEN_ATQENABLE_MASK));
312	if (i40e_is_vf(hw))
313		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
314					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
315	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
316	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
317
318	/* Check one register to verify that config was applied */
319	reg = rd32(hw, hw->aq.asq.bal);
320	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
321		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
322
323	return ret_code;
324}
325
326/**
327 *  i40e_config_arq_regs - ARQ register configuration
328 *  @hw: pointer to the hardware structure
329 *
330 * Configure base address and length registers for the receive (event queue)
331 **/
332static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
333{
334	enum i40e_status_code ret_code = I40E_SUCCESS;
335	u32 reg = 0;
336
337	/* Clear Head and Tail */
338	wr32(hw, hw->aq.arq.head, 0);
339	wr32(hw, hw->aq.arq.tail, 0);
340
341	/* set starting point */
342	if (!i40e_is_vf(hw))
343		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
344					  I40E_PF_ARQLEN_ARQENABLE_MASK));
345	if (i40e_is_vf(hw))
346		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
347					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
348	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
349	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
350
351	/* Update tail in the HW to post pre-allocated buffers */
352	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
353
354	/* Check one register to verify that config was applied */
355	reg = rd32(hw, hw->aq.arq.bal);
356	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
357		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
358
359	return ret_code;
360}
361
362/**
363 *  i40e_init_asq - main initialization routine for ASQ
364 *  @hw: pointer to the hardware structure
365 *
366 *  This is the main initialization routine for the Admin Send Queue
367 *  Prior to calling this function, drivers *MUST* set the following fields
368 *  in the hw->aq structure:
369 *     - hw->aq.num_asq_entries
370 *     - hw->aq.arq_buf_size
371 *
372 *  Do *NOT* hold the lock when calling this as the memory allocation routines
373 *  called are not going to be atomic context safe
374 **/
375enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
376{
377	enum i40e_status_code ret_code = I40E_SUCCESS;
378
379	if (hw->aq.asq.count > 0) {
380		/* queue already initialized */
381		ret_code = I40E_ERR_NOT_READY;
382		goto init_adminq_exit;
383	}
384
385	/* verify input for valid configuration */
386	if ((hw->aq.num_asq_entries == 0) ||
387	    (hw->aq.asq_buf_size == 0)) {
388		ret_code = I40E_ERR_CONFIG;
389		goto init_adminq_exit;
390	}
391
392	hw->aq.asq.next_to_use = 0;
393	hw->aq.asq.next_to_clean = 0;
394
395	/* allocate the ring memory */
396	ret_code = i40e_alloc_adminq_asq_ring(hw);
397	if (ret_code != I40E_SUCCESS)
398		goto init_adminq_exit;
399
400	/* allocate buffers in the rings */
401	ret_code = i40e_alloc_asq_bufs(hw);
402	if (ret_code != I40E_SUCCESS)
403		goto init_adminq_free_rings;
404
405	/* initialize base registers */
406	ret_code = i40e_config_asq_regs(hw);
407	if (ret_code != I40E_SUCCESS)
408		goto init_config_regs;
409
410	/* success! */
411	hw->aq.asq.count = hw->aq.num_asq_entries;
412	goto init_adminq_exit;
413
414init_adminq_free_rings:
415	i40e_free_adminq_asq(hw);
416	return ret_code;
417
418init_config_regs:
419	i40e_free_asq_bufs(hw);
420
421init_adminq_exit:
422	return ret_code;
423}
424
425/**
426 *  i40e_init_arq - initialize ARQ
427 *  @hw: pointer to the hardware structure
428 *
429 *  The main initialization routine for the Admin Receive (Event) Queue.
430 *  Prior to calling this function, drivers *MUST* set the following fields
431 *  in the hw->aq structure:
432 *     - hw->aq.num_asq_entries
433 *     - hw->aq.arq_buf_size
434 *
435 *  Do *NOT* hold the lock when calling this as the memory allocation routines
436 *  called are not going to be atomic context safe
437 **/
438enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
439{
440	enum i40e_status_code ret_code = I40E_SUCCESS;
441
442	if (hw->aq.arq.count > 0) {
443		/* queue already initialized */
444		ret_code = I40E_ERR_NOT_READY;
445		goto init_adminq_exit;
446	}
447
448	/* verify input for valid configuration */
449	if ((hw->aq.num_arq_entries == 0) ||
450	    (hw->aq.arq_buf_size == 0)) {
451		ret_code = I40E_ERR_CONFIG;
452		goto init_adminq_exit;
453	}
454
455	hw->aq.arq.next_to_use = 0;
456	hw->aq.arq.next_to_clean = 0;
457
458	/* allocate the ring memory */
459	ret_code = i40e_alloc_adminq_arq_ring(hw);
460	if (ret_code != I40E_SUCCESS)
461		goto init_adminq_exit;
462
463	/* allocate buffers in the rings */
464	ret_code = i40e_alloc_arq_bufs(hw);
465	if (ret_code != I40E_SUCCESS)
466		goto init_adminq_free_rings;
467
468	/* initialize base registers */
469	ret_code = i40e_config_arq_regs(hw);
470	if (ret_code != I40E_SUCCESS)
471		goto init_adminq_free_rings;
472
473	/* success! */
474	hw->aq.arq.count = hw->aq.num_arq_entries;
475	goto init_adminq_exit;
476
477init_adminq_free_rings:
478	i40e_free_adminq_arq(hw);
479
480init_adminq_exit:
481	return ret_code;
482}
483
484/**
485 *  i40e_shutdown_asq - shutdown the ASQ
486 *  @hw: pointer to the hardware structure
487 *
488 *  The main shutdown routine for the Admin Send Queue
489 **/
490enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
491{
492	enum i40e_status_code ret_code = I40E_SUCCESS;
493
494	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
495
496	if (hw->aq.asq.count == 0) {
497		ret_code = I40E_ERR_NOT_READY;
498		goto shutdown_asq_out;
499	}
500
501	/* Stop firmware AdminQ processing */
502	wr32(hw, hw->aq.asq.head, 0);
503	wr32(hw, hw->aq.asq.tail, 0);
504	wr32(hw, hw->aq.asq.len, 0);
505	wr32(hw, hw->aq.asq.bal, 0);
506	wr32(hw, hw->aq.asq.bah, 0);
507
508	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
509
510	/* free ring buffers */
511	i40e_free_asq_bufs(hw);
512
513shutdown_asq_out:
514	i40e_release_spinlock(&hw->aq.asq_spinlock);
515	return ret_code;
516}
517
518/**
519 *  i40e_shutdown_arq - shutdown ARQ
520 *  @hw: pointer to the hardware structure
521 *
522 *  The main shutdown routine for the Admin Receive Queue
523 **/
524enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
525{
526	enum i40e_status_code ret_code = I40E_SUCCESS;
527
528	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
529
530	if (hw->aq.arq.count == 0) {
531		ret_code = I40E_ERR_NOT_READY;
532		goto shutdown_arq_out;
533	}
534
535	/* Stop firmware AdminQ processing */
536	wr32(hw, hw->aq.arq.head, 0);
537	wr32(hw, hw->aq.arq.tail, 0);
538	wr32(hw, hw->aq.arq.len, 0);
539	wr32(hw, hw->aq.arq.bal, 0);
540	wr32(hw, hw->aq.arq.bah, 0);
541
542	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
543
544	/* free ring buffers */
545	i40e_free_arq_bufs(hw);
546
547shutdown_arq_out:
548	i40e_release_spinlock(&hw->aq.arq_spinlock);
549	return ret_code;
550}
551
552/**
553 *  i40e_resume_aq - resume AQ processing from 0
554 *  @hw: pointer to the hardware structure
555 **/
556static void i40e_resume_aq(struct i40e_hw *hw)
557{
558	/* Registers are reset after PF reset */
559	hw->aq.asq.next_to_use = 0;
560	hw->aq.asq.next_to_clean = 0;
561
562	i40e_config_asq_regs(hw);
563
564	hw->aq.arq.next_to_use = 0;
565	hw->aq.arq.next_to_clean = 0;
566
567	i40e_config_arq_regs(hw);
568}
569
570/**
571 *  i40e_init_adminq - main initialization routine for Admin Queue
572 *  @hw: pointer to the hardware structure
573 *
574 *  Prior to calling this function, drivers *MUST* set the following fields
575 *  in the hw->aq structure:
576 *     - hw->aq.num_asq_entries
577 *     - hw->aq.num_arq_entries
578 *     - hw->aq.arq_buf_size
579 *     - hw->aq.asq_buf_size
580 **/
581enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
582{
583	struct i40e_adminq_info *aq = &hw->aq;
584	enum i40e_status_code ret_code;
585	u16 cfg_ptr, oem_hi, oem_lo;
586	u16 eetrack_lo, eetrack_hi;
587	int retry = 0;
588
589	/* verify input for valid configuration */
590	if (aq->num_arq_entries == 0 ||
591	    aq->num_asq_entries == 0 ||
592	    aq->arq_buf_size == 0 ||
593	    aq->asq_buf_size == 0) {
594		ret_code = I40E_ERR_CONFIG;
595		goto init_adminq_exit;
596	}
597	i40e_init_spinlock(&aq->asq_spinlock);
598	i40e_init_spinlock(&aq->arq_spinlock);
599
600	/* Set up register offsets */
601	i40e_adminq_init_regs(hw);
602
603	/* setup ASQ command write back timeout */
604	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
605
606	/* allocate the ASQ */
607	ret_code = i40e_init_asq(hw);
608	if (ret_code != I40E_SUCCESS)
609		goto init_adminq_destroy_spinlocks;
610
611	/* allocate the ARQ */
612	ret_code = i40e_init_arq(hw);
613	if (ret_code != I40E_SUCCESS)
614		goto init_adminq_free_asq;
615
616	/* VF has no need of firmware */
617	if (i40e_is_vf(hw))
618		goto init_adminq_exit;
619	/* There are some cases where the firmware may not be quite ready
620	 * for AdminQ operations, so we retry the AdminQ setup a few times
621	 * if we see timeouts in this first AQ call.
622	 */
623	do {
624		ret_code = i40e_aq_get_firmware_version(hw,
625							&aq->fw_maj_ver,
626							&aq->fw_min_ver,
627							&aq->fw_build,
628							&aq->api_maj_ver,
629							&aq->api_min_ver,
630							NULL);
631		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
632			break;
633		retry++;
634		i40e_msec_delay(100);
635		i40e_resume_aq(hw);
636	} while (retry < 10);
637	if (ret_code != I40E_SUCCESS)
638		goto init_adminq_free_arq;
639
640	/* get the NVM version info */
641	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
642			   &hw->nvm.version);
643	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
644	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
645	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
646	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
647	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
648			   &oem_hi);
649	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
650			   &oem_lo);
651	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
652	/*
653	 * Some features were introduced in different FW API version
654	 * for different MAC type.
655	 */
656	switch (hw->mac.type) {
657	case I40E_MAC_XL710:
658		if (aq->api_maj_ver > 1 ||
659		    (aq->api_maj_ver == 1 &&
660		     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
661			hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
662			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
663			/* The ability to RX (not drop) 802.1ad frames */
664			hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
665		}
666		break;
667	case I40E_MAC_X722:
668		if (aq->api_maj_ver > 1 ||
669		    (aq->api_maj_ver == 1 &&
670		     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
671			hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
672		/* fall through */
673	default:
674		break;
675	}
676
677	/* Newer versions of firmware require lock when reading the NVM */
678	if (aq->api_maj_ver > 1 ||
679	    (aq->api_maj_ver == 1 &&
680	     aq->api_min_ver >= 5))
681		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
682
683	if (aq->api_maj_ver > 1 ||
684	    (aq->api_maj_ver == 1 &&
685	     aq->api_min_ver >= 8))
686		hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
687
688	if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
689		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
690		goto init_adminq_free_arq;
691	}
692
693	/* pre-emptive resource lock release */
694	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
695	hw->nvm_release_on_done = FALSE;
696	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
697
698	ret_code = I40E_SUCCESS;
699
700	/* success! */
701	goto init_adminq_exit;
702
703init_adminq_free_arq:
704	i40e_shutdown_arq(hw);
705init_adminq_free_asq:
706	i40e_shutdown_asq(hw);
707init_adminq_destroy_spinlocks:
708	i40e_destroy_spinlock(&aq->asq_spinlock);
709	i40e_destroy_spinlock(&aq->arq_spinlock);
710
711init_adminq_exit:
712	return ret_code;
713}
714
715/**
716 *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
717 *  @hw: pointer to the hardware structure
718 **/
719enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
720{
721	enum i40e_status_code ret_code = I40E_SUCCESS;
722
723	if (i40e_check_asq_alive(hw))
724		i40e_aq_queue_shutdown(hw, TRUE);
725
726	i40e_shutdown_asq(hw);
727	i40e_shutdown_arq(hw);
728	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
729	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
730
731	if (hw->nvm_buff.va)
732		i40e_free_virt_mem(hw, &hw->nvm_buff);
733
734	return ret_code;
735}
736
737/**
738 *  i40e_clean_asq - cleans Admin send queue
739 *  @hw: pointer to the hardware structure
740 *
741 *  returns the number of free desc
742 **/
743u16 i40e_clean_asq(struct i40e_hw *hw)
744{
745	struct i40e_adminq_ring *asq = &(hw->aq.asq);
746	struct i40e_asq_cmd_details *details;
747	u16 ntc = asq->next_to_clean;
748	struct i40e_aq_desc desc_cb;
749	struct i40e_aq_desc *desc;
750
751	desc = I40E_ADMINQ_DESC(*asq, ntc);
752	details = I40E_ADMINQ_DETAILS(*asq, ntc);
753	while (rd32(hw, hw->aq.asq.head) != ntc) {
754		i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
755			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
756
757		if (details->callback) {
758			I40E_ADMINQ_CALLBACK cb_func =
759					(I40E_ADMINQ_CALLBACK)details->callback;
760			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
761				    I40E_DMA_TO_DMA);
762			cb_func(hw, &desc_cb);
763		}
764		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
765		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
766		ntc++;
767		if (ntc == asq->count)
768			ntc = 0;
769		desc = I40E_ADMINQ_DESC(*asq, ntc);
770		details = I40E_ADMINQ_DETAILS(*asq, ntc);
771	}
772
773	asq->next_to_clean = ntc;
774
775	return I40E_DESC_UNUSED(asq);
776}
777
778/**
779 *  i40e_asq_done - check if FW has processed the Admin Send Queue
780 *  @hw: pointer to the hw struct
781 *
782 *  Returns TRUE if the firmware has processed all descriptors on the
783 *  admin send queue. Returns FALSE if there are still requests pending.
784 **/
785bool i40e_asq_done(struct i40e_hw *hw)
786{
787	/* AQ designers suggest use of head for better
788	 * timing reliability than DD bit
789	 */
790	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
791
792}
793
794/**
795 *  i40e_asq_send_command - send command to Admin Queue
796 *  @hw: pointer to the hw struct
797 *  @desc: prefilled descriptor describing the command (non DMA mem)
798 *  @buff: buffer to use for indirect commands
799 *  @buff_size: size of buffer for indirect commands
800 *  @cmd_details: pointer to command details structure
801 *
802 *  This is the main send command driver routine for the Admin Queue send
803 *  queue.  It runs the queue, cleans the queue, etc
804 **/
805enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
806				struct i40e_aq_desc *desc,
807				void *buff, /* can be NULL */
808				u16  buff_size,
809				struct i40e_asq_cmd_details *cmd_details)
810{
811	enum i40e_status_code status = I40E_SUCCESS;
812	struct i40e_dma_mem *dma_buff = NULL;
813	struct i40e_asq_cmd_details *details;
814	struct i40e_aq_desc *desc_on_ring;
815	bool cmd_completed = FALSE;
816	u16  retval = 0;
817	u32  val = 0;
818
819	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
820
821	hw->aq.asq_last_status = I40E_AQ_RC_OK;
822
823	if (hw->aq.asq.count == 0) {
824		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
825			   "AQTX: Admin queue not initialized.\n");
826		status = I40E_ERR_QUEUE_EMPTY;
827		goto asq_send_command_error;
828	}
829
830	val = rd32(hw, hw->aq.asq.head);
831	if (val >= hw->aq.num_asq_entries) {
832		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
833			   "AQTX: head overrun at %d\n", val);
834		status = I40E_ERR_ADMIN_QUEUE_FULL;
835		goto asq_send_command_error;
836	}
837
838	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
839	if (cmd_details) {
840		i40e_memcpy(details,
841			    cmd_details,
842			    sizeof(struct i40e_asq_cmd_details),
843			    I40E_NONDMA_TO_NONDMA);
844
845		/* If the cmd_details are defined copy the cookie.  The
846		 * CPU_TO_LE32 is not needed here because the data is ignored
847		 * by the FW, only used by the driver
848		 */
849		if (details->cookie) {
850			desc->cookie_high =
851				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
852			desc->cookie_low =
853				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
854		}
855	} else {
856		i40e_memset(details, 0,
857			    sizeof(struct i40e_asq_cmd_details),
858			    I40E_NONDMA_MEM);
859	}
860
861	/* clear requested flags and then set additional flags if defined */
862	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
863	desc->flags |= CPU_TO_LE16(details->flags_ena);
864
865	if (buff_size > hw->aq.asq_buf_size) {
866		i40e_debug(hw,
867			   I40E_DEBUG_AQ_MESSAGE,
868			   "AQTX: Invalid buffer size: %d.\n",
869			   buff_size);
870		status = I40E_ERR_INVALID_SIZE;
871		goto asq_send_command_error;
872	}
873
874	if (details->postpone && !details->async) {
875		i40e_debug(hw,
876			   I40E_DEBUG_AQ_MESSAGE,
877			   "AQTX: Async flag not set along with postpone flag");
878		status = I40E_ERR_PARAM;
879		goto asq_send_command_error;
880	}
881
882	/* call clean and check queue available function to reclaim the
883	 * descriptors that were processed by FW, the function returns the
884	 * number of desc available
885	 */
886	/* the clean function called here could be called in a separate thread
887	 * in case of asynchronous completions
888	 */
889	if (i40e_clean_asq(hw) == 0) {
890		i40e_debug(hw,
891			   I40E_DEBUG_AQ_MESSAGE,
892			   "AQTX: Error queue is full.\n");
893		status = I40E_ERR_ADMIN_QUEUE_FULL;
894		goto asq_send_command_error;
895	}
896
897	/* initialize the temp desc pointer with the right desc */
898	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
899
900	/* if the desc is available copy the temp desc to the right place */
901	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
902		    I40E_NONDMA_TO_DMA);
903
904	/* if buff is not NULL assume indirect command */
905	if (buff != NULL) {
906		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
907		/* copy the user buff into the respective DMA buff */
908		i40e_memcpy(dma_buff->va, buff, buff_size,
909			    I40E_NONDMA_TO_DMA);
910		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
911
912		/* Update the address values in the desc with the pa value
913		 * for respective buffer
914		 */
915		desc_on_ring->params.external.addr_high =
916				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
917		desc_on_ring->params.external.addr_low =
918				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
919	}
920
921	/* bump the tail */
922	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
923	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
924		      buff, buff_size);
925	(hw->aq.asq.next_to_use)++;
926	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
927		hw->aq.asq.next_to_use = 0;
928	if (!details->postpone)
929		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
930
931	/* if cmd_details are not defined or async flag is not set,
932	 * we need to wait for desc write back
933	 */
934	if (!details->async && !details->postpone) {
935		u32 total_delay = 0;
936
937		do {
938			/* AQ designers suggest use of head for better
939			 * timing reliability than DD bit
940			 */
941			if (i40e_asq_done(hw))
942				break;
943			i40e_usec_delay(50);
944			total_delay += 50;
945		} while (total_delay < hw->aq.asq_cmd_timeout);
946	}
947
948	/* if ready, copy the desc back to temp */
949	if (i40e_asq_done(hw)) {
950		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
951			    I40E_DMA_TO_NONDMA);
952		if (buff != NULL)
953			i40e_memcpy(buff, dma_buff->va, buff_size,
954				    I40E_DMA_TO_NONDMA);
955		retval = LE16_TO_CPU(desc->retval);
956		if (retval != 0) {
957			i40e_debug(hw,
958				   I40E_DEBUG_AQ_MESSAGE,
959				   "AQTX: Command completed with error 0x%X.\n",
960				   retval);
961
962			/* strip off FW internal code */
963			retval &= 0xff;
964		}
965		cmd_completed = TRUE;
966		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
967			status = I40E_SUCCESS;
968		else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
969			status = I40E_ERR_NOT_READY;
970		else
971			status = I40E_ERR_ADMIN_QUEUE_ERROR;
972		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
973	}
974
975	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
976		   "AQTX: desc and buffer writeback:\n");
977	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
978
979	/* save writeback aq if requested */
980	if (details->wb_desc)
981		i40e_memcpy(details->wb_desc, desc_on_ring,
982			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
983
984	/* update the error if time out occurred */
985	if ((!cmd_completed) &&
986	    (!details->async && !details->postpone)) {
987		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
988			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
989				   "AQTX: AQ Critical error.\n");
990			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
991		} else {
992			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
993				   "AQTX: Writeback timeout.\n");
994			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
995		}
996	}
997
998asq_send_command_error:
999	i40e_release_spinlock(&hw->aq.asq_spinlock);
1000	return status;
1001}
1002
1003/**
1004 *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1005 *  @desc:     pointer to the temp descriptor (non DMA mem)
1006 *  @opcode:   the opcode can be used to decide which flags to turn off or on
1007 *
1008 *  Fill the desc with default values
1009 **/
1010void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1011				       u16 opcode)
1012{
1013	/* zero out the desc */
1014	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
1015		    I40E_NONDMA_MEM);
1016	desc->opcode = CPU_TO_LE16(opcode);
1017	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
1018}
1019
1020/**
1021 *  i40e_clean_arq_element
1022 *  @hw: pointer to the hw struct
1023 *  @e: event info from the receive descriptor, includes any buffers
1024 *  @pending: number of events that could be left to process
1025 *
1026 *  This function cleans one Admin Receive Queue element and returns
1027 *  the contents through e.  It can also return how many events are
1028 *  left to process through 'pending'
1029 **/
1030enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
1031					     struct i40e_arq_event_info *e,
1032					     u16 *pending)
1033{
1034	enum i40e_status_code ret_code = I40E_SUCCESS;
1035	u16 ntc = hw->aq.arq.next_to_clean;
1036	struct i40e_aq_desc *desc;
1037	struct i40e_dma_mem *bi;
1038	u16 desc_idx;
1039	u16 datalen;
1040	u16 flags;
1041	u16 ntu;
1042
1043	/* pre-clean the event info */
1044	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1045
1046	/* take the lock before we start messing with the ring */
1047	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1048
1049	if (hw->aq.arq.count == 0) {
1050		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1051			   "AQRX: Admin queue not initialized.\n");
1052		ret_code = I40E_ERR_QUEUE_EMPTY;
1053		goto clean_arq_element_err;
1054	}
1055
1056	/* set next_to_use to head */
1057	if (!i40e_is_vf(hw))
1058		ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1059	else
1060		ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1061	if (ntu == ntc) {
1062		/* nothing to do - shouldn't need to update ring's values */
1063		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1064		goto clean_arq_element_out;
1065	}
1066
1067	/* now clean the next descriptor */
1068	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1069	desc_idx = ntc;
1070
1071	hw->aq.arq_last_status =
1072		(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1073	flags = LE16_TO_CPU(desc->flags);
1074	if (flags & I40E_AQ_FLAG_ERR) {
1075		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1076		i40e_debug(hw,
1077			   I40E_DEBUG_AQ_MESSAGE,
1078			   "AQRX: Event received with error 0x%X.\n",
1079			   hw->aq.arq_last_status);
1080	}
1081
1082	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1083		    I40E_DMA_TO_NONDMA);
1084	datalen = LE16_TO_CPU(desc->datalen);
1085	e->msg_len = min(datalen, e->buf_len);
1086	if (e->msg_buf != NULL && (e->msg_len != 0))
1087		i40e_memcpy(e->msg_buf,
1088			    hw->aq.arq.r.arq_bi[desc_idx].va,
1089			    e->msg_len, I40E_DMA_TO_NONDMA);
1090
1091	i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1092	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1093		      hw->aq.arq_buf_size);
1094
1095	/* Restore the original datalen and buffer address in the desc,
1096	 * FW updates datalen to indicate the event message
1097	 * size
1098	 */
1099	bi = &hw->aq.arq.r.arq_bi[ntc];
1100	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1101
1102	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1103	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1104		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1105	desc->datalen = CPU_TO_LE16((u16)bi->size);
1106	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1107	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1108
1109	/* set tail = the last cleaned desc index. */
1110	wr32(hw, hw->aq.arq.tail, ntc);
1111	/* ntc is updated to tail + 1 */
1112	ntc++;
1113	if (ntc == hw->aq.num_arq_entries)
1114		ntc = 0;
1115	hw->aq.arq.next_to_clean = ntc;
1116	hw->aq.arq.next_to_use = ntu;
1117
1118	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1119clean_arq_element_out:
1120	/* Set pending if needed, unlock and return */
1121	if (pending != NULL)
1122		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1123clean_arq_element_err:
1124	i40e_release_spinlock(&hw->aq.arq_spinlock);
1125
1126	return ret_code;
1127}
1128
1129