i40e_adminq.c revision 303967
1/******************************************************************************
2
3  Copyright (c) 2013-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixl/i40e_adminq.c 303967 2016-08-11 19:13:30Z sbruno $*/
34
35#include "i40e_status.h"
36#include "i40e_type.h"
37#include "i40e_register.h"
38#include "i40e_adminq.h"
39#include "i40e_prototype.h"
40
41/**
42 *  i40e_adminq_init_regs - Initialize AdminQ registers
43 *  @hw: pointer to the hardware structure
44 *
45 *  This assumes the alloc_asq and alloc_arq functions have already been called
46 **/
47static void i40e_adminq_init_regs(struct i40e_hw *hw)
48{
49	/* set head and tail registers in our local struct */
50	if (i40e_is_vf(hw)) {
51		hw->aq.asq.tail = I40E_VF_ATQT1;
52		hw->aq.asq.head = I40E_VF_ATQH1;
53		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56		hw->aq.arq.tail = I40E_VF_ARQT1;
57		hw->aq.arq.head = I40E_VF_ARQH1;
58		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61	} else {
62		hw->aq.asq.tail = I40E_PF_ATQT;
63		hw->aq.asq.head = I40E_PF_ATQH;
64		hw->aq.asq.len  = I40E_PF_ATQLEN;
65		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67		hw->aq.arq.tail = I40E_PF_ARQT;
68		hw->aq.arq.head = I40E_PF_ARQH;
69		hw->aq.arq.len  = I40E_PF_ARQLEN;
70		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72	}
73}
74
75/**
76 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77 *  @hw: pointer to the hardware structure
78 **/
79enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80{
81	enum i40e_status_code ret_code;
82
83	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84					 i40e_mem_atq_ring,
85					 (hw->aq.num_asq_entries *
86					 sizeof(struct i40e_aq_desc)),
87					 I40E_ADMINQ_DESC_ALIGNMENT);
88	if (ret_code)
89		return ret_code;
90
91	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92					  (hw->aq.num_asq_entries *
93					  sizeof(struct i40e_asq_cmd_details)));
94	if (ret_code) {
95		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96		return ret_code;
97	}
98
99	return ret_code;
100}
101
102/**
103 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104 *  @hw: pointer to the hardware structure
105 **/
106enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107{
108	enum i40e_status_code ret_code;
109
110	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111					 i40e_mem_arq_ring,
112					 (hw->aq.num_arq_entries *
113					 sizeof(struct i40e_aq_desc)),
114					 I40E_ADMINQ_DESC_ALIGNMENT);
115
116	return ret_code;
117}
118
119/**
120 *  i40e_free_adminq_asq - Free Admin Queue send rings
121 *  @hw: pointer to the hardware structure
122 *
123 *  This assumes the posted send buffers have already been cleaned
124 *  and de-allocated
125 **/
126void i40e_free_adminq_asq(struct i40e_hw *hw)
127{
128	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
129}
130
131/**
132 *  i40e_free_adminq_arq - Free Admin Queue receive rings
133 *  @hw: pointer to the hardware structure
134 *
135 *  This assumes the posted receive buffers have already been cleaned
136 *  and de-allocated
137 **/
138void i40e_free_adminq_arq(struct i40e_hw *hw)
139{
140	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
141}
142
143/**
144 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145 *  @hw: pointer to the hardware structure
146 **/
147static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
148{
149	enum i40e_status_code ret_code;
150	struct i40e_aq_desc *desc;
151	struct i40e_dma_mem *bi;
152	int i;
153
154	/* We'll be allocating the buffer info memory first, then we can
155	 * allocate the mapped buffers for the event processing
156	 */
157
158	/* buffer_info structures do not need alignment */
159	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161	if (ret_code)
162		goto alloc_arq_bufs;
163	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
164
165	/* allocate the mapped buffers */
166	for (i = 0; i < hw->aq.num_arq_entries; i++) {
167		bi = &hw->aq.arq.r.arq_bi[i];
168		ret_code = i40e_allocate_dma_mem(hw, bi,
169						 i40e_mem_arq_buf,
170						 hw->aq.arq_buf_size,
171						 I40E_ADMINQ_DESC_ALIGNMENT);
172		if (ret_code)
173			goto unwind_alloc_arq_bufs;
174
175		/* now configure the descriptors for use */
176		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
177
178		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181		desc->opcode = 0;
182		/* This is in accordance with Admin queue design, there is no
183		 * register for buffer size configuration
184		 */
185		desc->datalen = CPU_TO_LE16((u16)bi->size);
186		desc->retval = 0;
187		desc->cookie_high = 0;
188		desc->cookie_low = 0;
189		desc->params.external.addr_high =
190			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191		desc->params.external.addr_low =
192			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193		desc->params.external.param0 = 0;
194		desc->params.external.param1 = 0;
195	}
196
197alloc_arq_bufs:
198	return ret_code;
199
200unwind_alloc_arq_bufs:
201	/* don't try to free the one that failed... */
202	i--;
203	for (; i >= 0; i--)
204		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
206
207	return ret_code;
208}
209
210/**
211 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212 *  @hw: pointer to the hardware structure
213 **/
214static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
215{
216	enum i40e_status_code ret_code;
217	struct i40e_dma_mem *bi;
218	int i;
219
220	/* No mapped memory needed yet, just the buffer info structures */
221	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223	if (ret_code)
224		goto alloc_asq_bufs;
225	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
226
227	/* allocate the mapped buffers */
228	for (i = 0; i < hw->aq.num_asq_entries; i++) {
229		bi = &hw->aq.asq.r.asq_bi[i];
230		ret_code = i40e_allocate_dma_mem(hw, bi,
231						 i40e_mem_asq_buf,
232						 hw->aq.asq_buf_size,
233						 I40E_ADMINQ_DESC_ALIGNMENT);
234		if (ret_code)
235			goto unwind_alloc_asq_bufs;
236	}
237alloc_asq_bufs:
238	return ret_code;
239
240unwind_alloc_asq_bufs:
241	/* don't try to free the one that failed... */
242	i--;
243	for (; i >= 0; i--)
244		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
246
247	return ret_code;
248}
249
250/**
251 *  i40e_free_arq_bufs - Free receive queue buffer info elements
252 *  @hw: pointer to the hardware structure
253 **/
254static void i40e_free_arq_bufs(struct i40e_hw *hw)
255{
256	int i;
257
258	/* free descriptors */
259	for (i = 0; i < hw->aq.num_arq_entries; i++)
260		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
261
262	/* free the descriptor memory */
263	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
264
265	/* free the dma header */
266	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
267}
268
269/**
270 *  i40e_free_asq_bufs - Free send queue buffer info elements
271 *  @hw: pointer to the hardware structure
272 **/
273static void i40e_free_asq_bufs(struct i40e_hw *hw)
274{
275	int i;
276
277	/* only unmap if the address is non-NULL */
278	for (i = 0; i < hw->aq.num_asq_entries; i++)
279		if (hw->aq.asq.r.asq_bi[i].pa)
280			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
281
282	/* free the buffer info list */
283	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
284
285	/* free the descriptor memory */
286	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
287
288	/* free the dma header */
289	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
290}
291
292/**
293 *  i40e_config_asq_regs - configure ASQ registers
294 *  @hw: pointer to the hardware structure
295 *
296 *  Configure base address and length registers for the transmit queue
297 **/
298static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
299{
300	enum i40e_status_code ret_code = I40E_SUCCESS;
301	u32 reg = 0;
302
303	/* Clear Head and Tail */
304	wr32(hw, hw->aq.asq.head, 0);
305	wr32(hw, hw->aq.asq.tail, 0);
306
307	/* set starting point */
308	if (!i40e_is_vf(hw))
309		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
310					  I40E_PF_ATQLEN_ATQENABLE_MASK));
311	if (i40e_is_vf(hw))
312		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
313					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
314	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
315	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
316
317	/* Check one register to verify that config was applied */
318	reg = rd32(hw, hw->aq.asq.bal);
319	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
320		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
321
322	return ret_code;
323}
324
325/**
326 *  i40e_config_arq_regs - ARQ register configuration
327 *  @hw: pointer to the hardware structure
328 *
329 * Configure base address and length registers for the receive (event queue)
330 **/
331static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
332{
333	enum i40e_status_code ret_code = I40E_SUCCESS;
334	u32 reg = 0;
335
336	/* Clear Head and Tail */
337	wr32(hw, hw->aq.arq.head, 0);
338	wr32(hw, hw->aq.arq.tail, 0);
339
340	/* set starting point */
341	if (!i40e_is_vf(hw))
342		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
343					  I40E_PF_ARQLEN_ARQENABLE_MASK));
344	if (i40e_is_vf(hw))
345		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
346					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
347	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
348	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
349
350	/* Update tail in the HW to post pre-allocated buffers */
351	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
352
353	/* Check one register to verify that config was applied */
354	reg = rd32(hw, hw->aq.arq.bal);
355	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
356		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
357
358	return ret_code;
359}
360
361/**
362 *  i40e_init_asq - main initialization routine for ASQ
363 *  @hw: pointer to the hardware structure
364 *
365 *  This is the main initialization routine for the Admin Send Queue
366 *  Prior to calling this function, drivers *MUST* set the following fields
367 *  in the hw->aq structure:
368 *     - hw->aq.num_asq_entries
369 *     - hw->aq.arq_buf_size
370 *
371 *  Do *NOT* hold the lock when calling this as the memory allocation routines
372 *  called are not going to be atomic context safe
373 **/
374enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
375{
376	enum i40e_status_code ret_code = I40E_SUCCESS;
377
378	if (hw->aq.asq.count > 0) {
379		/* queue already initialized */
380		ret_code = I40E_ERR_NOT_READY;
381		goto init_adminq_exit;
382	}
383
384	/* verify input for valid configuration */
385	if ((hw->aq.num_asq_entries == 0) ||
386	    (hw->aq.asq_buf_size == 0)) {
387		ret_code = I40E_ERR_CONFIG;
388		goto init_adminq_exit;
389	}
390
391	hw->aq.asq.next_to_use = 0;
392	hw->aq.asq.next_to_clean = 0;
393
394	/* allocate the ring memory */
395	ret_code = i40e_alloc_adminq_asq_ring(hw);
396	if (ret_code != I40E_SUCCESS)
397		goto init_adminq_exit;
398
399	/* allocate buffers in the rings */
400	ret_code = i40e_alloc_asq_bufs(hw);
401	if (ret_code != I40E_SUCCESS)
402		goto init_adminq_free_rings;
403
404	/* initialize base registers */
405	ret_code = i40e_config_asq_regs(hw);
406	if (ret_code != I40E_SUCCESS)
407		goto init_adminq_free_rings;
408
409	/* success! */
410	hw->aq.asq.count = hw->aq.num_asq_entries;
411	goto init_adminq_exit;
412
413init_adminq_free_rings:
414	i40e_free_adminq_asq(hw);
415
416init_adminq_exit:
417	return ret_code;
418}
419
420/**
421 *  i40e_init_arq - initialize ARQ
422 *  @hw: pointer to the hardware structure
423 *
424 *  The main initialization routine for the Admin Receive (Event) Queue.
425 *  Prior to calling this function, drivers *MUST* set the following fields
426 *  in the hw->aq structure:
427 *     - hw->aq.num_asq_entries
428 *     - hw->aq.arq_buf_size
429 *
430 *  Do *NOT* hold the lock when calling this as the memory allocation routines
431 *  called are not going to be atomic context safe
432 **/
433enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
434{
435	enum i40e_status_code ret_code = I40E_SUCCESS;
436
437	if (hw->aq.arq.count > 0) {
438		/* queue already initialized */
439		ret_code = I40E_ERR_NOT_READY;
440		goto init_adminq_exit;
441	}
442
443	/* verify input for valid configuration */
444	if ((hw->aq.num_arq_entries == 0) ||
445	    (hw->aq.arq_buf_size == 0)) {
446		ret_code = I40E_ERR_CONFIG;
447		goto init_adminq_exit;
448	}
449
450	hw->aq.arq.next_to_use = 0;
451	hw->aq.arq.next_to_clean = 0;
452
453	/* allocate the ring memory */
454	ret_code = i40e_alloc_adminq_arq_ring(hw);
455	if (ret_code != I40E_SUCCESS)
456		goto init_adminq_exit;
457
458	/* allocate buffers in the rings */
459	ret_code = i40e_alloc_arq_bufs(hw);
460	if (ret_code != I40E_SUCCESS)
461		goto init_adminq_free_rings;
462
463	/* initialize base registers */
464	ret_code = i40e_config_arq_regs(hw);
465	if (ret_code != I40E_SUCCESS)
466		goto init_adminq_free_rings;
467
468	/* success! */
469	hw->aq.arq.count = hw->aq.num_arq_entries;
470	goto init_adminq_exit;
471
472init_adminq_free_rings:
473	i40e_free_adminq_arq(hw);
474
475init_adminq_exit:
476	return ret_code;
477}
478
479/**
480 *  i40e_shutdown_asq - shutdown the ASQ
481 *  @hw: pointer to the hardware structure
482 *
483 *  The main shutdown routine for the Admin Send Queue
484 **/
485enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
486{
487	enum i40e_status_code ret_code = I40E_SUCCESS;
488
489	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
490
491	if (hw->aq.asq.count == 0) {
492		ret_code = I40E_ERR_NOT_READY;
493		goto shutdown_asq_out;
494	}
495
496	/* Stop firmware AdminQ processing */
497	wr32(hw, hw->aq.asq.head, 0);
498	wr32(hw, hw->aq.asq.tail, 0);
499	wr32(hw, hw->aq.asq.len, 0);
500	wr32(hw, hw->aq.asq.bal, 0);
501	wr32(hw, hw->aq.asq.bah, 0);
502
503	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
504
505	/* free ring buffers */
506	i40e_free_asq_bufs(hw);
507
508shutdown_asq_out:
509	i40e_release_spinlock(&hw->aq.asq_spinlock);
510	return ret_code;
511}
512
513/**
514 *  i40e_shutdown_arq - shutdown ARQ
515 *  @hw: pointer to the hardware structure
516 *
517 *  The main shutdown routine for the Admin Receive Queue
518 **/
519enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
520{
521	enum i40e_status_code ret_code = I40E_SUCCESS;
522
523	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
524
525	if (hw->aq.arq.count == 0) {
526		ret_code = I40E_ERR_NOT_READY;
527		goto shutdown_arq_out;
528	}
529
530	/* Stop firmware AdminQ processing */
531	wr32(hw, hw->aq.arq.head, 0);
532	wr32(hw, hw->aq.arq.tail, 0);
533	wr32(hw, hw->aq.arq.len, 0);
534	wr32(hw, hw->aq.arq.bal, 0);
535	wr32(hw, hw->aq.arq.bah, 0);
536
537	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
538
539	/* free ring buffers */
540	i40e_free_arq_bufs(hw);
541
542shutdown_arq_out:
543	i40e_release_spinlock(&hw->aq.arq_spinlock);
544	return ret_code;
545}
546
547/**
548 *  i40e_resume_aq - resume AQ processing from 0
549 *  @hw: pointer to the hardware structure
550 **/
551static void i40e_resume_aq(struct i40e_hw *hw)
552{
553	/* Registers are reset after PF reset */
554	hw->aq.asq.next_to_use = 0;
555	hw->aq.asq.next_to_clean = 0;
556
557	i40e_config_asq_regs(hw);
558
559	hw->aq.arq.next_to_use = 0;
560	hw->aq.arq.next_to_clean = 0;
561
562	i40e_config_arq_regs(hw);
563}
564
565/**
566 *  i40e_init_adminq - main initialization routine for Admin Queue
567 *  @hw: pointer to the hardware structure
568 *
569 *  Prior to calling this function, drivers *MUST* set the following fields
570 *  in the hw->aq structure:
571 *     - hw->aq.num_asq_entries
572 *     - hw->aq.num_arq_entries
573 *     - hw->aq.arq_buf_size
574 *     - hw->aq.asq_buf_size
575 **/
576enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
577{
578	u16 cfg_ptr, oem_hi, oem_lo;
579	u16 eetrack_lo, eetrack_hi;
580	enum i40e_status_code ret_code;
581	int retry = 0;
582
583	/* verify input for valid configuration */
584	if ((hw->aq.num_arq_entries == 0) ||
585	    (hw->aq.num_asq_entries == 0) ||
586	    (hw->aq.arq_buf_size == 0) ||
587	    (hw->aq.asq_buf_size == 0)) {
588		ret_code = I40E_ERR_CONFIG;
589		goto init_adminq_exit;
590	}
591	i40e_init_spinlock(&hw->aq.asq_spinlock);
592	i40e_init_spinlock(&hw->aq.arq_spinlock);
593
594	/* Set up register offsets */
595	i40e_adminq_init_regs(hw);
596
597	/* setup ASQ command write back timeout */
598	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
599
600	/* allocate the ASQ */
601	ret_code = i40e_init_asq(hw);
602	if (ret_code != I40E_SUCCESS)
603		goto init_adminq_destroy_spinlocks;
604
605	/* allocate the ARQ */
606	ret_code = i40e_init_arq(hw);
607	if (ret_code != I40E_SUCCESS)
608		goto init_adminq_free_asq;
609
610	/* VF has no need of firmware */
611	if (i40e_is_vf(hw))
612		goto init_adminq_exit;
613	/* There are some cases where the firmware may not be quite ready
614	 * for AdminQ operations, so we retry the AdminQ setup a few times
615	 * if we see timeouts in this first AQ call.
616	 */
617	do {
618		ret_code = i40e_aq_get_firmware_version(hw,
619							&hw->aq.fw_maj_ver,
620							&hw->aq.fw_min_ver,
621							&hw->aq.fw_build,
622							&hw->aq.api_maj_ver,
623							&hw->aq.api_min_ver,
624							NULL);
625		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
626			break;
627		retry++;
628		i40e_msec_delay(100);
629		i40e_resume_aq(hw);
630	} while (retry < 10);
631	if (ret_code != I40E_SUCCESS)
632		goto init_adminq_free_arq;
633
634	/* get the NVM version info */
635	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
636			   &hw->nvm.version);
637	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
638	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
639	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
640	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
641	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
642			   &oem_hi);
643	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
644			   &oem_lo);
645	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
646
647	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
648		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
649		goto init_adminq_free_arq;
650	}
651
652	/* pre-emptive resource lock release */
653	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
654	hw->nvm_release_on_done = FALSE;
655	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
656
657	ret_code = I40E_SUCCESS;
658
659	/* success! */
660	goto init_adminq_exit;
661
662init_adminq_free_arq:
663	i40e_shutdown_arq(hw);
664init_adminq_free_asq:
665	i40e_shutdown_asq(hw);
666init_adminq_destroy_spinlocks:
667	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
668	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
669
670init_adminq_exit:
671	return ret_code;
672}
673
674/**
675 *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
676 *  @hw: pointer to the hardware structure
677 **/
678enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
679{
680	enum i40e_status_code ret_code = I40E_SUCCESS;
681
682	if (i40e_check_asq_alive(hw))
683		i40e_aq_queue_shutdown(hw, TRUE);
684
685	i40e_shutdown_asq(hw);
686	i40e_shutdown_arq(hw);
687	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
688	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
689
690	if (hw->nvm_buff.va)
691		i40e_free_virt_mem(hw, &hw->nvm_buff);
692
693	return ret_code;
694}
695
696/**
697 *  i40e_clean_asq - cleans Admin send queue
698 *  @hw: pointer to the hardware structure
699 *
700 *  returns the number of free desc
701 **/
702u16 i40e_clean_asq(struct i40e_hw *hw)
703{
704	struct i40e_adminq_ring *asq = &(hw->aq.asq);
705	struct i40e_asq_cmd_details *details;
706	u16 ntc = asq->next_to_clean;
707	struct i40e_aq_desc desc_cb;
708	struct i40e_aq_desc *desc;
709
710	desc = I40E_ADMINQ_DESC(*asq, ntc);
711	details = I40E_ADMINQ_DETAILS(*asq, ntc);
712	while (rd32(hw, hw->aq.asq.head) != ntc) {
713		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
714			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
715
716		if (details->callback) {
717			I40E_ADMINQ_CALLBACK cb_func =
718					(I40E_ADMINQ_CALLBACK)details->callback;
719			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
720				    I40E_DMA_TO_DMA);
721			cb_func(hw, &desc_cb);
722		}
723		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
724		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
725		ntc++;
726		if (ntc == asq->count)
727			ntc = 0;
728		desc = I40E_ADMINQ_DESC(*asq, ntc);
729		details = I40E_ADMINQ_DETAILS(*asq, ntc);
730	}
731
732	asq->next_to_clean = ntc;
733
734	return I40E_DESC_UNUSED(asq);
735}
736
737/**
738 *  i40e_asq_done - check if FW has processed the Admin Send Queue
739 *  @hw: pointer to the hw struct
740 *
741 *  Returns TRUE if the firmware has processed all descriptors on the
742 *  admin send queue. Returns FALSE if there are still requests pending.
743 **/
744bool i40e_asq_done(struct i40e_hw *hw)
745{
746	/* AQ designers suggest use of head for better
747	 * timing reliability than DD bit
748	 */
749	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
750
751}
752
753/**
754 *  i40e_asq_send_command - send command to Admin Queue
755 *  @hw: pointer to the hw struct
756 *  @desc: prefilled descriptor describing the command (non DMA mem)
757 *  @buff: buffer to use for indirect commands
758 *  @buff_size: size of buffer for indirect commands
759 *  @cmd_details: pointer to command details structure
760 *
761 *  This is the main send command driver routine for the Admin Queue send
762 *  queue.  It runs the queue, cleans the queue, etc
763 **/
764enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
765				struct i40e_aq_desc *desc,
766				void *buff, /* can be NULL */
767				u16  buff_size,
768				struct i40e_asq_cmd_details *cmd_details)
769{
770	enum i40e_status_code status = I40E_SUCCESS;
771	struct i40e_dma_mem *dma_buff = NULL;
772	struct i40e_asq_cmd_details *details;
773	struct i40e_aq_desc *desc_on_ring;
774	bool cmd_completed = FALSE;
775	u16  retval = 0;
776	u32  val = 0;
777
778	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
779
780	hw->aq.asq_last_status = I40E_AQ_RC_OK;
781
782	if (hw->aq.asq.count == 0) {
783		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
784			   "AQTX: Admin queue not initialized.\n");
785		status = I40E_ERR_QUEUE_EMPTY;
786		goto asq_send_command_error;
787	}
788
789	val = rd32(hw, hw->aq.asq.head);
790	if (val >= hw->aq.num_asq_entries) {
791		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
792			   "AQTX: head overrun at %d\n", val);
793		status = I40E_ERR_QUEUE_EMPTY;
794		goto asq_send_command_error;
795	}
796
797	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
798	if (cmd_details) {
799		i40e_memcpy(details,
800			    cmd_details,
801			    sizeof(struct i40e_asq_cmd_details),
802			    I40E_NONDMA_TO_NONDMA);
803
804		/* If the cmd_details are defined copy the cookie.  The
805		 * CPU_TO_LE32 is not needed here because the data is ignored
806		 * by the FW, only used by the driver
807		 */
808		if (details->cookie) {
809			desc->cookie_high =
810				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
811			desc->cookie_low =
812				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
813		}
814	} else {
815		i40e_memset(details, 0,
816			    sizeof(struct i40e_asq_cmd_details),
817			    I40E_NONDMA_MEM);
818	}
819
820	/* clear requested flags and then set additional flags if defined */
821	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
822	desc->flags |= CPU_TO_LE16(details->flags_ena);
823
824	if (buff_size > hw->aq.asq_buf_size) {
825		i40e_debug(hw,
826			   I40E_DEBUG_AQ_MESSAGE,
827			   "AQTX: Invalid buffer size: %d.\n",
828			   buff_size);
829		status = I40E_ERR_INVALID_SIZE;
830		goto asq_send_command_error;
831	}
832
833	if (details->postpone && !details->async) {
834		i40e_debug(hw,
835			   I40E_DEBUG_AQ_MESSAGE,
836			   "AQTX: Async flag not set along with postpone flag");
837		status = I40E_ERR_PARAM;
838		goto asq_send_command_error;
839	}
840
841	/* call clean and check queue available function to reclaim the
842	 * descriptors that were processed by FW, the function returns the
843	 * number of desc available
844	 */
845	/* the clean function called here could be called in a separate thread
846	 * in case of asynchronous completions
847	 */
848	if (i40e_clean_asq(hw) == 0) {
849		i40e_debug(hw,
850			   I40E_DEBUG_AQ_MESSAGE,
851			   "AQTX: Error queue is full.\n");
852		status = I40E_ERR_ADMIN_QUEUE_FULL;
853		goto asq_send_command_error;
854	}
855
856	/* initialize the temp desc pointer with the right desc */
857	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
858
859	/* if the desc is available copy the temp desc to the right place */
860	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
861		    I40E_NONDMA_TO_DMA);
862
863	/* if buff is not NULL assume indirect command */
864	if (buff != NULL) {
865		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
866		/* copy the user buff into the respective DMA buff */
867		i40e_memcpy(dma_buff->va, buff, buff_size,
868			    I40E_NONDMA_TO_DMA);
869		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
870
871		/* Update the address values in the desc with the pa value
872		 * for respective buffer
873		 */
874		desc_on_ring->params.external.addr_high =
875				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
876		desc_on_ring->params.external.addr_low =
877				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
878	}
879
880	/* bump the tail */
881	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
882	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
883		      buff, buff_size);
884	(hw->aq.asq.next_to_use)++;
885	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
886		hw->aq.asq.next_to_use = 0;
887	if (!details->postpone)
888		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
889
890	/* if cmd_details are not defined or async flag is not set,
891	 * we need to wait for desc write back
892	 */
893	if (!details->async && !details->postpone) {
894		u32 total_delay = 0;
895
896		do {
897			/* AQ designers suggest use of head for better
898			 * timing reliability than DD bit
899			 */
900			if (i40e_asq_done(hw))
901				break;
902			i40e_msec_delay(1);
903			total_delay++;
904		} while (total_delay < hw->aq.asq_cmd_timeout);
905	}
906
907	/* if ready, copy the desc back to temp */
908	if (i40e_asq_done(hw)) {
909		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
910			    I40E_DMA_TO_NONDMA);
911		if (buff != NULL)
912			i40e_memcpy(buff, dma_buff->va, buff_size,
913				    I40E_DMA_TO_NONDMA);
914		retval = LE16_TO_CPU(desc->retval);
915		if (retval != 0) {
916			i40e_debug(hw,
917				   I40E_DEBUG_AQ_MESSAGE,
918				   "AQTX: Command completed with error 0x%X.\n",
919				   retval);
920
921			/* strip off FW internal code */
922			retval &= 0xff;
923		}
924		cmd_completed = TRUE;
925		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
926			status = I40E_SUCCESS;
927		else
928			status = I40E_ERR_ADMIN_QUEUE_ERROR;
929		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
930	}
931
932	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
933		   "AQTX: desc and buffer writeback:\n");
934	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
935
936	/* save writeback aq if requested */
937	if (details->wb_desc)
938		i40e_memcpy(details->wb_desc, desc_on_ring,
939			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
940
941	/* update the error if time out occurred */
942	if ((!cmd_completed) &&
943	    (!details->async && !details->postpone)) {
944		i40e_debug(hw,
945			   I40E_DEBUG_AQ_MESSAGE,
946			   "AQTX: Writeback timeout.\n");
947		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
948	}
949
950asq_send_command_error:
951	i40e_release_spinlock(&hw->aq.asq_spinlock);
952	return status;
953}
954
955/**
956 *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
957 *  @desc:     pointer to the temp descriptor (non DMA mem)
958 *  @opcode:   the opcode can be used to decide which flags to turn off or on
959 *
960 *  Fill the desc with default values
961 **/
962void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
963				       u16 opcode)
964{
965	/* zero out the desc */
966	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
967		    I40E_NONDMA_MEM);
968	desc->opcode = CPU_TO_LE16(opcode);
969	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
970}
971
972/**
973 *  i40e_clean_arq_element
974 *  @hw: pointer to the hw struct
975 *  @e: event info from the receive descriptor, includes any buffers
976 *  @pending: number of events that could be left to process
977 *
978 *  This function cleans one Admin Receive Queue element and returns
979 *  the contents through e.  It can also return how many events are
980 *  left to process through 'pending'
981 **/
982enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
983					     struct i40e_arq_event_info *e,
984					     u16 *pending)
985{
986	enum i40e_status_code ret_code = I40E_SUCCESS;
987	u16 ntc = hw->aq.arq.next_to_clean;
988	struct i40e_aq_desc *desc;
989	struct i40e_dma_mem *bi;
990	u16 desc_idx;
991	u16 datalen;
992	u16 flags;
993	u16 ntu;
994
995	/* pre-clean the event info */
996	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
997
998	/* take the lock before we start messing with the ring */
999	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1000
1001	if (hw->aq.arq.count == 0) {
1002		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1003			   "AQRX: Admin queue not initialized.\n");
1004		ret_code = I40E_ERR_QUEUE_EMPTY;
1005		goto clean_arq_element_err;
1006	}
1007
1008	/* set next_to_use to head */
1009	if (!i40e_is_vf(hw))
1010		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1011	if (i40e_is_vf(hw))
1012		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1013	if (ntu == ntc) {
1014		/* nothing to do - shouldn't need to update ring's values */
1015		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1016		goto clean_arq_element_out;
1017	}
1018
1019	/* now clean the next descriptor */
1020	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1021	desc_idx = ntc;
1022
1023	flags = LE16_TO_CPU(desc->flags);
1024	if (flags & I40E_AQ_FLAG_ERR) {
1025		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1026		hw->aq.arq_last_status =
1027			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1028		i40e_debug(hw,
1029			   I40E_DEBUG_AQ_MESSAGE,
1030			   "AQRX: Event received with error 0x%X.\n",
1031			   hw->aq.arq_last_status);
1032	}
1033
1034	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1035		    I40E_DMA_TO_NONDMA);
1036	datalen = LE16_TO_CPU(desc->datalen);
1037	e->msg_len = min(datalen, e->buf_len);
1038	if (e->msg_buf != NULL && (e->msg_len != 0))
1039		i40e_memcpy(e->msg_buf,
1040			    hw->aq.arq.r.arq_bi[desc_idx].va,
1041			    e->msg_len, I40E_DMA_TO_NONDMA);
1042
1043	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1044	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1045		      hw->aq.arq_buf_size);
1046
1047	/* Restore the original datalen and buffer address in the desc,
1048	 * FW updates datalen to indicate the event message
1049	 * size
1050	 */
1051	bi = &hw->aq.arq.r.arq_bi[ntc];
1052	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1053
1054	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1055	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1056		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1057	desc->datalen = CPU_TO_LE16((u16)bi->size);
1058	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1059	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1060
1061	/* set tail = the last cleaned desc index. */
1062	wr32(hw, hw->aq.arq.tail, ntc);
1063	/* ntc is updated to tail + 1 */
1064	ntc++;
1065	if (ntc == hw->aq.num_arq_entries)
1066		ntc = 0;
1067	hw->aq.arq.next_to_clean = ntc;
1068	hw->aq.arq.next_to_use = ntu;
1069
1070	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
1071clean_arq_element_out:
1072	/* Set pending if needed, unlock and return */
1073	if (pending != NULL)
1074		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1075clean_arq_element_err:
1076	i40e_release_spinlock(&hw->aq.arq_spinlock);
1077
1078	return ret_code;
1079}
1080
1081