i40e_adminq.c revision 277082
1266423Sjfv/****************************************************************************** 2266423Sjfv 3266423Sjfv Copyright (c) 2013-2014, Intel Corporation 4266423Sjfv All rights reserved. 5266423Sjfv 6266423Sjfv Redistribution and use in source and binary forms, with or without 7266423Sjfv modification, are permitted provided that the following conditions are met: 8266423Sjfv 9266423Sjfv 1. Redistributions of source code must retain the above copyright notice, 10266423Sjfv this list of conditions and the following disclaimer. 11266423Sjfv 12266423Sjfv 2. Redistributions in binary form must reproduce the above copyright 13266423Sjfv notice, this list of conditions and the following disclaimer in the 14266423Sjfv documentation and/or other materials provided with the distribution. 15266423Sjfv 16266423Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17266423Sjfv contributors may be used to endorse or promote products derived from 18266423Sjfv this software without specific prior written permission. 19266423Sjfv 20266423Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21266423Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22266423Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23266423Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24266423Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25266423Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26266423Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27266423Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28266423Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29266423Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30266423Sjfv POSSIBILITY OF SUCH DAMAGE. 31266423Sjfv 32266423Sjfv******************************************************************************/ 33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/i40e_adminq.c 277082 2015-01-12 18:32:45Z jfv $*/ 34266423Sjfv 35266423Sjfv#include "i40e_status.h" 36266423Sjfv#include "i40e_type.h" 37266423Sjfv#include "i40e_register.h" 38266423Sjfv#include "i40e_adminq.h" 39266423Sjfv#include "i40e_prototype.h" 40266423Sjfv 41266423Sjfv/** 42266423Sjfv * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation 43266423Sjfv * @desc: API request descriptor 44266423Sjfv **/ 45266423Sjfvstatic INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) 46266423Sjfv{ 47266423Sjfv return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) || 48266423Sjfv desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); 49266423Sjfv} 50266423Sjfv 51266423Sjfv/** 52266423Sjfv * i40e_adminq_init_regs - Initialize AdminQ registers 53266423Sjfv * @hw: pointer to the hardware structure 54266423Sjfv * 55266423Sjfv * This assumes the alloc_asq and alloc_arq functions have already been called 56266423Sjfv **/ 57266423Sjfvstatic void i40e_adminq_init_regs(struct i40e_hw *hw) 58266423Sjfv{ 59266423Sjfv /* set head and tail registers in our local struct */ 60270346Sjfv if (i40e_is_vf(hw)) { 61266423Sjfv hw->aq.asq.tail = I40E_VF_ATQT1; 62266423Sjfv hw->aq.asq.head = I40E_VF_ATQH1; 63266423Sjfv hw->aq.asq.len = I40E_VF_ATQLEN1; 64269198Sjfv hw->aq.asq.bal = I40E_VF_ATQBAL1; 65269198Sjfv hw->aq.asq.bah = I40E_VF_ATQBAH1; 66266423Sjfv hw->aq.arq.tail = I40E_VF_ARQT1; 67266423Sjfv hw->aq.arq.head = I40E_VF_ARQH1; 68266423Sjfv hw->aq.arq.len = I40E_VF_ARQLEN1; 69269198Sjfv hw->aq.arq.bal = I40E_VF_ARQBAL1; 70269198Sjfv hw->aq.arq.bah = I40E_VF_ARQBAH1; 71266423Sjfv } else { 72266423Sjfv hw->aq.asq.tail = I40E_PF_ATQT; 73266423Sjfv hw->aq.asq.head = I40E_PF_ATQH; 74266423Sjfv hw->aq.asq.len = I40E_PF_ATQLEN; 75269198Sjfv hw->aq.asq.bal = I40E_PF_ATQBAL; 76269198Sjfv hw->aq.asq.bah = I40E_PF_ATQBAH; 77266423Sjfv hw->aq.arq.tail = I40E_PF_ARQT; 78266423Sjfv hw->aq.arq.head = I40E_PF_ARQH; 79266423Sjfv hw->aq.arq.len = I40E_PF_ARQLEN; 80269198Sjfv hw->aq.arq.bal = I40E_PF_ARQBAL; 81269198Sjfv hw->aq.arq.bah = I40E_PF_ARQBAH; 82266423Sjfv } 83266423Sjfv} 84266423Sjfv 85266423Sjfv/** 86266423Sjfv * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 87266423Sjfv * @hw: pointer to the hardware structure 88266423Sjfv **/ 89266423Sjfvenum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 90266423Sjfv{ 91266423Sjfv enum i40e_status_code ret_code; 92266423Sjfv 93266423Sjfv ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 94266423Sjfv i40e_mem_atq_ring, 95266423Sjfv (hw->aq.num_asq_entries * 96266423Sjfv sizeof(struct i40e_aq_desc)), 97266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 98266423Sjfv if (ret_code) 99266423Sjfv return ret_code; 100266423Sjfv 101266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 102266423Sjfv (hw->aq.num_asq_entries * 103266423Sjfv sizeof(struct i40e_asq_cmd_details))); 104266423Sjfv if (ret_code) { 105266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 106266423Sjfv return ret_code; 107266423Sjfv } 108266423Sjfv 109266423Sjfv return ret_code; 110266423Sjfv} 111266423Sjfv 112266423Sjfv/** 113266423Sjfv * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 114266423Sjfv * @hw: pointer to the hardware structure 115266423Sjfv **/ 116266423Sjfvenum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 117266423Sjfv{ 118266423Sjfv enum i40e_status_code ret_code; 119266423Sjfv 120266423Sjfv ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 121266423Sjfv i40e_mem_arq_ring, 122266423Sjfv (hw->aq.num_arq_entries * 123266423Sjfv sizeof(struct i40e_aq_desc)), 124266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 125266423Sjfv 126266423Sjfv return ret_code; 127266423Sjfv} 128266423Sjfv 129266423Sjfv/** 130266423Sjfv * i40e_free_adminq_asq - Free Admin Queue send rings 131266423Sjfv * @hw: pointer to the hardware structure 132266423Sjfv * 133266423Sjfv * This assumes the posted send buffers have already been cleaned 134266423Sjfv * and de-allocated 135266423Sjfv **/ 136266423Sjfvvoid i40e_free_adminq_asq(struct i40e_hw *hw) 137266423Sjfv{ 138266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 139266423Sjfv} 140266423Sjfv 141266423Sjfv/** 142266423Sjfv * i40e_free_adminq_arq - Free Admin Queue receive rings 143266423Sjfv * @hw: pointer to the hardware structure 144266423Sjfv * 145266423Sjfv * This assumes the posted receive buffers have already been cleaned 146266423Sjfv * and de-allocated 147266423Sjfv **/ 148266423Sjfvvoid i40e_free_adminq_arq(struct i40e_hw *hw) 149266423Sjfv{ 150266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 151266423Sjfv} 152266423Sjfv 153266423Sjfv/** 154266423Sjfv * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 155266423Sjfv * @hw: pointer to the hardware structure 156266423Sjfv **/ 157266423Sjfvstatic enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) 158266423Sjfv{ 159266423Sjfv enum i40e_status_code ret_code; 160266423Sjfv struct i40e_aq_desc *desc; 161266423Sjfv struct i40e_dma_mem *bi; 162266423Sjfv int i; 163266423Sjfv 164266423Sjfv /* We'll be allocating the buffer info memory first, then we can 165266423Sjfv * allocate the mapped buffers for the event processing 166266423Sjfv */ 167266423Sjfv 168266423Sjfv /* buffer_info structures do not need alignment */ 169266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 170266423Sjfv (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 171266423Sjfv if (ret_code) 172266423Sjfv goto alloc_arq_bufs; 173266423Sjfv hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 174266423Sjfv 175266423Sjfv /* allocate the mapped buffers */ 176266423Sjfv for (i = 0; i < hw->aq.num_arq_entries; i++) { 177266423Sjfv bi = &hw->aq.arq.r.arq_bi[i]; 178266423Sjfv ret_code = i40e_allocate_dma_mem(hw, bi, 179266423Sjfv i40e_mem_arq_buf, 180266423Sjfv hw->aq.arq_buf_size, 181266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 182266423Sjfv if (ret_code) 183266423Sjfv goto unwind_alloc_arq_bufs; 184266423Sjfv 185266423Sjfv /* now configure the descriptors for use */ 186266423Sjfv desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 187266423Sjfv 188266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 189266423Sjfv if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 190266423Sjfv desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 191266423Sjfv desc->opcode = 0; 192266423Sjfv /* This is in accordance with Admin queue design, there is no 193266423Sjfv * register for buffer size configuration 194266423Sjfv */ 195266423Sjfv desc->datalen = CPU_TO_LE16((u16)bi->size); 196266423Sjfv desc->retval = 0; 197266423Sjfv desc->cookie_high = 0; 198266423Sjfv desc->cookie_low = 0; 199266423Sjfv desc->params.external.addr_high = 200266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 201266423Sjfv desc->params.external.addr_low = 202266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 203266423Sjfv desc->params.external.param0 = 0; 204266423Sjfv desc->params.external.param1 = 0; 205266423Sjfv } 206266423Sjfv 207266423Sjfvalloc_arq_bufs: 208266423Sjfv return ret_code; 209266423Sjfv 210266423Sjfvunwind_alloc_arq_bufs: 211266423Sjfv /* don't try to free the one that failed... */ 212266423Sjfv i--; 213266423Sjfv for (; i >= 0; i--) 214266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 215266423Sjfv i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 216266423Sjfv 217266423Sjfv return ret_code; 218266423Sjfv} 219266423Sjfv 220266423Sjfv/** 221266423Sjfv * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 222266423Sjfv * @hw: pointer to the hardware structure 223266423Sjfv **/ 224266423Sjfvstatic enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) 225266423Sjfv{ 226266423Sjfv enum i40e_status_code ret_code; 227266423Sjfv struct i40e_dma_mem *bi; 228266423Sjfv int i; 229266423Sjfv 230266423Sjfv /* No mapped memory needed yet, just the buffer info structures */ 231266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 232266423Sjfv (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 233266423Sjfv if (ret_code) 234266423Sjfv goto alloc_asq_bufs; 235266423Sjfv hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 236266423Sjfv 237266423Sjfv /* allocate the mapped buffers */ 238266423Sjfv for (i = 0; i < hw->aq.num_asq_entries; i++) { 239266423Sjfv bi = &hw->aq.asq.r.asq_bi[i]; 240266423Sjfv ret_code = i40e_allocate_dma_mem(hw, bi, 241266423Sjfv i40e_mem_asq_buf, 242266423Sjfv hw->aq.asq_buf_size, 243266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 244266423Sjfv if (ret_code) 245266423Sjfv goto unwind_alloc_asq_bufs; 246266423Sjfv } 247266423Sjfvalloc_asq_bufs: 248266423Sjfv return ret_code; 249266423Sjfv 250266423Sjfvunwind_alloc_asq_bufs: 251266423Sjfv /* don't try to free the one that failed... */ 252266423Sjfv i--; 253266423Sjfv for (; i >= 0; i--) 254266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 255266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 256266423Sjfv 257266423Sjfv return ret_code; 258266423Sjfv} 259266423Sjfv 260266423Sjfv/** 261266423Sjfv * i40e_free_arq_bufs - Free receive queue buffer info elements 262266423Sjfv * @hw: pointer to the hardware structure 263266423Sjfv **/ 264266423Sjfvstatic void i40e_free_arq_bufs(struct i40e_hw *hw) 265266423Sjfv{ 266266423Sjfv int i; 267266423Sjfv 268266423Sjfv /* free descriptors */ 269266423Sjfv for (i = 0; i < hw->aq.num_arq_entries; i++) 270266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 271266423Sjfv 272266423Sjfv /* free the descriptor memory */ 273266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 274266423Sjfv 275266423Sjfv /* free the dma header */ 276266423Sjfv i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 277266423Sjfv} 278266423Sjfv 279266423Sjfv/** 280266423Sjfv * i40e_free_asq_bufs - Free send queue buffer info elements 281266423Sjfv * @hw: pointer to the hardware structure 282266423Sjfv **/ 283266423Sjfvstatic void i40e_free_asq_bufs(struct i40e_hw *hw) 284266423Sjfv{ 285266423Sjfv int i; 286266423Sjfv 287266423Sjfv /* only unmap if the address is non-NULL */ 288266423Sjfv for (i = 0; i < hw->aq.num_asq_entries; i++) 289266423Sjfv if (hw->aq.asq.r.asq_bi[i].pa) 290266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 291266423Sjfv 292266423Sjfv /* free the buffer info list */ 293266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 294266423Sjfv 295266423Sjfv /* free the descriptor memory */ 296266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 297266423Sjfv 298266423Sjfv /* free the dma header */ 299266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 300266423Sjfv} 301266423Sjfv 302266423Sjfv/** 303266423Sjfv * i40e_config_asq_regs - configure ASQ registers 304266423Sjfv * @hw: pointer to the hardware structure 305266423Sjfv * 306266423Sjfv * Configure base address and length registers for the transmit queue 307266423Sjfv **/ 308266423Sjfvstatic enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) 309266423Sjfv{ 310266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 311266423Sjfv u32 reg = 0; 312266423Sjfv 313266423Sjfv /* Clear Head and Tail */ 314266423Sjfv wr32(hw, hw->aq.asq.head, 0); 315266423Sjfv wr32(hw, hw->aq.asq.tail, 0); 316266423Sjfv 317269198Sjfv /* set starting point */ 318269198Sjfv wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 319269198Sjfv I40E_PF_ATQLEN_ATQENABLE_MASK)); 320269198Sjfv wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); 321269198Sjfv wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); 322266423Sjfv 323266423Sjfv /* Check one register to verify that config was applied */ 324269198Sjfv reg = rd32(hw, hw->aq.asq.bal); 325266423Sjfv if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) 326266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 327266423Sjfv 328266423Sjfv return ret_code; 329266423Sjfv} 330266423Sjfv 331266423Sjfv/** 332266423Sjfv * i40e_config_arq_regs - ARQ register configuration 333266423Sjfv * @hw: pointer to the hardware structure 334266423Sjfv * 335266423Sjfv * Configure base address and length registers for the receive (event queue) 336266423Sjfv **/ 337266423Sjfvstatic enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) 338266423Sjfv{ 339266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 340266423Sjfv u32 reg = 0; 341266423Sjfv 342266423Sjfv /* Clear Head and Tail */ 343266423Sjfv wr32(hw, hw->aq.arq.head, 0); 344266423Sjfv wr32(hw, hw->aq.arq.tail, 0); 345266423Sjfv 346269198Sjfv /* set starting point */ 347269198Sjfv wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 348269198Sjfv I40E_PF_ARQLEN_ARQENABLE_MASK)); 349269198Sjfv wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); 350269198Sjfv wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); 351266423Sjfv 352266423Sjfv /* Update tail in the HW to post pre-allocated buffers */ 353266423Sjfv wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 354266423Sjfv 355266423Sjfv /* Check one register to verify that config was applied */ 356269198Sjfv reg = rd32(hw, hw->aq.arq.bal); 357266423Sjfv if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) 358266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 359266423Sjfv 360266423Sjfv return ret_code; 361266423Sjfv} 362266423Sjfv 363266423Sjfv/** 364266423Sjfv * i40e_init_asq - main initialization routine for ASQ 365266423Sjfv * @hw: pointer to the hardware structure 366266423Sjfv * 367266423Sjfv * This is the main initialization routine for the Admin Send Queue 368266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 369266423Sjfv * in the hw->aq structure: 370266423Sjfv * - hw->aq.num_asq_entries 371266423Sjfv * - hw->aq.arq_buf_size 372266423Sjfv * 373266423Sjfv * Do *NOT* hold the lock when calling this as the memory allocation routines 374266423Sjfv * called are not going to be atomic context safe 375266423Sjfv **/ 376266423Sjfvenum i40e_status_code i40e_init_asq(struct i40e_hw *hw) 377266423Sjfv{ 378266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 379266423Sjfv 380266423Sjfv if (hw->aq.asq.count > 0) { 381266423Sjfv /* queue already initialized */ 382266423Sjfv ret_code = I40E_ERR_NOT_READY; 383266423Sjfv goto init_adminq_exit; 384266423Sjfv } 385266423Sjfv 386266423Sjfv /* verify input for valid configuration */ 387266423Sjfv if ((hw->aq.num_asq_entries == 0) || 388266423Sjfv (hw->aq.asq_buf_size == 0)) { 389266423Sjfv ret_code = I40E_ERR_CONFIG; 390266423Sjfv goto init_adminq_exit; 391266423Sjfv } 392266423Sjfv 393266423Sjfv hw->aq.asq.next_to_use = 0; 394266423Sjfv hw->aq.asq.next_to_clean = 0; 395266423Sjfv hw->aq.asq.count = hw->aq.num_asq_entries; 396266423Sjfv 397266423Sjfv /* allocate the ring memory */ 398266423Sjfv ret_code = i40e_alloc_adminq_asq_ring(hw); 399266423Sjfv if (ret_code != I40E_SUCCESS) 400266423Sjfv goto init_adminq_exit; 401266423Sjfv 402266423Sjfv /* allocate buffers in the rings */ 403266423Sjfv ret_code = i40e_alloc_asq_bufs(hw); 404266423Sjfv if (ret_code != I40E_SUCCESS) 405266423Sjfv goto init_adminq_free_rings; 406266423Sjfv 407266423Sjfv /* initialize base registers */ 408266423Sjfv ret_code = i40e_config_asq_regs(hw); 409266423Sjfv if (ret_code != I40E_SUCCESS) 410266423Sjfv goto init_adminq_free_rings; 411266423Sjfv 412266423Sjfv /* success! */ 413266423Sjfv goto init_adminq_exit; 414266423Sjfv 415266423Sjfvinit_adminq_free_rings: 416266423Sjfv i40e_free_adminq_asq(hw); 417266423Sjfv 418266423Sjfvinit_adminq_exit: 419266423Sjfv return ret_code; 420266423Sjfv} 421266423Sjfv 422266423Sjfv/** 423266423Sjfv * i40e_init_arq - initialize ARQ 424266423Sjfv * @hw: pointer to the hardware structure 425266423Sjfv * 426266423Sjfv * The main initialization routine for the Admin Receive (Event) Queue. 427266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 428266423Sjfv * in the hw->aq structure: 429266423Sjfv * - hw->aq.num_asq_entries 430266423Sjfv * - hw->aq.arq_buf_size 431266423Sjfv * 432266423Sjfv * Do *NOT* hold the lock when calling this as the memory allocation routines 433266423Sjfv * called are not going to be atomic context safe 434266423Sjfv **/ 435266423Sjfvenum i40e_status_code i40e_init_arq(struct i40e_hw *hw) 436266423Sjfv{ 437266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 438266423Sjfv 439266423Sjfv if (hw->aq.arq.count > 0) { 440266423Sjfv /* queue already initialized */ 441266423Sjfv ret_code = I40E_ERR_NOT_READY; 442266423Sjfv goto init_adminq_exit; 443266423Sjfv } 444266423Sjfv 445266423Sjfv /* verify input for valid configuration */ 446266423Sjfv if ((hw->aq.num_arq_entries == 0) || 447266423Sjfv (hw->aq.arq_buf_size == 0)) { 448266423Sjfv ret_code = I40E_ERR_CONFIG; 449266423Sjfv goto init_adminq_exit; 450266423Sjfv } 451266423Sjfv 452266423Sjfv hw->aq.arq.next_to_use = 0; 453266423Sjfv hw->aq.arq.next_to_clean = 0; 454266423Sjfv hw->aq.arq.count = hw->aq.num_arq_entries; 455266423Sjfv 456266423Sjfv /* allocate the ring memory */ 457266423Sjfv ret_code = i40e_alloc_adminq_arq_ring(hw); 458266423Sjfv if (ret_code != I40E_SUCCESS) 459266423Sjfv goto init_adminq_exit; 460266423Sjfv 461266423Sjfv /* allocate buffers in the rings */ 462266423Sjfv ret_code = i40e_alloc_arq_bufs(hw); 463266423Sjfv if (ret_code != I40E_SUCCESS) 464266423Sjfv goto init_adminq_free_rings; 465266423Sjfv 466266423Sjfv /* initialize base registers */ 467266423Sjfv ret_code = i40e_config_arq_regs(hw); 468266423Sjfv if (ret_code != I40E_SUCCESS) 469266423Sjfv goto init_adminq_free_rings; 470266423Sjfv 471266423Sjfv /* success! */ 472266423Sjfv goto init_adminq_exit; 473266423Sjfv 474266423Sjfvinit_adminq_free_rings: 475266423Sjfv i40e_free_adminq_arq(hw); 476266423Sjfv 477266423Sjfvinit_adminq_exit: 478266423Sjfv return ret_code; 479266423Sjfv} 480266423Sjfv 481266423Sjfv/** 482266423Sjfv * i40e_shutdown_asq - shutdown the ASQ 483266423Sjfv * @hw: pointer to the hardware structure 484266423Sjfv * 485266423Sjfv * The main shutdown routine for the Admin Send Queue 486266423Sjfv **/ 487266423Sjfvenum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) 488266423Sjfv{ 489266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 490266423Sjfv 491266423Sjfv if (hw->aq.asq.count == 0) 492266423Sjfv return I40E_ERR_NOT_READY; 493266423Sjfv 494266423Sjfv /* Stop firmware AdminQ processing */ 495266423Sjfv wr32(hw, hw->aq.asq.head, 0); 496266423Sjfv wr32(hw, hw->aq.asq.tail, 0); 497266423Sjfv wr32(hw, hw->aq.asq.len, 0); 498269198Sjfv wr32(hw, hw->aq.asq.bal, 0); 499269198Sjfv wr32(hw, hw->aq.asq.bah, 0); 500266423Sjfv 501266423Sjfv /* make sure spinlock is available */ 502266423Sjfv i40e_acquire_spinlock(&hw->aq.asq_spinlock); 503266423Sjfv 504266423Sjfv hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 505266423Sjfv 506266423Sjfv /* free ring buffers */ 507266423Sjfv i40e_free_asq_bufs(hw); 508266423Sjfv 509266423Sjfv i40e_release_spinlock(&hw->aq.asq_spinlock); 510266423Sjfv 511266423Sjfv return ret_code; 512266423Sjfv} 513266423Sjfv 514266423Sjfv/** 515266423Sjfv * i40e_shutdown_arq - shutdown ARQ 516266423Sjfv * @hw: pointer to the hardware structure 517266423Sjfv * 518266423Sjfv * The main shutdown routine for the Admin Receive Queue 519266423Sjfv **/ 520266423Sjfvenum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) 521266423Sjfv{ 522266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 523266423Sjfv 524266423Sjfv if (hw->aq.arq.count == 0) 525266423Sjfv return I40E_ERR_NOT_READY; 526266423Sjfv 527266423Sjfv /* Stop firmware AdminQ processing */ 528266423Sjfv wr32(hw, hw->aq.arq.head, 0); 529266423Sjfv wr32(hw, hw->aq.arq.tail, 0); 530266423Sjfv wr32(hw, hw->aq.arq.len, 0); 531269198Sjfv wr32(hw, hw->aq.arq.bal, 0); 532269198Sjfv wr32(hw, hw->aq.arq.bah, 0); 533266423Sjfv 534266423Sjfv /* make sure spinlock is available */ 535266423Sjfv i40e_acquire_spinlock(&hw->aq.arq_spinlock); 536266423Sjfv 537266423Sjfv hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 538266423Sjfv 539266423Sjfv /* free ring buffers */ 540266423Sjfv i40e_free_arq_bufs(hw); 541266423Sjfv 542266423Sjfv i40e_release_spinlock(&hw->aq.arq_spinlock); 543266423Sjfv 544266423Sjfv return ret_code; 545266423Sjfv} 546266423Sjfv 547266423Sjfv/** 548266423Sjfv * i40e_init_adminq - main initialization routine for Admin Queue 549266423Sjfv * @hw: pointer to the hardware structure 550266423Sjfv * 551266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 552266423Sjfv * in the hw->aq structure: 553266423Sjfv * - hw->aq.num_asq_entries 554266423Sjfv * - hw->aq.num_arq_entries 555266423Sjfv * - hw->aq.arq_buf_size 556266423Sjfv * - hw->aq.asq_buf_size 557266423Sjfv **/ 558266423Sjfvenum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) 559266423Sjfv{ 560266423Sjfv enum i40e_status_code ret_code; 561266423Sjfv u16 eetrack_lo, eetrack_hi; 562266423Sjfv int retry = 0; 563266423Sjfv /* verify input for valid configuration */ 564266423Sjfv if ((hw->aq.num_arq_entries == 0) || 565266423Sjfv (hw->aq.num_asq_entries == 0) || 566266423Sjfv (hw->aq.arq_buf_size == 0) || 567266423Sjfv (hw->aq.asq_buf_size == 0)) { 568266423Sjfv ret_code = I40E_ERR_CONFIG; 569266423Sjfv goto init_adminq_exit; 570266423Sjfv } 571266423Sjfv 572266423Sjfv /* initialize spin locks */ 573266423Sjfv i40e_init_spinlock(&hw->aq.asq_spinlock); 574266423Sjfv i40e_init_spinlock(&hw->aq.arq_spinlock); 575266423Sjfv 576266423Sjfv /* Set up register offsets */ 577266423Sjfv i40e_adminq_init_regs(hw); 578266423Sjfv 579269198Sjfv /* setup ASQ command write back timeout */ 580269198Sjfv hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 581269198Sjfv 582266423Sjfv /* allocate the ASQ */ 583266423Sjfv ret_code = i40e_init_asq(hw); 584266423Sjfv if (ret_code != I40E_SUCCESS) 585266423Sjfv goto init_adminq_destroy_spinlocks; 586266423Sjfv 587266423Sjfv /* allocate the ARQ */ 588266423Sjfv ret_code = i40e_init_arq(hw); 589266423Sjfv if (ret_code != I40E_SUCCESS) 590266423Sjfv goto init_adminq_free_asq; 591266423Sjfv 592277082Sjfv /* VF has no need of firmware */ 593277082Sjfv if (i40e_is_vf(hw)) 594277082Sjfv goto init_adminq_exit; 595277082Sjfv /* There are some cases where the firmware may not be quite ready 596266423Sjfv * for AdminQ operations, so we retry the AdminQ setup a few times 597266423Sjfv * if we see timeouts in this first AQ call. 598266423Sjfv */ 599266423Sjfv do { 600266423Sjfv ret_code = i40e_aq_get_firmware_version(hw, 601266423Sjfv &hw->aq.fw_maj_ver, 602266423Sjfv &hw->aq.fw_min_ver, 603277082Sjfv &hw->aq.fw_build, 604266423Sjfv &hw->aq.api_maj_ver, 605266423Sjfv &hw->aq.api_min_ver, 606266423Sjfv NULL); 607266423Sjfv if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) 608266423Sjfv break; 609266423Sjfv retry++; 610266423Sjfv i40e_msec_delay(100); 611266423Sjfv i40e_resume_aq(hw); 612266423Sjfv } while (retry < 10); 613266423Sjfv if (ret_code != I40E_SUCCESS) 614266423Sjfv goto init_adminq_free_arq; 615266423Sjfv 616266423Sjfv /* get the NVM version info */ 617266423Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version); 618266423Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 619266423Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 620266423Sjfv hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 621266423Sjfv 622266423Sjfv if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 623266423Sjfv ret_code = I40E_ERR_FIRMWARE_API_VERSION; 624266423Sjfv goto init_adminq_free_arq; 625266423Sjfv } 626266423Sjfv 627266423Sjfv /* pre-emptive resource lock release */ 628266423Sjfv i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 629277082Sjfv hw->aq.nvm_release_on_done = FALSE; 630277082Sjfv hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 631266423Sjfv 632266423Sjfv ret_code = i40e_aq_set_hmc_resource_profile(hw, 633266423Sjfv I40E_HMC_PROFILE_DEFAULT, 634266423Sjfv 0, 635266423Sjfv NULL); 636266423Sjfv ret_code = I40E_SUCCESS; 637266423Sjfv 638266423Sjfv /* success! */ 639266423Sjfv goto init_adminq_exit; 640266423Sjfv 641266423Sjfvinit_adminq_free_arq: 642266423Sjfv i40e_shutdown_arq(hw); 643266423Sjfvinit_adminq_free_asq: 644266423Sjfv i40e_shutdown_asq(hw); 645266423Sjfvinit_adminq_destroy_spinlocks: 646266423Sjfv i40e_destroy_spinlock(&hw->aq.asq_spinlock); 647266423Sjfv i40e_destroy_spinlock(&hw->aq.arq_spinlock); 648266423Sjfv 649266423Sjfvinit_adminq_exit: 650266423Sjfv return ret_code; 651266423Sjfv} 652266423Sjfv 653266423Sjfv/** 654266423Sjfv * i40e_shutdown_adminq - shutdown routine for the Admin Queue 655266423Sjfv * @hw: pointer to the hardware structure 656266423Sjfv **/ 657266423Sjfvenum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) 658266423Sjfv{ 659266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 660266423Sjfv 661266423Sjfv if (i40e_check_asq_alive(hw)) 662266423Sjfv i40e_aq_queue_shutdown(hw, TRUE); 663266423Sjfv 664266423Sjfv i40e_shutdown_asq(hw); 665266423Sjfv i40e_shutdown_arq(hw); 666266423Sjfv 667266423Sjfv /* destroy the spinlocks */ 668266423Sjfv i40e_destroy_spinlock(&hw->aq.asq_spinlock); 669266423Sjfv i40e_destroy_spinlock(&hw->aq.arq_spinlock); 670266423Sjfv 671266423Sjfv return ret_code; 672266423Sjfv} 673266423Sjfv 674266423Sjfv/** 675266423Sjfv * i40e_clean_asq - cleans Admin send queue 676266423Sjfv * @hw: pointer to the hardware structure 677266423Sjfv * 678266423Sjfv * returns the number of free desc 679266423Sjfv **/ 680266423Sjfvu16 i40e_clean_asq(struct i40e_hw *hw) 681266423Sjfv{ 682266423Sjfv struct i40e_adminq_ring *asq = &(hw->aq.asq); 683266423Sjfv struct i40e_asq_cmd_details *details; 684266423Sjfv u16 ntc = asq->next_to_clean; 685266423Sjfv struct i40e_aq_desc desc_cb; 686266423Sjfv struct i40e_aq_desc *desc; 687266423Sjfv 688266423Sjfv desc = I40E_ADMINQ_DESC(*asq, ntc); 689266423Sjfv details = I40E_ADMINQ_DETAILS(*asq, ntc); 690266423Sjfv while (rd32(hw, hw->aq.asq.head) != ntc) { 691266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 692266423Sjfv "%s: ntc %d head %d.\n", __FUNCTION__, ntc, 693266423Sjfv rd32(hw, hw->aq.asq.head)); 694266423Sjfv 695266423Sjfv if (details->callback) { 696266423Sjfv I40E_ADMINQ_CALLBACK cb_func = 697266423Sjfv (I40E_ADMINQ_CALLBACK)details->callback; 698266423Sjfv i40e_memcpy(&desc_cb, desc, 699266423Sjfv sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA); 700266423Sjfv cb_func(hw, &desc_cb); 701266423Sjfv } 702266423Sjfv i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); 703266423Sjfv i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); 704266423Sjfv ntc++; 705266423Sjfv if (ntc == asq->count) 706266423Sjfv ntc = 0; 707266423Sjfv desc = I40E_ADMINQ_DESC(*asq, ntc); 708266423Sjfv details = I40E_ADMINQ_DETAILS(*asq, ntc); 709266423Sjfv } 710266423Sjfv 711266423Sjfv asq->next_to_clean = ntc; 712266423Sjfv 713266423Sjfv return I40E_DESC_UNUSED(asq); 714266423Sjfv} 715266423Sjfv 716266423Sjfv/** 717266423Sjfv * i40e_asq_done - check if FW has processed the Admin Send Queue 718266423Sjfv * @hw: pointer to the hw struct 719266423Sjfv * 720266423Sjfv * Returns TRUE if the firmware has processed all descriptors on the 721266423Sjfv * admin send queue. Returns FALSE if there are still requests pending. 722266423Sjfv **/ 723266423Sjfvbool i40e_asq_done(struct i40e_hw *hw) 724266423Sjfv{ 725266423Sjfv /* AQ designers suggest use of head for better 726266423Sjfv * timing reliability than DD bit 727266423Sjfv */ 728266423Sjfv return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 729266423Sjfv 730266423Sjfv} 731266423Sjfv 732266423Sjfv/** 733266423Sjfv * i40e_asq_send_command - send command to Admin Queue 734266423Sjfv * @hw: pointer to the hw struct 735266423Sjfv * @desc: prefilled descriptor describing the command (non DMA mem) 736266423Sjfv * @buff: buffer to use for indirect commands 737266423Sjfv * @buff_size: size of buffer for indirect commands 738266423Sjfv * @cmd_details: pointer to command details structure 739266423Sjfv * 740266423Sjfv * This is the main send command driver routine for the Admin Queue send 741266423Sjfv * queue. It runs the queue, cleans the queue, etc 742266423Sjfv **/ 743266423Sjfvenum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, 744266423Sjfv struct i40e_aq_desc *desc, 745266423Sjfv void *buff, /* can be NULL */ 746266423Sjfv u16 buff_size, 747266423Sjfv struct i40e_asq_cmd_details *cmd_details) 748266423Sjfv{ 749266423Sjfv enum i40e_status_code status = I40E_SUCCESS; 750266423Sjfv struct i40e_dma_mem *dma_buff = NULL; 751266423Sjfv struct i40e_asq_cmd_details *details; 752266423Sjfv struct i40e_aq_desc *desc_on_ring; 753266423Sjfv bool cmd_completed = FALSE; 754266423Sjfv u16 retval = 0; 755266423Sjfv u32 val = 0; 756266423Sjfv 757266423Sjfv val = rd32(hw, hw->aq.asq.head); 758266423Sjfv if (val >= hw->aq.num_asq_entries) { 759266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 760266423Sjfv "AQTX: head overrun at %d\n", val); 761266423Sjfv status = I40E_ERR_QUEUE_EMPTY; 762266423Sjfv goto asq_send_command_exit; 763266423Sjfv } 764266423Sjfv 765266423Sjfv if (hw->aq.asq.count == 0) { 766266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 767266423Sjfv "AQTX: Admin queue not initialized.\n"); 768266423Sjfv status = I40E_ERR_QUEUE_EMPTY; 769266423Sjfv goto asq_send_command_exit; 770266423Sjfv } 771266423Sjfv 772266423Sjfv details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 773266423Sjfv if (cmd_details) { 774266423Sjfv i40e_memcpy(details, 775266423Sjfv cmd_details, 776266423Sjfv sizeof(struct i40e_asq_cmd_details), 777266423Sjfv I40E_NONDMA_TO_NONDMA); 778266423Sjfv 779266423Sjfv /* If the cmd_details are defined copy the cookie. The 780266423Sjfv * CPU_TO_LE32 is not needed here because the data is ignored 781266423Sjfv * by the FW, only used by the driver 782266423Sjfv */ 783266423Sjfv if (details->cookie) { 784266423Sjfv desc->cookie_high = 785266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); 786266423Sjfv desc->cookie_low = 787266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); 788266423Sjfv } 789266423Sjfv } else { 790266423Sjfv i40e_memset(details, 0, 791266423Sjfv sizeof(struct i40e_asq_cmd_details), 792266423Sjfv I40E_NONDMA_MEM); 793266423Sjfv } 794266423Sjfv 795266423Sjfv /* clear requested flags and then set additional flags if defined */ 796266423Sjfv desc->flags &= ~CPU_TO_LE16(details->flags_dis); 797266423Sjfv desc->flags |= CPU_TO_LE16(details->flags_ena); 798266423Sjfv 799266423Sjfv i40e_acquire_spinlock(&hw->aq.asq_spinlock); 800266423Sjfv 801266423Sjfv if (buff_size > hw->aq.asq_buf_size) { 802266423Sjfv i40e_debug(hw, 803266423Sjfv I40E_DEBUG_AQ_MESSAGE, 804266423Sjfv "AQTX: Invalid buffer size: %d.\n", 805266423Sjfv buff_size); 806266423Sjfv status = I40E_ERR_INVALID_SIZE; 807266423Sjfv goto asq_send_command_error; 808266423Sjfv } 809266423Sjfv 810266423Sjfv if (details->postpone && !details->async) { 811266423Sjfv i40e_debug(hw, 812266423Sjfv I40E_DEBUG_AQ_MESSAGE, 813266423Sjfv "AQTX: Async flag not set along with postpone flag"); 814266423Sjfv status = I40E_ERR_PARAM; 815266423Sjfv goto asq_send_command_error; 816266423Sjfv } 817266423Sjfv 818266423Sjfv /* call clean and check queue available function to reclaim the 819266423Sjfv * descriptors that were processed by FW, the function returns the 820266423Sjfv * number of desc available 821266423Sjfv */ 822266423Sjfv /* the clean function called here could be called in a separate thread 823266423Sjfv * in case of asynchronous completions 824266423Sjfv */ 825266423Sjfv if (i40e_clean_asq(hw) == 0) { 826266423Sjfv i40e_debug(hw, 827266423Sjfv I40E_DEBUG_AQ_MESSAGE, 828266423Sjfv "AQTX: Error queue is full.\n"); 829266423Sjfv status = I40E_ERR_ADMIN_QUEUE_FULL; 830266423Sjfv goto asq_send_command_error; 831266423Sjfv } 832266423Sjfv 833266423Sjfv /* initialize the temp desc pointer with the right desc */ 834266423Sjfv desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 835266423Sjfv 836266423Sjfv /* if the desc is available copy the temp desc to the right place */ 837266423Sjfv i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), 838266423Sjfv I40E_NONDMA_TO_DMA); 839266423Sjfv 840266423Sjfv /* if buff is not NULL assume indirect command */ 841266423Sjfv if (buff != NULL) { 842266423Sjfv dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 843266423Sjfv /* copy the user buff into the respective DMA buff */ 844266423Sjfv i40e_memcpy(dma_buff->va, buff, buff_size, 845266423Sjfv I40E_NONDMA_TO_DMA); 846266423Sjfv desc_on_ring->datalen = CPU_TO_LE16(buff_size); 847266423Sjfv 848266423Sjfv /* Update the address values in the desc with the pa value 849266423Sjfv * for respective buffer 850266423Sjfv */ 851266423Sjfv desc_on_ring->params.external.addr_high = 852266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); 853266423Sjfv desc_on_ring->params.external.addr_low = 854266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); 855266423Sjfv } 856266423Sjfv 857266423Sjfv /* bump the tail */ 858266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 859269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 860269198Sjfv buff, buff_size); 861266423Sjfv (hw->aq.asq.next_to_use)++; 862266423Sjfv if (hw->aq.asq.next_to_use == hw->aq.asq.count) 863266423Sjfv hw->aq.asq.next_to_use = 0; 864266423Sjfv if (!details->postpone) 865266423Sjfv wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 866266423Sjfv 867266423Sjfv /* if cmd_details are not defined or async flag is not set, 868266423Sjfv * we need to wait for desc write back 869266423Sjfv */ 870266423Sjfv if (!details->async && !details->postpone) { 871266423Sjfv u32 total_delay = 0; 872266423Sjfv 873266423Sjfv do { 874266423Sjfv /* AQ designers suggest use of head for better 875266423Sjfv * timing reliability than DD bit 876266423Sjfv */ 877266423Sjfv if (i40e_asq_done(hw)) 878266423Sjfv break; 879266423Sjfv /* ugh! delay while spin_lock */ 880270346Sjfv i40e_msec_delay(1); 881270346Sjfv total_delay++; 882269198Sjfv } while (total_delay < hw->aq.asq_cmd_timeout); 883266423Sjfv } 884266423Sjfv 885266423Sjfv /* if ready, copy the desc back to temp */ 886266423Sjfv if (i40e_asq_done(hw)) { 887266423Sjfv i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), 888266423Sjfv I40E_DMA_TO_NONDMA); 889266423Sjfv if (buff != NULL) 890266423Sjfv i40e_memcpy(buff, dma_buff->va, buff_size, 891266423Sjfv I40E_DMA_TO_NONDMA); 892266423Sjfv retval = LE16_TO_CPU(desc->retval); 893266423Sjfv if (retval != 0) { 894266423Sjfv i40e_debug(hw, 895266423Sjfv I40E_DEBUG_AQ_MESSAGE, 896266423Sjfv "AQTX: Command completed with error 0x%X.\n", 897266423Sjfv retval); 898266423Sjfv 899266423Sjfv /* strip off FW internal code */ 900266423Sjfv retval &= 0xff; 901266423Sjfv } 902266423Sjfv cmd_completed = TRUE; 903266423Sjfv if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 904266423Sjfv status = I40E_SUCCESS; 905266423Sjfv else 906266423Sjfv status = I40E_ERR_ADMIN_QUEUE_ERROR; 907266423Sjfv hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 908266423Sjfv } 909266423Sjfv 910269198Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 911269198Sjfv "AQTX: desc and buffer writeback:\n"); 912269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 913266423Sjfv 914266423Sjfv /* update the error if time out occurred */ 915266423Sjfv if ((!cmd_completed) && 916266423Sjfv (!details->async && !details->postpone)) { 917266423Sjfv i40e_debug(hw, 918266423Sjfv I40E_DEBUG_AQ_MESSAGE, 919266423Sjfv "AQTX: Writeback timeout.\n"); 920266423Sjfv status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 921266423Sjfv } 922266423Sjfv 923266423Sjfvasq_send_command_error: 924266423Sjfv i40e_release_spinlock(&hw->aq.asq_spinlock); 925266423Sjfvasq_send_command_exit: 926266423Sjfv return status; 927266423Sjfv} 928266423Sjfv 929266423Sjfv/** 930266423Sjfv * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 931266423Sjfv * @desc: pointer to the temp descriptor (non DMA mem) 932266423Sjfv * @opcode: the opcode can be used to decide which flags to turn off or on 933266423Sjfv * 934266423Sjfv * Fill the desc with default values 935266423Sjfv **/ 936266423Sjfvvoid i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 937266423Sjfv u16 opcode) 938266423Sjfv{ 939266423Sjfv /* zero out the desc */ 940266423Sjfv i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), 941266423Sjfv I40E_NONDMA_MEM); 942266423Sjfv desc->opcode = CPU_TO_LE16(opcode); 943266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); 944266423Sjfv} 945266423Sjfv 946266423Sjfv/** 947266423Sjfv * i40e_clean_arq_element 948266423Sjfv * @hw: pointer to the hw struct 949266423Sjfv * @e: event info from the receive descriptor, includes any buffers 950266423Sjfv * @pending: number of events that could be left to process 951266423Sjfv * 952266423Sjfv * This function cleans one Admin Receive Queue element and returns 953266423Sjfv * the contents through e. It can also return how many events are 954266423Sjfv * left to process through 'pending' 955266423Sjfv **/ 956266423Sjfvenum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, 957266423Sjfv struct i40e_arq_event_info *e, 958266423Sjfv u16 *pending) 959266423Sjfv{ 960266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 961266423Sjfv u16 ntc = hw->aq.arq.next_to_clean; 962266423Sjfv struct i40e_aq_desc *desc; 963266423Sjfv struct i40e_dma_mem *bi; 964266423Sjfv u16 desc_idx; 965266423Sjfv u16 datalen; 966266423Sjfv u16 flags; 967266423Sjfv u16 ntu; 968266423Sjfv 969266423Sjfv /* take the lock before we start messing with the ring */ 970266423Sjfv i40e_acquire_spinlock(&hw->aq.arq_spinlock); 971266423Sjfv 972266423Sjfv /* set next_to_use to head */ 973266423Sjfv ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); 974266423Sjfv if (ntu == ntc) { 975266423Sjfv /* nothing to do - shouldn't need to update ring's values */ 976266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 977266423Sjfv goto clean_arq_element_out; 978266423Sjfv } 979266423Sjfv 980266423Sjfv /* now clean the next descriptor */ 981266423Sjfv desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 982266423Sjfv desc_idx = ntc; 983266423Sjfv 984266423Sjfv flags = LE16_TO_CPU(desc->flags); 985266423Sjfv if (flags & I40E_AQ_FLAG_ERR) { 986266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 987266423Sjfv hw->aq.arq_last_status = 988266423Sjfv (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); 989266423Sjfv i40e_debug(hw, 990266423Sjfv I40E_DEBUG_AQ_MESSAGE, 991266423Sjfv "AQRX: Event received with error 0x%X.\n", 992266423Sjfv hw->aq.arq_last_status); 993266423Sjfv } 994266423Sjfv 995269198Sjfv i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), 996269198Sjfv I40E_DMA_TO_NONDMA); 997269198Sjfv datalen = LE16_TO_CPU(desc->datalen); 998270346Sjfv e->msg_len = min(datalen, e->buf_len); 999270346Sjfv if (e->msg_buf != NULL && (e->msg_len != 0)) 1000269198Sjfv i40e_memcpy(e->msg_buf, 1001269198Sjfv hw->aq.arq.r.arq_bi[desc_idx].va, 1002270346Sjfv e->msg_len, I40E_DMA_TO_NONDMA); 1003269198Sjfv 1004266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 1005269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1006269198Sjfv hw->aq.arq_buf_size); 1007266423Sjfv 1008266423Sjfv /* Restore the original datalen and buffer address in the desc, 1009266423Sjfv * FW updates datalen to indicate the event message 1010266423Sjfv * size 1011266423Sjfv */ 1012266423Sjfv bi = &hw->aq.arq.r.arq_bi[ntc]; 1013266423Sjfv i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); 1014266423Sjfv 1015266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 1016266423Sjfv if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1017266423Sjfv desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 1018266423Sjfv desc->datalen = CPU_TO_LE16((u16)bi->size); 1019266423Sjfv desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 1020266423Sjfv desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 1021266423Sjfv 1022266423Sjfv /* set tail = the last cleaned desc index. */ 1023266423Sjfv wr32(hw, hw->aq.arq.tail, ntc); 1024266423Sjfv /* ntc is updated to tail + 1 */ 1025266423Sjfv ntc++; 1026266423Sjfv if (ntc == hw->aq.num_arq_entries) 1027266423Sjfv ntc = 0; 1028266423Sjfv hw->aq.arq.next_to_clean = ntc; 1029266423Sjfv hw->aq.arq.next_to_use = ntu; 1030266423Sjfv 1031266423Sjfvclean_arq_element_out: 1032266423Sjfv /* Set pending if needed, unlock and return */ 1033266423Sjfv if (pending != NULL) 1034266423Sjfv *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1035266423Sjfv i40e_release_spinlock(&hw->aq.arq_spinlock); 1036266423Sjfv 1037266423Sjfv if (i40e_is_nvm_update_op(&e->desc)) { 1038266423Sjfv if (hw->aq.nvm_release_on_done) { 1039266423Sjfv i40e_release_nvm(hw); 1040266423Sjfv hw->aq.nvm_release_on_done = FALSE; 1041266423Sjfv } 1042266423Sjfv } 1043266423Sjfv 1044266423Sjfv return ret_code; 1045266423Sjfv} 1046266423Sjfv 1047266423Sjfvvoid i40e_resume_aq(struct i40e_hw *hw) 1048266423Sjfv{ 1049266423Sjfv /* Registers are reset after PF reset */ 1050266423Sjfv hw->aq.asq.next_to_use = 0; 1051266423Sjfv hw->aq.asq.next_to_clean = 0; 1052266423Sjfv 1053266423Sjfv#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK) 1054266423Sjfv#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK 1055266423Sjfv#endif 1056266423Sjfv i40e_config_asq_regs(hw); 1057266423Sjfv 1058266423Sjfv hw->aq.arq.next_to_use = 0; 1059266423Sjfv hw->aq.arq.next_to_clean = 0; 1060266423Sjfv 1061266423Sjfv i40e_config_arq_regs(hw); 1062266423Sjfv} 1063