1266423Sjfv/****************************************************************************** 2266423Sjfv 3349163Serj Copyright (c) 2013-2019, Intel Corporation 4266423Sjfv All rights reserved. 5349163Serj 6266423Sjfv Redistribution and use in source and binary forms, with or without 7266423Sjfv modification, are permitted provided that the following conditions are met: 8266423Sjfv 9266423Sjfv 1. Redistributions of source code must retain the above copyright notice, 10266423Sjfv this list of conditions and the following disclaimer. 11266423Sjfv 12266423Sjfv 2. Redistributions in binary form must reproduce the above copyright 13266423Sjfv notice, this list of conditions and the following disclaimer in the 14266423Sjfv documentation and/or other materials provided with the distribution. 15266423Sjfv 16266423Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17266423Sjfv contributors may be used to endorse or promote products derived from 18266423Sjfv this software without specific prior written permission. 19266423Sjfv 20266423Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21266423Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22266423Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23266423Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24266423Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25266423Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26266423Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27266423Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28266423Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29266423Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30266423Sjfv POSSIBILITY OF SUCH DAMAGE. 31266423Sjfv 32266423Sjfv******************************************************************************/ 33266423Sjfv/*$FreeBSD: stable/11/sys/dev/ixl/i40e_adminq.c 349163 2019-06-18 00:08:02Z erj $*/ 34266423Sjfv 35266423Sjfv#include "i40e_status.h" 36266423Sjfv#include "i40e_type.h" 37266423Sjfv#include "i40e_register.h" 38266423Sjfv#include "i40e_adminq.h" 39266423Sjfv#include "i40e_prototype.h" 40266423Sjfv 41266423Sjfv/** 42266423Sjfv * i40e_adminq_init_regs - Initialize AdminQ registers 43266423Sjfv * @hw: pointer to the hardware structure 44266423Sjfv * 45266423Sjfv * This assumes the alloc_asq and alloc_arq functions have already been called 46266423Sjfv **/ 47266423Sjfvstatic void i40e_adminq_init_regs(struct i40e_hw *hw) 48266423Sjfv{ 49266423Sjfv /* set head and tail registers in our local struct */ 50270346Sjfv if (i40e_is_vf(hw)) { 51266423Sjfv hw->aq.asq.tail = I40E_VF_ATQT1; 52266423Sjfv hw->aq.asq.head = I40E_VF_ATQH1; 53266423Sjfv hw->aq.asq.len = I40E_VF_ATQLEN1; 54269198Sjfv hw->aq.asq.bal = I40E_VF_ATQBAL1; 55269198Sjfv hw->aq.asq.bah = I40E_VF_ATQBAH1; 56266423Sjfv hw->aq.arq.tail = I40E_VF_ARQT1; 57266423Sjfv hw->aq.arq.head = I40E_VF_ARQH1; 58266423Sjfv hw->aq.arq.len = I40E_VF_ARQLEN1; 59269198Sjfv hw->aq.arq.bal = I40E_VF_ARQBAL1; 60269198Sjfv hw->aq.arq.bah = I40E_VF_ARQBAH1; 61266423Sjfv } else { 62266423Sjfv hw->aq.asq.tail = I40E_PF_ATQT; 63266423Sjfv hw->aq.asq.head = I40E_PF_ATQH; 64266423Sjfv hw->aq.asq.len = I40E_PF_ATQLEN; 65269198Sjfv hw->aq.asq.bal = I40E_PF_ATQBAL; 66269198Sjfv hw->aq.asq.bah = I40E_PF_ATQBAH; 67266423Sjfv hw->aq.arq.tail = I40E_PF_ARQT; 68266423Sjfv hw->aq.arq.head = I40E_PF_ARQH; 69266423Sjfv hw->aq.arq.len = I40E_PF_ARQLEN; 70269198Sjfv hw->aq.arq.bal = I40E_PF_ARQBAL; 71269198Sjfv hw->aq.arq.bah = I40E_PF_ARQBAH; 72266423Sjfv } 73266423Sjfv} 74266423Sjfv 75266423Sjfv/** 76266423Sjfv * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 77266423Sjfv * @hw: pointer to the hardware structure 78266423Sjfv **/ 79266423Sjfvenum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) 80266423Sjfv{ 81266423Sjfv enum i40e_status_code ret_code; 82266423Sjfv 83266423Sjfv ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 84266423Sjfv i40e_mem_atq_ring, 85266423Sjfv (hw->aq.num_asq_entries * 86266423Sjfv sizeof(struct i40e_aq_desc)), 87266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 88266423Sjfv if (ret_code) 89266423Sjfv return ret_code; 90266423Sjfv 91266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 92266423Sjfv (hw->aq.num_asq_entries * 93266423Sjfv sizeof(struct i40e_asq_cmd_details))); 94266423Sjfv if (ret_code) { 95266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 96266423Sjfv return ret_code; 97266423Sjfv } 98266423Sjfv 99266423Sjfv return ret_code; 100266423Sjfv} 101266423Sjfv 102266423Sjfv/** 103266423Sjfv * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 104266423Sjfv * @hw: pointer to the hardware structure 105266423Sjfv **/ 106266423Sjfvenum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) 107266423Sjfv{ 108266423Sjfv enum i40e_status_code ret_code; 109266423Sjfv 110266423Sjfv ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 111266423Sjfv i40e_mem_arq_ring, 112266423Sjfv (hw->aq.num_arq_entries * 113266423Sjfv sizeof(struct i40e_aq_desc)), 114266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 115266423Sjfv 116266423Sjfv return ret_code; 117266423Sjfv} 118266423Sjfv 119266423Sjfv/** 120266423Sjfv * i40e_free_adminq_asq - Free Admin Queue send rings 121266423Sjfv * @hw: pointer to the hardware structure 122266423Sjfv * 123266423Sjfv * This assumes the posted send buffers have already been cleaned 124266423Sjfv * and de-allocated 125266423Sjfv **/ 126266423Sjfvvoid i40e_free_adminq_asq(struct i40e_hw *hw) 127266423Sjfv{ 128349163Serj i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 129266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 130266423Sjfv} 131266423Sjfv 132266423Sjfv/** 133266423Sjfv * i40e_free_adminq_arq - Free Admin Queue receive rings 134266423Sjfv * @hw: pointer to the hardware structure 135266423Sjfv * 136266423Sjfv * This assumes the posted receive buffers have already been cleaned 137266423Sjfv * and de-allocated 138266423Sjfv **/ 139266423Sjfvvoid i40e_free_adminq_arq(struct i40e_hw *hw) 140266423Sjfv{ 141266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 142266423Sjfv} 143266423Sjfv 144266423Sjfv/** 145266423Sjfv * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 146266423Sjfv * @hw: pointer to the hardware structure 147266423Sjfv **/ 148266423Sjfvstatic enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) 149266423Sjfv{ 150266423Sjfv enum i40e_status_code ret_code; 151266423Sjfv struct i40e_aq_desc *desc; 152266423Sjfv struct i40e_dma_mem *bi; 153266423Sjfv int i; 154266423Sjfv 155266423Sjfv /* We'll be allocating the buffer info memory first, then we can 156266423Sjfv * allocate the mapped buffers for the event processing 157266423Sjfv */ 158266423Sjfv 159266423Sjfv /* buffer_info structures do not need alignment */ 160266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 161266423Sjfv (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); 162266423Sjfv if (ret_code) 163266423Sjfv goto alloc_arq_bufs; 164266423Sjfv hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; 165266423Sjfv 166266423Sjfv /* allocate the mapped buffers */ 167266423Sjfv for (i = 0; i < hw->aq.num_arq_entries; i++) { 168266423Sjfv bi = &hw->aq.arq.r.arq_bi[i]; 169266423Sjfv ret_code = i40e_allocate_dma_mem(hw, bi, 170266423Sjfv i40e_mem_arq_buf, 171266423Sjfv hw->aq.arq_buf_size, 172266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 173266423Sjfv if (ret_code) 174266423Sjfv goto unwind_alloc_arq_bufs; 175266423Sjfv 176266423Sjfv /* now configure the descriptors for use */ 177266423Sjfv desc = I40E_ADMINQ_DESC(hw->aq.arq, i); 178266423Sjfv 179266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 180266423Sjfv if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 181266423Sjfv desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 182266423Sjfv desc->opcode = 0; 183266423Sjfv /* This is in accordance with Admin queue design, there is no 184266423Sjfv * register for buffer size configuration 185266423Sjfv */ 186266423Sjfv desc->datalen = CPU_TO_LE16((u16)bi->size); 187266423Sjfv desc->retval = 0; 188266423Sjfv desc->cookie_high = 0; 189266423Sjfv desc->cookie_low = 0; 190266423Sjfv desc->params.external.addr_high = 191266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 192266423Sjfv desc->params.external.addr_low = 193266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 194266423Sjfv desc->params.external.param0 = 0; 195266423Sjfv desc->params.external.param1 = 0; 196266423Sjfv } 197266423Sjfv 198266423Sjfvalloc_arq_bufs: 199266423Sjfv return ret_code; 200266423Sjfv 201266423Sjfvunwind_alloc_arq_bufs: 202266423Sjfv /* don't try to free the one that failed... */ 203266423Sjfv i--; 204266423Sjfv for (; i >= 0; i--) 205266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 206266423Sjfv i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 207266423Sjfv 208266423Sjfv return ret_code; 209266423Sjfv} 210266423Sjfv 211266423Sjfv/** 212266423Sjfv * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 213266423Sjfv * @hw: pointer to the hardware structure 214266423Sjfv **/ 215266423Sjfvstatic enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw) 216266423Sjfv{ 217266423Sjfv enum i40e_status_code ret_code; 218266423Sjfv struct i40e_dma_mem *bi; 219266423Sjfv int i; 220266423Sjfv 221266423Sjfv /* No mapped memory needed yet, just the buffer info structures */ 222266423Sjfv ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 223266423Sjfv (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); 224266423Sjfv if (ret_code) 225266423Sjfv goto alloc_asq_bufs; 226266423Sjfv hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; 227266423Sjfv 228266423Sjfv /* allocate the mapped buffers */ 229266423Sjfv for (i = 0; i < hw->aq.num_asq_entries; i++) { 230266423Sjfv bi = &hw->aq.asq.r.asq_bi[i]; 231266423Sjfv ret_code = i40e_allocate_dma_mem(hw, bi, 232266423Sjfv i40e_mem_asq_buf, 233266423Sjfv hw->aq.asq_buf_size, 234266423Sjfv I40E_ADMINQ_DESC_ALIGNMENT); 235266423Sjfv if (ret_code) 236266423Sjfv goto unwind_alloc_asq_bufs; 237266423Sjfv } 238266423Sjfvalloc_asq_bufs: 239266423Sjfv return ret_code; 240266423Sjfv 241266423Sjfvunwind_alloc_asq_bufs: 242266423Sjfv /* don't try to free the one that failed... */ 243266423Sjfv i--; 244266423Sjfv for (; i >= 0; i--) 245266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 246266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 247266423Sjfv 248266423Sjfv return ret_code; 249266423Sjfv} 250266423Sjfv 251266423Sjfv/** 252266423Sjfv * i40e_free_arq_bufs - Free receive queue buffer info elements 253266423Sjfv * @hw: pointer to the hardware structure 254266423Sjfv **/ 255266423Sjfvstatic void i40e_free_arq_bufs(struct i40e_hw *hw) 256266423Sjfv{ 257266423Sjfv int i; 258266423Sjfv 259266423Sjfv /* free descriptors */ 260266423Sjfv for (i = 0; i < hw->aq.num_arq_entries; i++) 261266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 262266423Sjfv 263266423Sjfv /* free the descriptor memory */ 264266423Sjfv i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); 265266423Sjfv 266266423Sjfv /* free the dma header */ 267266423Sjfv i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); 268266423Sjfv} 269266423Sjfv 270266423Sjfv/** 271266423Sjfv * i40e_free_asq_bufs - Free send queue buffer info elements 272266423Sjfv * @hw: pointer to the hardware structure 273266423Sjfv **/ 274266423Sjfvstatic void i40e_free_asq_bufs(struct i40e_hw *hw) 275266423Sjfv{ 276266423Sjfv int i; 277266423Sjfv 278266423Sjfv /* only unmap if the address is non-NULL */ 279266423Sjfv for (i = 0; i < hw->aq.num_asq_entries; i++) 280266423Sjfv if (hw->aq.asq.r.asq_bi[i].pa) 281266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 282266423Sjfv 283266423Sjfv /* free the buffer info list */ 284266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 285266423Sjfv 286266423Sjfv /* free the descriptor memory */ 287266423Sjfv i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); 288266423Sjfv 289266423Sjfv /* free the dma header */ 290266423Sjfv i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); 291266423Sjfv} 292266423Sjfv 293266423Sjfv/** 294266423Sjfv * i40e_config_asq_regs - configure ASQ registers 295266423Sjfv * @hw: pointer to the hardware structure 296266423Sjfv * 297266423Sjfv * Configure base address and length registers for the transmit queue 298266423Sjfv **/ 299266423Sjfvstatic enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw) 300266423Sjfv{ 301266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 302266423Sjfv u32 reg = 0; 303266423Sjfv 304266423Sjfv /* Clear Head and Tail */ 305266423Sjfv wr32(hw, hw->aq.asq.head, 0); 306266423Sjfv wr32(hw, hw->aq.asq.tail, 0); 307266423Sjfv 308269198Sjfv /* set starting point */ 309284049Sjfv if (!i40e_is_vf(hw)) 310284049Sjfv wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 311284049Sjfv I40E_PF_ATQLEN_ATQENABLE_MASK)); 312284049Sjfv if (i40e_is_vf(hw)) 313284049Sjfv wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 314284049Sjfv I40E_VF_ATQLEN1_ATQENABLE_MASK)); 315269198Sjfv wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)); 316269198Sjfv wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa)); 317266423Sjfv 318266423Sjfv /* Check one register to verify that config was applied */ 319269198Sjfv reg = rd32(hw, hw->aq.asq.bal); 320266423Sjfv if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa)) 321266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 322266423Sjfv 323266423Sjfv return ret_code; 324266423Sjfv} 325266423Sjfv 326266423Sjfv/** 327266423Sjfv * i40e_config_arq_regs - ARQ register configuration 328266423Sjfv * @hw: pointer to the hardware structure 329266423Sjfv * 330266423Sjfv * Configure base address and length registers for the receive (event queue) 331266423Sjfv **/ 332266423Sjfvstatic enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw) 333266423Sjfv{ 334266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 335266423Sjfv u32 reg = 0; 336266423Sjfv 337266423Sjfv /* Clear Head and Tail */ 338266423Sjfv wr32(hw, hw->aq.arq.head, 0); 339266423Sjfv wr32(hw, hw->aq.arq.tail, 0); 340266423Sjfv 341269198Sjfv /* set starting point */ 342284049Sjfv if (!i40e_is_vf(hw)) 343284049Sjfv wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 344284049Sjfv I40E_PF_ARQLEN_ARQENABLE_MASK)); 345284049Sjfv if (i40e_is_vf(hw)) 346284049Sjfv wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 347284049Sjfv I40E_VF_ARQLEN1_ARQENABLE_MASK)); 348269198Sjfv wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)); 349269198Sjfv wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa)); 350266423Sjfv 351266423Sjfv /* Update tail in the HW to post pre-allocated buffers */ 352266423Sjfv wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 353266423Sjfv 354266423Sjfv /* Check one register to verify that config was applied */ 355269198Sjfv reg = rd32(hw, hw->aq.arq.bal); 356266423Sjfv if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa)) 357266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 358266423Sjfv 359266423Sjfv return ret_code; 360266423Sjfv} 361266423Sjfv 362266423Sjfv/** 363266423Sjfv * i40e_init_asq - main initialization routine for ASQ 364266423Sjfv * @hw: pointer to the hardware structure 365266423Sjfv * 366266423Sjfv * This is the main initialization routine for the Admin Send Queue 367266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 368266423Sjfv * in the hw->aq structure: 369266423Sjfv * - hw->aq.num_asq_entries 370266423Sjfv * - hw->aq.arq_buf_size 371266423Sjfv * 372266423Sjfv * Do *NOT* hold the lock when calling this as the memory allocation routines 373266423Sjfv * called are not going to be atomic context safe 374266423Sjfv **/ 375266423Sjfvenum i40e_status_code i40e_init_asq(struct i40e_hw *hw) 376266423Sjfv{ 377266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 378266423Sjfv 379266423Sjfv if (hw->aq.asq.count > 0) { 380266423Sjfv /* queue already initialized */ 381266423Sjfv ret_code = I40E_ERR_NOT_READY; 382266423Sjfv goto init_adminq_exit; 383266423Sjfv } 384266423Sjfv 385266423Sjfv /* verify input for valid configuration */ 386266423Sjfv if ((hw->aq.num_asq_entries == 0) || 387266423Sjfv (hw->aq.asq_buf_size == 0)) { 388266423Sjfv ret_code = I40E_ERR_CONFIG; 389266423Sjfv goto init_adminq_exit; 390266423Sjfv } 391266423Sjfv 392266423Sjfv hw->aq.asq.next_to_use = 0; 393266423Sjfv hw->aq.asq.next_to_clean = 0; 394266423Sjfv 395266423Sjfv /* allocate the ring memory */ 396266423Sjfv ret_code = i40e_alloc_adminq_asq_ring(hw); 397266423Sjfv if (ret_code != I40E_SUCCESS) 398266423Sjfv goto init_adminq_exit; 399266423Sjfv 400266423Sjfv /* allocate buffers in the rings */ 401266423Sjfv ret_code = i40e_alloc_asq_bufs(hw); 402266423Sjfv if (ret_code != I40E_SUCCESS) 403266423Sjfv goto init_adminq_free_rings; 404266423Sjfv 405266423Sjfv /* initialize base registers */ 406266423Sjfv ret_code = i40e_config_asq_regs(hw); 407266423Sjfv if (ret_code != I40E_SUCCESS) 408349163Serj goto init_config_regs; 409266423Sjfv 410266423Sjfv /* success! */ 411299548Serj hw->aq.asq.count = hw->aq.num_asq_entries; 412266423Sjfv goto init_adminq_exit; 413266423Sjfv 414266423Sjfvinit_adminq_free_rings: 415266423Sjfv i40e_free_adminq_asq(hw); 416349163Serj return ret_code; 417266423Sjfv 418349163Serjinit_config_regs: 419349163Serj i40e_free_asq_bufs(hw); 420349163Serj 421266423Sjfvinit_adminq_exit: 422266423Sjfv return ret_code; 423266423Sjfv} 424266423Sjfv 425266423Sjfv/** 426266423Sjfv * i40e_init_arq - initialize ARQ 427266423Sjfv * @hw: pointer to the hardware structure 428266423Sjfv * 429266423Sjfv * The main initialization routine for the Admin Receive (Event) Queue. 430266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 431266423Sjfv * in the hw->aq structure: 432266423Sjfv * - hw->aq.num_asq_entries 433266423Sjfv * - hw->aq.arq_buf_size 434266423Sjfv * 435266423Sjfv * Do *NOT* hold the lock when calling this as the memory allocation routines 436266423Sjfv * called are not going to be atomic context safe 437266423Sjfv **/ 438266423Sjfvenum i40e_status_code i40e_init_arq(struct i40e_hw *hw) 439266423Sjfv{ 440266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 441266423Sjfv 442266423Sjfv if (hw->aq.arq.count > 0) { 443266423Sjfv /* queue already initialized */ 444266423Sjfv ret_code = I40E_ERR_NOT_READY; 445266423Sjfv goto init_adminq_exit; 446266423Sjfv } 447266423Sjfv 448266423Sjfv /* verify input for valid configuration */ 449266423Sjfv if ((hw->aq.num_arq_entries == 0) || 450266423Sjfv (hw->aq.arq_buf_size == 0)) { 451266423Sjfv ret_code = I40E_ERR_CONFIG; 452266423Sjfv goto init_adminq_exit; 453266423Sjfv } 454266423Sjfv 455266423Sjfv hw->aq.arq.next_to_use = 0; 456266423Sjfv hw->aq.arq.next_to_clean = 0; 457266423Sjfv 458266423Sjfv /* allocate the ring memory */ 459266423Sjfv ret_code = i40e_alloc_adminq_arq_ring(hw); 460266423Sjfv if (ret_code != I40E_SUCCESS) 461266423Sjfv goto init_adminq_exit; 462266423Sjfv 463266423Sjfv /* allocate buffers in the rings */ 464266423Sjfv ret_code = i40e_alloc_arq_bufs(hw); 465266423Sjfv if (ret_code != I40E_SUCCESS) 466266423Sjfv goto init_adminq_free_rings; 467266423Sjfv 468266423Sjfv /* initialize base registers */ 469266423Sjfv ret_code = i40e_config_arq_regs(hw); 470266423Sjfv if (ret_code != I40E_SUCCESS) 471266423Sjfv goto init_adminq_free_rings; 472266423Sjfv 473266423Sjfv /* success! */ 474299548Serj hw->aq.arq.count = hw->aq.num_arq_entries; 475266423Sjfv goto init_adminq_exit; 476266423Sjfv 477266423Sjfvinit_adminq_free_rings: 478266423Sjfv i40e_free_adminq_arq(hw); 479266423Sjfv 480266423Sjfvinit_adminq_exit: 481266423Sjfv return ret_code; 482266423Sjfv} 483266423Sjfv 484266423Sjfv/** 485266423Sjfv * i40e_shutdown_asq - shutdown the ASQ 486266423Sjfv * @hw: pointer to the hardware structure 487266423Sjfv * 488266423Sjfv * The main shutdown routine for the Admin Send Queue 489266423Sjfv **/ 490266423Sjfvenum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) 491266423Sjfv{ 492266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 493266423Sjfv 494299545Serj i40e_acquire_spinlock(&hw->aq.asq_spinlock); 495266423Sjfv 496299545Serj if (hw->aq.asq.count == 0) { 497299545Serj ret_code = I40E_ERR_NOT_READY; 498299545Serj goto shutdown_asq_out; 499299545Serj } 500299545Serj 501266423Sjfv /* Stop firmware AdminQ processing */ 502266423Sjfv wr32(hw, hw->aq.asq.head, 0); 503266423Sjfv wr32(hw, hw->aq.asq.tail, 0); 504266423Sjfv wr32(hw, hw->aq.asq.len, 0); 505269198Sjfv wr32(hw, hw->aq.asq.bal, 0); 506269198Sjfv wr32(hw, hw->aq.asq.bah, 0); 507266423Sjfv 508266423Sjfv hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 509266423Sjfv 510266423Sjfv /* free ring buffers */ 511266423Sjfv i40e_free_asq_bufs(hw); 512266423Sjfv 513299545Serjshutdown_asq_out: 514266423Sjfv i40e_release_spinlock(&hw->aq.asq_spinlock); 515266423Sjfv return ret_code; 516266423Sjfv} 517266423Sjfv 518266423Sjfv/** 519266423Sjfv * i40e_shutdown_arq - shutdown ARQ 520266423Sjfv * @hw: pointer to the hardware structure 521266423Sjfv * 522266423Sjfv * The main shutdown routine for the Admin Receive Queue 523266423Sjfv **/ 524266423Sjfvenum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) 525266423Sjfv{ 526266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 527266423Sjfv 528299545Serj i40e_acquire_spinlock(&hw->aq.arq_spinlock); 529266423Sjfv 530299545Serj if (hw->aq.arq.count == 0) { 531299545Serj ret_code = I40E_ERR_NOT_READY; 532299545Serj goto shutdown_arq_out; 533299545Serj } 534299545Serj 535266423Sjfv /* Stop firmware AdminQ processing */ 536266423Sjfv wr32(hw, hw->aq.arq.head, 0); 537266423Sjfv wr32(hw, hw->aq.arq.tail, 0); 538266423Sjfv wr32(hw, hw->aq.arq.len, 0); 539269198Sjfv wr32(hw, hw->aq.arq.bal, 0); 540269198Sjfv wr32(hw, hw->aq.arq.bah, 0); 541266423Sjfv 542266423Sjfv hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 543266423Sjfv 544266423Sjfv /* free ring buffers */ 545266423Sjfv i40e_free_arq_bufs(hw); 546266423Sjfv 547299545Serjshutdown_arq_out: 548266423Sjfv i40e_release_spinlock(&hw->aq.arq_spinlock); 549266423Sjfv return ret_code; 550266423Sjfv} 551266423Sjfv 552266423Sjfv/** 553299555Serj * i40e_resume_aq - resume AQ processing from 0 554299555Serj * @hw: pointer to the hardware structure 555299555Serj **/ 556299555Serjstatic void i40e_resume_aq(struct i40e_hw *hw) 557299555Serj{ 558299555Serj /* Registers are reset after PF reset */ 559299555Serj hw->aq.asq.next_to_use = 0; 560299555Serj hw->aq.asq.next_to_clean = 0; 561299555Serj 562299555Serj i40e_config_asq_regs(hw); 563299555Serj 564299555Serj hw->aq.arq.next_to_use = 0; 565299555Serj hw->aq.arq.next_to_clean = 0; 566299555Serj 567299555Serj i40e_config_arq_regs(hw); 568299555Serj} 569299555Serj 570299555Serj/** 571266423Sjfv * i40e_init_adminq - main initialization routine for Admin Queue 572266423Sjfv * @hw: pointer to the hardware structure 573266423Sjfv * 574266423Sjfv * Prior to calling this function, drivers *MUST* set the following fields 575266423Sjfv * in the hw->aq structure: 576266423Sjfv * - hw->aq.num_asq_entries 577266423Sjfv * - hw->aq.num_arq_entries 578266423Sjfv * - hw->aq.arq_buf_size 579266423Sjfv * - hw->aq.asq_buf_size 580266423Sjfv **/ 581266423Sjfvenum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) 582266423Sjfv{ 583349163Serj struct i40e_adminq_info *aq = &hw->aq; 584349163Serj enum i40e_status_code ret_code; 585299555Serj u16 cfg_ptr, oem_hi, oem_lo; 586299555Serj u16 eetrack_lo, eetrack_hi; 587266423Sjfv int retry = 0; 588299555Serj 589266423Sjfv /* verify input for valid configuration */ 590349163Serj if (aq->num_arq_entries == 0 || 591349163Serj aq->num_asq_entries == 0 || 592349163Serj aq->arq_buf_size == 0 || 593349163Serj aq->asq_buf_size == 0) { 594266423Sjfv ret_code = I40E_ERR_CONFIG; 595266423Sjfv goto init_adminq_exit; 596266423Sjfv } 597349163Serj i40e_init_spinlock(&aq->asq_spinlock); 598349163Serj i40e_init_spinlock(&aq->arq_spinlock); 599266423Sjfv 600266423Sjfv /* Set up register offsets */ 601266423Sjfv i40e_adminq_init_regs(hw); 602266423Sjfv 603269198Sjfv /* setup ASQ command write back timeout */ 604269198Sjfv hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 605269198Sjfv 606266423Sjfv /* allocate the ASQ */ 607266423Sjfv ret_code = i40e_init_asq(hw); 608266423Sjfv if (ret_code != I40E_SUCCESS) 609266423Sjfv goto init_adminq_destroy_spinlocks; 610266423Sjfv 611266423Sjfv /* allocate the ARQ */ 612266423Sjfv ret_code = i40e_init_arq(hw); 613266423Sjfv if (ret_code != I40E_SUCCESS) 614266423Sjfv goto init_adminq_free_asq; 615266423Sjfv 616277082Sjfv /* VF has no need of firmware */ 617277082Sjfv if (i40e_is_vf(hw)) 618277082Sjfv goto init_adminq_exit; 619277082Sjfv /* There are some cases where the firmware may not be quite ready 620266423Sjfv * for AdminQ operations, so we retry the AdminQ setup a few times 621266423Sjfv * if we see timeouts in this first AQ call. 622266423Sjfv */ 623266423Sjfv do { 624266423Sjfv ret_code = i40e_aq_get_firmware_version(hw, 625349163Serj &aq->fw_maj_ver, 626349163Serj &aq->fw_min_ver, 627349163Serj &aq->fw_build, 628349163Serj &aq->api_maj_ver, 629349163Serj &aq->api_min_ver, 630266423Sjfv NULL); 631266423Sjfv if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) 632266423Sjfv break; 633266423Sjfv retry++; 634266423Sjfv i40e_msec_delay(100); 635266423Sjfv i40e_resume_aq(hw); 636266423Sjfv } while (retry < 10); 637266423Sjfv if (ret_code != I40E_SUCCESS) 638266423Sjfv goto init_adminq_free_arq; 639266423Sjfv 640266423Sjfv /* get the NVM version info */ 641279033Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, 642279033Sjfv &hw->nvm.version); 643266423Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); 644266423Sjfv i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); 645266423Sjfv hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; 646284049Sjfv i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr); 647284049Sjfv i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF), 648284049Sjfv &oem_hi); 649284049Sjfv i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)), 650284049Sjfv &oem_lo); 651284049Sjfv hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; 652349163Serj /* 653349163Serj * Some features were introduced in different FW API version 654349163Serj * for different MAC type. 655349163Serj */ 656349163Serj switch (hw->mac.type) { 657349163Serj case I40E_MAC_XL710: 658349163Serj if (aq->api_maj_ver > 1 || 659349163Serj (aq->api_maj_ver == 1 && 660349163Serj aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) { 661349163Serj hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; 662349163Serj hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 663349163Serj /* The ability to RX (not drop) 802.1ad frames */ 664349163Serj hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; 665349163Serj } 666349163Serj break; 667349163Serj case I40E_MAC_X722: 668349163Serj if (aq->api_maj_ver > 1 || 669349163Serj (aq->api_maj_ver == 1 && 670349163Serj aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722)) 671349163Serj hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; 672349163Serj /* fall through */ 673349163Serj default: 674349163Serj break; 675333343Serj } 676333343Serj 677333343Serj /* Newer versions of firmware require lock when reading the NVM */ 678349163Serj if (aq->api_maj_ver > 1 || 679349163Serj (aq->api_maj_ver == 1 && 680349163Serj aq->api_min_ver >= 5)) 681333343Serj hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 682333343Serj 683349163Serj if (aq->api_maj_ver > 1 || 684349163Serj (aq->api_maj_ver == 1 && 685349163Serj aq->api_min_ver >= 8)) 686349163Serj hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT; 687349163Serj 688349163Serj if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) { 689266423Sjfv ret_code = I40E_ERR_FIRMWARE_API_VERSION; 690266423Sjfv goto init_adminq_free_arq; 691266423Sjfv } 692266423Sjfv 693266423Sjfv /* pre-emptive resource lock release */ 694266423Sjfv i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); 695303967Ssbruno hw->nvm_release_on_done = FALSE; 696277082Sjfv hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; 697266423Sjfv 698266423Sjfv ret_code = I40E_SUCCESS; 699266423Sjfv 700266423Sjfv /* success! */ 701266423Sjfv goto init_adminq_exit; 702266423Sjfv 703266423Sjfvinit_adminq_free_arq: 704266423Sjfv i40e_shutdown_arq(hw); 705266423Sjfvinit_adminq_free_asq: 706266423Sjfv i40e_shutdown_asq(hw); 707266423Sjfvinit_adminq_destroy_spinlocks: 708349163Serj i40e_destroy_spinlock(&aq->asq_spinlock); 709349163Serj i40e_destroy_spinlock(&aq->arq_spinlock); 710266423Sjfv 711266423Sjfvinit_adminq_exit: 712266423Sjfv return ret_code; 713266423Sjfv} 714266423Sjfv 715266423Sjfv/** 716266423Sjfv * i40e_shutdown_adminq - shutdown routine for the Admin Queue 717266423Sjfv * @hw: pointer to the hardware structure 718266423Sjfv **/ 719266423Sjfvenum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) 720266423Sjfv{ 721266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 722266423Sjfv 723266423Sjfv if (i40e_check_asq_alive(hw)) 724266423Sjfv i40e_aq_queue_shutdown(hw, TRUE); 725266423Sjfv 726266423Sjfv i40e_shutdown_asq(hw); 727266423Sjfv i40e_shutdown_arq(hw); 728266423Sjfv i40e_destroy_spinlock(&hw->aq.asq_spinlock); 729266423Sjfv i40e_destroy_spinlock(&hw->aq.arq_spinlock); 730266423Sjfv 731284049Sjfv if (hw->nvm_buff.va) 732284049Sjfv i40e_free_virt_mem(hw, &hw->nvm_buff); 733284049Sjfv 734266423Sjfv return ret_code; 735266423Sjfv} 736266423Sjfv 737266423Sjfv/** 738266423Sjfv * i40e_clean_asq - cleans Admin send queue 739266423Sjfv * @hw: pointer to the hardware structure 740266423Sjfv * 741266423Sjfv * returns the number of free desc 742266423Sjfv **/ 743266423Sjfvu16 i40e_clean_asq(struct i40e_hw *hw) 744266423Sjfv{ 745266423Sjfv struct i40e_adminq_ring *asq = &(hw->aq.asq); 746266423Sjfv struct i40e_asq_cmd_details *details; 747266423Sjfv u16 ntc = asq->next_to_clean; 748266423Sjfv struct i40e_aq_desc desc_cb; 749266423Sjfv struct i40e_aq_desc *desc; 750266423Sjfv 751266423Sjfv desc = I40E_ADMINQ_DESC(*asq, ntc); 752266423Sjfv details = I40E_ADMINQ_DETAILS(*asq, ntc); 753266423Sjfv while (rd32(hw, hw->aq.asq.head) != ntc) { 754349163Serj i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 755284049Sjfv "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 756266423Sjfv 757266423Sjfv if (details->callback) { 758266423Sjfv I40E_ADMINQ_CALLBACK cb_func = 759266423Sjfv (I40E_ADMINQ_CALLBACK)details->callback; 760284049Sjfv i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc), 761284049Sjfv I40E_DMA_TO_DMA); 762266423Sjfv cb_func(hw, &desc_cb); 763266423Sjfv } 764266423Sjfv i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); 765266423Sjfv i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); 766266423Sjfv ntc++; 767266423Sjfv if (ntc == asq->count) 768266423Sjfv ntc = 0; 769266423Sjfv desc = I40E_ADMINQ_DESC(*asq, ntc); 770266423Sjfv details = I40E_ADMINQ_DETAILS(*asq, ntc); 771266423Sjfv } 772266423Sjfv 773266423Sjfv asq->next_to_clean = ntc; 774266423Sjfv 775266423Sjfv return I40E_DESC_UNUSED(asq); 776266423Sjfv} 777266423Sjfv 778266423Sjfv/** 779266423Sjfv * i40e_asq_done - check if FW has processed the Admin Send Queue 780266423Sjfv * @hw: pointer to the hw struct 781266423Sjfv * 782266423Sjfv * Returns TRUE if the firmware has processed all descriptors on the 783266423Sjfv * admin send queue. Returns FALSE if there are still requests pending. 784266423Sjfv **/ 785266423Sjfvbool i40e_asq_done(struct i40e_hw *hw) 786266423Sjfv{ 787266423Sjfv /* AQ designers suggest use of head for better 788266423Sjfv * timing reliability than DD bit 789266423Sjfv */ 790266423Sjfv return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 791266423Sjfv 792266423Sjfv} 793266423Sjfv 794266423Sjfv/** 795266423Sjfv * i40e_asq_send_command - send command to Admin Queue 796266423Sjfv * @hw: pointer to the hw struct 797266423Sjfv * @desc: prefilled descriptor describing the command (non DMA mem) 798266423Sjfv * @buff: buffer to use for indirect commands 799266423Sjfv * @buff_size: size of buffer for indirect commands 800266423Sjfv * @cmd_details: pointer to command details structure 801266423Sjfv * 802266423Sjfv * This is the main send command driver routine for the Admin Queue send 803266423Sjfv * queue. It runs the queue, cleans the queue, etc 804266423Sjfv **/ 805266423Sjfvenum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, 806266423Sjfv struct i40e_aq_desc *desc, 807266423Sjfv void *buff, /* can be NULL */ 808266423Sjfv u16 buff_size, 809266423Sjfv struct i40e_asq_cmd_details *cmd_details) 810266423Sjfv{ 811266423Sjfv enum i40e_status_code status = I40E_SUCCESS; 812266423Sjfv struct i40e_dma_mem *dma_buff = NULL; 813266423Sjfv struct i40e_asq_cmd_details *details; 814266423Sjfv struct i40e_aq_desc *desc_on_ring; 815266423Sjfv bool cmd_completed = FALSE; 816266423Sjfv u16 retval = 0; 817266423Sjfv u32 val = 0; 818266423Sjfv 819299545Serj i40e_acquire_spinlock(&hw->aq.asq_spinlock); 820299545Serj 821284049Sjfv hw->aq.asq_last_status = I40E_AQ_RC_OK; 822284049Sjfv 823299545Serj if (hw->aq.asq.count == 0) { 824266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 825299545Serj "AQTX: Admin queue not initialized.\n"); 826266423Sjfv status = I40E_ERR_QUEUE_EMPTY; 827299545Serj goto asq_send_command_error; 828266423Sjfv } 829266423Sjfv 830299545Serj val = rd32(hw, hw->aq.asq.head); 831299545Serj if (val >= hw->aq.num_asq_entries) { 832266423Sjfv i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 833299545Serj "AQTX: head overrun at %d\n", val); 834349163Serj status = I40E_ERR_ADMIN_QUEUE_FULL; 835299545Serj goto asq_send_command_error; 836266423Sjfv } 837266423Sjfv 838266423Sjfv details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 839266423Sjfv if (cmd_details) { 840266423Sjfv i40e_memcpy(details, 841266423Sjfv cmd_details, 842266423Sjfv sizeof(struct i40e_asq_cmd_details), 843266423Sjfv I40E_NONDMA_TO_NONDMA); 844266423Sjfv 845266423Sjfv /* If the cmd_details are defined copy the cookie. The 846266423Sjfv * CPU_TO_LE32 is not needed here because the data is ignored 847266423Sjfv * by the FW, only used by the driver 848266423Sjfv */ 849266423Sjfv if (details->cookie) { 850266423Sjfv desc->cookie_high = 851266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(details->cookie)); 852266423Sjfv desc->cookie_low = 853266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(details->cookie)); 854266423Sjfv } 855266423Sjfv } else { 856266423Sjfv i40e_memset(details, 0, 857266423Sjfv sizeof(struct i40e_asq_cmd_details), 858266423Sjfv I40E_NONDMA_MEM); 859266423Sjfv } 860266423Sjfv 861266423Sjfv /* clear requested flags and then set additional flags if defined */ 862266423Sjfv desc->flags &= ~CPU_TO_LE16(details->flags_dis); 863266423Sjfv desc->flags |= CPU_TO_LE16(details->flags_ena); 864266423Sjfv 865266423Sjfv if (buff_size > hw->aq.asq_buf_size) { 866266423Sjfv i40e_debug(hw, 867266423Sjfv I40E_DEBUG_AQ_MESSAGE, 868266423Sjfv "AQTX: Invalid buffer size: %d.\n", 869266423Sjfv buff_size); 870266423Sjfv status = I40E_ERR_INVALID_SIZE; 871266423Sjfv goto asq_send_command_error; 872266423Sjfv } 873266423Sjfv 874266423Sjfv if (details->postpone && !details->async) { 875266423Sjfv i40e_debug(hw, 876266423Sjfv I40E_DEBUG_AQ_MESSAGE, 877266423Sjfv "AQTX: Async flag not set along with postpone flag"); 878266423Sjfv status = I40E_ERR_PARAM; 879266423Sjfv goto asq_send_command_error; 880266423Sjfv } 881266423Sjfv 882266423Sjfv /* call clean and check queue available function to reclaim the 883266423Sjfv * descriptors that were processed by FW, the function returns the 884266423Sjfv * number of desc available 885266423Sjfv */ 886266423Sjfv /* the clean function called here could be called in a separate thread 887266423Sjfv * in case of asynchronous completions 888266423Sjfv */ 889266423Sjfv if (i40e_clean_asq(hw) == 0) { 890266423Sjfv i40e_debug(hw, 891266423Sjfv I40E_DEBUG_AQ_MESSAGE, 892266423Sjfv "AQTX: Error queue is full.\n"); 893266423Sjfv status = I40E_ERR_ADMIN_QUEUE_FULL; 894266423Sjfv goto asq_send_command_error; 895266423Sjfv } 896266423Sjfv 897266423Sjfv /* initialize the temp desc pointer with the right desc */ 898266423Sjfv desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 899266423Sjfv 900266423Sjfv /* if the desc is available copy the temp desc to the right place */ 901266423Sjfv i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), 902266423Sjfv I40E_NONDMA_TO_DMA); 903266423Sjfv 904266423Sjfv /* if buff is not NULL assume indirect command */ 905266423Sjfv if (buff != NULL) { 906266423Sjfv dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); 907266423Sjfv /* copy the user buff into the respective DMA buff */ 908266423Sjfv i40e_memcpy(dma_buff->va, buff, buff_size, 909266423Sjfv I40E_NONDMA_TO_DMA); 910266423Sjfv desc_on_ring->datalen = CPU_TO_LE16(buff_size); 911266423Sjfv 912266423Sjfv /* Update the address values in the desc with the pa value 913266423Sjfv * for respective buffer 914266423Sjfv */ 915266423Sjfv desc_on_ring->params.external.addr_high = 916266423Sjfv CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); 917266423Sjfv desc_on_ring->params.external.addr_low = 918266423Sjfv CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); 919266423Sjfv } 920266423Sjfv 921266423Sjfv /* bump the tail */ 922349163Serj i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n"); 923269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 924269198Sjfv buff, buff_size); 925266423Sjfv (hw->aq.asq.next_to_use)++; 926266423Sjfv if (hw->aq.asq.next_to_use == hw->aq.asq.count) 927266423Sjfv hw->aq.asq.next_to_use = 0; 928266423Sjfv if (!details->postpone) 929266423Sjfv wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 930266423Sjfv 931266423Sjfv /* if cmd_details are not defined or async flag is not set, 932266423Sjfv * we need to wait for desc write back 933266423Sjfv */ 934266423Sjfv if (!details->async && !details->postpone) { 935266423Sjfv u32 total_delay = 0; 936266423Sjfv 937266423Sjfv do { 938266423Sjfv /* AQ designers suggest use of head for better 939266423Sjfv * timing reliability than DD bit 940266423Sjfv */ 941266423Sjfv if (i40e_asq_done(hw)) 942266423Sjfv break; 943333343Serj i40e_usec_delay(50); 944333343Serj total_delay += 50; 945269198Sjfv } while (total_delay < hw->aq.asq_cmd_timeout); 946266423Sjfv } 947266423Sjfv 948266423Sjfv /* if ready, copy the desc back to temp */ 949266423Sjfv if (i40e_asq_done(hw)) { 950266423Sjfv i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), 951266423Sjfv I40E_DMA_TO_NONDMA); 952266423Sjfv if (buff != NULL) 953266423Sjfv i40e_memcpy(buff, dma_buff->va, buff_size, 954266423Sjfv I40E_DMA_TO_NONDMA); 955266423Sjfv retval = LE16_TO_CPU(desc->retval); 956266423Sjfv if (retval != 0) { 957266423Sjfv i40e_debug(hw, 958266423Sjfv I40E_DEBUG_AQ_MESSAGE, 959266423Sjfv "AQTX: Command completed with error 0x%X.\n", 960266423Sjfv retval); 961266423Sjfv 962266423Sjfv /* strip off FW internal code */ 963266423Sjfv retval &= 0xff; 964266423Sjfv } 965266423Sjfv cmd_completed = TRUE; 966266423Sjfv if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 967266423Sjfv status = I40E_SUCCESS; 968349163Serj else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) 969349163Serj status = I40E_ERR_NOT_READY; 970266423Sjfv else 971266423Sjfv status = I40E_ERR_ADMIN_QUEUE_ERROR; 972266423Sjfv hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 973266423Sjfv } 974266423Sjfv 975349163Serj i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, 976269198Sjfv "AQTX: desc and buffer writeback:\n"); 977269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 978266423Sjfv 979284049Sjfv /* save writeback aq if requested */ 980284049Sjfv if (details->wb_desc) 981284049Sjfv i40e_memcpy(details->wb_desc, desc_on_ring, 982284049Sjfv sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); 983284049Sjfv 984266423Sjfv /* update the error if time out occurred */ 985266423Sjfv if ((!cmd_completed) && 986266423Sjfv (!details->async && !details->postpone)) { 987333343Serj if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { 988333343Serj i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 989333343Serj "AQTX: AQ Critical error.\n"); 990333343Serj status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 991333343Serj } else { 992333343Serj i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 993333343Serj "AQTX: Writeback timeout.\n"); 994333343Serj status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 995333343Serj } 996266423Sjfv } 997266423Sjfv 998266423Sjfvasq_send_command_error: 999266423Sjfv i40e_release_spinlock(&hw->aq.asq_spinlock); 1000266423Sjfv return status; 1001266423Sjfv} 1002266423Sjfv 1003266423Sjfv/** 1004266423Sjfv * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function 1005266423Sjfv * @desc: pointer to the temp descriptor (non DMA mem) 1006266423Sjfv * @opcode: the opcode can be used to decide which flags to turn off or on 1007266423Sjfv * 1008266423Sjfv * Fill the desc with default values 1009266423Sjfv **/ 1010266423Sjfvvoid i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 1011266423Sjfv u16 opcode) 1012266423Sjfv{ 1013266423Sjfv /* zero out the desc */ 1014266423Sjfv i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), 1015266423Sjfv I40E_NONDMA_MEM); 1016266423Sjfv desc->opcode = CPU_TO_LE16(opcode); 1017266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI); 1018266423Sjfv} 1019266423Sjfv 1020266423Sjfv/** 1021266423Sjfv * i40e_clean_arq_element 1022266423Sjfv * @hw: pointer to the hw struct 1023266423Sjfv * @e: event info from the receive descriptor, includes any buffers 1024266423Sjfv * @pending: number of events that could be left to process 1025266423Sjfv * 1026266423Sjfv * This function cleans one Admin Receive Queue element and returns 1027266423Sjfv * the contents through e. It can also return how many events are 1028266423Sjfv * left to process through 'pending' 1029266423Sjfv **/ 1030266423Sjfvenum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, 1031266423Sjfv struct i40e_arq_event_info *e, 1032266423Sjfv u16 *pending) 1033266423Sjfv{ 1034266423Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 1035266423Sjfv u16 ntc = hw->aq.arq.next_to_clean; 1036266423Sjfv struct i40e_aq_desc *desc; 1037266423Sjfv struct i40e_dma_mem *bi; 1038266423Sjfv u16 desc_idx; 1039266423Sjfv u16 datalen; 1040266423Sjfv u16 flags; 1041266423Sjfv u16 ntu; 1042266423Sjfv 1043299554Serj /* pre-clean the event info */ 1044299554Serj i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM); 1045299554Serj 1046266423Sjfv /* take the lock before we start messing with the ring */ 1047266423Sjfv i40e_acquire_spinlock(&hw->aq.arq_spinlock); 1048266423Sjfv 1049299548Serj if (hw->aq.arq.count == 0) { 1050299548Serj i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 1051299548Serj "AQRX: Admin queue not initialized.\n"); 1052299548Serj ret_code = I40E_ERR_QUEUE_EMPTY; 1053299548Serj goto clean_arq_element_err; 1054299548Serj } 1055299548Serj 1056266423Sjfv /* set next_to_use to head */ 1057284049Sjfv if (!i40e_is_vf(hw)) 1058333343Serj ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; 1059333343Serj else 1060333343Serj ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; 1061266423Sjfv if (ntu == ntc) { 1062266423Sjfv /* nothing to do - shouldn't need to update ring's values */ 1063266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 1064266423Sjfv goto clean_arq_element_out; 1065266423Sjfv } 1066266423Sjfv 1067266423Sjfv /* now clean the next descriptor */ 1068266423Sjfv desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); 1069266423Sjfv desc_idx = ntc; 1070266423Sjfv 1071318357Serj hw->aq.arq_last_status = 1072318357Serj (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval); 1073266423Sjfv flags = LE16_TO_CPU(desc->flags); 1074266423Sjfv if (flags & I40E_AQ_FLAG_ERR) { 1075266423Sjfv ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 1076266423Sjfv i40e_debug(hw, 1077266423Sjfv I40E_DEBUG_AQ_MESSAGE, 1078266423Sjfv "AQRX: Event received with error 0x%X.\n", 1079266423Sjfv hw->aq.arq_last_status); 1080266423Sjfv } 1081266423Sjfv 1082269198Sjfv i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), 1083269198Sjfv I40E_DMA_TO_NONDMA); 1084269198Sjfv datalen = LE16_TO_CPU(desc->datalen); 1085270346Sjfv e->msg_len = min(datalen, e->buf_len); 1086270346Sjfv if (e->msg_buf != NULL && (e->msg_len != 0)) 1087269198Sjfv i40e_memcpy(e->msg_buf, 1088269198Sjfv hw->aq.arq.r.arq_bi[desc_idx].va, 1089270346Sjfv e->msg_len, I40E_DMA_TO_NONDMA); 1090269198Sjfv 1091349163Serj i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n"); 1092269198Sjfv i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 1093269198Sjfv hw->aq.arq_buf_size); 1094266423Sjfv 1095266423Sjfv /* Restore the original datalen and buffer address in the desc, 1096266423Sjfv * FW updates datalen to indicate the event message 1097266423Sjfv * size 1098266423Sjfv */ 1099266423Sjfv bi = &hw->aq.arq.r.arq_bi[ntc]; 1100266423Sjfv i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM); 1101266423Sjfv 1102266423Sjfv desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); 1103266423Sjfv if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 1104266423Sjfv desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); 1105266423Sjfv desc->datalen = CPU_TO_LE16((u16)bi->size); 1106266423Sjfv desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); 1107266423Sjfv desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); 1108266423Sjfv 1109266423Sjfv /* set tail = the last cleaned desc index. */ 1110266423Sjfv wr32(hw, hw->aq.arq.tail, ntc); 1111266423Sjfv /* ntc is updated to tail + 1 */ 1112266423Sjfv ntc++; 1113266423Sjfv if (ntc == hw->aq.num_arq_entries) 1114266423Sjfv ntc = 0; 1115266423Sjfv hw->aq.arq.next_to_clean = ntc; 1116266423Sjfv hw->aq.arq.next_to_use = ntu; 1117266423Sjfv 1118333343Serj i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); 1119299554Serjclean_arq_element_out: 1120299554Serj /* Set pending if needed, unlock and return */ 1121299554Serj if (pending != NULL) 1122299554Serj *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 1123299554Serjclean_arq_element_err: 1124299554Serj i40e_release_spinlock(&hw->aq.arq_spinlock); 1125299554Serj 1126266423Sjfv return ret_code; 1127266423Sjfv} 1128266423Sjfv 1129