1270631Sjfv/****************************************************************************** 2270631Sjfv 3292100Ssmh Copyright (c) 2013-2015, Intel Corporation 4270631Sjfv All rights reserved. 5270631Sjfv 6270631Sjfv Redistribution and use in source and binary forms, with or without 7270631Sjfv modification, are permitted provided that the following conditions are met: 8270631Sjfv 9270631Sjfv 1. Redistributions of source code must retain the above copyright notice, 10270631Sjfv this list of conditions and the following disclaimer. 11270631Sjfv 12270631Sjfv 2. Redistributions in binary form must reproduce the above copyright 13270631Sjfv notice, this list of conditions and the following disclaimer in the 14270631Sjfv documentation and/or other materials provided with the distribution. 15270631Sjfv 16270631Sjfv 3. Neither the name of the Intel Corporation nor the names of its 17270631Sjfv contributors may be used to endorse or promote products derived from 18270631Sjfv this software without specific prior written permission. 19270631Sjfv 20270631Sjfv THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21270631Sjfv AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22270631Sjfv IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23270631Sjfv ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24270631Sjfv LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25270631Sjfv CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26270631Sjfv SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27270631Sjfv INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28270631Sjfv CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29270631Sjfv ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30270631Sjfv POSSIBILITY OF SUCH DAMAGE. 31270631Sjfv 32270631Sjfv******************************************************************************/ 33270631Sjfv/*$FreeBSD: releng/10.3/sys/dev/ixl/i40e_hmc.c 292100 2015-12-11 13:08:38Z smh $*/ 34270631Sjfv 35270631Sjfv#include "i40e_osdep.h" 36270631Sjfv#include "i40e_register.h" 37270631Sjfv#include "i40e_status.h" 38270631Sjfv#include "i40e_alloc.h" 39270631Sjfv#include "i40e_hmc.h" 40270631Sjfv#ifndef I40E_NO_TYPE_HEADER 41270631Sjfv#include "i40e_type.h" 42270631Sjfv#endif 43270631Sjfv 44270631Sjfv/** 45270631Sjfv * i40e_add_sd_table_entry - Adds a segment descriptor to the table 46270631Sjfv * @hw: pointer to our hw struct 47270631Sjfv * @hmc_info: pointer to the HMC configuration information struct 48270631Sjfv * @sd_index: segment descriptor index to manipulate 49270631Sjfv * @type: what type of segment descriptor we're manipulating 50270631Sjfv * @direct_mode_sz: size to alloc in direct mode 51270631Sjfv **/ 52270631Sjfvenum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, 53270631Sjfv struct i40e_hmc_info *hmc_info, 54270631Sjfv u32 sd_index, 55270631Sjfv enum i40e_sd_entry_type type, 56270631Sjfv u64 direct_mode_sz) 57270631Sjfv{ 58270631Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 59270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 60270631Sjfv enum i40e_memory_type mem_type; 61270631Sjfv bool dma_mem_alloc_done = FALSE; 62270631Sjfv struct i40e_dma_mem mem; 63270631Sjfv u64 alloc_len; 64270631Sjfv 65270631Sjfv if (NULL == hmc_info->sd_table.sd_entry) { 66270631Sjfv ret_code = I40E_ERR_BAD_PTR; 67270631Sjfv DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n"); 68270631Sjfv goto exit; 69270631Sjfv } 70270631Sjfv 71270631Sjfv if (sd_index >= hmc_info->sd_table.sd_cnt) { 72270631Sjfv ret_code = I40E_ERR_INVALID_SD_INDEX; 73270631Sjfv DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n"); 74270631Sjfv goto exit; 75270631Sjfv } 76270631Sjfv 77270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; 78270631Sjfv if (!sd_entry->valid) { 79270631Sjfv if (I40E_SD_TYPE_PAGED == type) { 80270631Sjfv mem_type = i40e_mem_pd; 81270631Sjfv alloc_len = I40E_HMC_PAGED_BP_SIZE; 82270631Sjfv } else { 83270631Sjfv mem_type = i40e_mem_bp_jumbo; 84270631Sjfv alloc_len = direct_mode_sz; 85270631Sjfv } 86270631Sjfv 87270631Sjfv /* allocate a 4K pd page or 2M backing page */ 88270631Sjfv ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, 89270631Sjfv I40E_HMC_PD_BP_BUF_ALIGNMENT); 90270631Sjfv if (ret_code) 91270631Sjfv goto exit; 92270631Sjfv dma_mem_alloc_done = TRUE; 93270631Sjfv if (I40E_SD_TYPE_PAGED == type) { 94270631Sjfv ret_code = i40e_allocate_virt_mem(hw, 95270631Sjfv &sd_entry->u.pd_table.pd_entry_virt_mem, 96270631Sjfv sizeof(struct i40e_hmc_pd_entry) * 512); 97270631Sjfv if (ret_code) 98270631Sjfv goto exit; 99270631Sjfv sd_entry->u.pd_table.pd_entry = 100270631Sjfv (struct i40e_hmc_pd_entry *) 101270631Sjfv sd_entry->u.pd_table.pd_entry_virt_mem.va; 102270631Sjfv i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, 103270631Sjfv &mem, sizeof(struct i40e_dma_mem), 104270631Sjfv I40E_NONDMA_TO_NONDMA); 105270631Sjfv } else { 106270631Sjfv i40e_memcpy(&sd_entry->u.bp.addr, 107270631Sjfv &mem, sizeof(struct i40e_dma_mem), 108270631Sjfv I40E_NONDMA_TO_NONDMA); 109270631Sjfv sd_entry->u.bp.sd_pd_index = sd_index; 110270631Sjfv } 111270631Sjfv /* initialize the sd entry */ 112270631Sjfv hmc_info->sd_table.sd_entry[sd_index].entry_type = type; 113270631Sjfv 114270631Sjfv /* increment the ref count */ 115270631Sjfv I40E_INC_SD_REFCNT(&hmc_info->sd_table); 116270631Sjfv } 117270631Sjfv /* Increment backing page reference count */ 118270631Sjfv if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) 119270631Sjfv I40E_INC_BP_REFCNT(&sd_entry->u.bp); 120270631Sjfvexit: 121270631Sjfv if (I40E_SUCCESS != ret_code) 122270631Sjfv if (dma_mem_alloc_done) 123270631Sjfv i40e_free_dma_mem(hw, &mem); 124270631Sjfv 125270631Sjfv return ret_code; 126270631Sjfv} 127270631Sjfv 128270631Sjfv/** 129270631Sjfv * i40e_add_pd_table_entry - Adds page descriptor to the specified table 130270631Sjfv * @hw: pointer to our HW structure 131270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 132270631Sjfv * @pd_index: which page descriptor index to manipulate 133292100Ssmh * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. 134270631Sjfv * 135270631Sjfv * This function: 136270631Sjfv * 1. Initializes the pd entry 137270631Sjfv * 2. Adds pd_entry in the pd_table 138270631Sjfv * 3. Mark the entry valid in i40e_hmc_pd_entry structure 139270631Sjfv * 4. Initializes the pd_entry's ref count to 1 140270631Sjfv * assumptions: 141270631Sjfv * 1. The memory for pd should be pinned down, physically contiguous and 142270631Sjfv * aligned on 4K boundary and zeroed memory. 143270631Sjfv * 2. It should be 4K in size. 144270631Sjfv **/ 145270631Sjfvenum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, 146270631Sjfv struct i40e_hmc_info *hmc_info, 147292100Ssmh u32 pd_index, 148292100Ssmh struct i40e_dma_mem *rsrc_pg) 149270631Sjfv{ 150270631Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 151270631Sjfv struct i40e_hmc_pd_table *pd_table; 152270631Sjfv struct i40e_hmc_pd_entry *pd_entry; 153270631Sjfv struct i40e_dma_mem mem; 154292100Ssmh struct i40e_dma_mem *page = &mem; 155270631Sjfv u32 sd_idx, rel_pd_idx; 156270631Sjfv u64 *pd_addr; 157270631Sjfv u64 page_desc; 158270631Sjfv 159270631Sjfv if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { 160270631Sjfv ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 161270631Sjfv DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n"); 162270631Sjfv goto exit; 163270631Sjfv } 164270631Sjfv 165270631Sjfv /* find corresponding sd */ 166270631Sjfv sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); 167270631Sjfv if (I40E_SD_TYPE_PAGED != 168270631Sjfv hmc_info->sd_table.sd_entry[sd_idx].entry_type) 169270631Sjfv goto exit; 170270631Sjfv 171270631Sjfv rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); 172270631Sjfv pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 173270631Sjfv pd_entry = &pd_table->pd_entry[rel_pd_idx]; 174270631Sjfv if (!pd_entry->valid) { 175292100Ssmh if (rsrc_pg) { 176292100Ssmh pd_entry->rsrc_pg = TRUE; 177292100Ssmh page = rsrc_pg; 178292100Ssmh } else { 179292100Ssmh /* allocate a 4K backing page */ 180292100Ssmh ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, 181292100Ssmh I40E_HMC_PAGED_BP_SIZE, 182292100Ssmh I40E_HMC_PD_BP_BUF_ALIGNMENT); 183292100Ssmh if (ret_code) 184292100Ssmh goto exit; 185292100Ssmh pd_entry->rsrc_pg = FALSE; 186292100Ssmh } 187270631Sjfv 188292100Ssmh i40e_memcpy(&pd_entry->bp.addr, page, 189270631Sjfv sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); 190270631Sjfv pd_entry->bp.sd_pd_index = pd_index; 191270631Sjfv pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; 192270631Sjfv /* Set page address and valid bit */ 193292100Ssmh page_desc = page->pa | 0x1; 194270631Sjfv 195270631Sjfv pd_addr = (u64 *)pd_table->pd_page_addr.va; 196270631Sjfv pd_addr += rel_pd_idx; 197270631Sjfv 198270631Sjfv /* Add the backing page physical address in the pd entry */ 199270631Sjfv i40e_memcpy(pd_addr, &page_desc, sizeof(u64), 200270631Sjfv I40E_NONDMA_TO_DMA); 201270631Sjfv 202270631Sjfv pd_entry->sd_index = sd_idx; 203270631Sjfv pd_entry->valid = TRUE; 204270631Sjfv I40E_INC_PD_REFCNT(pd_table); 205270631Sjfv } 206270631Sjfv I40E_INC_BP_REFCNT(&pd_entry->bp); 207270631Sjfvexit: 208270631Sjfv return ret_code; 209270631Sjfv} 210270631Sjfv 211270631Sjfv/** 212270631Sjfv * i40e_remove_pd_bp - remove a backing page from a page descriptor 213270631Sjfv * @hw: pointer to our HW structure 214270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 215270631Sjfv * @idx: the page index 216270631Sjfv * @is_pf: distinguishes a VF from a PF 217270631Sjfv * 218270631Sjfv * This function: 219270631Sjfv * 1. Marks the entry in pd tabe (for paged address mode) or in sd table 220270631Sjfv * (for direct address mode) invalid. 221270631Sjfv * 2. Write to register PMPDINV to invalidate the backing page in FV cache 222270631Sjfv * 3. Decrement the ref count for the pd _entry 223270631Sjfv * assumptions: 224270631Sjfv * 1. Caller can deallocate the memory used by backing storage after this 225270631Sjfv * function returns. 226270631Sjfv **/ 227270631Sjfvenum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, 228270631Sjfv struct i40e_hmc_info *hmc_info, 229270631Sjfv u32 idx) 230270631Sjfv{ 231270631Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 232270631Sjfv struct i40e_hmc_pd_entry *pd_entry; 233270631Sjfv struct i40e_hmc_pd_table *pd_table; 234270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 235270631Sjfv u32 sd_idx, rel_pd_idx; 236270631Sjfv u64 *pd_addr; 237270631Sjfv 238270631Sjfv /* calculate index */ 239270631Sjfv sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; 240270631Sjfv rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; 241270631Sjfv if (sd_idx >= hmc_info->sd_table.sd_cnt) { 242270631Sjfv ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 243270631Sjfv DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); 244270631Sjfv goto exit; 245270631Sjfv } 246270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 247270631Sjfv if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { 248270631Sjfv ret_code = I40E_ERR_INVALID_SD_TYPE; 249270631Sjfv DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); 250270631Sjfv goto exit; 251270631Sjfv } 252270631Sjfv /* get the entry and decrease its ref counter */ 253270631Sjfv pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 254270631Sjfv pd_entry = &pd_table->pd_entry[rel_pd_idx]; 255270631Sjfv I40E_DEC_BP_REFCNT(&pd_entry->bp); 256270631Sjfv if (pd_entry->bp.ref_cnt) 257270631Sjfv goto exit; 258270631Sjfv 259270631Sjfv /* mark the entry invalid */ 260270631Sjfv pd_entry->valid = FALSE; 261270631Sjfv I40E_DEC_PD_REFCNT(pd_table); 262270631Sjfv pd_addr = (u64 *)pd_table->pd_page_addr.va; 263270631Sjfv pd_addr += rel_pd_idx; 264270631Sjfv i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); 265270631Sjfv I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); 266270631Sjfv 267270631Sjfv /* free memory here */ 268292100Ssmh if (!pd_entry->rsrc_pg) 269292100Ssmh ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 270270631Sjfv if (I40E_SUCCESS != ret_code) 271270631Sjfv goto exit; 272270631Sjfv if (!pd_table->ref_cnt) 273270631Sjfv i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); 274270631Sjfvexit: 275270631Sjfv return ret_code; 276270631Sjfv} 277270631Sjfv 278270631Sjfv/** 279270631Sjfv * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry 280270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 281270631Sjfv * @idx: the page index 282270631Sjfv **/ 283270631Sjfvenum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 284270631Sjfv u32 idx) 285270631Sjfv{ 286270631Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 287270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 288270631Sjfv 289270631Sjfv /* get the entry and decrease its ref counter */ 290270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[idx]; 291270631Sjfv I40E_DEC_BP_REFCNT(&sd_entry->u.bp); 292270631Sjfv if (sd_entry->u.bp.ref_cnt) { 293270631Sjfv ret_code = I40E_ERR_NOT_READY; 294270631Sjfv goto exit; 295270631Sjfv } 296270631Sjfv I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 297270631Sjfv 298270631Sjfv /* mark the entry invalid */ 299270631Sjfv sd_entry->valid = FALSE; 300270631Sjfvexit: 301270631Sjfv return ret_code; 302270631Sjfv} 303270631Sjfv 304270631Sjfv/** 305270631Sjfv * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor 306270631Sjfv * @hw: pointer to our hw struct 307270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 308270631Sjfv * @idx: the page index 309270631Sjfv * @is_pf: used to distinguish between VF and PF 310270631Sjfv **/ 311270631Sjfvenum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, 312270631Sjfv struct i40e_hmc_info *hmc_info, 313270631Sjfv u32 idx, bool is_pf) 314270631Sjfv{ 315270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 316270631Sjfv 317292100Ssmh if (!is_pf) 318292100Ssmh return I40E_NOT_SUPPORTED; 319292100Ssmh 320270631Sjfv /* get the entry and decrease its ref counter */ 321270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[idx]; 322292100Ssmh I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); 323292100Ssmh 324292100Ssmh return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); 325270631Sjfv} 326270631Sjfv 327270631Sjfv/** 328270631Sjfv * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. 329270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 330270631Sjfv * @idx: segment descriptor index to find the relevant page descriptor 331270631Sjfv **/ 332270631Sjfvenum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, 333270631Sjfv u32 idx) 334270631Sjfv{ 335270631Sjfv enum i40e_status_code ret_code = I40E_SUCCESS; 336270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 337270631Sjfv 338270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[idx]; 339270631Sjfv 340270631Sjfv if (sd_entry->u.pd_table.ref_cnt) { 341270631Sjfv ret_code = I40E_ERR_NOT_READY; 342270631Sjfv goto exit; 343270631Sjfv } 344270631Sjfv 345270631Sjfv /* mark the entry invalid */ 346270631Sjfv sd_entry->valid = FALSE; 347270631Sjfv 348270631Sjfv I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 349270631Sjfvexit: 350270631Sjfv return ret_code; 351270631Sjfv} 352270631Sjfv 353270631Sjfv/** 354270631Sjfv * i40e_remove_pd_page_new - Removes a PD page from sd entry. 355270631Sjfv * @hw: pointer to our hw struct 356270631Sjfv * @hmc_info: pointer to the HMC configuration information structure 357270631Sjfv * @idx: segment descriptor index to find the relevant page descriptor 358270631Sjfv * @is_pf: used to distinguish between VF and PF 359270631Sjfv **/ 360270631Sjfvenum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, 361270631Sjfv struct i40e_hmc_info *hmc_info, 362270631Sjfv u32 idx, bool is_pf) 363270631Sjfv{ 364270631Sjfv struct i40e_hmc_sd_entry *sd_entry; 365270631Sjfv 366292100Ssmh if (!is_pf) 367292100Ssmh return I40E_NOT_SUPPORTED; 368292100Ssmh 369270631Sjfv sd_entry = &hmc_info->sd_table.sd_entry[idx]; 370292100Ssmh I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); 371292100Ssmh 372292100Ssmh return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); 373270631Sjfv} 374