1270631Sjfv/******************************************************************************
2270631Sjfv
3292095Ssmh  Copyright (c) 2013-2015, Intel Corporation
4270631Sjfv  All rights reserved.
5270631Sjfv
6270631Sjfv  Redistribution and use in source and binary forms, with or without
7270631Sjfv  modification, are permitted provided that the following conditions are met:
8270631Sjfv
9270631Sjfv   1. Redistributions of source code must retain the above copyright notice,
10270631Sjfv      this list of conditions and the following disclaimer.
11270631Sjfv
12270631Sjfv   2. Redistributions in binary form must reproduce the above copyright
13270631Sjfv      notice, this list of conditions and the following disclaimer in the
14270631Sjfv      documentation and/or other materials provided with the distribution.
15270631Sjfv
16270631Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17270631Sjfv      contributors may be used to endorse or promote products derived from
18270631Sjfv      this software without specific prior written permission.
19270631Sjfv
20270631Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21270631Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22270631Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23270631Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24270631Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25270631Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26270631Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27270631Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28270631Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29270631Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30270631Sjfv  POSSIBILITY OF SUCH DAMAGE.
31270631Sjfv
32270631Sjfv******************************************************************************/
33270631Sjfv/*$FreeBSD: releng/10.3/sys/dev/ixl/i40e_lan_hmc.c 292100 2015-12-11 13:08:38Z smh $*/
34270631Sjfv
35270631Sjfv#include "i40e_osdep.h"
36270631Sjfv#include "i40e_register.h"
37270631Sjfv#include "i40e_type.h"
38270631Sjfv#include "i40e_hmc.h"
39270631Sjfv#include "i40e_lan_hmc.h"
40270631Sjfv#include "i40e_prototype.h"
41270631Sjfv
42270631Sjfv/* lan specific interface functions */
43270631Sjfv
44270631Sjfv/**
45270631Sjfv * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46270631Sjfv * @offset: base address offset needing alignment
47270631Sjfv *
48270631Sjfv * Aligns the layer 2 function private memory so it's 512-byte aligned.
49270631Sjfv **/
50270631Sjfvstatic u64 i40e_align_l2obj_base(u64 offset)
51270631Sjfv{
52270631Sjfv	u64 aligned_offset = offset;
53270631Sjfv
54270631Sjfv	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55270631Sjfv		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56270631Sjfv				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57270631Sjfv
58270631Sjfv	return aligned_offset;
59270631Sjfv}
60270631Sjfv
61270631Sjfv/**
62270631Sjfv * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63270631Sjfv * @txq_num: number of Tx queues needing backing context
64270631Sjfv * @rxq_num: number of Rx queues needing backing context
65270631Sjfv * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66270631Sjfv * @fcoe_filt_num: number of FCoE filters needing backing context
67270631Sjfv *
68270631Sjfv * Calculates the maximum amount of memory for the function required, based
69270631Sjfv * on the number of resources it must provide context for.
70270631Sjfv **/
71270631Sjfvu64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72270631Sjfv			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
73270631Sjfv{
74270631Sjfv	u64 fpm_size = 0;
75270631Sjfv
76270631Sjfv	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
78270631Sjfv
79270631Sjfv	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
81270631Sjfv
82270631Sjfv	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
84270631Sjfv
85270631Sjfv	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
87270631Sjfv
88270631Sjfv	return fpm_size;
89270631Sjfv}
90270631Sjfv
91270631Sjfv/**
92270631Sjfv * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93270631Sjfv * @hw: pointer to the HW structure
94270631Sjfv * @txq_num: number of Tx queues needing backing context
95270631Sjfv * @rxq_num: number of Rx queues needing backing context
96270631Sjfv * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97270631Sjfv * @fcoe_filt_num: number of FCoE filters needing backing context
98270631Sjfv *
99270631Sjfv * This function will be called once per physical function initialization.
100270631Sjfv * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101270631Sjfv * the driver's provided input, as well as information from the HMC itself
102270631Sjfv * loaded from NVRAM.
103270631Sjfv *
104270631Sjfv * Assumptions:
105270631Sjfv *   - HMC Resource Profile has been selected before calling this function.
106270631Sjfv **/
107270631Sjfvenum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108270631Sjfv					u32 rxq_num, u32 fcoe_cntx_num,
109270631Sjfv					u32 fcoe_filt_num)
110270631Sjfv{
111270631Sjfv	struct i40e_hmc_obj_info *obj, *full_obj;
112270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
113270631Sjfv	u64 l2fpm_size;
114270631Sjfv	u32 size_exp;
115270631Sjfv
116270631Sjfv	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117270631Sjfv	hw->hmc.hmc_fn_id = hw->pf_id;
118270631Sjfv
119270631Sjfv	/* allocate memory for hmc_obj */
120270631Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121270631Sjfv			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122270631Sjfv	if (ret_code)
123270631Sjfv		goto init_lan_hmc_out;
124270631Sjfv	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125270631Sjfv			  hw->hmc.hmc_obj_virt_mem.va;
126270631Sjfv
127270631Sjfv	/* The full object will be used to create the LAN HMC SD */
128270631Sjfv	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129270631Sjfv	full_obj->max_cnt = 0;
130270631Sjfv	full_obj->cnt = 0;
131270631Sjfv	full_obj->base = 0;
132270631Sjfv	full_obj->size = 0;
133270631Sjfv
134270631Sjfv	/* Tx queue context information */
135270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137270631Sjfv	obj->cnt = txq_num;
138270631Sjfv	obj->base = 0;
139270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140292100Ssmh	obj->size = BIT_ULL(size_exp);
141270631Sjfv
142270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
143270631Sjfv	if (txq_num > obj->max_cnt) {
144270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146270631Sjfv			  txq_num, obj->max_cnt, ret_code);
147270631Sjfv		goto init_lan_hmc_out;
148270631Sjfv	}
149270631Sjfv
150270631Sjfv	/* aggregate values into the full LAN object for later */
151270631Sjfv	full_obj->max_cnt += obj->max_cnt;
152270631Sjfv	full_obj->cnt += obj->cnt;
153270631Sjfv
154270631Sjfv	/* Rx queue context information */
155270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157270631Sjfv	obj->cnt = rxq_num;
158270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
162270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163292100Ssmh	obj->size = BIT_ULL(size_exp);
164270631Sjfv
165270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
166270631Sjfv	if (rxq_num > obj->max_cnt) {
167270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169270631Sjfv			  rxq_num, obj->max_cnt, ret_code);
170270631Sjfv		goto init_lan_hmc_out;
171270631Sjfv	}
172270631Sjfv
173270631Sjfv	/* aggregate values into the full LAN object for later */
174270631Sjfv	full_obj->max_cnt += obj->max_cnt;
175270631Sjfv	full_obj->cnt += obj->cnt;
176270631Sjfv
177270631Sjfv	/* FCoE context information */
178270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180270631Sjfv	obj->cnt = fcoe_cntx_num;
181270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
185270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186292100Ssmh	obj->size = BIT_ULL(size_exp);
187270631Sjfv
188270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
189270631Sjfv	if (fcoe_cntx_num > obj->max_cnt) {
190270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192270631Sjfv			  fcoe_cntx_num, obj->max_cnt, ret_code);
193270631Sjfv		goto init_lan_hmc_out;
194270631Sjfv	}
195270631Sjfv
196270631Sjfv	/* aggregate values into the full LAN object for later */
197270631Sjfv	full_obj->max_cnt += obj->max_cnt;
198270631Sjfv	full_obj->cnt += obj->cnt;
199270631Sjfv
200270631Sjfv	/* FCoE filter information */
201270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203270631Sjfv	obj->cnt = fcoe_filt_num;
204270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
208270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209292100Ssmh	obj->size = BIT_ULL(size_exp);
210270631Sjfv
211270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
212270631Sjfv	if (fcoe_filt_num > obj->max_cnt) {
213270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215270631Sjfv			  fcoe_filt_num, obj->max_cnt, ret_code);
216270631Sjfv		goto init_lan_hmc_out;
217270631Sjfv	}
218270631Sjfv
219270631Sjfv	/* aggregate values into the full LAN object for later */
220270631Sjfv	full_obj->max_cnt += obj->max_cnt;
221270631Sjfv	full_obj->cnt += obj->cnt;
222270631Sjfv
223270631Sjfv	hw->hmc.first_sd_index = 0;
224270631Sjfv	hw->hmc.sd_table.ref_cnt = 0;
225270631Sjfv	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226270631Sjfv					       fcoe_filt_num);
227270631Sjfv	if (NULL == hw->hmc.sd_table.sd_entry) {
228270631Sjfv		hw->hmc.sd_table.sd_cnt = (u32)
229270631Sjfv				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230270631Sjfv				   I40E_HMC_DIRECT_BP_SIZE;
231270631Sjfv
232270631Sjfv		/* allocate the sd_entry members in the sd_table */
233270631Sjfv		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234270631Sjfv					  (sizeof(struct i40e_hmc_sd_entry) *
235270631Sjfv					  hw->hmc.sd_table.sd_cnt));
236270631Sjfv		if (ret_code)
237270631Sjfv			goto init_lan_hmc_out;
238270631Sjfv		hw->hmc.sd_table.sd_entry =
239270631Sjfv			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240270631Sjfv	}
241270631Sjfv	/* store in the LAN full object for later */
242270631Sjfv	full_obj->size = l2fpm_size;
243270631Sjfv
244270631Sjfvinit_lan_hmc_out:
245270631Sjfv	return ret_code;
246270631Sjfv}
247270631Sjfv
248270631Sjfv/**
249270631Sjfv * i40e_remove_pd_page - Remove a page from the page descriptor table
250270631Sjfv * @hw: pointer to the HW structure
251270631Sjfv * @hmc_info: pointer to the HMC configuration information structure
252270631Sjfv * @idx: segment descriptor index to find the relevant page descriptor
253270631Sjfv *
254270631Sjfv * This function:
255270631Sjfv *	1. Marks the entry in pd table (for paged address mode) invalid
256270631Sjfv *	2. write to register PMPDINV to invalidate the backing page in FV cache
257270631Sjfv *	3. Decrement the ref count for  pd_entry
258270631Sjfv * assumptions:
259270631Sjfv *	1. caller can deallocate the memory used by pd after this function
260270631Sjfv *	   returns.
261270631Sjfv **/
262270631Sjfvstatic enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263270631Sjfv						 struct i40e_hmc_info *hmc_info,
264270631Sjfv						 u32 idx)
265270631Sjfv{
266270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
267270631Sjfv
268270631Sjfv	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269270631Sjfv		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
270270631Sjfv
271270631Sjfv	return ret_code;
272270631Sjfv}
273270631Sjfv
274270631Sjfv/**
275270631Sjfv * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276270631Sjfv * @hw: pointer to our HW structure
277270631Sjfv * @hmc_info: pointer to the HMC configuration information structure
278270631Sjfv * @idx: the page index
279270631Sjfv *
280270631Sjfv * This function:
281270631Sjfv *	1. Marks the entry in sd table (for direct address mode) invalid
282270631Sjfv *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283270631Sjfv *	   to 0) and PMSDDATAHIGH to invalidate the sd page
284270631Sjfv *	3. Decrement the ref count for the sd_entry
285270631Sjfv * assumptions:
286270631Sjfv *	1. caller can deallocate the memory used by backing storage after this
287270631Sjfv *	   function returns.
288270631Sjfv **/
289270631Sjfvstatic enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290270631Sjfv					       struct i40e_hmc_info *hmc_info,
291270631Sjfv					       u32 idx)
292270631Sjfv{
293270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
294270631Sjfv
295270631Sjfv	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296270631Sjfv		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
297270631Sjfv
298270631Sjfv	return ret_code;
299270631Sjfv}
300270631Sjfv
301270631Sjfv/**
302270631Sjfv * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303270631Sjfv * @hw: pointer to the HW structure
304270631Sjfv * @info: pointer to i40e_hmc_create_obj_info struct
305270631Sjfv *
306270631Sjfv * This will allocate memory for PDs and backing pages and populate
307270631Sjfv * the sd and pd entries.
308270631Sjfv **/
309270631Sjfvenum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310270631Sjfv				struct i40e_hmc_lan_create_obj_info *info)
311270631Sjfv{
312270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
313270631Sjfv	struct i40e_hmc_sd_entry *sd_entry;
314270631Sjfv	u32 pd_idx1 = 0, pd_lmt1 = 0;
315270631Sjfv	u32 pd_idx = 0, pd_lmt = 0;
316270631Sjfv	bool pd_error = FALSE;
317270631Sjfv	u32 sd_idx, sd_lmt;
318270631Sjfv	u64 sd_size;
319270631Sjfv	u32 i, j;
320270631Sjfv
321270631Sjfv	if (NULL == info) {
322270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
323270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324270631Sjfv		goto exit;
325270631Sjfv	}
326270631Sjfv	if (NULL == info->hmc_info) {
327270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
328270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329270631Sjfv		goto exit;
330270631Sjfv	}
331270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
333270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334270631Sjfv		goto exit;
335270631Sjfv	}
336270631Sjfv
337270631Sjfv	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339270631Sjfv		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340270631Sjfv			  ret_code);
341270631Sjfv		goto exit;
342270631Sjfv	}
343270631Sjfv	if ((info->start_idx + info->count) >
344270631Sjfv	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346270631Sjfv		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347270631Sjfv			  ret_code);
348270631Sjfv		goto exit;
349270631Sjfv	}
350270631Sjfv
351270631Sjfv	/* find sd index and limit */
352270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353270631Sjfv				 info->start_idx, info->count,
354270631Sjfv				 &sd_idx, &sd_lmt);
355270631Sjfv	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356270631Sjfv	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357270631Sjfv			ret_code = I40E_ERR_INVALID_SD_INDEX;
358270631Sjfv			goto exit;
359270631Sjfv	}
360270631Sjfv	/* find pd index */
361270631Sjfv	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362270631Sjfv				 info->start_idx, info->count, &pd_idx,
363270631Sjfv				 &pd_lmt);
364270631Sjfv
365270631Sjfv	/* This is to cover for cases where you may not want to have an SD with
366270631Sjfv	 * the full 2M memory but something smaller. By not filling out any
367270631Sjfv	 * size, the function will default the SD size to be 2M.
368270631Sjfv	 */
369270631Sjfv	if (info->direct_mode_sz == 0)
370270631Sjfv		sd_size = I40E_HMC_DIRECT_BP_SIZE;
371270631Sjfv	else
372270631Sjfv		sd_size = info->direct_mode_sz;
373270631Sjfv
374270631Sjfv	/* check if all the sds are valid. If not, allocate a page and
375270631Sjfv	 * initialize it.
376270631Sjfv	 */
377270631Sjfv	for (j = sd_idx; j < sd_lmt; j++) {
378270631Sjfv		/* update the sd table entry */
379270631Sjfv		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380270631Sjfv						   info->entry_type,
381270631Sjfv						   sd_size);
382270631Sjfv		if (I40E_SUCCESS != ret_code)
383270631Sjfv			goto exit_sd_error;
384270631Sjfv		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385270631Sjfv		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386270631Sjfv			/* check if all the pds in this sd are valid. If not,
387270631Sjfv			 * allocate a page and initialize it.
388270631Sjfv			 */
389270631Sjfv
390270631Sjfv			/* find pd_idx and pd_lmt in this sd */
391270631Sjfv			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392270631Sjfv			pd_lmt1 = min(pd_lmt,
393270631Sjfv				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394270631Sjfv			for (i = pd_idx1; i < pd_lmt1; i++) {
395270631Sjfv				/* update the pd table entry */
396270631Sjfv				ret_code = i40e_add_pd_table_entry(hw,
397270631Sjfv								info->hmc_info,
398292100Ssmh								i, NULL);
399270631Sjfv				if (I40E_SUCCESS != ret_code) {
400270631Sjfv					pd_error = TRUE;
401270631Sjfv					break;
402270631Sjfv				}
403270631Sjfv			}
404270631Sjfv			if (pd_error) {
405270631Sjfv				/* remove the backing pages from pd_idx1 to i */
406270631Sjfv				while (i && (i > pd_idx1)) {
407270631Sjfv					i40e_remove_pd_bp(hw, info->hmc_info,
408270631Sjfv							  (i - 1));
409270631Sjfv					i--;
410270631Sjfv				}
411270631Sjfv			}
412270631Sjfv		}
413270631Sjfv		if (!sd_entry->valid) {
414270631Sjfv			sd_entry->valid = TRUE;
415270631Sjfv			switch (sd_entry->entry_type) {
416270631Sjfv			case I40E_SD_TYPE_PAGED:
417270631Sjfv				I40E_SET_PF_SD_ENTRY(hw,
418270631Sjfv					sd_entry->u.pd_table.pd_page_addr.pa,
419270631Sjfv					j, sd_entry->entry_type);
420270631Sjfv				break;
421270631Sjfv			case I40E_SD_TYPE_DIRECT:
422270631Sjfv				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423270631Sjfv						     j, sd_entry->entry_type);
424270631Sjfv				break;
425270631Sjfv			default:
426270631Sjfv				ret_code = I40E_ERR_INVALID_SD_TYPE;
427270631Sjfv				goto exit;
428270631Sjfv			}
429270631Sjfv		}
430270631Sjfv	}
431270631Sjfv	goto exit;
432270631Sjfv
433270631Sjfvexit_sd_error:
434270631Sjfv	/* cleanup for sd entries from j to sd_idx */
435270631Sjfv	while (j && (j > sd_idx)) {
436270631Sjfv		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437270631Sjfv		switch (sd_entry->entry_type) {
438270631Sjfv		case I40E_SD_TYPE_PAGED:
439270631Sjfv			pd_idx1 = max(pd_idx,
440270631Sjfv				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441270631Sjfv			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442292100Ssmh			for (i = pd_idx1; i < pd_lmt1; i++)
443270631Sjfv				i40e_remove_pd_bp(hw, info->hmc_info, i);
444270631Sjfv			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
445270631Sjfv			break;
446270631Sjfv		case I40E_SD_TYPE_DIRECT:
447270631Sjfv			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
448270631Sjfv			break;
449270631Sjfv		default:
450270631Sjfv			ret_code = I40E_ERR_INVALID_SD_TYPE;
451270631Sjfv			break;
452270631Sjfv		}
453270631Sjfv		j--;
454270631Sjfv	}
455270631Sjfvexit:
456270631Sjfv	return ret_code;
457270631Sjfv}
458270631Sjfv
459270631Sjfv/**
460270631Sjfv * i40e_configure_lan_hmc - prepare the HMC backing store
461270631Sjfv * @hw: pointer to the hw structure
462270631Sjfv * @model: the model for the layout of the SD/PD tables
463270631Sjfv *
464270631Sjfv * - This function will be called once per physical function initialization.
465270631Sjfv * - This function will be called after i40e_init_lan_hmc() and before
466270631Sjfv *   any LAN/FCoE HMC objects can be created.
467270631Sjfv **/
468270631Sjfvenum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
469270631Sjfv					     enum i40e_hmc_model model)
470270631Sjfv{
471270631Sjfv	struct i40e_hmc_lan_create_obj_info info;
472270631Sjfv	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
473270631Sjfv	struct i40e_hmc_obj_info *obj;
474270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
475270631Sjfv
476270631Sjfv	/* Initialize part of the create object info struct */
477270631Sjfv	info.hmc_info = &hw->hmc;
478270631Sjfv	info.rsrc_type = I40E_HMC_LAN_FULL;
479270631Sjfv	info.start_idx = 0;
480270631Sjfv	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
481270631Sjfv
482270631Sjfv	/* Build the SD entry for the LAN objects */
483270631Sjfv	switch (model) {
484270631Sjfv	case I40E_HMC_MODEL_DIRECT_PREFERRED:
485270631Sjfv	case I40E_HMC_MODEL_DIRECT_ONLY:
486270631Sjfv		info.entry_type = I40E_SD_TYPE_DIRECT;
487270631Sjfv		/* Make one big object, a single SD */
488270631Sjfv		info.count = 1;
489270631Sjfv		ret_code = i40e_create_lan_hmc_object(hw, &info);
490270631Sjfv		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
491270631Sjfv			goto try_type_paged;
492270631Sjfv		else if (ret_code != I40E_SUCCESS)
493270631Sjfv			goto configure_lan_hmc_out;
494270631Sjfv		/* else clause falls through the break */
495270631Sjfv		break;
496270631Sjfv	case I40E_HMC_MODEL_PAGED_ONLY:
497270631Sjfvtry_type_paged:
498270631Sjfv		info.entry_type = I40E_SD_TYPE_PAGED;
499270631Sjfv		/* Make one big object in the PD table */
500270631Sjfv		info.count = 1;
501270631Sjfv		ret_code = i40e_create_lan_hmc_object(hw, &info);
502270631Sjfv		if (ret_code != I40E_SUCCESS)
503270631Sjfv			goto configure_lan_hmc_out;
504270631Sjfv		break;
505270631Sjfv	default:
506270631Sjfv		/* unsupported type */
507270631Sjfv		ret_code = I40E_ERR_INVALID_SD_TYPE;
508270631Sjfv		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
509270631Sjfv			  ret_code);
510270631Sjfv		goto configure_lan_hmc_out;
511270631Sjfv	}
512270631Sjfv
513270631Sjfv	/* Configure and program the FPM registers so objects can be created */
514270631Sjfv
515270631Sjfv	/* Tx contexts */
516270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
517270631Sjfv	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
518270631Sjfv	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
519270631Sjfv	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
520270631Sjfv
521270631Sjfv	/* Rx contexts */
522270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
523270631Sjfv	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
524270631Sjfv	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
525270631Sjfv	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
526270631Sjfv
527270631Sjfv	/* FCoE contexts */
528270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
529270631Sjfv	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
530270631Sjfv	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
531270631Sjfv	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
532270631Sjfv
533270631Sjfv	/* FCoE filters */
534270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
535270631Sjfv	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
536270631Sjfv	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
537270631Sjfv	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
538270631Sjfv
539270631Sjfvconfigure_lan_hmc_out:
540270631Sjfv	return ret_code;
541270631Sjfv}
542270631Sjfv
543270631Sjfv/**
544270631Sjfv * i40e_delete_hmc_object - remove hmc objects
545270631Sjfv * @hw: pointer to the HW structure
546270631Sjfv * @info: pointer to i40e_hmc_delete_obj_info struct
547270631Sjfv *
548270631Sjfv * This will de-populate the SDs and PDs.  It frees
549270631Sjfv * the memory for PDS and backing storage.  After this function is returned,
550270631Sjfv * caller should deallocate memory allocated previously for
551270631Sjfv * book-keeping information about PDs and backing storage.
552270631Sjfv **/
553270631Sjfvenum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
554270631Sjfv				struct i40e_hmc_lan_delete_obj_info *info)
555270631Sjfv{
556270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
557270631Sjfv	struct i40e_hmc_pd_table *pd_table;
558270631Sjfv	u32 pd_idx, pd_lmt, rel_pd_idx;
559270631Sjfv	u32 sd_idx, sd_lmt;
560270631Sjfv	u32 i, j;
561270631Sjfv
562270631Sjfv	if (NULL == info) {
563270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
564270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
565270631Sjfv		goto exit;
566270631Sjfv	}
567270631Sjfv	if (NULL == info->hmc_info) {
568270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
569270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
570270631Sjfv		goto exit;
571270631Sjfv	}
572270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
573270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
574270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
575270631Sjfv		goto exit;
576270631Sjfv	}
577270631Sjfv
578270631Sjfv	if (NULL == info->hmc_info->sd_table.sd_entry) {
579270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
580270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
581270631Sjfv		goto exit;
582270631Sjfv	}
583270631Sjfv
584270631Sjfv	if (NULL == info->hmc_info->hmc_obj) {
585270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
586270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
587270631Sjfv		goto exit;
588270631Sjfv	}
589270631Sjfv	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
590270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
591270631Sjfv		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
592270631Sjfv			  ret_code);
593270631Sjfv		goto exit;
594270631Sjfv	}
595270631Sjfv
596270631Sjfv	if ((info->start_idx + info->count) >
597270631Sjfv	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
598270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
599270631Sjfv		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
600270631Sjfv			  ret_code);
601270631Sjfv		goto exit;
602270631Sjfv	}
603270631Sjfv
604270631Sjfv	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
605270631Sjfv				 info->start_idx, info->count, &pd_idx,
606270631Sjfv				 &pd_lmt);
607270631Sjfv
608270631Sjfv	for (j = pd_idx; j < pd_lmt; j++) {
609270631Sjfv		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
610270631Sjfv
611270631Sjfv		if (I40E_SD_TYPE_PAGED !=
612270631Sjfv		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
613270631Sjfv			continue;
614270631Sjfv
615270631Sjfv		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
616270631Sjfv
617270631Sjfv		pd_table =
618270631Sjfv			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
619270631Sjfv		if (pd_table->pd_entry[rel_pd_idx].valid) {
620270631Sjfv			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
621270631Sjfv			if (I40E_SUCCESS != ret_code)
622270631Sjfv				goto exit;
623270631Sjfv		}
624270631Sjfv	}
625270631Sjfv
626270631Sjfv	/* find sd index and limit */
627270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
628270631Sjfv				 info->start_idx, info->count,
629270631Sjfv				 &sd_idx, &sd_lmt);
630270631Sjfv	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
631270631Sjfv	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
632270631Sjfv		ret_code = I40E_ERR_INVALID_SD_INDEX;
633270631Sjfv		goto exit;
634270631Sjfv	}
635270631Sjfv
636270631Sjfv	for (i = sd_idx; i < sd_lmt; i++) {
637270631Sjfv		if (!info->hmc_info->sd_table.sd_entry[i].valid)
638270631Sjfv			continue;
639270631Sjfv		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
640270631Sjfv		case I40E_SD_TYPE_DIRECT:
641270631Sjfv			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
642270631Sjfv			if (I40E_SUCCESS != ret_code)
643270631Sjfv				goto exit;
644270631Sjfv			break;
645270631Sjfv		case I40E_SD_TYPE_PAGED:
646270631Sjfv			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
647270631Sjfv			if (I40E_SUCCESS != ret_code)
648270631Sjfv				goto exit;
649270631Sjfv			break;
650270631Sjfv		default:
651270631Sjfv			break;
652270631Sjfv		}
653270631Sjfv	}
654270631Sjfvexit:
655270631Sjfv	return ret_code;
656270631Sjfv}
657270631Sjfv
658270631Sjfv/**
659270631Sjfv * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
660270631Sjfv * @hw: pointer to the hw structure
661270631Sjfv *
662270631Sjfv * This must be called by drivers as they are shutting down and being
663270631Sjfv * removed from the OS.
664270631Sjfv **/
665270631Sjfvenum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
666270631Sjfv{
667270631Sjfv	struct i40e_hmc_lan_delete_obj_info info;
668270631Sjfv	enum i40e_status_code ret_code;
669270631Sjfv
670270631Sjfv	info.hmc_info = &hw->hmc;
671270631Sjfv	info.rsrc_type = I40E_HMC_LAN_FULL;
672270631Sjfv	info.start_idx = 0;
673270631Sjfv	info.count = 1;
674270631Sjfv
675270631Sjfv	/* delete the object */
676270631Sjfv	ret_code = i40e_delete_lan_hmc_object(hw, &info);
677270631Sjfv
678270631Sjfv	/* free the SD table entry for LAN */
679270631Sjfv	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
680270631Sjfv	hw->hmc.sd_table.sd_cnt = 0;
681270631Sjfv	hw->hmc.sd_table.sd_entry = NULL;
682270631Sjfv
683270631Sjfv	/* free memory used for hmc_obj */
684270631Sjfv	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
685270631Sjfv	hw->hmc.hmc_obj = NULL;
686270631Sjfv
687270631Sjfv	return ret_code;
688270631Sjfv}
689270631Sjfv
690270631Sjfv#define I40E_HMC_STORE(_struct, _ele)		\
691270631Sjfv	offsetof(struct _struct, _ele),		\
692270631Sjfv	FIELD_SIZEOF(struct _struct, _ele)
693270631Sjfv
694270631Sjfvstruct i40e_context_ele {
695270631Sjfv	u16 offset;
696270631Sjfv	u16 size_of;
697270631Sjfv	u16 width;
698270631Sjfv	u16 lsb;
699270631Sjfv};
700270631Sjfv
701270631Sjfv/* LAN Tx Queue Context */
702270631Sjfvstatic struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
703270631Sjfv					     /* Field      Width    LSB */
704270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
705270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
706270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
707270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
708270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
709270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
710270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
711270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
712270631Sjfv/* line 1 */
713270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
714270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
715270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
716270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
717270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
718270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
719270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
720270631Sjfv/* line 7 */
721270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
722270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
723270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
724270631Sjfv	{ 0 }
725270631Sjfv};
726270631Sjfv
727270631Sjfv/* LAN Rx Queue Context */
728270631Sjfvstatic struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
729270631Sjfv					 /* Field      Width    LSB */
730270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
731270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
732270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
733270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
734270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
735270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
736270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
737270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
738270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
739270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
740270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
741270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
742270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
743270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
744270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
745270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
746270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
747270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
748270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
749270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
750270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
751270631Sjfv	{ 0 }
752270631Sjfv};
753270631Sjfv
754270631Sjfv/**
755270631Sjfv * i40e_write_byte - replace HMC context byte
756270631Sjfv * @hmc_bits: pointer to the HMC memory
757270631Sjfv * @ce_info: a description of the struct to be read from
758270631Sjfv * @src: the struct to be read from
759270631Sjfv **/
760270631Sjfvstatic void i40e_write_byte(u8 *hmc_bits,
761270631Sjfv			    struct i40e_context_ele *ce_info,
762270631Sjfv			    u8 *src)
763270631Sjfv{
764270631Sjfv	u8 src_byte, dest_byte, mask;
765270631Sjfv	u8 *from, *dest;
766270631Sjfv	u16 shift_width;
767270631Sjfv
768270631Sjfv	/* copy from the next struct field */
769270631Sjfv	from = src + ce_info->offset;
770270631Sjfv
771270631Sjfv	/* prepare the bits and mask */
772270631Sjfv	shift_width = ce_info->lsb % 8;
773292100Ssmh	mask = BIT(ce_info->width) - 1;
774270631Sjfv
775270631Sjfv	src_byte = *from;
776270631Sjfv	src_byte &= mask;
777270631Sjfv
778270631Sjfv	/* shift to correct alignment */
779270631Sjfv	mask <<= shift_width;
780270631Sjfv	src_byte <<= shift_width;
781270631Sjfv
782270631Sjfv	/* get the current bits from the target bit string */
783270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
784270631Sjfv
785270631Sjfv	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
786270631Sjfv
787270631Sjfv	dest_byte &= ~mask;	/* get the bits not changing */
788270631Sjfv	dest_byte |= src_byte;	/* add in the new bits */
789270631Sjfv
790270631Sjfv	/* put it all back */
791270631Sjfv	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
792270631Sjfv}
793270631Sjfv
794270631Sjfv/**
795270631Sjfv * i40e_write_word - replace HMC context word
796270631Sjfv * @hmc_bits: pointer to the HMC memory
797270631Sjfv * @ce_info: a description of the struct to be read from
798270631Sjfv * @src: the struct to be read from
799270631Sjfv **/
800270631Sjfvstatic void i40e_write_word(u8 *hmc_bits,
801270631Sjfv			    struct i40e_context_ele *ce_info,
802270631Sjfv			    u8 *src)
803270631Sjfv{
804270631Sjfv	u16 src_word, mask;
805270631Sjfv	u8 *from, *dest;
806270631Sjfv	u16 shift_width;
807270631Sjfv	__le16 dest_word;
808270631Sjfv
809270631Sjfv	/* copy from the next struct field */
810270631Sjfv	from = src + ce_info->offset;
811270631Sjfv
812270631Sjfv	/* prepare the bits and mask */
813270631Sjfv	shift_width = ce_info->lsb % 8;
814292100Ssmh	mask = BIT(ce_info->width) - 1;
815270631Sjfv
816270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
817270631Sjfv	 * will be in a different bit position on big endian machines
818270631Sjfv	 */
819270631Sjfv	src_word = *(u16 *)from;
820270631Sjfv	src_word &= mask;
821270631Sjfv
822270631Sjfv	/* shift to correct alignment */
823270631Sjfv	mask <<= shift_width;
824270631Sjfv	src_word <<= shift_width;
825270631Sjfv
826270631Sjfv	/* get the current bits from the target bit string */
827270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
828270631Sjfv
829270631Sjfv	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
830270631Sjfv
831270631Sjfv	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
832270631Sjfv	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
833270631Sjfv
834270631Sjfv	/* put it all back */
835270631Sjfv	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
836270631Sjfv}
837270631Sjfv
838270631Sjfv/**
839270631Sjfv * i40e_write_dword - replace HMC context dword
840270631Sjfv * @hmc_bits: pointer to the HMC memory
841270631Sjfv * @ce_info: a description of the struct to be read from
842270631Sjfv * @src: the struct to be read from
843270631Sjfv **/
844270631Sjfvstatic void i40e_write_dword(u8 *hmc_bits,
845270631Sjfv			     struct i40e_context_ele *ce_info,
846270631Sjfv			     u8 *src)
847270631Sjfv{
848270631Sjfv	u32 src_dword, mask;
849270631Sjfv	u8 *from, *dest;
850270631Sjfv	u16 shift_width;
851270631Sjfv	__le32 dest_dword;
852270631Sjfv
853270631Sjfv	/* copy from the next struct field */
854270631Sjfv	from = src + ce_info->offset;
855270631Sjfv
856270631Sjfv	/* prepare the bits and mask */
857270631Sjfv	shift_width = ce_info->lsb % 8;
858270631Sjfv
859270631Sjfv	/* if the field width is exactly 32 on an x86 machine, then the shift
860270631Sjfv	 * operation will not work because the SHL instructions count is masked
861270631Sjfv	 * to 5 bits so the shift will do nothing
862270631Sjfv	 */
863270631Sjfv	if (ce_info->width < 32)
864292100Ssmh		mask = BIT(ce_info->width) - 1;
865270631Sjfv	else
866291248Ssmh		mask = ~(u32)0;
867270631Sjfv
868270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
869270631Sjfv	 * will be in a different bit position on big endian machines
870270631Sjfv	 */
871270631Sjfv	src_dword = *(u32 *)from;
872270631Sjfv	src_dword &= mask;
873270631Sjfv
874270631Sjfv	/* shift to correct alignment */
875270631Sjfv	mask <<= shift_width;
876270631Sjfv	src_dword <<= shift_width;
877270631Sjfv
878270631Sjfv	/* get the current bits from the target bit string */
879270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
880270631Sjfv
881270631Sjfv	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
882270631Sjfv
883270631Sjfv	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
884270631Sjfv	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
885270631Sjfv
886270631Sjfv	/* put it all back */
887270631Sjfv	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
888270631Sjfv}
889270631Sjfv
890270631Sjfv/**
891270631Sjfv * i40e_write_qword - replace HMC context qword
892270631Sjfv * @hmc_bits: pointer to the HMC memory
893270631Sjfv * @ce_info: a description of the struct to be read from
894270631Sjfv * @src: the struct to be read from
895270631Sjfv **/
896270631Sjfvstatic void i40e_write_qword(u8 *hmc_bits,
897270631Sjfv			     struct i40e_context_ele *ce_info,
898270631Sjfv			     u8 *src)
899270631Sjfv{
900270631Sjfv	u64 src_qword, mask;
901270631Sjfv	u8 *from, *dest;
902270631Sjfv	u16 shift_width;
903270631Sjfv	__le64 dest_qword;
904270631Sjfv
905270631Sjfv	/* copy from the next struct field */
906270631Sjfv	from = src + ce_info->offset;
907270631Sjfv
908270631Sjfv	/* prepare the bits and mask */
909270631Sjfv	shift_width = ce_info->lsb % 8;
910270631Sjfv
911270631Sjfv	/* if the field width is exactly 64 on an x86 machine, then the shift
912270631Sjfv	 * operation will not work because the SHL instructions count is masked
913270631Sjfv	 * to 6 bits so the shift will do nothing
914270631Sjfv	 */
915270631Sjfv	if (ce_info->width < 64)
916292100Ssmh		mask = BIT_ULL(ce_info->width) - 1;
917270631Sjfv	else
918291248Ssmh		mask = ~(u64)0;
919270631Sjfv
920270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
921270631Sjfv	 * will be in a different bit position on big endian machines
922270631Sjfv	 */
923270631Sjfv	src_qword = *(u64 *)from;
924270631Sjfv	src_qword &= mask;
925270631Sjfv
926270631Sjfv	/* shift to correct alignment */
927270631Sjfv	mask <<= shift_width;
928270631Sjfv	src_qword <<= shift_width;
929270631Sjfv
930270631Sjfv	/* get the current bits from the target bit string */
931270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
932270631Sjfv
933270631Sjfv	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
934270631Sjfv
935270631Sjfv	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
936270631Sjfv	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
937270631Sjfv
938270631Sjfv	/* put it all back */
939270631Sjfv	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
940270631Sjfv}
941270631Sjfv
942270631Sjfv/**
943270631Sjfv * i40e_read_byte - read HMC context byte into struct
944270631Sjfv * @hmc_bits: pointer to the HMC memory
945270631Sjfv * @ce_info: a description of the struct to be filled
946270631Sjfv * @dest: the struct to be filled
947270631Sjfv **/
948270631Sjfvstatic void i40e_read_byte(u8 *hmc_bits,
949270631Sjfv			   struct i40e_context_ele *ce_info,
950270631Sjfv			   u8 *dest)
951270631Sjfv{
952270631Sjfv	u8 dest_byte, mask;
953270631Sjfv	u8 *src, *target;
954270631Sjfv	u16 shift_width;
955270631Sjfv
956270631Sjfv	/* prepare the bits and mask */
957270631Sjfv	shift_width = ce_info->lsb % 8;
958292100Ssmh	mask = BIT(ce_info->width) - 1;
959270631Sjfv
960270631Sjfv	/* shift to correct alignment */
961270631Sjfv	mask <<= shift_width;
962270631Sjfv
963270631Sjfv	/* get the current bits from the src bit string */
964270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
965270631Sjfv
966270631Sjfv	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
967270631Sjfv
968270631Sjfv	dest_byte &= ~(mask);
969270631Sjfv
970270631Sjfv	dest_byte >>= shift_width;
971270631Sjfv
972270631Sjfv	/* get the address from the struct field */
973270631Sjfv	target = dest + ce_info->offset;
974270631Sjfv
975270631Sjfv	/* put it back in the struct */
976270631Sjfv	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
977270631Sjfv}
978270631Sjfv
979270631Sjfv/**
980270631Sjfv * i40e_read_word - read HMC context word into struct
981270631Sjfv * @hmc_bits: pointer to the HMC memory
982270631Sjfv * @ce_info: a description of the struct to be filled
983270631Sjfv * @dest: the struct to be filled
984270631Sjfv **/
985270631Sjfvstatic void i40e_read_word(u8 *hmc_bits,
986270631Sjfv			   struct i40e_context_ele *ce_info,
987270631Sjfv			   u8 *dest)
988270631Sjfv{
989270631Sjfv	u16 dest_word, mask;
990270631Sjfv	u8 *src, *target;
991270631Sjfv	u16 shift_width;
992270631Sjfv	__le16 src_word;
993270631Sjfv
994270631Sjfv	/* prepare the bits and mask */
995270631Sjfv	shift_width = ce_info->lsb % 8;
996292100Ssmh	mask = BIT(ce_info->width) - 1;
997270631Sjfv
998270631Sjfv	/* shift to correct alignment */
999270631Sjfv	mask <<= shift_width;
1000270631Sjfv
1001270631Sjfv	/* get the current bits from the src bit string */
1002270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1003270631Sjfv
1004270631Sjfv	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1005270631Sjfv
1006270631Sjfv	/* the data in the memory is stored as little endian so mask it
1007270631Sjfv	 * correctly
1008270631Sjfv	 */
1009270631Sjfv	src_word &= ~(CPU_TO_LE16(mask));
1010270631Sjfv
1011270631Sjfv	/* get the data back into host order before shifting */
1012270631Sjfv	dest_word = LE16_TO_CPU(src_word);
1013270631Sjfv
1014270631Sjfv	dest_word >>= shift_width;
1015270631Sjfv
1016270631Sjfv	/* get the address from the struct field */
1017270631Sjfv	target = dest + ce_info->offset;
1018270631Sjfv
1019270631Sjfv	/* put it back in the struct */
1020270631Sjfv	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1021270631Sjfv}
1022270631Sjfv
1023270631Sjfv/**
1024270631Sjfv * i40e_read_dword - read HMC context dword into struct
1025270631Sjfv * @hmc_bits: pointer to the HMC memory
1026270631Sjfv * @ce_info: a description of the struct to be filled
1027270631Sjfv * @dest: the struct to be filled
1028270631Sjfv **/
1029270631Sjfvstatic void i40e_read_dword(u8 *hmc_bits,
1030270631Sjfv			    struct i40e_context_ele *ce_info,
1031270631Sjfv			    u8 *dest)
1032270631Sjfv{
1033270631Sjfv	u32 dest_dword, mask;
1034270631Sjfv	u8 *src, *target;
1035270631Sjfv	u16 shift_width;
1036270631Sjfv	__le32 src_dword;
1037270631Sjfv
1038270631Sjfv	/* prepare the bits and mask */
1039270631Sjfv	shift_width = ce_info->lsb % 8;
1040270631Sjfv
1041270631Sjfv	/* if the field width is exactly 32 on an x86 machine, then the shift
1042270631Sjfv	 * operation will not work because the SHL instructions count is masked
1043270631Sjfv	 * to 5 bits so the shift will do nothing
1044270631Sjfv	 */
1045270631Sjfv	if (ce_info->width < 32)
1046292100Ssmh		mask = BIT(ce_info->width) - 1;
1047270631Sjfv	else
1048291248Ssmh		mask = ~(u32)0;
1049270631Sjfv
1050270631Sjfv	/* shift to correct alignment */
1051270631Sjfv	mask <<= shift_width;
1052270631Sjfv
1053270631Sjfv	/* get the current bits from the src bit string */
1054270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1055270631Sjfv
1056270631Sjfv	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1057270631Sjfv
1058270631Sjfv	/* the data in the memory is stored as little endian so mask it
1059270631Sjfv	 * correctly
1060270631Sjfv	 */
1061270631Sjfv	src_dword &= ~(CPU_TO_LE32(mask));
1062270631Sjfv
1063270631Sjfv	/* get the data back into host order before shifting */
1064270631Sjfv	dest_dword = LE32_TO_CPU(src_dword);
1065270631Sjfv
1066270631Sjfv	dest_dword >>= shift_width;
1067270631Sjfv
1068270631Sjfv	/* get the address from the struct field */
1069270631Sjfv	target = dest + ce_info->offset;
1070270631Sjfv
1071270631Sjfv	/* put it back in the struct */
1072270631Sjfv	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1073270631Sjfv		    I40E_NONDMA_TO_DMA);
1074270631Sjfv}
1075270631Sjfv
1076270631Sjfv/**
1077270631Sjfv * i40e_read_qword - read HMC context qword into struct
1078270631Sjfv * @hmc_bits: pointer to the HMC memory
1079270631Sjfv * @ce_info: a description of the struct to be filled
1080270631Sjfv * @dest: the struct to be filled
1081270631Sjfv **/
1082270631Sjfvstatic void i40e_read_qword(u8 *hmc_bits,
1083270631Sjfv			    struct i40e_context_ele *ce_info,
1084270631Sjfv			    u8 *dest)
1085270631Sjfv{
1086270631Sjfv	u64 dest_qword, mask;
1087270631Sjfv	u8 *src, *target;
1088270631Sjfv	u16 shift_width;
1089270631Sjfv	__le64 src_qword;
1090270631Sjfv
1091270631Sjfv	/* prepare the bits and mask */
1092270631Sjfv	shift_width = ce_info->lsb % 8;
1093270631Sjfv
1094270631Sjfv	/* if the field width is exactly 64 on an x86 machine, then the shift
1095270631Sjfv	 * operation will not work because the SHL instructions count is masked
1096270631Sjfv	 * to 6 bits so the shift will do nothing
1097270631Sjfv	 */
1098270631Sjfv	if (ce_info->width < 64)
1099292100Ssmh		mask = BIT_ULL(ce_info->width) - 1;
1100270631Sjfv	else
1101291248Ssmh		mask = ~(u64)0;
1102270631Sjfv
1103270631Sjfv	/* shift to correct alignment */
1104270631Sjfv	mask <<= shift_width;
1105270631Sjfv
1106270631Sjfv	/* get the current bits from the src bit string */
1107270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1108270631Sjfv
1109270631Sjfv	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1110270631Sjfv
1111270631Sjfv	/* the data in the memory is stored as little endian so mask it
1112270631Sjfv	 * correctly
1113270631Sjfv	 */
1114270631Sjfv	src_qword &= ~(CPU_TO_LE64(mask));
1115270631Sjfv
1116270631Sjfv	/* get the data back into host order before shifting */
1117270631Sjfv	dest_qword = LE64_TO_CPU(src_qword);
1118270631Sjfv
1119270631Sjfv	dest_qword >>= shift_width;
1120270631Sjfv
1121270631Sjfv	/* get the address from the struct field */
1122270631Sjfv	target = dest + ce_info->offset;
1123270631Sjfv
1124270631Sjfv	/* put it back in the struct */
1125270631Sjfv	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1126270631Sjfv		    I40E_NONDMA_TO_DMA);
1127270631Sjfv}
1128270631Sjfv
1129270631Sjfv/**
1130270631Sjfv * i40e_get_hmc_context - extract HMC context bits
1131270631Sjfv * @context_bytes: pointer to the context bit array
1132270631Sjfv * @ce_info: a description of the struct to be filled
1133270631Sjfv * @dest: the struct to be filled
1134270631Sjfv **/
1135270631Sjfvstatic enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1136270631Sjfv					struct i40e_context_ele *ce_info,
1137270631Sjfv					u8 *dest)
1138270631Sjfv{
1139270631Sjfv	int f;
1140270631Sjfv
1141270631Sjfv	for (f = 0; ce_info[f].width != 0; f++) {
1142270631Sjfv		switch (ce_info[f].size_of) {
1143270631Sjfv		case 1:
1144270631Sjfv			i40e_read_byte(context_bytes, &ce_info[f], dest);
1145270631Sjfv			break;
1146270631Sjfv		case 2:
1147270631Sjfv			i40e_read_word(context_bytes, &ce_info[f], dest);
1148270631Sjfv			break;
1149270631Sjfv		case 4:
1150270631Sjfv			i40e_read_dword(context_bytes, &ce_info[f], dest);
1151270631Sjfv			break;
1152270631Sjfv		case 8:
1153270631Sjfv			i40e_read_qword(context_bytes, &ce_info[f], dest);
1154270631Sjfv			break;
1155270631Sjfv		default:
1156270631Sjfv			/* nothing to do, just keep going */
1157270631Sjfv			break;
1158270631Sjfv		}
1159270631Sjfv	}
1160270631Sjfv
1161270631Sjfv	return I40E_SUCCESS;
1162270631Sjfv}
1163270631Sjfv
1164270631Sjfv/**
1165270631Sjfv * i40e_clear_hmc_context - zero out the HMC context bits
1166270631Sjfv * @hw:       the hardware struct
1167270631Sjfv * @context_bytes: pointer to the context bit array (DMA memory)
1168270631Sjfv * @hmc_type: the type of HMC resource
1169270631Sjfv **/
1170270631Sjfvstatic enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1171270631Sjfv					u8 *context_bytes,
1172270631Sjfv					enum i40e_hmc_lan_rsrc_type hmc_type)
1173270631Sjfv{
1174270631Sjfv	/* clean the bit array */
1175270631Sjfv	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1176270631Sjfv		    I40E_DMA_MEM);
1177270631Sjfv
1178270631Sjfv	return I40E_SUCCESS;
1179270631Sjfv}
1180270631Sjfv
1181270631Sjfv/**
1182270631Sjfv * i40e_set_hmc_context - replace HMC context bits
1183270631Sjfv * @context_bytes: pointer to the context bit array
1184270631Sjfv * @ce_info:  a description of the struct to be filled
1185270631Sjfv * @dest:     the struct to be filled
1186270631Sjfv **/
1187270631Sjfvstatic enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1188270631Sjfv					struct i40e_context_ele *ce_info,
1189270631Sjfv					u8 *dest)
1190270631Sjfv{
1191270631Sjfv	int f;
1192270631Sjfv
1193270631Sjfv	for (f = 0; ce_info[f].width != 0; f++) {
1194270631Sjfv
1195270631Sjfv		/* we have to deal with each element of the HMC using the
1196270631Sjfv		 * correct size so that we are correct regardless of the
1197270631Sjfv		 * endianness of the machine
1198270631Sjfv		 */
1199270631Sjfv		switch (ce_info[f].size_of) {
1200270631Sjfv		case 1:
1201270631Sjfv			i40e_write_byte(context_bytes, &ce_info[f], dest);
1202270631Sjfv			break;
1203270631Sjfv		case 2:
1204270631Sjfv			i40e_write_word(context_bytes, &ce_info[f], dest);
1205270631Sjfv			break;
1206270631Sjfv		case 4:
1207270631Sjfv			i40e_write_dword(context_bytes, &ce_info[f], dest);
1208270631Sjfv			break;
1209270631Sjfv		case 8:
1210270631Sjfv			i40e_write_qword(context_bytes, &ce_info[f], dest);
1211270631Sjfv			break;
1212270631Sjfv		}
1213270631Sjfv	}
1214270631Sjfv
1215270631Sjfv	return I40E_SUCCESS;
1216270631Sjfv}
1217270631Sjfv
1218270631Sjfv/**
1219270631Sjfv * i40e_hmc_get_object_va - retrieves an object's virtual address
1220292095Ssmh * @hw: pointer to the hw structure
1221270631Sjfv * @object_base: pointer to u64 to get the va
1222270631Sjfv * @rsrc_type: the hmc resource type
1223270631Sjfv * @obj_idx: hmc object index
1224270631Sjfv *
1225270631Sjfv * This function retrieves the object's virtual address from the object
1226270631Sjfv * base pointer.  This function is used for LAN Queue contexts.
1227270631Sjfv **/
1228270631Sjfvstatic
1229292095Ssmhenum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1230270631Sjfv					u8 **object_base,
1231270631Sjfv					enum i40e_hmc_lan_rsrc_type rsrc_type,
1232270631Sjfv					u32 obj_idx)
1233270631Sjfv{
1234270631Sjfv	u32 obj_offset_in_sd, obj_offset_in_pd;
1235292095Ssmh	struct i40e_hmc_info     *hmc_info = &hw->hmc;
1236270631Sjfv	struct i40e_hmc_sd_entry *sd_entry;
1237270631Sjfv	struct i40e_hmc_pd_entry *pd_entry;
1238270631Sjfv	u32 pd_idx, pd_lmt, rel_pd_idx;
1239270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
1240270631Sjfv	u64 obj_offset_in_fpm;
1241270631Sjfv	u32 sd_idx, sd_lmt;
1242270631Sjfv
1243270631Sjfv	if (NULL == hmc_info) {
1244270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1245270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1246270631Sjfv		goto exit;
1247270631Sjfv	}
1248270631Sjfv	if (NULL == hmc_info->hmc_obj) {
1249270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1250270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1251270631Sjfv		goto exit;
1252270631Sjfv	}
1253270631Sjfv	if (NULL == object_base) {
1254270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1255270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1256270631Sjfv		goto exit;
1257270631Sjfv	}
1258270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1260270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1261270631Sjfv		goto exit;
1262270631Sjfv	}
1263270631Sjfv	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264270631Sjfv		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1265270631Sjfv			  ret_code);
1266270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1267270631Sjfv		goto exit;
1268270631Sjfv	}
1269270631Sjfv	/* find sd index and limit */
1270270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1271270631Sjfv				 &sd_idx, &sd_lmt);
1272270631Sjfv
1273270631Sjfv	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274270631Sjfv	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275270631Sjfv			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1276270631Sjfv
1277270631Sjfv	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278270631Sjfv		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1279270631Sjfv					 &pd_idx, &pd_lmt);
1280270631Sjfv		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281270631Sjfv		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282270631Sjfv		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283270631Sjfv					 I40E_HMC_PAGED_BP_SIZE);
1284270631Sjfv		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1285270631Sjfv	} else {
1286270631Sjfv		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287270631Sjfv					 I40E_HMC_DIRECT_BP_SIZE);
1288270631Sjfv		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1289270631Sjfv	}
1290270631Sjfvexit:
1291270631Sjfv	return ret_code;
1292270631Sjfv}
1293270631Sjfv
1294270631Sjfv/**
1295270631Sjfv * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296270631Sjfv * @hw:    the hardware struct
1297270631Sjfv * @queue: the queue we care about
1298270631Sjfv * @s:     the struct to be filled
1299270631Sjfv **/
1300270631Sjfvenum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1301270631Sjfv						    u16 queue,
1302270631Sjfv						    struct i40e_hmc_obj_txq *s)
1303270631Sjfv{
1304270631Sjfv	enum i40e_status_code err;
1305270631Sjfv	u8 *context_bytes;
1306270631Sjfv
1307292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1308270631Sjfv	if (err < 0)
1309270631Sjfv		return err;
1310270631Sjfv
1311270631Sjfv	return i40e_get_hmc_context(context_bytes,
1312270631Sjfv				    i40e_hmc_txq_ce_info, (u8 *)s);
1313270631Sjfv}
1314270631Sjfv
1315270631Sjfv/**
1316270631Sjfv * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1317270631Sjfv * @hw:    the hardware struct
1318270631Sjfv * @queue: the queue we care about
1319270631Sjfv **/
1320270631Sjfvenum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1321270631Sjfv						      u16 queue)
1322270631Sjfv{
1323270631Sjfv	enum i40e_status_code err;
1324270631Sjfv	u8 *context_bytes;
1325270631Sjfv
1326292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1327270631Sjfv	if (err < 0)
1328270631Sjfv		return err;
1329270631Sjfv
1330270631Sjfv	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1331270631Sjfv}
1332270631Sjfv
1333270631Sjfv/**
1334270631Sjfv * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1335270631Sjfv * @hw:    the hardware struct
1336270631Sjfv * @queue: the queue we care about
1337270631Sjfv * @s:     the struct to be filled
1338270631Sjfv **/
1339270631Sjfvenum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1340270631Sjfv						    u16 queue,
1341270631Sjfv						    struct i40e_hmc_obj_txq *s)
1342270631Sjfv{
1343270631Sjfv	enum i40e_status_code err;
1344270631Sjfv	u8 *context_bytes;
1345270631Sjfv
1346292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1347270631Sjfv	if (err < 0)
1348270631Sjfv		return err;
1349270631Sjfv
1350270631Sjfv	return i40e_set_hmc_context(context_bytes,
1351270631Sjfv				    i40e_hmc_txq_ce_info, (u8 *)s);
1352270631Sjfv}
1353270631Sjfv
1354270631Sjfv/**
1355270631Sjfv * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1356270631Sjfv * @hw:    the hardware struct
1357270631Sjfv * @queue: the queue we care about
1358270631Sjfv * @s:     the struct to be filled
1359270631Sjfv **/
1360270631Sjfvenum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1361270631Sjfv						    u16 queue,
1362270631Sjfv						    struct i40e_hmc_obj_rxq *s)
1363270631Sjfv{
1364270631Sjfv	enum i40e_status_code err;
1365270631Sjfv	u8 *context_bytes;
1366270631Sjfv
1367292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1368270631Sjfv	if (err < 0)
1369270631Sjfv		return err;
1370270631Sjfv
1371270631Sjfv	return i40e_get_hmc_context(context_bytes,
1372270631Sjfv				    i40e_hmc_rxq_ce_info, (u8 *)s);
1373270631Sjfv}
1374270631Sjfv
1375270631Sjfv/**
1376270631Sjfv * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1377270631Sjfv * @hw:    the hardware struct
1378270631Sjfv * @queue: the queue we care about
1379270631Sjfv **/
1380270631Sjfvenum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1381270631Sjfv						      u16 queue)
1382270631Sjfv{
1383270631Sjfv	enum i40e_status_code err;
1384270631Sjfv	u8 *context_bytes;
1385270631Sjfv
1386292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1387270631Sjfv	if (err < 0)
1388270631Sjfv		return err;
1389270631Sjfv
1390270631Sjfv	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1391270631Sjfv}
1392270631Sjfv
1393270631Sjfv/**
1394270631Sjfv * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1395270631Sjfv * @hw:    the hardware struct
1396270631Sjfv * @queue: the queue we care about
1397270631Sjfv * @s:     the struct to be filled
1398270631Sjfv **/
1399270631Sjfvenum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1400270631Sjfv						    u16 queue,
1401270631Sjfv						    struct i40e_hmc_obj_rxq *s)
1402270631Sjfv{
1403270631Sjfv	enum i40e_status_code err;
1404270631Sjfv	u8 *context_bytes;
1405270631Sjfv
1406292095Ssmh	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1407270631Sjfv	if (err < 0)
1408270631Sjfv		return err;
1409270631Sjfv
1410270631Sjfv	return i40e_set_hmc_context(context_bytes,
1411270631Sjfv				    i40e_hmc_rxq_ce_info, (u8 *)s);
1412270631Sjfv}
1413