1270631Sjfv/******************************************************************************
2270631Sjfv
3270631Sjfv  Copyright (c) 2013-2014, Intel Corporation
4270631Sjfv  All rights reserved.
5270631Sjfv
6270631Sjfv  Redistribution and use in source and binary forms, with or without
7270631Sjfv  modification, are permitted provided that the following conditions are met:
8270631Sjfv
9270631Sjfv   1. Redistributions of source code must retain the above copyright notice,
10270631Sjfv      this list of conditions and the following disclaimer.
11270631Sjfv
12270631Sjfv   2. Redistributions in binary form must reproduce the above copyright
13270631Sjfv      notice, this list of conditions and the following disclaimer in the
14270631Sjfv      documentation and/or other materials provided with the distribution.
15270631Sjfv
16270631Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17270631Sjfv      contributors may be used to endorse or promote products derived from
18270631Sjfv      this software without specific prior written permission.
19270631Sjfv
20270631Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21270631Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22270631Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23270631Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24270631Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25270631Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26270631Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27270631Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28270631Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29270631Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30270631Sjfv  POSSIBILITY OF SUCH DAMAGE.
31270631Sjfv
32270631Sjfv******************************************************************************/
33270631Sjfv/*$FreeBSD$*/
34270631Sjfv
35270631Sjfv#include "i40e_osdep.h"
36270631Sjfv#include "i40e_register.h"
37270631Sjfv#include "i40e_type.h"
38270631Sjfv#include "i40e_hmc.h"
39270631Sjfv#include "i40e_lan_hmc.h"
40270631Sjfv#include "i40e_prototype.h"
41270631Sjfv
42270631Sjfv/* lan specific interface functions */
43270631Sjfv
44270631Sjfv/**
45270631Sjfv * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46270631Sjfv * @offset: base address offset needing alignment
47270631Sjfv *
48270631Sjfv * Aligns the layer 2 function private memory so it's 512-byte aligned.
49270631Sjfv **/
50270631Sjfvstatic u64 i40e_align_l2obj_base(u64 offset)
51270631Sjfv{
52270631Sjfv	u64 aligned_offset = offset;
53270631Sjfv
54270631Sjfv	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55270631Sjfv		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56270631Sjfv				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57270631Sjfv
58270631Sjfv	return aligned_offset;
59270631Sjfv}
60270631Sjfv
61270631Sjfv/**
62270631Sjfv * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63270631Sjfv * @txq_num: number of Tx queues needing backing context
64270631Sjfv * @rxq_num: number of Rx queues needing backing context
65270631Sjfv * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66270631Sjfv * @fcoe_filt_num: number of FCoE filters needing backing context
67270631Sjfv *
68270631Sjfv * Calculates the maximum amount of memory for the function required, based
69270631Sjfv * on the number of resources it must provide context for.
70270631Sjfv **/
71270631Sjfvu64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72270631Sjfv			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
73270631Sjfv{
74270631Sjfv	u64 fpm_size = 0;
75270631Sjfv
76270631Sjfv	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
78270631Sjfv
79270631Sjfv	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
81270631Sjfv
82270631Sjfv	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
84270631Sjfv
85270631Sjfv	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86270631Sjfv	fpm_size = i40e_align_l2obj_base(fpm_size);
87270631Sjfv
88270631Sjfv	return fpm_size;
89270631Sjfv}
90270631Sjfv
91270631Sjfv/**
92270631Sjfv * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93270631Sjfv * @hw: pointer to the HW structure
94270631Sjfv * @txq_num: number of Tx queues needing backing context
95270631Sjfv * @rxq_num: number of Rx queues needing backing context
96270631Sjfv * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97270631Sjfv * @fcoe_filt_num: number of FCoE filters needing backing context
98270631Sjfv *
99270631Sjfv * This function will be called once per physical function initialization.
100270631Sjfv * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101270631Sjfv * the driver's provided input, as well as information from the HMC itself
102270631Sjfv * loaded from NVRAM.
103270631Sjfv *
104270631Sjfv * Assumptions:
105270631Sjfv *   - HMC Resource Profile has been selected before calling this function.
106270631Sjfv **/
107270631Sjfvenum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108270631Sjfv					u32 rxq_num, u32 fcoe_cntx_num,
109270631Sjfv					u32 fcoe_filt_num)
110270631Sjfv{
111270631Sjfv	struct i40e_hmc_obj_info *obj, *full_obj;
112270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
113270631Sjfv	u64 l2fpm_size;
114270631Sjfv	u32 size_exp;
115270631Sjfv
116270631Sjfv	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117270631Sjfv	hw->hmc.hmc_fn_id = hw->pf_id;
118270631Sjfv
119270631Sjfv	/* allocate memory for hmc_obj */
120270631Sjfv	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121270631Sjfv			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122270631Sjfv	if (ret_code)
123270631Sjfv		goto init_lan_hmc_out;
124270631Sjfv	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125270631Sjfv			  hw->hmc.hmc_obj_virt_mem.va;
126270631Sjfv
127270631Sjfv	/* The full object will be used to create the LAN HMC SD */
128270631Sjfv	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129270631Sjfv	full_obj->max_cnt = 0;
130270631Sjfv	full_obj->cnt = 0;
131270631Sjfv	full_obj->base = 0;
132270631Sjfv	full_obj->size = 0;
133270631Sjfv
134270631Sjfv	/* Tx queue context information */
135270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137270631Sjfv	obj->cnt = txq_num;
138270631Sjfv	obj->base = 0;
139270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140270631Sjfv	obj->size = (u64)1 << size_exp;
141270631Sjfv
142270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
143270631Sjfv	if (txq_num > obj->max_cnt) {
144270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146270631Sjfv			  txq_num, obj->max_cnt, ret_code);
147270631Sjfv		goto init_lan_hmc_out;
148270631Sjfv	}
149270631Sjfv
150270631Sjfv	/* aggregate values into the full LAN object for later */
151270631Sjfv	full_obj->max_cnt += obj->max_cnt;
152270631Sjfv	full_obj->cnt += obj->cnt;
153270631Sjfv
154270631Sjfv	/* Rx queue context information */
155270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157270631Sjfv	obj->cnt = rxq_num;
158270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
162270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163270631Sjfv	obj->size = (u64)1 << size_exp;
164270631Sjfv
165270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
166270631Sjfv	if (rxq_num > obj->max_cnt) {
167270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169270631Sjfv			  rxq_num, obj->max_cnt, ret_code);
170270631Sjfv		goto init_lan_hmc_out;
171270631Sjfv	}
172270631Sjfv
173270631Sjfv	/* aggregate values into the full LAN object for later */
174270631Sjfv	full_obj->max_cnt += obj->max_cnt;
175270631Sjfv	full_obj->cnt += obj->cnt;
176270631Sjfv
177270631Sjfv	/* FCoE context information */
178270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180270631Sjfv	obj->cnt = fcoe_cntx_num;
181270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
185270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186270631Sjfv	obj->size = (u64)1 << size_exp;
187270631Sjfv
188270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
189270631Sjfv	if (fcoe_cntx_num > obj->max_cnt) {
190270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192270631Sjfv			  fcoe_cntx_num, obj->max_cnt, ret_code);
193270631Sjfv		goto init_lan_hmc_out;
194270631Sjfv	}
195270631Sjfv
196270631Sjfv	/* aggregate values into the full LAN object for later */
197270631Sjfv	full_obj->max_cnt += obj->max_cnt;
198270631Sjfv	full_obj->cnt += obj->cnt;
199270631Sjfv
200270631Sjfv	/* FCoE filter information */
201270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202270631Sjfv	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203270631Sjfv	obj->cnt = fcoe_filt_num;
204270631Sjfv	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205270631Sjfv		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206270631Sjfv		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207270631Sjfv	obj->base = i40e_align_l2obj_base(obj->base);
208270631Sjfv	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209270631Sjfv	obj->size = (u64)1 << size_exp;
210270631Sjfv
211270631Sjfv	/* validate values requested by driver don't exceed HMC capacity */
212270631Sjfv	if (fcoe_filt_num > obj->max_cnt) {
213270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214270631Sjfv		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215270631Sjfv			  fcoe_filt_num, obj->max_cnt, ret_code);
216270631Sjfv		goto init_lan_hmc_out;
217270631Sjfv	}
218270631Sjfv
219270631Sjfv	/* aggregate values into the full LAN object for later */
220270631Sjfv	full_obj->max_cnt += obj->max_cnt;
221270631Sjfv	full_obj->cnt += obj->cnt;
222270631Sjfv
223270631Sjfv	hw->hmc.first_sd_index = 0;
224270631Sjfv	hw->hmc.sd_table.ref_cnt = 0;
225270631Sjfv	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226270631Sjfv					       fcoe_filt_num);
227270631Sjfv	if (NULL == hw->hmc.sd_table.sd_entry) {
228270631Sjfv		hw->hmc.sd_table.sd_cnt = (u32)
229270631Sjfv				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230270631Sjfv				   I40E_HMC_DIRECT_BP_SIZE;
231270631Sjfv
232270631Sjfv		/* allocate the sd_entry members in the sd_table */
233270631Sjfv		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234270631Sjfv					  (sizeof(struct i40e_hmc_sd_entry) *
235270631Sjfv					  hw->hmc.sd_table.sd_cnt));
236270631Sjfv		if (ret_code)
237270631Sjfv			goto init_lan_hmc_out;
238270631Sjfv		hw->hmc.sd_table.sd_entry =
239270631Sjfv			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240270631Sjfv	}
241270631Sjfv	/* store in the LAN full object for later */
242270631Sjfv	full_obj->size = l2fpm_size;
243270631Sjfv
244270631Sjfvinit_lan_hmc_out:
245270631Sjfv	return ret_code;
246270631Sjfv}
247270631Sjfv
248270631Sjfv/**
249270631Sjfv * i40e_remove_pd_page - Remove a page from the page descriptor table
250270631Sjfv * @hw: pointer to the HW structure
251270631Sjfv * @hmc_info: pointer to the HMC configuration information structure
252270631Sjfv * @idx: segment descriptor index to find the relevant page descriptor
253270631Sjfv *
254270631Sjfv * This function:
255270631Sjfv *	1. Marks the entry in pd table (for paged address mode) invalid
256270631Sjfv *	2. write to register PMPDINV to invalidate the backing page in FV cache
257270631Sjfv *	3. Decrement the ref count for  pd_entry
258270631Sjfv * assumptions:
259270631Sjfv *	1. caller can deallocate the memory used by pd after this function
260270631Sjfv *	   returns.
261270631Sjfv **/
262270631Sjfvstatic enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263270631Sjfv						 struct i40e_hmc_info *hmc_info,
264270631Sjfv						 u32 idx)
265270631Sjfv{
266270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
267270631Sjfv
268270631Sjfv	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269270631Sjfv		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
270270631Sjfv
271270631Sjfv	return ret_code;
272270631Sjfv}
273270631Sjfv
274270631Sjfv/**
275270631Sjfv * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276270631Sjfv * @hw: pointer to our HW structure
277270631Sjfv * @hmc_info: pointer to the HMC configuration information structure
278270631Sjfv * @idx: the page index
279270631Sjfv *
280270631Sjfv * This function:
281270631Sjfv *	1. Marks the entry in sd table (for direct address mode) invalid
282270631Sjfv *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283270631Sjfv *	   to 0) and PMSDDATAHIGH to invalidate the sd page
284270631Sjfv *	3. Decrement the ref count for the sd_entry
285270631Sjfv * assumptions:
286270631Sjfv *	1. caller can deallocate the memory used by backing storage after this
287270631Sjfv *	   function returns.
288270631Sjfv **/
289270631Sjfvstatic enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290270631Sjfv					       struct i40e_hmc_info *hmc_info,
291270631Sjfv					       u32 idx)
292270631Sjfv{
293270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
294270631Sjfv
295270631Sjfv	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296270631Sjfv		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
297270631Sjfv
298270631Sjfv	return ret_code;
299270631Sjfv}
300270631Sjfv
301270631Sjfv/**
302270631Sjfv * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303270631Sjfv * @hw: pointer to the HW structure
304270631Sjfv * @info: pointer to i40e_hmc_create_obj_info struct
305270631Sjfv *
306270631Sjfv * This will allocate memory for PDs and backing pages and populate
307270631Sjfv * the sd and pd entries.
308270631Sjfv **/
309270631Sjfvenum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310270631Sjfv				struct i40e_hmc_lan_create_obj_info *info)
311270631Sjfv{
312270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
313270631Sjfv	struct i40e_hmc_sd_entry *sd_entry;
314270631Sjfv	u32 pd_idx1 = 0, pd_lmt1 = 0;
315270631Sjfv	u32 pd_idx = 0, pd_lmt = 0;
316270631Sjfv	bool pd_error = FALSE;
317270631Sjfv	u32 sd_idx, sd_lmt;
318270631Sjfv	u64 sd_size;
319270631Sjfv	u32 i, j;
320270631Sjfv
321270631Sjfv	if (NULL == info) {
322270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
323270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324270631Sjfv		goto exit;
325270631Sjfv	}
326270631Sjfv	if (NULL == info->hmc_info) {
327270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
328270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329270631Sjfv		goto exit;
330270631Sjfv	}
331270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
333270631Sjfv		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334270631Sjfv		goto exit;
335270631Sjfv	}
336270631Sjfv
337270631Sjfv	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339270631Sjfv		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340270631Sjfv			  ret_code);
341270631Sjfv		goto exit;
342270631Sjfv	}
343270631Sjfv	if ((info->start_idx + info->count) >
344270631Sjfv	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346270631Sjfv		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347270631Sjfv			  ret_code);
348270631Sjfv		goto exit;
349270631Sjfv	}
350270631Sjfv
351270631Sjfv	/* find sd index and limit */
352270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353270631Sjfv				 info->start_idx, info->count,
354270631Sjfv				 &sd_idx, &sd_lmt);
355270631Sjfv	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356270631Sjfv	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357270631Sjfv			ret_code = I40E_ERR_INVALID_SD_INDEX;
358270631Sjfv			goto exit;
359270631Sjfv	}
360270631Sjfv	/* find pd index */
361270631Sjfv	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362270631Sjfv				 info->start_idx, info->count, &pd_idx,
363270631Sjfv				 &pd_lmt);
364270631Sjfv
365270631Sjfv	/* This is to cover for cases where you may not want to have an SD with
366270631Sjfv	 * the full 2M memory but something smaller. By not filling out any
367270631Sjfv	 * size, the function will default the SD size to be 2M.
368270631Sjfv	 */
369270631Sjfv	if (info->direct_mode_sz == 0)
370270631Sjfv		sd_size = I40E_HMC_DIRECT_BP_SIZE;
371270631Sjfv	else
372270631Sjfv		sd_size = info->direct_mode_sz;
373270631Sjfv
374270631Sjfv	/* check if all the sds are valid. If not, allocate a page and
375270631Sjfv	 * initialize it.
376270631Sjfv	 */
377270631Sjfv	for (j = sd_idx; j < sd_lmt; j++) {
378270631Sjfv		/* update the sd table entry */
379270631Sjfv		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380270631Sjfv						   info->entry_type,
381270631Sjfv						   sd_size);
382270631Sjfv		if (I40E_SUCCESS != ret_code)
383270631Sjfv			goto exit_sd_error;
384270631Sjfv		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385270631Sjfv		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386270631Sjfv			/* check if all the pds in this sd are valid. If not,
387270631Sjfv			 * allocate a page and initialize it.
388270631Sjfv			 */
389270631Sjfv
390270631Sjfv			/* find pd_idx and pd_lmt in this sd */
391270631Sjfv			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392270631Sjfv			pd_lmt1 = min(pd_lmt,
393270631Sjfv				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394270631Sjfv			for (i = pd_idx1; i < pd_lmt1; i++) {
395270631Sjfv				/* update the pd table entry */
396270631Sjfv				ret_code = i40e_add_pd_table_entry(hw,
397270631Sjfv								info->hmc_info,
398270631Sjfv								i);
399270631Sjfv				if (I40E_SUCCESS != ret_code) {
400270631Sjfv					pd_error = TRUE;
401270631Sjfv					break;
402270631Sjfv				}
403270631Sjfv			}
404270631Sjfv			if (pd_error) {
405270631Sjfv				/* remove the backing pages from pd_idx1 to i */
406270631Sjfv				while (i && (i > pd_idx1)) {
407270631Sjfv					i40e_remove_pd_bp(hw, info->hmc_info,
408270631Sjfv							  (i - 1));
409270631Sjfv					i--;
410270631Sjfv				}
411270631Sjfv			}
412270631Sjfv		}
413270631Sjfv		if (!sd_entry->valid) {
414270631Sjfv			sd_entry->valid = TRUE;
415270631Sjfv			switch (sd_entry->entry_type) {
416270631Sjfv			case I40E_SD_TYPE_PAGED:
417270631Sjfv				I40E_SET_PF_SD_ENTRY(hw,
418270631Sjfv					sd_entry->u.pd_table.pd_page_addr.pa,
419270631Sjfv					j, sd_entry->entry_type);
420270631Sjfv				break;
421270631Sjfv			case I40E_SD_TYPE_DIRECT:
422270631Sjfv				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423270631Sjfv						     j, sd_entry->entry_type);
424270631Sjfv				break;
425270631Sjfv			default:
426270631Sjfv				ret_code = I40E_ERR_INVALID_SD_TYPE;
427270631Sjfv				goto exit;
428270631Sjfv			}
429270631Sjfv		}
430270631Sjfv	}
431270631Sjfv	goto exit;
432270631Sjfv
433270631Sjfvexit_sd_error:
434270631Sjfv	/* cleanup for sd entries from j to sd_idx */
435270631Sjfv	while (j && (j > sd_idx)) {
436270631Sjfv		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437270631Sjfv		switch (sd_entry->entry_type) {
438270631Sjfv		case I40E_SD_TYPE_PAGED:
439270631Sjfv			pd_idx1 = max(pd_idx,
440270631Sjfv				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441270631Sjfv			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442270631Sjfv			for (i = pd_idx1; i < pd_lmt1; i++) {
443270631Sjfv				i40e_remove_pd_bp(hw, info->hmc_info, i);
444270631Sjfv			}
445270631Sjfv			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
446270631Sjfv			break;
447270631Sjfv		case I40E_SD_TYPE_DIRECT:
448270631Sjfv			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
449270631Sjfv			break;
450270631Sjfv		default:
451270631Sjfv			ret_code = I40E_ERR_INVALID_SD_TYPE;
452270631Sjfv			break;
453270631Sjfv		}
454270631Sjfv		j--;
455270631Sjfv	}
456270631Sjfvexit:
457270631Sjfv	return ret_code;
458270631Sjfv}
459270631Sjfv
460270631Sjfv/**
461270631Sjfv * i40e_configure_lan_hmc - prepare the HMC backing store
462270631Sjfv * @hw: pointer to the hw structure
463270631Sjfv * @model: the model for the layout of the SD/PD tables
464270631Sjfv *
465270631Sjfv * - This function will be called once per physical function initialization.
466270631Sjfv * - This function will be called after i40e_init_lan_hmc() and before
467270631Sjfv *   any LAN/FCoE HMC objects can be created.
468270631Sjfv **/
469270631Sjfvenum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
470270631Sjfv					     enum i40e_hmc_model model)
471270631Sjfv{
472270631Sjfv	struct i40e_hmc_lan_create_obj_info info;
473270631Sjfv	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
474270631Sjfv	struct i40e_hmc_obj_info *obj;
475270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
476270631Sjfv
477270631Sjfv	/* Initialize part of the create object info struct */
478270631Sjfv	info.hmc_info = &hw->hmc;
479270631Sjfv	info.rsrc_type = I40E_HMC_LAN_FULL;
480270631Sjfv	info.start_idx = 0;
481270631Sjfv	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
482270631Sjfv
483270631Sjfv	/* Build the SD entry for the LAN objects */
484270631Sjfv	switch (model) {
485270631Sjfv	case I40E_HMC_MODEL_DIRECT_PREFERRED:
486270631Sjfv	case I40E_HMC_MODEL_DIRECT_ONLY:
487270631Sjfv		info.entry_type = I40E_SD_TYPE_DIRECT;
488270631Sjfv		/* Make one big object, a single SD */
489270631Sjfv		info.count = 1;
490270631Sjfv		ret_code = i40e_create_lan_hmc_object(hw, &info);
491270631Sjfv		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
492270631Sjfv			goto try_type_paged;
493270631Sjfv		else if (ret_code != I40E_SUCCESS)
494270631Sjfv			goto configure_lan_hmc_out;
495270631Sjfv		/* else clause falls through the break */
496270631Sjfv		break;
497270631Sjfv	case I40E_HMC_MODEL_PAGED_ONLY:
498270631Sjfvtry_type_paged:
499270631Sjfv		info.entry_type = I40E_SD_TYPE_PAGED;
500270631Sjfv		/* Make one big object in the PD table */
501270631Sjfv		info.count = 1;
502270631Sjfv		ret_code = i40e_create_lan_hmc_object(hw, &info);
503270631Sjfv		if (ret_code != I40E_SUCCESS)
504270631Sjfv			goto configure_lan_hmc_out;
505270631Sjfv		break;
506270631Sjfv	default:
507270631Sjfv		/* unsupported type */
508270631Sjfv		ret_code = I40E_ERR_INVALID_SD_TYPE;
509270631Sjfv		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
510270631Sjfv			  ret_code);
511270631Sjfv		goto configure_lan_hmc_out;
512270631Sjfv	}
513270631Sjfv
514270631Sjfv	/* Configure and program the FPM registers so objects can be created */
515270631Sjfv
516270631Sjfv	/* Tx contexts */
517270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
518270631Sjfv	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
519270631Sjfv	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
520270631Sjfv	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
521270631Sjfv
522270631Sjfv	/* Rx contexts */
523270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
524270631Sjfv	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
525270631Sjfv	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
526270631Sjfv	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
527270631Sjfv
528270631Sjfv	/* FCoE contexts */
529270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
530270631Sjfv	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
531270631Sjfv	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
532270631Sjfv	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
533270631Sjfv
534270631Sjfv	/* FCoE filters */
535270631Sjfv	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
536270631Sjfv	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
537270631Sjfv	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
538270631Sjfv	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
539270631Sjfv
540270631Sjfvconfigure_lan_hmc_out:
541270631Sjfv	return ret_code;
542270631Sjfv}
543270631Sjfv
544270631Sjfv/**
545270631Sjfv * i40e_delete_hmc_object - remove hmc objects
546270631Sjfv * @hw: pointer to the HW structure
547270631Sjfv * @info: pointer to i40e_hmc_delete_obj_info struct
548270631Sjfv *
549270631Sjfv * This will de-populate the SDs and PDs.  It frees
550270631Sjfv * the memory for PDS and backing storage.  After this function is returned,
551270631Sjfv * caller should deallocate memory allocated previously for
552270631Sjfv * book-keeping information about PDs and backing storage.
553270631Sjfv **/
554270631Sjfvenum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
555270631Sjfv				struct i40e_hmc_lan_delete_obj_info *info)
556270631Sjfv{
557270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
558270631Sjfv	struct i40e_hmc_pd_table *pd_table;
559270631Sjfv	u32 pd_idx, pd_lmt, rel_pd_idx;
560270631Sjfv	u32 sd_idx, sd_lmt;
561270631Sjfv	u32 i, j;
562270631Sjfv
563270631Sjfv	if (NULL == info) {
564270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
565270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
566270631Sjfv		goto exit;
567270631Sjfv	}
568270631Sjfv	if (NULL == info->hmc_info) {
569270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
570270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
571270631Sjfv		goto exit;
572270631Sjfv	}
573270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
574270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
575270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
576270631Sjfv		goto exit;
577270631Sjfv	}
578270631Sjfv
579270631Sjfv	if (NULL == info->hmc_info->sd_table.sd_entry) {
580270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
581270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
582270631Sjfv		goto exit;
583270631Sjfv	}
584270631Sjfv
585270631Sjfv	if (NULL == info->hmc_info->hmc_obj) {
586270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
587270631Sjfv		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
588270631Sjfv		goto exit;
589270631Sjfv	}
590270631Sjfv	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
591270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
592270631Sjfv		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
593270631Sjfv			  ret_code);
594270631Sjfv		goto exit;
595270631Sjfv	}
596270631Sjfv
597270631Sjfv	if ((info->start_idx + info->count) >
598270631Sjfv	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
599270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
600270631Sjfv		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
601270631Sjfv			  ret_code);
602270631Sjfv		goto exit;
603270631Sjfv	}
604270631Sjfv
605270631Sjfv	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
606270631Sjfv				 info->start_idx, info->count, &pd_idx,
607270631Sjfv				 &pd_lmt);
608270631Sjfv
609270631Sjfv	for (j = pd_idx; j < pd_lmt; j++) {
610270631Sjfv		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
611270631Sjfv
612270631Sjfv		if (I40E_SD_TYPE_PAGED !=
613270631Sjfv		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
614270631Sjfv			continue;
615270631Sjfv
616270631Sjfv		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
617270631Sjfv
618270631Sjfv		pd_table =
619270631Sjfv			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
620270631Sjfv		if (pd_table->pd_entry[rel_pd_idx].valid) {
621270631Sjfv			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
622270631Sjfv			if (I40E_SUCCESS != ret_code)
623270631Sjfv				goto exit;
624270631Sjfv		}
625270631Sjfv	}
626270631Sjfv
627270631Sjfv	/* find sd index and limit */
628270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
629270631Sjfv				 info->start_idx, info->count,
630270631Sjfv				 &sd_idx, &sd_lmt);
631270631Sjfv	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
632270631Sjfv	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
633270631Sjfv		ret_code = I40E_ERR_INVALID_SD_INDEX;
634270631Sjfv		goto exit;
635270631Sjfv	}
636270631Sjfv
637270631Sjfv	for (i = sd_idx; i < sd_lmt; i++) {
638270631Sjfv		if (!info->hmc_info->sd_table.sd_entry[i].valid)
639270631Sjfv			continue;
640270631Sjfv		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
641270631Sjfv		case I40E_SD_TYPE_DIRECT:
642270631Sjfv			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
643270631Sjfv			if (I40E_SUCCESS != ret_code)
644270631Sjfv				goto exit;
645270631Sjfv			break;
646270631Sjfv		case I40E_SD_TYPE_PAGED:
647270631Sjfv			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
648270631Sjfv			if (I40E_SUCCESS != ret_code)
649270631Sjfv				goto exit;
650270631Sjfv			break;
651270631Sjfv		default:
652270631Sjfv			break;
653270631Sjfv		}
654270631Sjfv	}
655270631Sjfvexit:
656270631Sjfv	return ret_code;
657270631Sjfv}
658270631Sjfv
659270631Sjfv/**
660270631Sjfv * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
661270631Sjfv * @hw: pointer to the hw structure
662270631Sjfv *
663270631Sjfv * This must be called by drivers as they are shutting down and being
664270631Sjfv * removed from the OS.
665270631Sjfv **/
666270631Sjfvenum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
667270631Sjfv{
668270631Sjfv	struct i40e_hmc_lan_delete_obj_info info;
669270631Sjfv	enum i40e_status_code ret_code;
670270631Sjfv
671270631Sjfv	info.hmc_info = &hw->hmc;
672270631Sjfv	info.rsrc_type = I40E_HMC_LAN_FULL;
673270631Sjfv	info.start_idx = 0;
674270631Sjfv	info.count = 1;
675270631Sjfv
676270631Sjfv	/* delete the object */
677270631Sjfv	ret_code = i40e_delete_lan_hmc_object(hw, &info);
678270631Sjfv
679270631Sjfv	/* free the SD table entry for LAN */
680270631Sjfv	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
681270631Sjfv	hw->hmc.sd_table.sd_cnt = 0;
682270631Sjfv	hw->hmc.sd_table.sd_entry = NULL;
683270631Sjfv
684270631Sjfv	/* free memory used for hmc_obj */
685270631Sjfv	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
686270631Sjfv	hw->hmc.hmc_obj = NULL;
687270631Sjfv
688270631Sjfv	return ret_code;
689270631Sjfv}
690270631Sjfv
691270631Sjfv#define I40E_HMC_STORE(_struct, _ele)		\
692270631Sjfv	offsetof(struct _struct, _ele),		\
693270631Sjfv	FIELD_SIZEOF(struct _struct, _ele)
694270631Sjfv
695270631Sjfvstruct i40e_context_ele {
696270631Sjfv	u16 offset;
697270631Sjfv	u16 size_of;
698270631Sjfv	u16 width;
699270631Sjfv	u16 lsb;
700270631Sjfv};
701270631Sjfv
702270631Sjfv/* LAN Tx Queue Context */
703270631Sjfvstatic struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
704270631Sjfv					     /* Field      Width    LSB */
705270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
706270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
707270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
708270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
709270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
710270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
711270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
712270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
713270631Sjfv/* line 1 */
714270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
715270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
716270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
717270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
718270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
719270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
720270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
721270631Sjfv/* line 7 */
722270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
723270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
724270631Sjfv	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
725270631Sjfv	{ 0 }
726270631Sjfv};
727270631Sjfv
728270631Sjfv/* LAN Rx Queue Context */
729270631Sjfvstatic struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
730270631Sjfv					 /* Field      Width    LSB */
731270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
732270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
733270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
734270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
735270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
736270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
737270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
738270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
739270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
740270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
741270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
742270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
743270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
744270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
745270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
746270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
747270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
748270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
749270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
750270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
751270631Sjfv	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
752270631Sjfv	{ 0 }
753270631Sjfv};
754270631Sjfv
755270631Sjfv/**
756270631Sjfv * i40e_write_byte - replace HMC context byte
757270631Sjfv * @hmc_bits: pointer to the HMC memory
758270631Sjfv * @ce_info: a description of the struct to be read from
759270631Sjfv * @src: the struct to be read from
760270631Sjfv **/
761270631Sjfvstatic void i40e_write_byte(u8 *hmc_bits,
762270631Sjfv			    struct i40e_context_ele *ce_info,
763270631Sjfv			    u8 *src)
764270631Sjfv{
765270631Sjfv	u8 src_byte, dest_byte, mask;
766270631Sjfv	u8 *from, *dest;
767270631Sjfv	u16 shift_width;
768270631Sjfv
769270631Sjfv	/* copy from the next struct field */
770270631Sjfv	from = src + ce_info->offset;
771270631Sjfv
772270631Sjfv	/* prepare the bits and mask */
773270631Sjfv	shift_width = ce_info->lsb % 8;
774270631Sjfv	mask = ((u8)1 << ce_info->width) - 1;
775270631Sjfv
776270631Sjfv	src_byte = *from;
777270631Sjfv	src_byte &= mask;
778270631Sjfv
779270631Sjfv	/* shift to correct alignment */
780270631Sjfv	mask <<= shift_width;
781270631Sjfv	src_byte <<= shift_width;
782270631Sjfv
783270631Sjfv	/* get the current bits from the target bit string */
784270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
785270631Sjfv
786270631Sjfv	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
787270631Sjfv
788270631Sjfv	dest_byte &= ~mask;	/* get the bits not changing */
789270631Sjfv	dest_byte |= src_byte;	/* add in the new bits */
790270631Sjfv
791270631Sjfv	/* put it all back */
792270631Sjfv	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
793270631Sjfv}
794270631Sjfv
795270631Sjfv/**
796270631Sjfv * i40e_write_word - replace HMC context word
797270631Sjfv * @hmc_bits: pointer to the HMC memory
798270631Sjfv * @ce_info: a description of the struct to be read from
799270631Sjfv * @src: the struct to be read from
800270631Sjfv **/
801270631Sjfvstatic void i40e_write_word(u8 *hmc_bits,
802270631Sjfv			    struct i40e_context_ele *ce_info,
803270631Sjfv			    u8 *src)
804270631Sjfv{
805270631Sjfv	u16 src_word, mask;
806270631Sjfv	u8 *from, *dest;
807270631Sjfv	u16 shift_width;
808270631Sjfv	__le16 dest_word;
809270631Sjfv
810270631Sjfv	/* copy from the next struct field */
811270631Sjfv	from = src + ce_info->offset;
812270631Sjfv
813270631Sjfv	/* prepare the bits and mask */
814270631Sjfv	shift_width = ce_info->lsb % 8;
815270631Sjfv	mask = ((u16)1 << ce_info->width) - 1;
816270631Sjfv
817270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
818270631Sjfv	 * will be in a different bit position on big endian machines
819270631Sjfv	 */
820270631Sjfv	src_word = *(u16 *)from;
821270631Sjfv	src_word &= mask;
822270631Sjfv
823270631Sjfv	/* shift to correct alignment */
824270631Sjfv	mask <<= shift_width;
825270631Sjfv	src_word <<= shift_width;
826270631Sjfv
827270631Sjfv	/* get the current bits from the target bit string */
828270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
829270631Sjfv
830270631Sjfv	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
831270631Sjfv
832270631Sjfv	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
833270631Sjfv	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
834270631Sjfv
835270631Sjfv	/* put it all back */
836270631Sjfv	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
837270631Sjfv}
838270631Sjfv
839270631Sjfv/**
840270631Sjfv * i40e_write_dword - replace HMC context dword
841270631Sjfv * @hmc_bits: pointer to the HMC memory
842270631Sjfv * @ce_info: a description of the struct to be read from
843270631Sjfv * @src: the struct to be read from
844270631Sjfv **/
845270631Sjfvstatic void i40e_write_dword(u8 *hmc_bits,
846270631Sjfv			     struct i40e_context_ele *ce_info,
847270631Sjfv			     u8 *src)
848270631Sjfv{
849270631Sjfv	u32 src_dword, mask;
850270631Sjfv	u8 *from, *dest;
851270631Sjfv	u16 shift_width;
852270631Sjfv	__le32 dest_dword;
853270631Sjfv
854270631Sjfv	/* copy from the next struct field */
855270631Sjfv	from = src + ce_info->offset;
856270631Sjfv
857270631Sjfv	/* prepare the bits and mask */
858270631Sjfv	shift_width = ce_info->lsb % 8;
859270631Sjfv
860270631Sjfv	/* if the field width is exactly 32 on an x86 machine, then the shift
861270631Sjfv	 * operation will not work because the SHL instructions count is masked
862270631Sjfv	 * to 5 bits so the shift will do nothing
863270631Sjfv	 */
864270631Sjfv	if (ce_info->width < 32)
865270631Sjfv		mask = ((u32)1 << ce_info->width) - 1;
866270631Sjfv	else
867270631Sjfv		mask = 0xFFFFFFFF;
868270631Sjfv
869270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
870270631Sjfv	 * will be in a different bit position on big endian machines
871270631Sjfv	 */
872270631Sjfv	src_dword = *(u32 *)from;
873270631Sjfv	src_dword &= mask;
874270631Sjfv
875270631Sjfv	/* shift to correct alignment */
876270631Sjfv	mask <<= shift_width;
877270631Sjfv	src_dword <<= shift_width;
878270631Sjfv
879270631Sjfv	/* get the current bits from the target bit string */
880270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
881270631Sjfv
882270631Sjfv	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
883270631Sjfv
884270631Sjfv	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
885270631Sjfv	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
886270631Sjfv
887270631Sjfv	/* put it all back */
888270631Sjfv	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
889270631Sjfv}
890270631Sjfv
891270631Sjfv/**
892270631Sjfv * i40e_write_qword - replace HMC context qword
893270631Sjfv * @hmc_bits: pointer to the HMC memory
894270631Sjfv * @ce_info: a description of the struct to be read from
895270631Sjfv * @src: the struct to be read from
896270631Sjfv **/
897270631Sjfvstatic void i40e_write_qword(u8 *hmc_bits,
898270631Sjfv			     struct i40e_context_ele *ce_info,
899270631Sjfv			     u8 *src)
900270631Sjfv{
901270631Sjfv	u64 src_qword, mask;
902270631Sjfv	u8 *from, *dest;
903270631Sjfv	u16 shift_width;
904270631Sjfv	__le64 dest_qword;
905270631Sjfv
906270631Sjfv	/* copy from the next struct field */
907270631Sjfv	from = src + ce_info->offset;
908270631Sjfv
909270631Sjfv	/* prepare the bits and mask */
910270631Sjfv	shift_width = ce_info->lsb % 8;
911270631Sjfv
912270631Sjfv	/* if the field width is exactly 64 on an x86 machine, then the shift
913270631Sjfv	 * operation will not work because the SHL instructions count is masked
914270631Sjfv	 * to 6 bits so the shift will do nothing
915270631Sjfv	 */
916270631Sjfv	if (ce_info->width < 64)
917270631Sjfv		mask = ((u64)1 << ce_info->width) - 1;
918270631Sjfv	else
919270631Sjfv		mask = 0xFFFFFFFFFFFFFFFFUL;
920270631Sjfv
921270631Sjfv	/* don't swizzle the bits until after the mask because the mask bits
922270631Sjfv	 * will be in a different bit position on big endian machines
923270631Sjfv	 */
924270631Sjfv	src_qword = *(u64 *)from;
925270631Sjfv	src_qword &= mask;
926270631Sjfv
927270631Sjfv	/* shift to correct alignment */
928270631Sjfv	mask <<= shift_width;
929270631Sjfv	src_qword <<= shift_width;
930270631Sjfv
931270631Sjfv	/* get the current bits from the target bit string */
932270631Sjfv	dest = hmc_bits + (ce_info->lsb / 8);
933270631Sjfv
934270631Sjfv	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
935270631Sjfv
936270631Sjfv	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
937270631Sjfv	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
938270631Sjfv
939270631Sjfv	/* put it all back */
940270631Sjfv	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
941270631Sjfv}
942270631Sjfv
943270631Sjfv/**
944270631Sjfv * i40e_read_byte - read HMC context byte into struct
945270631Sjfv * @hmc_bits: pointer to the HMC memory
946270631Sjfv * @ce_info: a description of the struct to be filled
947270631Sjfv * @dest: the struct to be filled
948270631Sjfv **/
949270631Sjfvstatic void i40e_read_byte(u8 *hmc_bits,
950270631Sjfv			   struct i40e_context_ele *ce_info,
951270631Sjfv			   u8 *dest)
952270631Sjfv{
953270631Sjfv	u8 dest_byte, mask;
954270631Sjfv	u8 *src, *target;
955270631Sjfv	u16 shift_width;
956270631Sjfv
957270631Sjfv	/* prepare the bits and mask */
958270631Sjfv	shift_width = ce_info->lsb % 8;
959270631Sjfv	mask = ((u8)1 << ce_info->width) - 1;
960270631Sjfv
961270631Sjfv	/* shift to correct alignment */
962270631Sjfv	mask <<= shift_width;
963270631Sjfv
964270631Sjfv	/* get the current bits from the src bit string */
965270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
966270631Sjfv
967270631Sjfv	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
968270631Sjfv
969270631Sjfv	dest_byte &= ~(mask);
970270631Sjfv
971270631Sjfv	dest_byte >>= shift_width;
972270631Sjfv
973270631Sjfv	/* get the address from the struct field */
974270631Sjfv	target = dest + ce_info->offset;
975270631Sjfv
976270631Sjfv	/* put it back in the struct */
977270631Sjfv	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
978270631Sjfv}
979270631Sjfv
980270631Sjfv/**
981270631Sjfv * i40e_read_word - read HMC context word into struct
982270631Sjfv * @hmc_bits: pointer to the HMC memory
983270631Sjfv * @ce_info: a description of the struct to be filled
984270631Sjfv * @dest: the struct to be filled
985270631Sjfv **/
986270631Sjfvstatic void i40e_read_word(u8 *hmc_bits,
987270631Sjfv			   struct i40e_context_ele *ce_info,
988270631Sjfv			   u8 *dest)
989270631Sjfv{
990270631Sjfv	u16 dest_word, mask;
991270631Sjfv	u8 *src, *target;
992270631Sjfv	u16 shift_width;
993270631Sjfv	__le16 src_word;
994270631Sjfv
995270631Sjfv	/* prepare the bits and mask */
996270631Sjfv	shift_width = ce_info->lsb % 8;
997270631Sjfv	mask = ((u16)1 << ce_info->width) - 1;
998270631Sjfv
999270631Sjfv	/* shift to correct alignment */
1000270631Sjfv	mask <<= shift_width;
1001270631Sjfv
1002270631Sjfv	/* get the current bits from the src bit string */
1003270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1004270631Sjfv
1005270631Sjfv	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1006270631Sjfv
1007270631Sjfv	/* the data in the memory is stored as little endian so mask it
1008270631Sjfv	 * correctly
1009270631Sjfv	 */
1010270631Sjfv	src_word &= ~(CPU_TO_LE16(mask));
1011270631Sjfv
1012270631Sjfv	/* get the data back into host order before shifting */
1013270631Sjfv	dest_word = LE16_TO_CPU(src_word);
1014270631Sjfv
1015270631Sjfv	dest_word >>= shift_width;
1016270631Sjfv
1017270631Sjfv	/* get the address from the struct field */
1018270631Sjfv	target = dest + ce_info->offset;
1019270631Sjfv
1020270631Sjfv	/* put it back in the struct */
1021270631Sjfv	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1022270631Sjfv}
1023270631Sjfv
1024270631Sjfv/**
1025270631Sjfv * i40e_read_dword - read HMC context dword into struct
1026270631Sjfv * @hmc_bits: pointer to the HMC memory
1027270631Sjfv * @ce_info: a description of the struct to be filled
1028270631Sjfv * @dest: the struct to be filled
1029270631Sjfv **/
1030270631Sjfvstatic void i40e_read_dword(u8 *hmc_bits,
1031270631Sjfv			    struct i40e_context_ele *ce_info,
1032270631Sjfv			    u8 *dest)
1033270631Sjfv{
1034270631Sjfv	u32 dest_dword, mask;
1035270631Sjfv	u8 *src, *target;
1036270631Sjfv	u16 shift_width;
1037270631Sjfv	__le32 src_dword;
1038270631Sjfv
1039270631Sjfv	/* prepare the bits and mask */
1040270631Sjfv	shift_width = ce_info->lsb % 8;
1041270631Sjfv
1042270631Sjfv	/* if the field width is exactly 32 on an x86 machine, then the shift
1043270631Sjfv	 * operation will not work because the SHL instructions count is masked
1044270631Sjfv	 * to 5 bits so the shift will do nothing
1045270631Sjfv	 */
1046270631Sjfv	if (ce_info->width < 32)
1047270631Sjfv		mask = ((u32)1 << ce_info->width) - 1;
1048270631Sjfv	else
1049270631Sjfv		mask = 0xFFFFFFFF;
1050270631Sjfv
1051270631Sjfv	/* shift to correct alignment */
1052270631Sjfv	mask <<= shift_width;
1053270631Sjfv
1054270631Sjfv	/* get the current bits from the src bit string */
1055270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1056270631Sjfv
1057270631Sjfv	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1058270631Sjfv
1059270631Sjfv	/* the data in the memory is stored as little endian so mask it
1060270631Sjfv	 * correctly
1061270631Sjfv	 */
1062270631Sjfv	src_dword &= ~(CPU_TO_LE32(mask));
1063270631Sjfv
1064270631Sjfv	/* get the data back into host order before shifting */
1065270631Sjfv	dest_dword = LE32_TO_CPU(src_dword);
1066270631Sjfv
1067270631Sjfv	dest_dword >>= shift_width;
1068270631Sjfv
1069270631Sjfv	/* get the address from the struct field */
1070270631Sjfv	target = dest + ce_info->offset;
1071270631Sjfv
1072270631Sjfv	/* put it back in the struct */
1073270631Sjfv	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1074270631Sjfv		    I40E_NONDMA_TO_DMA);
1075270631Sjfv}
1076270631Sjfv
1077270631Sjfv/**
1078270631Sjfv * i40e_read_qword - read HMC context qword into struct
1079270631Sjfv * @hmc_bits: pointer to the HMC memory
1080270631Sjfv * @ce_info: a description of the struct to be filled
1081270631Sjfv * @dest: the struct to be filled
1082270631Sjfv **/
1083270631Sjfvstatic void i40e_read_qword(u8 *hmc_bits,
1084270631Sjfv			    struct i40e_context_ele *ce_info,
1085270631Sjfv			    u8 *dest)
1086270631Sjfv{
1087270631Sjfv	u64 dest_qword, mask;
1088270631Sjfv	u8 *src, *target;
1089270631Sjfv	u16 shift_width;
1090270631Sjfv	__le64 src_qword;
1091270631Sjfv
1092270631Sjfv	/* prepare the bits and mask */
1093270631Sjfv	shift_width = ce_info->lsb % 8;
1094270631Sjfv
1095270631Sjfv	/* if the field width is exactly 64 on an x86 machine, then the shift
1096270631Sjfv	 * operation will not work because the SHL instructions count is masked
1097270631Sjfv	 * to 6 bits so the shift will do nothing
1098270631Sjfv	 */
1099270631Sjfv	if (ce_info->width < 64)
1100270631Sjfv		mask = ((u64)1 << ce_info->width) - 1;
1101270631Sjfv	else
1102270631Sjfv		mask = 0xFFFFFFFFFFFFFFFFUL;
1103270631Sjfv
1104270631Sjfv	/* shift to correct alignment */
1105270631Sjfv	mask <<= shift_width;
1106270631Sjfv
1107270631Sjfv	/* get the current bits from the src bit string */
1108270631Sjfv	src = hmc_bits + (ce_info->lsb / 8);
1109270631Sjfv
1110270631Sjfv	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1111270631Sjfv
1112270631Sjfv	/* the data in the memory is stored as little endian so mask it
1113270631Sjfv	 * correctly
1114270631Sjfv	 */
1115270631Sjfv	src_qword &= ~(CPU_TO_LE64(mask));
1116270631Sjfv
1117270631Sjfv	/* get the data back into host order before shifting */
1118270631Sjfv	dest_qword = LE64_TO_CPU(src_qword);
1119270631Sjfv
1120270631Sjfv	dest_qword >>= shift_width;
1121270631Sjfv
1122270631Sjfv	/* get the address from the struct field */
1123270631Sjfv	target = dest + ce_info->offset;
1124270631Sjfv
1125270631Sjfv	/* put it back in the struct */
1126270631Sjfv	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1127270631Sjfv		    I40E_NONDMA_TO_DMA);
1128270631Sjfv}
1129270631Sjfv
1130270631Sjfv/**
1131270631Sjfv * i40e_get_hmc_context - extract HMC context bits
1132270631Sjfv * @context_bytes: pointer to the context bit array
1133270631Sjfv * @ce_info: a description of the struct to be filled
1134270631Sjfv * @dest: the struct to be filled
1135270631Sjfv **/
1136270631Sjfvstatic enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1137270631Sjfv					struct i40e_context_ele *ce_info,
1138270631Sjfv					u8 *dest)
1139270631Sjfv{
1140270631Sjfv	int f;
1141270631Sjfv
1142270631Sjfv	for (f = 0; ce_info[f].width != 0; f++) {
1143270631Sjfv		switch (ce_info[f].size_of) {
1144270631Sjfv		case 1:
1145270631Sjfv			i40e_read_byte(context_bytes, &ce_info[f], dest);
1146270631Sjfv			break;
1147270631Sjfv		case 2:
1148270631Sjfv			i40e_read_word(context_bytes, &ce_info[f], dest);
1149270631Sjfv			break;
1150270631Sjfv		case 4:
1151270631Sjfv			i40e_read_dword(context_bytes, &ce_info[f], dest);
1152270631Sjfv			break;
1153270631Sjfv		case 8:
1154270631Sjfv			i40e_read_qword(context_bytes, &ce_info[f], dest);
1155270631Sjfv			break;
1156270631Sjfv		default:
1157270631Sjfv			/* nothing to do, just keep going */
1158270631Sjfv			break;
1159270631Sjfv		}
1160270631Sjfv	}
1161270631Sjfv
1162270631Sjfv	return I40E_SUCCESS;
1163270631Sjfv}
1164270631Sjfv
1165270631Sjfv/**
1166270631Sjfv * i40e_clear_hmc_context - zero out the HMC context bits
1167270631Sjfv * @hw:       the hardware struct
1168270631Sjfv * @context_bytes: pointer to the context bit array (DMA memory)
1169270631Sjfv * @hmc_type: the type of HMC resource
1170270631Sjfv **/
1171270631Sjfvstatic enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1172270631Sjfv					u8 *context_bytes,
1173270631Sjfv					enum i40e_hmc_lan_rsrc_type hmc_type)
1174270631Sjfv{
1175270631Sjfv	/* clean the bit array */
1176270631Sjfv	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1177270631Sjfv		    I40E_DMA_MEM);
1178270631Sjfv
1179270631Sjfv	return I40E_SUCCESS;
1180270631Sjfv}
1181270631Sjfv
1182270631Sjfv/**
1183270631Sjfv * i40e_set_hmc_context - replace HMC context bits
1184270631Sjfv * @context_bytes: pointer to the context bit array
1185270631Sjfv * @ce_info:  a description of the struct to be filled
1186270631Sjfv * @dest:     the struct to be filled
1187270631Sjfv **/
1188270631Sjfvstatic enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1189270631Sjfv					struct i40e_context_ele *ce_info,
1190270631Sjfv					u8 *dest)
1191270631Sjfv{
1192270631Sjfv	int f;
1193270631Sjfv
1194270631Sjfv	for (f = 0; ce_info[f].width != 0; f++) {
1195270631Sjfv
1196270631Sjfv		/* we have to deal with each element of the HMC using the
1197270631Sjfv		 * correct size so that we are correct regardless of the
1198270631Sjfv		 * endianness of the machine
1199270631Sjfv		 */
1200270631Sjfv		switch (ce_info[f].size_of) {
1201270631Sjfv		case 1:
1202270631Sjfv			i40e_write_byte(context_bytes, &ce_info[f], dest);
1203270631Sjfv			break;
1204270631Sjfv		case 2:
1205270631Sjfv			i40e_write_word(context_bytes, &ce_info[f], dest);
1206270631Sjfv			break;
1207270631Sjfv		case 4:
1208270631Sjfv			i40e_write_dword(context_bytes, &ce_info[f], dest);
1209270631Sjfv			break;
1210270631Sjfv		case 8:
1211270631Sjfv			i40e_write_qword(context_bytes, &ce_info[f], dest);
1212270631Sjfv			break;
1213270631Sjfv		}
1214270631Sjfv	}
1215270631Sjfv
1216270631Sjfv	return I40E_SUCCESS;
1217270631Sjfv}
1218270631Sjfv
1219270631Sjfv/**
1220270631Sjfv * i40e_hmc_get_object_va - retrieves an object's virtual address
1221270631Sjfv * @hmc_info: pointer to i40e_hmc_info struct
1222270631Sjfv * @object_base: pointer to u64 to get the va
1223270631Sjfv * @rsrc_type: the hmc resource type
1224270631Sjfv * @obj_idx: hmc object index
1225270631Sjfv *
1226270631Sjfv * This function retrieves the object's virtual address from the object
1227270631Sjfv * base pointer.  This function is used for LAN Queue contexts.
1228270631Sjfv **/
1229270631Sjfvstatic
1230270631Sjfvenum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
1231270631Sjfv					u8 **object_base,
1232270631Sjfv					enum i40e_hmc_lan_rsrc_type rsrc_type,
1233270631Sjfv					u32 obj_idx)
1234270631Sjfv{
1235270631Sjfv	u32 obj_offset_in_sd, obj_offset_in_pd;
1236270631Sjfv	struct i40e_hmc_sd_entry *sd_entry;
1237270631Sjfv	struct i40e_hmc_pd_entry *pd_entry;
1238270631Sjfv	u32 pd_idx, pd_lmt, rel_pd_idx;
1239270631Sjfv	enum i40e_status_code ret_code = I40E_SUCCESS;
1240270631Sjfv	u64 obj_offset_in_fpm;
1241270631Sjfv	u32 sd_idx, sd_lmt;
1242270631Sjfv
1243270631Sjfv	if (NULL == hmc_info) {
1244270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1245270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1246270631Sjfv		goto exit;
1247270631Sjfv	}
1248270631Sjfv	if (NULL == hmc_info->hmc_obj) {
1249270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1250270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1251270631Sjfv		goto exit;
1252270631Sjfv	}
1253270631Sjfv	if (NULL == object_base) {
1254270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1255270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1256270631Sjfv		goto exit;
1257270631Sjfv	}
1258270631Sjfv	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259270631Sjfv		ret_code = I40E_ERR_BAD_PTR;
1260270631Sjfv		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1261270631Sjfv		goto exit;
1262270631Sjfv	}
1263270631Sjfv	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264270631Sjfv		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1265270631Sjfv			  ret_code);
1266270631Sjfv		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1267270631Sjfv		goto exit;
1268270631Sjfv	}
1269270631Sjfv	/* find sd index and limit */
1270270631Sjfv	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1271270631Sjfv				 &sd_idx, &sd_lmt);
1272270631Sjfv
1273270631Sjfv	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274270631Sjfv	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275270631Sjfv			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1276270631Sjfv
1277270631Sjfv	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278270631Sjfv		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1279270631Sjfv					 &pd_idx, &pd_lmt);
1280270631Sjfv		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281270631Sjfv		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282270631Sjfv		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283270631Sjfv					 I40E_HMC_PAGED_BP_SIZE);
1284270631Sjfv		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1285270631Sjfv	} else {
1286270631Sjfv		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287270631Sjfv					 I40E_HMC_DIRECT_BP_SIZE);
1288270631Sjfv		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1289270631Sjfv	}
1290270631Sjfvexit:
1291270631Sjfv	return ret_code;
1292270631Sjfv}
1293270631Sjfv
1294270631Sjfv/**
1295270631Sjfv * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296270631Sjfv * @hw:    the hardware struct
1297270631Sjfv * @queue: the queue we care about
1298270631Sjfv * @s:     the struct to be filled
1299270631Sjfv **/
1300270631Sjfvenum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1301270631Sjfv						    u16 queue,
1302270631Sjfv						    struct i40e_hmc_obj_txq *s)
1303270631Sjfv{
1304270631Sjfv	enum i40e_status_code err;
1305270631Sjfv	u8 *context_bytes;
1306270631Sjfv
1307270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1308270631Sjfv				     I40E_HMC_LAN_TX, queue);
1309270631Sjfv	if (err < 0)
1310270631Sjfv		return err;
1311270631Sjfv
1312270631Sjfv	return i40e_get_hmc_context(context_bytes,
1313270631Sjfv				    i40e_hmc_txq_ce_info, (u8 *)s);
1314270631Sjfv}
1315270631Sjfv
1316270631Sjfv/**
1317270631Sjfv * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1318270631Sjfv * @hw:    the hardware struct
1319270631Sjfv * @queue: the queue we care about
1320270631Sjfv **/
1321270631Sjfvenum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1322270631Sjfv						      u16 queue)
1323270631Sjfv{
1324270631Sjfv	enum i40e_status_code err;
1325270631Sjfv	u8 *context_bytes;
1326270631Sjfv
1327270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1328270631Sjfv				     I40E_HMC_LAN_TX, queue);
1329270631Sjfv	if (err < 0)
1330270631Sjfv		return err;
1331270631Sjfv
1332270631Sjfv	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1333270631Sjfv}
1334270631Sjfv
1335270631Sjfv/**
1336270631Sjfv * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1337270631Sjfv * @hw:    the hardware struct
1338270631Sjfv * @queue: the queue we care about
1339270631Sjfv * @s:     the struct to be filled
1340270631Sjfv **/
1341270631Sjfvenum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1342270631Sjfv						    u16 queue,
1343270631Sjfv						    struct i40e_hmc_obj_txq *s)
1344270631Sjfv{
1345270631Sjfv	enum i40e_status_code err;
1346270631Sjfv	u8 *context_bytes;
1347270631Sjfv
1348270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1349270631Sjfv				     I40E_HMC_LAN_TX, queue);
1350270631Sjfv	if (err < 0)
1351270631Sjfv		return err;
1352270631Sjfv
1353270631Sjfv	return i40e_set_hmc_context(context_bytes,
1354270631Sjfv				    i40e_hmc_txq_ce_info, (u8 *)s);
1355270631Sjfv}
1356270631Sjfv
1357270631Sjfv/**
1358270631Sjfv * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1359270631Sjfv * @hw:    the hardware struct
1360270631Sjfv * @queue: the queue we care about
1361270631Sjfv * @s:     the struct to be filled
1362270631Sjfv **/
1363270631Sjfvenum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1364270631Sjfv						    u16 queue,
1365270631Sjfv						    struct i40e_hmc_obj_rxq *s)
1366270631Sjfv{
1367270631Sjfv	enum i40e_status_code err;
1368270631Sjfv	u8 *context_bytes;
1369270631Sjfv
1370270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1371270631Sjfv				     I40E_HMC_LAN_RX, queue);
1372270631Sjfv	if (err < 0)
1373270631Sjfv		return err;
1374270631Sjfv
1375270631Sjfv	return i40e_get_hmc_context(context_bytes,
1376270631Sjfv				    i40e_hmc_rxq_ce_info, (u8 *)s);
1377270631Sjfv}
1378270631Sjfv
1379270631Sjfv/**
1380270631Sjfv * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1381270631Sjfv * @hw:    the hardware struct
1382270631Sjfv * @queue: the queue we care about
1383270631Sjfv **/
1384270631Sjfvenum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1385270631Sjfv						      u16 queue)
1386270631Sjfv{
1387270631Sjfv	enum i40e_status_code err;
1388270631Sjfv	u8 *context_bytes;
1389270631Sjfv
1390270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1391270631Sjfv				     I40E_HMC_LAN_RX, queue);
1392270631Sjfv	if (err < 0)
1393270631Sjfv		return err;
1394270631Sjfv
1395270631Sjfv	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1396270631Sjfv}
1397270631Sjfv
1398270631Sjfv/**
1399270631Sjfv * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1400270631Sjfv * @hw:    the hardware struct
1401270631Sjfv * @queue: the queue we care about
1402270631Sjfv * @s:     the struct to be filled
1403270631Sjfv **/
1404270631Sjfvenum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1405270631Sjfv						    u16 queue,
1406270631Sjfv						    struct i40e_hmc_obj_rxq *s)
1407270631Sjfv{
1408270631Sjfv	enum i40e_status_code err;
1409270631Sjfv	u8 *context_bytes;
1410270631Sjfv
1411270631Sjfv	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1412270631Sjfv				     I40E_HMC_LAN_RX, queue);
1413270631Sjfv	if (err < 0)
1414270631Sjfv		return err;
1415270631Sjfv
1416270631Sjfv	return i40e_set_hmc_context(context_bytes,
1417270631Sjfv				    i40e_hmc_rxq_ce_info, (u8 *)s);
1418270631Sjfv}
1419