i40e_lan_hmc.c revision 292100
174462Salfred/******************************************************************************
274462Salfred
3261057Smav  Copyright (c) 2013-2015, Intel Corporation
4261057Smav  All rights reserved.
5261057Smav
6261057Smav  Redistribution and use in source and binary forms, with or without
7261057Smav  modification, are permitted provided that the following conditions are met:
8261057Smav
9261057Smav   1. Redistributions of source code must retain the above copyright notice,
10261057Smav      this list of conditions and the following disclaimer.
11261057Smav
12261057Smav   2. Redistributions in binary form must reproduce the above copyright
13261057Smav      notice, this list of conditions and the following disclaimer in the
14261057Smav      documentation and/or other materials provided with the distribution.
15261057Smav
16261057Smav   3. Neither the name of the Intel Corporation nor the names of its
1774462Salfred      contributors may be used to endorse or promote products derived from
18261057Smav      this software without specific prior written permission.
19261057Smav
20261057Smav  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21261057Smav  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22261057Smav  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23261057Smav  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24261057Smav  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25261057Smav  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26261057Smav  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27261057Smav  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28261057Smav  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2974462Salfred  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3074462Salfred  POSSIBILITY OF SUCH DAMAGE.
3174462Salfred
3274462Salfred******************************************************************************/
3374462Salfred/*$FreeBSD: stable/10/sys/dev/ixl/i40e_lan_hmc.c 292100 2015-12-11 13:08:38Z smh $*/
3474462Salfred
35136581Sobrien#include "i40e_osdep.h"
36136581Sobrien#include "i40e_register.h"
3774462Salfred#include "i40e_type.h"
3874462Salfred#include "i40e_hmc.h"
3992990Sobrien#include "i40e_lan_hmc.h"
4092990Sobrien#include "i40e_prototype.h"
4174462Salfred
4274462Salfred/* lan specific interface functions */
4374462Salfred
4474462Salfred/**
4574462Salfred * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
4674462Salfred * @offset: base address offset needing alignment
4775094Siedowse *
4874462Salfred * Aligns the layer 2 function private memory so it's 512-byte aligned.
4974462Salfred **/
5074462Salfredstatic u64 i40e_align_l2obj_base(u64 offset)
5174462Salfred{
5274462Salfred	u64 aligned_offset = offset;
5374462Salfred
5474462Salfred	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
5574462Salfred		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
5674462Salfred				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
5774462Salfred
5874462Salfred	return aligned_offset;
5974462Salfred}
6074462Salfred
6174462Salfred/**
6274462Salfred * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63156090Sdeischen * @txq_num: number of Tx queues needing backing context
6474462Salfred * @rxq_num: number of Rx queues needing backing context
6592905Sobrien * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
6674462Salfred * @fcoe_filt_num: number of FCoE filters needing backing context
6774462Salfred *
6874462Salfred * Calculates the maximum amount of memory for the function required, based
6974462Salfred * on the number of resources it must provide context for.
7074462Salfred **/
7174462Salfredu64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
7274462Salfred			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
7374462Salfred{
7474462Salfred	u64 fpm_size = 0;
7574462Salfred
7674462Salfred	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
7774462Salfred	fpm_size = i40e_align_l2obj_base(fpm_size);
7892905Sobrien
7974462Salfred	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
8074462Salfred	fpm_size = i40e_align_l2obj_base(fpm_size);
8174462Salfred
8274462Salfred	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
8374462Salfred	fpm_size = i40e_align_l2obj_base(fpm_size);
8474462Salfred
8574462Salfred	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
8674462Salfred	fpm_size = i40e_align_l2obj_base(fpm_size);
8774462Salfred
8874462Salfred	return fpm_size;
8974462Salfred}
9074462Salfred
9174462Salfred/**
9274462Salfred * i40e_init_lan_hmc - initialize i40e_hmc_info struct
9374462Salfred * @hw: pointer to the HW structure
9474462Salfred * @txq_num: number of Tx queues needing backing context
9574462Salfred * @rxq_num: number of Rx queues needing backing context
9674462Salfred * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
9774462Salfred * @fcoe_filt_num: number of FCoE filters needing backing context
9874462Salfred *
9974462Salfred * This function will be called once per physical function initialization.
10074462Salfred * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
10174462Salfred * the driver's provided input, as well as information from the HMC itself
10274462Salfred * loaded from NVRAM.
10374462Salfred *
10474462Salfred * Assumptions:
10574462Salfred *   - HMC Resource Profile has been selected before calling this function.
10674462Salfred **/
10774462Salfredenum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
10874462Salfred					u32 rxq_num, u32 fcoe_cntx_num,
10974462Salfred					u32 fcoe_filt_num)
11074462Salfred{
11174462Salfred	struct i40e_hmc_obj_info *obj, *full_obj;
11274462Salfred	enum i40e_status_code ret_code = I40E_SUCCESS;
11374462Salfred	u64 l2fpm_size;
11474462Salfred	u32 size_exp;
11574462Salfred
11674462Salfred	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
11774462Salfred	hw->hmc.hmc_fn_id = hw->pf_id;
11874462Salfred
11974462Salfred	/* allocate memory for hmc_obj */
12074462Salfred	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
12174462Salfred			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
12274462Salfred	if (ret_code)
12374462Salfred		goto init_lan_hmc_out;
12474462Salfred	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
12574462Salfred			  hw->hmc.hmc_obj_virt_mem.va;
12674462Salfred
12774462Salfred	/* The full object will be used to create the LAN HMC SD */
12874462Salfred	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
12974462Salfred	full_obj->max_cnt = 0;
13074462Salfred	full_obj->cnt = 0;
13174462Salfred	full_obj->base = 0;
13274462Salfred	full_obj->size = 0;
13374462Salfred
13474462Salfred	/* Tx queue context information */
13574462Salfred	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
13674462Salfred	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
13774462Salfred	obj->cnt = txq_num;
13874462Salfred	obj->base = 0;
13974462Salfred	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
14074462Salfred	obj->size = BIT_ULL(size_exp);
14174462Salfred
14274462Salfred	/* validate values requested by driver don't exceed HMC capacity */
14374462Salfred	if (txq_num > obj->max_cnt) {
14474462Salfred		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
14574462Salfred		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
14674462Salfred			  txq_num, obj->max_cnt, ret_code);
14774462Salfred		goto init_lan_hmc_out;
14874462Salfred	}
14992905Sobrien
15074462Salfred	/* aggregate values into the full LAN object for later */
15174462Salfred	full_obj->max_cnt += obj->max_cnt;
15274462Salfred	full_obj->cnt += obj->cnt;
15374462Salfred
15474462Salfred	/* Rx queue context information */
15574462Salfred	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
15674462Salfred	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
15774462Salfred	obj->cnt = rxq_num;
15874462Salfred	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
15974462Salfred		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
16074462Salfred		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
16174462Salfred	obj->base = i40e_align_l2obj_base(obj->base);
16274462Salfred	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
16374462Salfred	obj->size = BIT_ULL(size_exp);
16474462Salfred
16574462Salfred	/* validate values requested by driver don't exceed HMC capacity */
16674462Salfred	if (rxq_num > obj->max_cnt) {
16774462Salfred		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
16874462Salfred		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
16974462Salfred			  rxq_num, obj->max_cnt, ret_code);
17074462Salfred		goto init_lan_hmc_out;
17174462Salfred	}
17274462Salfred
17374462Salfred	/* aggregate values into the full LAN object for later */
17474462Salfred	full_obj->max_cnt += obj->max_cnt;
17574462Salfred	full_obj->cnt += obj->cnt;
17674462Salfred
17774462Salfred	/* FCoE context information */
17874462Salfred	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
17974462Salfred	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
18074462Salfred	obj->cnt = fcoe_cntx_num;
18174462Salfred	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
18274462Salfred		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
18374462Salfred		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
18474462Salfred	obj->base = i40e_align_l2obj_base(obj->base);
18574462Salfred	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
18674462Salfred	obj->size = BIT_ULL(size_exp);
18774462Salfred
18874462Salfred	/* validate values requested by driver don't exceed HMC capacity */
18974462Salfred	if (fcoe_cntx_num > obj->max_cnt) {
19074462Salfred		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
19174462Salfred		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
19274462Salfred			  fcoe_cntx_num, obj->max_cnt, ret_code);
19374462Salfred		goto init_lan_hmc_out;
19474462Salfred	}
19574462Salfred
19674462Salfred	/* aggregate values into the full LAN object for later */
19774462Salfred	full_obj->max_cnt += obj->max_cnt;
19874462Salfred	full_obj->cnt += obj->cnt;
19974462Salfred
20074462Salfred	/* FCoE filter information */
20174462Salfred	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
20274462Salfred	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
20374462Salfred	obj->cnt = fcoe_filt_num;
20474462Salfred	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
20574462Salfred		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
20674462Salfred		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
20774462Salfred	obj->base = i40e_align_l2obj_base(obj->base);
20874462Salfred	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
20974462Salfred	obj->size = BIT_ULL(size_exp);
21074462Salfred
21174462Salfred	/* validate values requested by driver don't exceed HMC capacity */
21274462Salfred	if (fcoe_filt_num > obj->max_cnt) {
21374462Salfred		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
21474462Salfred		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
21574462Salfred			  fcoe_filt_num, obj->max_cnt, ret_code);
21674462Salfred		goto init_lan_hmc_out;
21774462Salfred	}
21874462Salfred
21974462Salfred	/* aggregate values into the full LAN object for later */
22074462Salfred	full_obj->max_cnt += obj->max_cnt;
22174462Salfred	full_obj->cnt += obj->cnt;
22274462Salfred
22374462Salfred	hw->hmc.first_sd_index = 0;
22474462Salfred	hw->hmc.sd_table.ref_cnt = 0;
22574462Salfred	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
22674462Salfred					       fcoe_filt_num);
22774462Salfred	if (NULL == hw->hmc.sd_table.sd_entry) {
22874462Salfred		hw->hmc.sd_table.sd_cnt = (u32)
22974462Salfred				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
23074462Salfred				   I40E_HMC_DIRECT_BP_SIZE;
23174462Salfred
23274462Salfred		/* allocate the sd_entry members in the sd_table */
23374462Salfred		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
23474462Salfred					  (sizeof(struct i40e_hmc_sd_entry) *
23574462Salfred					  hw->hmc.sd_table.sd_cnt));
23674462Salfred		if (ret_code)
23774462Salfred			goto init_lan_hmc_out;
23874462Salfred		hw->hmc.sd_table.sd_entry =
23974462Salfred			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
24074462Salfred	}
24174462Salfred	/* store in the LAN full object for later */
24274462Salfred	full_obj->size = l2fpm_size;
24374462Salfred
24474462Salfredinit_lan_hmc_out:
24574462Salfred	return ret_code;
246115133Smbr}
24774462Salfred
24874462Salfred/**
24974462Salfred * i40e_remove_pd_page - Remove a page from the page descriptor table
25074462Salfred * @hw: pointer to the HW structure
25174462Salfred * @hmc_info: pointer to the HMC configuration information structure
25274462Salfred * @idx: segment descriptor index to find the relevant page descriptor
25374462Salfred *
25474462Salfred * This function:
25574462Salfred *	1. Marks the entry in pd table (for paged address mode) invalid
25674462Salfred *	2. write to register PMPDINV to invalidate the backing page in FV cache
25774462Salfred *	3. Decrement the ref count for  pd_entry
25874462Salfred * assumptions:
25974462Salfred *	1. caller can deallocate the memory used by pd after this function
26074462Salfred *	   returns.
26174462Salfred **/
26274462Salfredstatic enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
26374462Salfred						 struct i40e_hmc_info *hmc_info,
26474462Salfred						 u32 idx)
26574462Salfred{
26674462Salfred	enum i40e_status_code ret_code = I40E_SUCCESS;
26774462Salfred
26874462Salfred	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
26974462Salfred		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
27074462Salfred
27174462Salfred	return ret_code;
27274462Salfred}
27374462Salfred
27474462Salfred/**
27574462Salfred * i40e_remove_sd_bp - remove a backing page from a segment descriptor
27674462Salfred * @hw: pointer to our HW structure
27774462Salfred * @hmc_info: pointer to the HMC configuration information structure
27874462Salfred * @idx: the page index
27974462Salfred *
28074462Salfred * This function:
28174462Salfred *	1. Marks the entry in sd table (for direct address mode) invalid
28274462Salfred *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
28374462Salfred *	   to 0) and PMSDDATAHIGH to invalidate the sd page
28474462Salfred *	3. Decrement the ref count for the sd_entry
28574462Salfred * assumptions:
28674462Salfred *	1. caller can deallocate the memory used by backing storage after this
28774462Salfred *	   function returns.
28874462Salfred **/
28974462Salfredstatic enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
29074462Salfred					       struct i40e_hmc_info *hmc_info,
29174462Salfred					       u32 idx)
29274462Salfred{
29374462Salfred	enum i40e_status_code ret_code = I40E_SUCCESS;
29474462Salfred
29574462Salfred	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
29674462Salfred		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
29774462Salfred
29874462Salfred	return ret_code;
29974462Salfred}
30074462Salfred
30174462Salfred/**
30274462Salfred * i40e_create_lan_hmc_object - allocate backing store for hmc objects
30374462Salfred * @hw: pointer to the HW structure
30474462Salfred * @info: pointer to i40e_hmc_create_obj_info struct
30574462Salfred *
30674462Salfred * This will allocate memory for PDs and backing pages and populate
30774462Salfred * the sd and pd entries.
30874462Salfred **/
30974462Salfredenum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
31074462Salfred				struct i40e_hmc_lan_create_obj_info *info)
311{
312	enum i40e_status_code ret_code = I40E_SUCCESS;
313	struct i40e_hmc_sd_entry *sd_entry;
314	u32 pd_idx1 = 0, pd_lmt1 = 0;
315	u32 pd_idx = 0, pd_lmt = 0;
316	bool pd_error = FALSE;
317	u32 sd_idx, sd_lmt;
318	u64 sd_size;
319	u32 i, j;
320
321	if (NULL == info) {
322		ret_code = I40E_ERR_BAD_PTR;
323		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324		goto exit;
325	}
326	if (NULL == info->hmc_info) {
327		ret_code = I40E_ERR_BAD_PTR;
328		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329		goto exit;
330	}
331	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332		ret_code = I40E_ERR_BAD_PTR;
333		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334		goto exit;
335	}
336
337	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340			  ret_code);
341		goto exit;
342	}
343	if ((info->start_idx + info->count) >
344	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347			  ret_code);
348		goto exit;
349	}
350
351	/* find sd index and limit */
352	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353				 info->start_idx, info->count,
354				 &sd_idx, &sd_lmt);
355	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357			ret_code = I40E_ERR_INVALID_SD_INDEX;
358			goto exit;
359	}
360	/* find pd index */
361	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362				 info->start_idx, info->count, &pd_idx,
363				 &pd_lmt);
364
365	/* This is to cover for cases where you may not want to have an SD with
366	 * the full 2M memory but something smaller. By not filling out any
367	 * size, the function will default the SD size to be 2M.
368	 */
369	if (info->direct_mode_sz == 0)
370		sd_size = I40E_HMC_DIRECT_BP_SIZE;
371	else
372		sd_size = info->direct_mode_sz;
373
374	/* check if all the sds are valid. If not, allocate a page and
375	 * initialize it.
376	 */
377	for (j = sd_idx; j < sd_lmt; j++) {
378		/* update the sd table entry */
379		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380						   info->entry_type,
381						   sd_size);
382		if (I40E_SUCCESS != ret_code)
383			goto exit_sd_error;
384		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386			/* check if all the pds in this sd are valid. If not,
387			 * allocate a page and initialize it.
388			 */
389
390			/* find pd_idx and pd_lmt in this sd */
391			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392			pd_lmt1 = min(pd_lmt,
393				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394			for (i = pd_idx1; i < pd_lmt1; i++) {
395				/* update the pd table entry */
396				ret_code = i40e_add_pd_table_entry(hw,
397								info->hmc_info,
398								i, NULL);
399				if (I40E_SUCCESS != ret_code) {
400					pd_error = TRUE;
401					break;
402				}
403			}
404			if (pd_error) {
405				/* remove the backing pages from pd_idx1 to i */
406				while (i && (i > pd_idx1)) {
407					i40e_remove_pd_bp(hw, info->hmc_info,
408							  (i - 1));
409					i--;
410				}
411			}
412		}
413		if (!sd_entry->valid) {
414			sd_entry->valid = TRUE;
415			switch (sd_entry->entry_type) {
416			case I40E_SD_TYPE_PAGED:
417				I40E_SET_PF_SD_ENTRY(hw,
418					sd_entry->u.pd_table.pd_page_addr.pa,
419					j, sd_entry->entry_type);
420				break;
421			case I40E_SD_TYPE_DIRECT:
422				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423						     j, sd_entry->entry_type);
424				break;
425			default:
426				ret_code = I40E_ERR_INVALID_SD_TYPE;
427				goto exit;
428			}
429		}
430	}
431	goto exit;
432
433exit_sd_error:
434	/* cleanup for sd entries from j to sd_idx */
435	while (j && (j > sd_idx)) {
436		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437		switch (sd_entry->entry_type) {
438		case I40E_SD_TYPE_PAGED:
439			pd_idx1 = max(pd_idx,
440				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442			for (i = pd_idx1; i < pd_lmt1; i++)
443				i40e_remove_pd_bp(hw, info->hmc_info, i);
444			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
445			break;
446		case I40E_SD_TYPE_DIRECT:
447			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
448			break;
449		default:
450			ret_code = I40E_ERR_INVALID_SD_TYPE;
451			break;
452		}
453		j--;
454	}
455exit:
456	return ret_code;
457}
458
459/**
460 * i40e_configure_lan_hmc - prepare the HMC backing store
461 * @hw: pointer to the hw structure
462 * @model: the model for the layout of the SD/PD tables
463 *
464 * - This function will be called once per physical function initialization.
465 * - This function will be called after i40e_init_lan_hmc() and before
466 *   any LAN/FCoE HMC objects can be created.
467 **/
468enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
469					     enum i40e_hmc_model model)
470{
471	struct i40e_hmc_lan_create_obj_info info;
472	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
473	struct i40e_hmc_obj_info *obj;
474	enum i40e_status_code ret_code = I40E_SUCCESS;
475
476	/* Initialize part of the create object info struct */
477	info.hmc_info = &hw->hmc;
478	info.rsrc_type = I40E_HMC_LAN_FULL;
479	info.start_idx = 0;
480	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
481
482	/* Build the SD entry for the LAN objects */
483	switch (model) {
484	case I40E_HMC_MODEL_DIRECT_PREFERRED:
485	case I40E_HMC_MODEL_DIRECT_ONLY:
486		info.entry_type = I40E_SD_TYPE_DIRECT;
487		/* Make one big object, a single SD */
488		info.count = 1;
489		ret_code = i40e_create_lan_hmc_object(hw, &info);
490		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
491			goto try_type_paged;
492		else if (ret_code != I40E_SUCCESS)
493			goto configure_lan_hmc_out;
494		/* else clause falls through the break */
495		break;
496	case I40E_HMC_MODEL_PAGED_ONLY:
497try_type_paged:
498		info.entry_type = I40E_SD_TYPE_PAGED;
499		/* Make one big object in the PD table */
500		info.count = 1;
501		ret_code = i40e_create_lan_hmc_object(hw, &info);
502		if (ret_code != I40E_SUCCESS)
503			goto configure_lan_hmc_out;
504		break;
505	default:
506		/* unsupported type */
507		ret_code = I40E_ERR_INVALID_SD_TYPE;
508		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
509			  ret_code);
510		goto configure_lan_hmc_out;
511	}
512
513	/* Configure and program the FPM registers so objects can be created */
514
515	/* Tx contexts */
516	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
517	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
518	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
519	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
520
521	/* Rx contexts */
522	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
523	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
524	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
525	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
526
527	/* FCoE contexts */
528	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
529	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
530	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
531	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
532
533	/* FCoE filters */
534	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
535	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
536	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
537	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
538
539configure_lan_hmc_out:
540	return ret_code;
541}
542
543/**
544 * i40e_delete_hmc_object - remove hmc objects
545 * @hw: pointer to the HW structure
546 * @info: pointer to i40e_hmc_delete_obj_info struct
547 *
548 * This will de-populate the SDs and PDs.  It frees
549 * the memory for PDS and backing storage.  After this function is returned,
550 * caller should deallocate memory allocated previously for
551 * book-keeping information about PDs and backing storage.
552 **/
553enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
554				struct i40e_hmc_lan_delete_obj_info *info)
555{
556	enum i40e_status_code ret_code = I40E_SUCCESS;
557	struct i40e_hmc_pd_table *pd_table;
558	u32 pd_idx, pd_lmt, rel_pd_idx;
559	u32 sd_idx, sd_lmt;
560	u32 i, j;
561
562	if (NULL == info) {
563		ret_code = I40E_ERR_BAD_PTR;
564		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
565		goto exit;
566	}
567	if (NULL == info->hmc_info) {
568		ret_code = I40E_ERR_BAD_PTR;
569		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
570		goto exit;
571	}
572	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
573		ret_code = I40E_ERR_BAD_PTR;
574		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
575		goto exit;
576	}
577
578	if (NULL == info->hmc_info->sd_table.sd_entry) {
579		ret_code = I40E_ERR_BAD_PTR;
580		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
581		goto exit;
582	}
583
584	if (NULL == info->hmc_info->hmc_obj) {
585		ret_code = I40E_ERR_BAD_PTR;
586		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
587		goto exit;
588	}
589	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
590		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
591		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
592			  ret_code);
593		goto exit;
594	}
595
596	if ((info->start_idx + info->count) >
597	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
598		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
599		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
600			  ret_code);
601		goto exit;
602	}
603
604	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
605				 info->start_idx, info->count, &pd_idx,
606				 &pd_lmt);
607
608	for (j = pd_idx; j < pd_lmt; j++) {
609		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
610
611		if (I40E_SD_TYPE_PAGED !=
612		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
613			continue;
614
615		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
616
617		pd_table =
618			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
619		if (pd_table->pd_entry[rel_pd_idx].valid) {
620			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
621			if (I40E_SUCCESS != ret_code)
622				goto exit;
623		}
624	}
625
626	/* find sd index and limit */
627	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
628				 info->start_idx, info->count,
629				 &sd_idx, &sd_lmt);
630	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
631	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
632		ret_code = I40E_ERR_INVALID_SD_INDEX;
633		goto exit;
634	}
635
636	for (i = sd_idx; i < sd_lmt; i++) {
637		if (!info->hmc_info->sd_table.sd_entry[i].valid)
638			continue;
639		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
640		case I40E_SD_TYPE_DIRECT:
641			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
642			if (I40E_SUCCESS != ret_code)
643				goto exit;
644			break;
645		case I40E_SD_TYPE_PAGED:
646			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
647			if (I40E_SUCCESS != ret_code)
648				goto exit;
649			break;
650		default:
651			break;
652		}
653	}
654exit:
655	return ret_code;
656}
657
658/**
659 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
660 * @hw: pointer to the hw structure
661 *
662 * This must be called by drivers as they are shutting down and being
663 * removed from the OS.
664 **/
665enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
666{
667	struct i40e_hmc_lan_delete_obj_info info;
668	enum i40e_status_code ret_code;
669
670	info.hmc_info = &hw->hmc;
671	info.rsrc_type = I40E_HMC_LAN_FULL;
672	info.start_idx = 0;
673	info.count = 1;
674
675	/* delete the object */
676	ret_code = i40e_delete_lan_hmc_object(hw, &info);
677
678	/* free the SD table entry for LAN */
679	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
680	hw->hmc.sd_table.sd_cnt = 0;
681	hw->hmc.sd_table.sd_entry = NULL;
682
683	/* free memory used for hmc_obj */
684	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
685	hw->hmc.hmc_obj = NULL;
686
687	return ret_code;
688}
689
690#define I40E_HMC_STORE(_struct, _ele)		\
691	offsetof(struct _struct, _ele),		\
692	FIELD_SIZEOF(struct _struct, _ele)
693
694struct i40e_context_ele {
695	u16 offset;
696	u16 size_of;
697	u16 width;
698	u16 lsb;
699};
700
701/* LAN Tx Queue Context */
702static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
703					     /* Field      Width    LSB */
704	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
705	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
706	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
707	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
708	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
709	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
710	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
711	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
712/* line 1 */
713	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
714	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
715	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
716	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
717	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
718	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
719	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
720/* line 7 */
721	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
722	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
723	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
724	{ 0 }
725};
726
727/* LAN Rx Queue Context */
728static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
729					 /* Field      Width    LSB */
730	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
731	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
732	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
733	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
734	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
735	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
736	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
737	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
738	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
739	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
740	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
741	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
742	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
743	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
744	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
745	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
746	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
747	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
748	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
749	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
750	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
751	{ 0 }
752};
753
754/**
755 * i40e_write_byte - replace HMC context byte
756 * @hmc_bits: pointer to the HMC memory
757 * @ce_info: a description of the struct to be read from
758 * @src: the struct to be read from
759 **/
760static void i40e_write_byte(u8 *hmc_bits,
761			    struct i40e_context_ele *ce_info,
762			    u8 *src)
763{
764	u8 src_byte, dest_byte, mask;
765	u8 *from, *dest;
766	u16 shift_width;
767
768	/* copy from the next struct field */
769	from = src + ce_info->offset;
770
771	/* prepare the bits and mask */
772	shift_width = ce_info->lsb % 8;
773	mask = BIT(ce_info->width) - 1;
774
775	src_byte = *from;
776	src_byte &= mask;
777
778	/* shift to correct alignment */
779	mask <<= shift_width;
780	src_byte <<= shift_width;
781
782	/* get the current bits from the target bit string */
783	dest = hmc_bits + (ce_info->lsb / 8);
784
785	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
786
787	dest_byte &= ~mask;	/* get the bits not changing */
788	dest_byte |= src_byte;	/* add in the new bits */
789
790	/* put it all back */
791	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
792}
793
794/**
795 * i40e_write_word - replace HMC context word
796 * @hmc_bits: pointer to the HMC memory
797 * @ce_info: a description of the struct to be read from
798 * @src: the struct to be read from
799 **/
800static void i40e_write_word(u8 *hmc_bits,
801			    struct i40e_context_ele *ce_info,
802			    u8 *src)
803{
804	u16 src_word, mask;
805	u8 *from, *dest;
806	u16 shift_width;
807	__le16 dest_word;
808
809	/* copy from the next struct field */
810	from = src + ce_info->offset;
811
812	/* prepare the bits and mask */
813	shift_width = ce_info->lsb % 8;
814	mask = BIT(ce_info->width) - 1;
815
816	/* don't swizzle the bits until after the mask because the mask bits
817	 * will be in a different bit position on big endian machines
818	 */
819	src_word = *(u16 *)from;
820	src_word &= mask;
821
822	/* shift to correct alignment */
823	mask <<= shift_width;
824	src_word <<= shift_width;
825
826	/* get the current bits from the target bit string */
827	dest = hmc_bits + (ce_info->lsb / 8);
828
829	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
830
831	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
832	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
833
834	/* put it all back */
835	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
836}
837
838/**
839 * i40e_write_dword - replace HMC context dword
840 * @hmc_bits: pointer to the HMC memory
841 * @ce_info: a description of the struct to be read from
842 * @src: the struct to be read from
843 **/
844static void i40e_write_dword(u8 *hmc_bits,
845			     struct i40e_context_ele *ce_info,
846			     u8 *src)
847{
848	u32 src_dword, mask;
849	u8 *from, *dest;
850	u16 shift_width;
851	__le32 dest_dword;
852
853	/* copy from the next struct field */
854	from = src + ce_info->offset;
855
856	/* prepare the bits and mask */
857	shift_width = ce_info->lsb % 8;
858
859	/* if the field width is exactly 32 on an x86 machine, then the shift
860	 * operation will not work because the SHL instructions count is masked
861	 * to 5 bits so the shift will do nothing
862	 */
863	if (ce_info->width < 32)
864		mask = BIT(ce_info->width) - 1;
865	else
866		mask = ~(u32)0;
867
868	/* don't swizzle the bits until after the mask because the mask bits
869	 * will be in a different bit position on big endian machines
870	 */
871	src_dword = *(u32 *)from;
872	src_dword &= mask;
873
874	/* shift to correct alignment */
875	mask <<= shift_width;
876	src_dword <<= shift_width;
877
878	/* get the current bits from the target bit string */
879	dest = hmc_bits + (ce_info->lsb / 8);
880
881	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
882
883	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
884	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
885
886	/* put it all back */
887	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
888}
889
890/**
891 * i40e_write_qword - replace HMC context qword
892 * @hmc_bits: pointer to the HMC memory
893 * @ce_info: a description of the struct to be read from
894 * @src: the struct to be read from
895 **/
896static void i40e_write_qword(u8 *hmc_bits,
897			     struct i40e_context_ele *ce_info,
898			     u8 *src)
899{
900	u64 src_qword, mask;
901	u8 *from, *dest;
902	u16 shift_width;
903	__le64 dest_qword;
904
905	/* copy from the next struct field */
906	from = src + ce_info->offset;
907
908	/* prepare the bits and mask */
909	shift_width = ce_info->lsb % 8;
910
911	/* if the field width is exactly 64 on an x86 machine, then the shift
912	 * operation will not work because the SHL instructions count is masked
913	 * to 6 bits so the shift will do nothing
914	 */
915	if (ce_info->width < 64)
916		mask = BIT_ULL(ce_info->width) - 1;
917	else
918		mask = ~(u64)0;
919
920	/* don't swizzle the bits until after the mask because the mask bits
921	 * will be in a different bit position on big endian machines
922	 */
923	src_qword = *(u64 *)from;
924	src_qword &= mask;
925
926	/* shift to correct alignment */
927	mask <<= shift_width;
928	src_qword <<= shift_width;
929
930	/* get the current bits from the target bit string */
931	dest = hmc_bits + (ce_info->lsb / 8);
932
933	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
934
935	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
936	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
937
938	/* put it all back */
939	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
940}
941
942/**
943 * i40e_read_byte - read HMC context byte into struct
944 * @hmc_bits: pointer to the HMC memory
945 * @ce_info: a description of the struct to be filled
946 * @dest: the struct to be filled
947 **/
948static void i40e_read_byte(u8 *hmc_bits,
949			   struct i40e_context_ele *ce_info,
950			   u8 *dest)
951{
952	u8 dest_byte, mask;
953	u8 *src, *target;
954	u16 shift_width;
955
956	/* prepare the bits and mask */
957	shift_width = ce_info->lsb % 8;
958	mask = BIT(ce_info->width) - 1;
959
960	/* shift to correct alignment */
961	mask <<= shift_width;
962
963	/* get the current bits from the src bit string */
964	src = hmc_bits + (ce_info->lsb / 8);
965
966	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
967
968	dest_byte &= ~(mask);
969
970	dest_byte >>= shift_width;
971
972	/* get the address from the struct field */
973	target = dest + ce_info->offset;
974
975	/* put it back in the struct */
976	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
977}
978
979/**
980 * i40e_read_word - read HMC context word into struct
981 * @hmc_bits: pointer to the HMC memory
982 * @ce_info: a description of the struct to be filled
983 * @dest: the struct to be filled
984 **/
985static void i40e_read_word(u8 *hmc_bits,
986			   struct i40e_context_ele *ce_info,
987			   u8 *dest)
988{
989	u16 dest_word, mask;
990	u8 *src, *target;
991	u16 shift_width;
992	__le16 src_word;
993
994	/* prepare the bits and mask */
995	shift_width = ce_info->lsb % 8;
996	mask = BIT(ce_info->width) - 1;
997
998	/* shift to correct alignment */
999	mask <<= shift_width;
1000
1001	/* get the current bits from the src bit string */
1002	src = hmc_bits + (ce_info->lsb / 8);
1003
1004	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1005
1006	/* the data in the memory is stored as little endian so mask it
1007	 * correctly
1008	 */
1009	src_word &= ~(CPU_TO_LE16(mask));
1010
1011	/* get the data back into host order before shifting */
1012	dest_word = LE16_TO_CPU(src_word);
1013
1014	dest_word >>= shift_width;
1015
1016	/* get the address from the struct field */
1017	target = dest + ce_info->offset;
1018
1019	/* put it back in the struct */
1020	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1021}
1022
1023/**
1024 * i40e_read_dword - read HMC context dword into struct
1025 * @hmc_bits: pointer to the HMC memory
1026 * @ce_info: a description of the struct to be filled
1027 * @dest: the struct to be filled
1028 **/
1029static void i40e_read_dword(u8 *hmc_bits,
1030			    struct i40e_context_ele *ce_info,
1031			    u8 *dest)
1032{
1033	u32 dest_dword, mask;
1034	u8 *src, *target;
1035	u16 shift_width;
1036	__le32 src_dword;
1037
1038	/* prepare the bits and mask */
1039	shift_width = ce_info->lsb % 8;
1040
1041	/* if the field width is exactly 32 on an x86 machine, then the shift
1042	 * operation will not work because the SHL instructions count is masked
1043	 * to 5 bits so the shift will do nothing
1044	 */
1045	if (ce_info->width < 32)
1046		mask = BIT(ce_info->width) - 1;
1047	else
1048		mask = ~(u32)0;
1049
1050	/* shift to correct alignment */
1051	mask <<= shift_width;
1052
1053	/* get the current bits from the src bit string */
1054	src = hmc_bits + (ce_info->lsb / 8);
1055
1056	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1057
1058	/* the data in the memory is stored as little endian so mask it
1059	 * correctly
1060	 */
1061	src_dword &= ~(CPU_TO_LE32(mask));
1062
1063	/* get the data back into host order before shifting */
1064	dest_dword = LE32_TO_CPU(src_dword);
1065
1066	dest_dword >>= shift_width;
1067
1068	/* get the address from the struct field */
1069	target = dest + ce_info->offset;
1070
1071	/* put it back in the struct */
1072	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1073		    I40E_NONDMA_TO_DMA);
1074}
1075
1076/**
1077 * i40e_read_qword - read HMC context qword into struct
1078 * @hmc_bits: pointer to the HMC memory
1079 * @ce_info: a description of the struct to be filled
1080 * @dest: the struct to be filled
1081 **/
1082static void i40e_read_qword(u8 *hmc_bits,
1083			    struct i40e_context_ele *ce_info,
1084			    u8 *dest)
1085{
1086	u64 dest_qword, mask;
1087	u8 *src, *target;
1088	u16 shift_width;
1089	__le64 src_qword;
1090
1091	/* prepare the bits and mask */
1092	shift_width = ce_info->lsb % 8;
1093
1094	/* if the field width is exactly 64 on an x86 machine, then the shift
1095	 * operation will not work because the SHL instructions count is masked
1096	 * to 6 bits so the shift will do nothing
1097	 */
1098	if (ce_info->width < 64)
1099		mask = BIT_ULL(ce_info->width) - 1;
1100	else
1101		mask = ~(u64)0;
1102
1103	/* shift to correct alignment */
1104	mask <<= shift_width;
1105
1106	/* get the current bits from the src bit string */
1107	src = hmc_bits + (ce_info->lsb / 8);
1108
1109	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1110
1111	/* the data in the memory is stored as little endian so mask it
1112	 * correctly
1113	 */
1114	src_qword &= ~(CPU_TO_LE64(mask));
1115
1116	/* get the data back into host order before shifting */
1117	dest_qword = LE64_TO_CPU(src_qword);
1118
1119	dest_qword >>= shift_width;
1120
1121	/* get the address from the struct field */
1122	target = dest + ce_info->offset;
1123
1124	/* put it back in the struct */
1125	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1126		    I40E_NONDMA_TO_DMA);
1127}
1128
1129/**
1130 * i40e_get_hmc_context - extract HMC context bits
1131 * @context_bytes: pointer to the context bit array
1132 * @ce_info: a description of the struct to be filled
1133 * @dest: the struct to be filled
1134 **/
1135static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1136					struct i40e_context_ele *ce_info,
1137					u8 *dest)
1138{
1139	int f;
1140
1141	for (f = 0; ce_info[f].width != 0; f++) {
1142		switch (ce_info[f].size_of) {
1143		case 1:
1144			i40e_read_byte(context_bytes, &ce_info[f], dest);
1145			break;
1146		case 2:
1147			i40e_read_word(context_bytes, &ce_info[f], dest);
1148			break;
1149		case 4:
1150			i40e_read_dword(context_bytes, &ce_info[f], dest);
1151			break;
1152		case 8:
1153			i40e_read_qword(context_bytes, &ce_info[f], dest);
1154			break;
1155		default:
1156			/* nothing to do, just keep going */
1157			break;
1158		}
1159	}
1160
1161	return I40E_SUCCESS;
1162}
1163
1164/**
1165 * i40e_clear_hmc_context - zero out the HMC context bits
1166 * @hw:       the hardware struct
1167 * @context_bytes: pointer to the context bit array (DMA memory)
1168 * @hmc_type: the type of HMC resource
1169 **/
1170static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1171					u8 *context_bytes,
1172					enum i40e_hmc_lan_rsrc_type hmc_type)
1173{
1174	/* clean the bit array */
1175	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1176		    I40E_DMA_MEM);
1177
1178	return I40E_SUCCESS;
1179}
1180
1181/**
1182 * i40e_set_hmc_context - replace HMC context bits
1183 * @context_bytes: pointer to the context bit array
1184 * @ce_info:  a description of the struct to be filled
1185 * @dest:     the struct to be filled
1186 **/
1187static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1188					struct i40e_context_ele *ce_info,
1189					u8 *dest)
1190{
1191	int f;
1192
1193	for (f = 0; ce_info[f].width != 0; f++) {
1194
1195		/* we have to deal with each element of the HMC using the
1196		 * correct size so that we are correct regardless of the
1197		 * endianness of the machine
1198		 */
1199		switch (ce_info[f].size_of) {
1200		case 1:
1201			i40e_write_byte(context_bytes, &ce_info[f], dest);
1202			break;
1203		case 2:
1204			i40e_write_word(context_bytes, &ce_info[f], dest);
1205			break;
1206		case 4:
1207			i40e_write_dword(context_bytes, &ce_info[f], dest);
1208			break;
1209		case 8:
1210			i40e_write_qword(context_bytes, &ce_info[f], dest);
1211			break;
1212		}
1213	}
1214
1215	return I40E_SUCCESS;
1216}
1217
1218/**
1219 * i40e_hmc_get_object_va - retrieves an object's virtual address
1220 * @hw: pointer to the hw structure
1221 * @object_base: pointer to u64 to get the va
1222 * @rsrc_type: the hmc resource type
1223 * @obj_idx: hmc object index
1224 *
1225 * This function retrieves the object's virtual address from the object
1226 * base pointer.  This function is used for LAN Queue contexts.
1227 **/
1228static
1229enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1230					u8 **object_base,
1231					enum i40e_hmc_lan_rsrc_type rsrc_type,
1232					u32 obj_idx)
1233{
1234	u32 obj_offset_in_sd, obj_offset_in_pd;
1235	struct i40e_hmc_info     *hmc_info = &hw->hmc;
1236	struct i40e_hmc_sd_entry *sd_entry;
1237	struct i40e_hmc_pd_entry *pd_entry;
1238	u32 pd_idx, pd_lmt, rel_pd_idx;
1239	enum i40e_status_code ret_code = I40E_SUCCESS;
1240	u64 obj_offset_in_fpm;
1241	u32 sd_idx, sd_lmt;
1242
1243	if (NULL == hmc_info) {
1244		ret_code = I40E_ERR_BAD_PTR;
1245		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1246		goto exit;
1247	}
1248	if (NULL == hmc_info->hmc_obj) {
1249		ret_code = I40E_ERR_BAD_PTR;
1250		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1251		goto exit;
1252	}
1253	if (NULL == object_base) {
1254		ret_code = I40E_ERR_BAD_PTR;
1255		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1256		goto exit;
1257	}
1258	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259		ret_code = I40E_ERR_BAD_PTR;
1260		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1261		goto exit;
1262	}
1263	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1265			  ret_code);
1266		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1267		goto exit;
1268	}
1269	/* find sd index and limit */
1270	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1271				 &sd_idx, &sd_lmt);
1272
1273	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1276
1277	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1279					 &pd_idx, &pd_lmt);
1280		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283					 I40E_HMC_PAGED_BP_SIZE);
1284		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1285	} else {
1286		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287					 I40E_HMC_DIRECT_BP_SIZE);
1288		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1289	}
1290exit:
1291	return ret_code;
1292}
1293
1294/**
1295 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296 * @hw:    the hardware struct
1297 * @queue: the queue we care about
1298 * @s:     the struct to be filled
1299 **/
1300enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1301						    u16 queue,
1302						    struct i40e_hmc_obj_txq *s)
1303{
1304	enum i40e_status_code err;
1305	u8 *context_bytes;
1306
1307	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1308	if (err < 0)
1309		return err;
1310
1311	return i40e_get_hmc_context(context_bytes,
1312				    i40e_hmc_txq_ce_info, (u8 *)s);
1313}
1314
1315/**
1316 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1317 * @hw:    the hardware struct
1318 * @queue: the queue we care about
1319 **/
1320enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1321						      u16 queue)
1322{
1323	enum i40e_status_code err;
1324	u8 *context_bytes;
1325
1326	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1327	if (err < 0)
1328		return err;
1329
1330	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1331}
1332
1333/**
1334 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1335 * @hw:    the hardware struct
1336 * @queue: the queue we care about
1337 * @s:     the struct to be filled
1338 **/
1339enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1340						    u16 queue,
1341						    struct i40e_hmc_obj_txq *s)
1342{
1343	enum i40e_status_code err;
1344	u8 *context_bytes;
1345
1346	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1347	if (err < 0)
1348		return err;
1349
1350	return i40e_set_hmc_context(context_bytes,
1351				    i40e_hmc_txq_ce_info, (u8 *)s);
1352}
1353
1354/**
1355 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1356 * @hw:    the hardware struct
1357 * @queue: the queue we care about
1358 * @s:     the struct to be filled
1359 **/
1360enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1361						    u16 queue,
1362						    struct i40e_hmc_obj_rxq *s)
1363{
1364	enum i40e_status_code err;
1365	u8 *context_bytes;
1366
1367	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1368	if (err < 0)
1369		return err;
1370
1371	return i40e_get_hmc_context(context_bytes,
1372				    i40e_hmc_rxq_ce_info, (u8 *)s);
1373}
1374
1375/**
1376 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1377 * @hw:    the hardware struct
1378 * @queue: the queue we care about
1379 **/
1380enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1381						      u16 queue)
1382{
1383	enum i40e_status_code err;
1384	u8 *context_bytes;
1385
1386	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1387	if (err < 0)
1388		return err;
1389
1390	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1391}
1392
1393/**
1394 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1395 * @hw:    the hardware struct
1396 * @queue: the queue we care about
1397 * @s:     the struct to be filled
1398 **/
1399enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1400						    u16 queue,
1401						    struct i40e_hmc_obj_rxq *s)
1402{
1403	enum i40e_status_code err;
1404	u8 *context_bytes;
1405
1406	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1407	if (err < 0)
1408		return err;
1409
1410	return i40e_set_hmc_context(context_bytes,
1411				    i40e_hmc_rxq_ce_info, (u8 *)s);
1412}
1413