i40e_lan_hmc.c revision 270631
1284555Sarybchik/******************************************************************************
2227569Sphilip
3227569Sphilip  Copyright (c) 2013-2014, Intel Corporation
4227569Sphilip  All rights reserved.
5284555Sarybchik
6227569Sphilip  Redistribution and use in source and binary forms, with or without
7284555Sarybchik  modification, are permitted provided that the following conditions are met:
8284555Sarybchik
9284555Sarybchik   1. Redistributions of source code must retain the above copyright notice,
10284555Sarybchik      this list of conditions and the following disclaimer.
11284555Sarybchik
12227569Sphilip   2. Redistributions in binary form must reproduce the above copyright
13284555Sarybchik      notice, this list of conditions and the following disclaimer in the
14284555Sarybchik      documentation and/or other materials provided with the distribution.
15284555Sarybchik
16284555Sarybchik   3. Neither the name of the Intel Corporation nor the names of its
17284555Sarybchik      contributors may be used to endorse or promote products derived from
18284555Sarybchik      this software without specific prior written permission.
19284555Sarybchik
20284555Sarybchik  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21284555Sarybchik  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22284555Sarybchik  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23284555Sarybchik  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24284555Sarybchik  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25284555Sarybchik  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26284555Sarybchik  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27284555Sarybchik  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28284555Sarybchik  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29227569Sphilip  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30227569Sphilip  POSSIBILITY OF SUCH DAMAGE.
31280587Sarybchik
32227569Sphilip******************************************************************************/
33227569Sphilip/*$FreeBSD: stable/10/sys/dev/ixl/i40e_lan_hmc.c 270631 2014-08-25 22:04:29Z jfv $*/
34227569Sphilip
35227569Sphilip#include "i40e_osdep.h"
36227569Sphilip#include "i40e_register.h"
37227569Sphilip#include "i40e_type.h"
38227569Sphilip#include "i40e_hmc.h"
39227569Sphilip#include "i40e_lan_hmc.h"
40227569Sphilip#include "i40e_prototype.h"
41227569Sphilip
42227569Sphilip/* lan specific interface functions */
43227569Sphilip
44227569Sphilip/**
45227569Sphilip * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46227569Sphilip * @offset: base address offset needing alignment
47227569Sphilip *
48227569Sphilip * Aligns the layer 2 function private memory so it's 512-byte aligned.
49227569Sphilip **/
50227569Sphilipstatic u64 i40e_align_l2obj_base(u64 offset)
51227569Sphilip{
52227569Sphilip	u64 aligned_offset = offset;
53227569Sphilip
54227569Sphilip	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55227633Sbrueffer		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56227633Sbrueffer				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57227569Sphilip
58227569Sphilip	return aligned_offset;
59233648Seadler}
60227569Sphilip
61227569Sphilip/**
62227633Sbrueffer * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63227633Sbrueffer * @txq_num: number of Tx queues needing backing context
64231244Sgjb * @rxq_num: number of Rx queues needing backing context
65227569Sphilip * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66227633Sbrueffer * @fcoe_filt_num: number of FCoE filters needing backing context
67227633Sbrueffer *
68227569Sphilip * Calculates the maximum amount of memory for the function required, based
69227569Sphilip * on the number of resources it must provide context for.
70227569Sphilip **/
71227569Sphilipu64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72227569Sphilip			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
73227633Sbrueffer{
74227633Sbrueffer	u64 fpm_size = 0;
75227633Sbrueffer
76227569Sphilip	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77227633Sbrueffer	fpm_size = i40e_align_l2obj_base(fpm_size);
78227569Sphilip
79227569Sphilip	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80227569Sphilip	fpm_size = i40e_align_l2obj_base(fpm_size);
81227569Sphilip
82227569Sphilip	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83280503Sarybchik	fpm_size = i40e_align_l2obj_base(fpm_size);
84280503Sarybchik
85280503Sarybchik	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86280503Sarybchik	fpm_size = i40e_align_l2obj_base(fpm_size);
87280503Sarybchik
88280503Sarybchik	return fpm_size;
89280503Sarybchik}
90280503Sarybchik
91280503Sarybchik/**
92280506Sarybchik * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93280503Sarybchik * @hw: pointer to the HW structure
94280503Sarybchik * @txq_num: number of Tx queues needing backing context
95280506Sarybchik * @rxq_num: number of Rx queues needing backing context
96280503Sarybchik * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97280503Sarybchik * @fcoe_filt_num: number of FCoE filters needing backing context
98280506Sarybchik *
99280506Sarybchik * This function will be called once per physical function initialization.
100280519Sarybchik * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101280519Sarybchik * the driver's provided input, as well as information from the HMC itself
102280506Sarybchik * loaded from NVRAM.
103280519Sarybchik *
104280506Sarybchik * Assumptions:
105280506Sarybchik *   - HMC Resource Profile has been selected before calling this function.
106280519Sarybchik **/
107280519Sarybchikenum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108280519Sarybchik					u32 rxq_num, u32 fcoe_cntx_num,
109280519Sarybchik					u32 fcoe_filt_num)
110280587Sarybchik{
111280519Sarybchik	struct i40e_hmc_obj_info *obj, *full_obj;
112280519Sarybchik	enum i40e_status_code ret_code = I40E_SUCCESS;
113280519Sarybchik	u64 l2fpm_size;
114280503Sarybchik	u32 size_exp;
115280506Sarybchik
116280506Sarybchik	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117280506Sarybchik	hw->hmc.hmc_fn_id = hw->pf_id;
118280503Sarybchik
119280506Sarybchik	/* allocate memory for hmc_obj */
120280519Sarybchik	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121280506Sarybchik			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122280506Sarybchik	if (ret_code)
123284555Sarybchik		goto init_lan_hmc_out;
124294390Sarybchik	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125294390Sarybchik			  hw->hmc.hmc_obj_virt_mem.va;
126294390Sarybchik
127294390Sarybchik	/* The full object will be used to create the LAN HMC SD */
128280518Sarybchik	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129280518Sarybchik	full_obj->max_cnt = 0;
130280518Sarybchik	full_obj->cnt = 0;
131280518Sarybchik	full_obj->base = 0;
132280599Sarybchik	full_obj->size = 0;
133280599Sarybchik
134280599Sarybchik	/* Tx queue context information */
135280599Sarybchik	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136280599Sarybchik	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137280599Sarybchik	obj->cnt = txq_num;
138280599Sarybchik	obj->base = 0;
139280599Sarybchik	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140280599Sarybchik	obj->size = (u64)1 << size_exp;
141280599Sarybchik
142280599Sarybchik	/* validate values requested by driver don't exceed HMC capacity */
143280599Sarybchik	if (txq_num > obj->max_cnt) {
144280599Sarybchik		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145280599Sarybchik		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146280599Sarybchik			  txq_num, obj->max_cnt, ret_code);
147280599Sarybchik		goto init_lan_hmc_out;
148280599Sarybchik	}
149280599Sarybchik
150280599Sarybchik	/* aggregate values into the full LAN object for later */
151280599Sarybchik	full_obj->max_cnt += obj->max_cnt;
152280599Sarybchik	full_obj->cnt += obj->cnt;
153293946Sarybchik
154293946Sarybchik	/* Rx queue context information */
155293946Sarybchik	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156293946Sarybchik	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157293946Sarybchik	obj->cnt = rxq_num;
158293946Sarybchik	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159293946Sarybchik		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160293946Sarybchik		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161280506Sarybchik	obj->base = i40e_align_l2obj_base(obj->base);
162227569Sphilip	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163227569Sphilip	obj->size = (u64)1 << size_exp;
164227569Sphilip
165227569Sphilip	/* validate values requested by driver don't exceed HMC capacity */
166227569Sphilip	if (rxq_num > obj->max_cnt) {
167227569Sphilip		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168227569Sphilip		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169227569Sphilip			  rxq_num, obj->max_cnt, ret_code);
170227569Sphilip		goto init_lan_hmc_out;
171231244Sgjb	}
172227569Sphilip
173227569Sphilip	/* aggregate values into the full LAN object for later */
174227569Sphilip	full_obj->max_cnt += obj->max_cnt;
175227569Sphilip	full_obj->cnt += obj->cnt;
176227569Sphilip
177227569Sphilip	/* FCoE context information */
178227569Sphilip	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179227569Sphilip	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180	obj->cnt = fcoe_cntx_num;
181	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184	obj->base = i40e_align_l2obj_base(obj->base);
185	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186	obj->size = (u64)1 << size_exp;
187
188	/* validate values requested by driver don't exceed HMC capacity */
189	if (fcoe_cntx_num > obj->max_cnt) {
190		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192			  fcoe_cntx_num, obj->max_cnt, ret_code);
193		goto init_lan_hmc_out;
194	}
195
196	/* aggregate values into the full LAN object for later */
197	full_obj->max_cnt += obj->max_cnt;
198	full_obj->cnt += obj->cnt;
199
200	/* FCoE filter information */
201	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203	obj->cnt = fcoe_filt_num;
204	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207	obj->base = i40e_align_l2obj_base(obj->base);
208	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209	obj->size = (u64)1 << size_exp;
210
211	/* validate values requested by driver don't exceed HMC capacity */
212	if (fcoe_filt_num > obj->max_cnt) {
213		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215			  fcoe_filt_num, obj->max_cnt, ret_code);
216		goto init_lan_hmc_out;
217	}
218
219	/* aggregate values into the full LAN object for later */
220	full_obj->max_cnt += obj->max_cnt;
221	full_obj->cnt += obj->cnt;
222
223	hw->hmc.first_sd_index = 0;
224	hw->hmc.sd_table.ref_cnt = 0;
225	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226					       fcoe_filt_num);
227	if (NULL == hw->hmc.sd_table.sd_entry) {
228		hw->hmc.sd_table.sd_cnt = (u32)
229				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230				   I40E_HMC_DIRECT_BP_SIZE;
231
232		/* allocate the sd_entry members in the sd_table */
233		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234					  (sizeof(struct i40e_hmc_sd_entry) *
235					  hw->hmc.sd_table.sd_cnt));
236		if (ret_code)
237			goto init_lan_hmc_out;
238		hw->hmc.sd_table.sd_entry =
239			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240	}
241	/* store in the LAN full object for later */
242	full_obj->size = l2fpm_size;
243
244init_lan_hmc_out:
245	return ret_code;
246}
247
248/**
249 * i40e_remove_pd_page - Remove a page from the page descriptor table
250 * @hw: pointer to the HW structure
251 * @hmc_info: pointer to the HMC configuration information structure
252 * @idx: segment descriptor index to find the relevant page descriptor
253 *
254 * This function:
255 *	1. Marks the entry in pd table (for paged address mode) invalid
256 *	2. write to register PMPDINV to invalidate the backing page in FV cache
257 *	3. Decrement the ref count for  pd_entry
258 * assumptions:
259 *	1. caller can deallocate the memory used by pd after this function
260 *	   returns.
261 **/
262static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263						 struct i40e_hmc_info *hmc_info,
264						 u32 idx)
265{
266	enum i40e_status_code ret_code = I40E_SUCCESS;
267
268	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
270
271	return ret_code;
272}
273
274/**
275 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276 * @hw: pointer to our HW structure
277 * @hmc_info: pointer to the HMC configuration information structure
278 * @idx: the page index
279 *
280 * This function:
281 *	1. Marks the entry in sd table (for direct address mode) invalid
282 *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283 *	   to 0) and PMSDDATAHIGH to invalidate the sd page
284 *	3. Decrement the ref count for the sd_entry
285 * assumptions:
286 *	1. caller can deallocate the memory used by backing storage after this
287 *	   function returns.
288 **/
289static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290					       struct i40e_hmc_info *hmc_info,
291					       u32 idx)
292{
293	enum i40e_status_code ret_code = I40E_SUCCESS;
294
295	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
297
298	return ret_code;
299}
300
301/**
302 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303 * @hw: pointer to the HW structure
304 * @info: pointer to i40e_hmc_create_obj_info struct
305 *
306 * This will allocate memory for PDs and backing pages and populate
307 * the sd and pd entries.
308 **/
309enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310				struct i40e_hmc_lan_create_obj_info *info)
311{
312	enum i40e_status_code ret_code = I40E_SUCCESS;
313	struct i40e_hmc_sd_entry *sd_entry;
314	u32 pd_idx1 = 0, pd_lmt1 = 0;
315	u32 pd_idx = 0, pd_lmt = 0;
316	bool pd_error = FALSE;
317	u32 sd_idx, sd_lmt;
318	u64 sd_size;
319	u32 i, j;
320
321	if (NULL == info) {
322		ret_code = I40E_ERR_BAD_PTR;
323		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324		goto exit;
325	}
326	if (NULL == info->hmc_info) {
327		ret_code = I40E_ERR_BAD_PTR;
328		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329		goto exit;
330	}
331	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332		ret_code = I40E_ERR_BAD_PTR;
333		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334		goto exit;
335	}
336
337	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340			  ret_code);
341		goto exit;
342	}
343	if ((info->start_idx + info->count) >
344	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347			  ret_code);
348		goto exit;
349	}
350
351	/* find sd index and limit */
352	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353				 info->start_idx, info->count,
354				 &sd_idx, &sd_lmt);
355	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357			ret_code = I40E_ERR_INVALID_SD_INDEX;
358			goto exit;
359	}
360	/* find pd index */
361	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362				 info->start_idx, info->count, &pd_idx,
363				 &pd_lmt);
364
365	/* This is to cover for cases where you may not want to have an SD with
366	 * the full 2M memory but something smaller. By not filling out any
367	 * size, the function will default the SD size to be 2M.
368	 */
369	if (info->direct_mode_sz == 0)
370		sd_size = I40E_HMC_DIRECT_BP_SIZE;
371	else
372		sd_size = info->direct_mode_sz;
373
374	/* check if all the sds are valid. If not, allocate a page and
375	 * initialize it.
376	 */
377	for (j = sd_idx; j < sd_lmt; j++) {
378		/* update the sd table entry */
379		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380						   info->entry_type,
381						   sd_size);
382		if (I40E_SUCCESS != ret_code)
383			goto exit_sd_error;
384		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386			/* check if all the pds in this sd are valid. If not,
387			 * allocate a page and initialize it.
388			 */
389
390			/* find pd_idx and pd_lmt in this sd */
391			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392			pd_lmt1 = min(pd_lmt,
393				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394			for (i = pd_idx1; i < pd_lmt1; i++) {
395				/* update the pd table entry */
396				ret_code = i40e_add_pd_table_entry(hw,
397								info->hmc_info,
398								i);
399				if (I40E_SUCCESS != ret_code) {
400					pd_error = TRUE;
401					break;
402				}
403			}
404			if (pd_error) {
405				/* remove the backing pages from pd_idx1 to i */
406				while (i && (i > pd_idx1)) {
407					i40e_remove_pd_bp(hw, info->hmc_info,
408							  (i - 1));
409					i--;
410				}
411			}
412		}
413		if (!sd_entry->valid) {
414			sd_entry->valid = TRUE;
415			switch (sd_entry->entry_type) {
416			case I40E_SD_TYPE_PAGED:
417				I40E_SET_PF_SD_ENTRY(hw,
418					sd_entry->u.pd_table.pd_page_addr.pa,
419					j, sd_entry->entry_type);
420				break;
421			case I40E_SD_TYPE_DIRECT:
422				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423						     j, sd_entry->entry_type);
424				break;
425			default:
426				ret_code = I40E_ERR_INVALID_SD_TYPE;
427				goto exit;
428			}
429		}
430	}
431	goto exit;
432
433exit_sd_error:
434	/* cleanup for sd entries from j to sd_idx */
435	while (j && (j > sd_idx)) {
436		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437		switch (sd_entry->entry_type) {
438		case I40E_SD_TYPE_PAGED:
439			pd_idx1 = max(pd_idx,
440				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442			for (i = pd_idx1; i < pd_lmt1; i++) {
443				i40e_remove_pd_bp(hw, info->hmc_info, i);
444			}
445			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
446			break;
447		case I40E_SD_TYPE_DIRECT:
448			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
449			break;
450		default:
451			ret_code = I40E_ERR_INVALID_SD_TYPE;
452			break;
453		}
454		j--;
455	}
456exit:
457	return ret_code;
458}
459
460/**
461 * i40e_configure_lan_hmc - prepare the HMC backing store
462 * @hw: pointer to the hw structure
463 * @model: the model for the layout of the SD/PD tables
464 *
465 * - This function will be called once per physical function initialization.
466 * - This function will be called after i40e_init_lan_hmc() and before
467 *   any LAN/FCoE HMC objects can be created.
468 **/
469enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
470					     enum i40e_hmc_model model)
471{
472	struct i40e_hmc_lan_create_obj_info info;
473	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
474	struct i40e_hmc_obj_info *obj;
475	enum i40e_status_code ret_code = I40E_SUCCESS;
476
477	/* Initialize part of the create object info struct */
478	info.hmc_info = &hw->hmc;
479	info.rsrc_type = I40E_HMC_LAN_FULL;
480	info.start_idx = 0;
481	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
482
483	/* Build the SD entry for the LAN objects */
484	switch (model) {
485	case I40E_HMC_MODEL_DIRECT_PREFERRED:
486	case I40E_HMC_MODEL_DIRECT_ONLY:
487		info.entry_type = I40E_SD_TYPE_DIRECT;
488		/* Make one big object, a single SD */
489		info.count = 1;
490		ret_code = i40e_create_lan_hmc_object(hw, &info);
491		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
492			goto try_type_paged;
493		else if (ret_code != I40E_SUCCESS)
494			goto configure_lan_hmc_out;
495		/* else clause falls through the break */
496		break;
497	case I40E_HMC_MODEL_PAGED_ONLY:
498try_type_paged:
499		info.entry_type = I40E_SD_TYPE_PAGED;
500		/* Make one big object in the PD table */
501		info.count = 1;
502		ret_code = i40e_create_lan_hmc_object(hw, &info);
503		if (ret_code != I40E_SUCCESS)
504			goto configure_lan_hmc_out;
505		break;
506	default:
507		/* unsupported type */
508		ret_code = I40E_ERR_INVALID_SD_TYPE;
509		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
510			  ret_code);
511		goto configure_lan_hmc_out;
512	}
513
514	/* Configure and program the FPM registers so objects can be created */
515
516	/* Tx contexts */
517	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
518	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
519	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
520	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
521
522	/* Rx contexts */
523	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
524	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
525	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
526	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
527
528	/* FCoE contexts */
529	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
530	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
531	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
532	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
533
534	/* FCoE filters */
535	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
536	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
537	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
538	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
539
540configure_lan_hmc_out:
541	return ret_code;
542}
543
544/**
545 * i40e_delete_hmc_object - remove hmc objects
546 * @hw: pointer to the HW structure
547 * @info: pointer to i40e_hmc_delete_obj_info struct
548 *
549 * This will de-populate the SDs and PDs.  It frees
550 * the memory for PDS and backing storage.  After this function is returned,
551 * caller should deallocate memory allocated previously for
552 * book-keeping information about PDs and backing storage.
553 **/
554enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
555				struct i40e_hmc_lan_delete_obj_info *info)
556{
557	enum i40e_status_code ret_code = I40E_SUCCESS;
558	struct i40e_hmc_pd_table *pd_table;
559	u32 pd_idx, pd_lmt, rel_pd_idx;
560	u32 sd_idx, sd_lmt;
561	u32 i, j;
562
563	if (NULL == info) {
564		ret_code = I40E_ERR_BAD_PTR;
565		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
566		goto exit;
567	}
568	if (NULL == info->hmc_info) {
569		ret_code = I40E_ERR_BAD_PTR;
570		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
571		goto exit;
572	}
573	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
574		ret_code = I40E_ERR_BAD_PTR;
575		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
576		goto exit;
577	}
578
579	if (NULL == info->hmc_info->sd_table.sd_entry) {
580		ret_code = I40E_ERR_BAD_PTR;
581		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
582		goto exit;
583	}
584
585	if (NULL == info->hmc_info->hmc_obj) {
586		ret_code = I40E_ERR_BAD_PTR;
587		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
588		goto exit;
589	}
590	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
591		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
592		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
593			  ret_code);
594		goto exit;
595	}
596
597	if ((info->start_idx + info->count) >
598	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
599		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
600		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
601			  ret_code);
602		goto exit;
603	}
604
605	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
606				 info->start_idx, info->count, &pd_idx,
607				 &pd_lmt);
608
609	for (j = pd_idx; j < pd_lmt; j++) {
610		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
611
612		if (I40E_SD_TYPE_PAGED !=
613		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
614			continue;
615
616		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
617
618		pd_table =
619			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
620		if (pd_table->pd_entry[rel_pd_idx].valid) {
621			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
622			if (I40E_SUCCESS != ret_code)
623				goto exit;
624		}
625	}
626
627	/* find sd index and limit */
628	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
629				 info->start_idx, info->count,
630				 &sd_idx, &sd_lmt);
631	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
632	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
633		ret_code = I40E_ERR_INVALID_SD_INDEX;
634		goto exit;
635	}
636
637	for (i = sd_idx; i < sd_lmt; i++) {
638		if (!info->hmc_info->sd_table.sd_entry[i].valid)
639			continue;
640		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
641		case I40E_SD_TYPE_DIRECT:
642			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
643			if (I40E_SUCCESS != ret_code)
644				goto exit;
645			break;
646		case I40E_SD_TYPE_PAGED:
647			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
648			if (I40E_SUCCESS != ret_code)
649				goto exit;
650			break;
651		default:
652			break;
653		}
654	}
655exit:
656	return ret_code;
657}
658
659/**
660 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
661 * @hw: pointer to the hw structure
662 *
663 * This must be called by drivers as they are shutting down and being
664 * removed from the OS.
665 **/
666enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
667{
668	struct i40e_hmc_lan_delete_obj_info info;
669	enum i40e_status_code ret_code;
670
671	info.hmc_info = &hw->hmc;
672	info.rsrc_type = I40E_HMC_LAN_FULL;
673	info.start_idx = 0;
674	info.count = 1;
675
676	/* delete the object */
677	ret_code = i40e_delete_lan_hmc_object(hw, &info);
678
679	/* free the SD table entry for LAN */
680	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
681	hw->hmc.sd_table.sd_cnt = 0;
682	hw->hmc.sd_table.sd_entry = NULL;
683
684	/* free memory used for hmc_obj */
685	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
686	hw->hmc.hmc_obj = NULL;
687
688	return ret_code;
689}
690
691#define I40E_HMC_STORE(_struct, _ele)		\
692	offsetof(struct _struct, _ele),		\
693	FIELD_SIZEOF(struct _struct, _ele)
694
695struct i40e_context_ele {
696	u16 offset;
697	u16 size_of;
698	u16 width;
699	u16 lsb;
700};
701
702/* LAN Tx Queue Context */
703static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
704					     /* Field      Width    LSB */
705	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
706	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
707	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
708	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
709	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
710	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
711	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
712	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
713/* line 1 */
714	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
715	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
716	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
717	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
718	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
719	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
720	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
721/* line 7 */
722	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
723	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
724	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
725	{ 0 }
726};
727
728/* LAN Rx Queue Context */
729static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
730					 /* Field      Width    LSB */
731	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
732	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
733	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
734	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
735	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
736	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
737	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
738	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
739	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
740	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
741	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
742	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
743	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
744	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
745	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
746	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
747	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
748	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
749	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
750	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
751	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
752	{ 0 }
753};
754
755/**
756 * i40e_write_byte - replace HMC context byte
757 * @hmc_bits: pointer to the HMC memory
758 * @ce_info: a description of the struct to be read from
759 * @src: the struct to be read from
760 **/
761static void i40e_write_byte(u8 *hmc_bits,
762			    struct i40e_context_ele *ce_info,
763			    u8 *src)
764{
765	u8 src_byte, dest_byte, mask;
766	u8 *from, *dest;
767	u16 shift_width;
768
769	/* copy from the next struct field */
770	from = src + ce_info->offset;
771
772	/* prepare the bits and mask */
773	shift_width = ce_info->lsb % 8;
774	mask = ((u8)1 << ce_info->width) - 1;
775
776	src_byte = *from;
777	src_byte &= mask;
778
779	/* shift to correct alignment */
780	mask <<= shift_width;
781	src_byte <<= shift_width;
782
783	/* get the current bits from the target bit string */
784	dest = hmc_bits + (ce_info->lsb / 8);
785
786	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
787
788	dest_byte &= ~mask;	/* get the bits not changing */
789	dest_byte |= src_byte;	/* add in the new bits */
790
791	/* put it all back */
792	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
793}
794
795/**
796 * i40e_write_word - replace HMC context word
797 * @hmc_bits: pointer to the HMC memory
798 * @ce_info: a description of the struct to be read from
799 * @src: the struct to be read from
800 **/
801static void i40e_write_word(u8 *hmc_bits,
802			    struct i40e_context_ele *ce_info,
803			    u8 *src)
804{
805	u16 src_word, mask;
806	u8 *from, *dest;
807	u16 shift_width;
808	__le16 dest_word;
809
810	/* copy from the next struct field */
811	from = src + ce_info->offset;
812
813	/* prepare the bits and mask */
814	shift_width = ce_info->lsb % 8;
815	mask = ((u16)1 << ce_info->width) - 1;
816
817	/* don't swizzle the bits until after the mask because the mask bits
818	 * will be in a different bit position on big endian machines
819	 */
820	src_word = *(u16 *)from;
821	src_word &= mask;
822
823	/* shift to correct alignment */
824	mask <<= shift_width;
825	src_word <<= shift_width;
826
827	/* get the current bits from the target bit string */
828	dest = hmc_bits + (ce_info->lsb / 8);
829
830	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
831
832	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
833	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
834
835	/* put it all back */
836	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
837}
838
839/**
840 * i40e_write_dword - replace HMC context dword
841 * @hmc_bits: pointer to the HMC memory
842 * @ce_info: a description of the struct to be read from
843 * @src: the struct to be read from
844 **/
845static void i40e_write_dword(u8 *hmc_bits,
846			     struct i40e_context_ele *ce_info,
847			     u8 *src)
848{
849	u32 src_dword, mask;
850	u8 *from, *dest;
851	u16 shift_width;
852	__le32 dest_dword;
853
854	/* copy from the next struct field */
855	from = src + ce_info->offset;
856
857	/* prepare the bits and mask */
858	shift_width = ce_info->lsb % 8;
859
860	/* if the field width is exactly 32 on an x86 machine, then the shift
861	 * operation will not work because the SHL instructions count is masked
862	 * to 5 bits so the shift will do nothing
863	 */
864	if (ce_info->width < 32)
865		mask = ((u32)1 << ce_info->width) - 1;
866	else
867		mask = 0xFFFFFFFF;
868
869	/* don't swizzle the bits until after the mask because the mask bits
870	 * will be in a different bit position on big endian machines
871	 */
872	src_dword = *(u32 *)from;
873	src_dword &= mask;
874
875	/* shift to correct alignment */
876	mask <<= shift_width;
877	src_dword <<= shift_width;
878
879	/* get the current bits from the target bit string */
880	dest = hmc_bits + (ce_info->lsb / 8);
881
882	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
883
884	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
885	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
886
887	/* put it all back */
888	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
889}
890
891/**
892 * i40e_write_qword - replace HMC context qword
893 * @hmc_bits: pointer to the HMC memory
894 * @ce_info: a description of the struct to be read from
895 * @src: the struct to be read from
896 **/
897static void i40e_write_qword(u8 *hmc_bits,
898			     struct i40e_context_ele *ce_info,
899			     u8 *src)
900{
901	u64 src_qword, mask;
902	u8 *from, *dest;
903	u16 shift_width;
904	__le64 dest_qword;
905
906	/* copy from the next struct field */
907	from = src + ce_info->offset;
908
909	/* prepare the bits and mask */
910	shift_width = ce_info->lsb % 8;
911
912	/* if the field width is exactly 64 on an x86 machine, then the shift
913	 * operation will not work because the SHL instructions count is masked
914	 * to 6 bits so the shift will do nothing
915	 */
916	if (ce_info->width < 64)
917		mask = ((u64)1 << ce_info->width) - 1;
918	else
919		mask = 0xFFFFFFFFFFFFFFFFUL;
920
921	/* don't swizzle the bits until after the mask because the mask bits
922	 * will be in a different bit position on big endian machines
923	 */
924	src_qword = *(u64 *)from;
925	src_qword &= mask;
926
927	/* shift to correct alignment */
928	mask <<= shift_width;
929	src_qword <<= shift_width;
930
931	/* get the current bits from the target bit string */
932	dest = hmc_bits + (ce_info->lsb / 8);
933
934	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
935
936	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
937	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
938
939	/* put it all back */
940	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
941}
942
943/**
944 * i40e_read_byte - read HMC context byte into struct
945 * @hmc_bits: pointer to the HMC memory
946 * @ce_info: a description of the struct to be filled
947 * @dest: the struct to be filled
948 **/
949static void i40e_read_byte(u8 *hmc_bits,
950			   struct i40e_context_ele *ce_info,
951			   u8 *dest)
952{
953	u8 dest_byte, mask;
954	u8 *src, *target;
955	u16 shift_width;
956
957	/* prepare the bits and mask */
958	shift_width = ce_info->lsb % 8;
959	mask = ((u8)1 << ce_info->width) - 1;
960
961	/* shift to correct alignment */
962	mask <<= shift_width;
963
964	/* get the current bits from the src bit string */
965	src = hmc_bits + (ce_info->lsb / 8);
966
967	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
968
969	dest_byte &= ~(mask);
970
971	dest_byte >>= shift_width;
972
973	/* get the address from the struct field */
974	target = dest + ce_info->offset;
975
976	/* put it back in the struct */
977	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
978}
979
980/**
981 * i40e_read_word - read HMC context word into struct
982 * @hmc_bits: pointer to the HMC memory
983 * @ce_info: a description of the struct to be filled
984 * @dest: the struct to be filled
985 **/
986static void i40e_read_word(u8 *hmc_bits,
987			   struct i40e_context_ele *ce_info,
988			   u8 *dest)
989{
990	u16 dest_word, mask;
991	u8 *src, *target;
992	u16 shift_width;
993	__le16 src_word;
994
995	/* prepare the bits and mask */
996	shift_width = ce_info->lsb % 8;
997	mask = ((u16)1 << ce_info->width) - 1;
998
999	/* shift to correct alignment */
1000	mask <<= shift_width;
1001
1002	/* get the current bits from the src bit string */
1003	src = hmc_bits + (ce_info->lsb / 8);
1004
1005	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1006
1007	/* the data in the memory is stored as little endian so mask it
1008	 * correctly
1009	 */
1010	src_word &= ~(CPU_TO_LE16(mask));
1011
1012	/* get the data back into host order before shifting */
1013	dest_word = LE16_TO_CPU(src_word);
1014
1015	dest_word >>= shift_width;
1016
1017	/* get the address from the struct field */
1018	target = dest + ce_info->offset;
1019
1020	/* put it back in the struct */
1021	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1022}
1023
1024/**
1025 * i40e_read_dword - read HMC context dword into struct
1026 * @hmc_bits: pointer to the HMC memory
1027 * @ce_info: a description of the struct to be filled
1028 * @dest: the struct to be filled
1029 **/
1030static void i40e_read_dword(u8 *hmc_bits,
1031			    struct i40e_context_ele *ce_info,
1032			    u8 *dest)
1033{
1034	u32 dest_dword, mask;
1035	u8 *src, *target;
1036	u16 shift_width;
1037	__le32 src_dword;
1038
1039	/* prepare the bits and mask */
1040	shift_width = ce_info->lsb % 8;
1041
1042	/* if the field width is exactly 32 on an x86 machine, then the shift
1043	 * operation will not work because the SHL instructions count is masked
1044	 * to 5 bits so the shift will do nothing
1045	 */
1046	if (ce_info->width < 32)
1047		mask = ((u32)1 << ce_info->width) - 1;
1048	else
1049		mask = 0xFFFFFFFF;
1050
1051	/* shift to correct alignment */
1052	mask <<= shift_width;
1053
1054	/* get the current bits from the src bit string */
1055	src = hmc_bits + (ce_info->lsb / 8);
1056
1057	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1058
1059	/* the data in the memory is stored as little endian so mask it
1060	 * correctly
1061	 */
1062	src_dword &= ~(CPU_TO_LE32(mask));
1063
1064	/* get the data back into host order before shifting */
1065	dest_dword = LE32_TO_CPU(src_dword);
1066
1067	dest_dword >>= shift_width;
1068
1069	/* get the address from the struct field */
1070	target = dest + ce_info->offset;
1071
1072	/* put it back in the struct */
1073	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1074		    I40E_NONDMA_TO_DMA);
1075}
1076
1077/**
1078 * i40e_read_qword - read HMC context qword into struct
1079 * @hmc_bits: pointer to the HMC memory
1080 * @ce_info: a description of the struct to be filled
1081 * @dest: the struct to be filled
1082 **/
1083static void i40e_read_qword(u8 *hmc_bits,
1084			    struct i40e_context_ele *ce_info,
1085			    u8 *dest)
1086{
1087	u64 dest_qword, mask;
1088	u8 *src, *target;
1089	u16 shift_width;
1090	__le64 src_qword;
1091
1092	/* prepare the bits and mask */
1093	shift_width = ce_info->lsb % 8;
1094
1095	/* if the field width is exactly 64 on an x86 machine, then the shift
1096	 * operation will not work because the SHL instructions count is masked
1097	 * to 6 bits so the shift will do nothing
1098	 */
1099	if (ce_info->width < 64)
1100		mask = ((u64)1 << ce_info->width) - 1;
1101	else
1102		mask = 0xFFFFFFFFFFFFFFFFUL;
1103
1104	/* shift to correct alignment */
1105	mask <<= shift_width;
1106
1107	/* get the current bits from the src bit string */
1108	src = hmc_bits + (ce_info->lsb / 8);
1109
1110	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1111
1112	/* the data in the memory is stored as little endian so mask it
1113	 * correctly
1114	 */
1115	src_qword &= ~(CPU_TO_LE64(mask));
1116
1117	/* get the data back into host order before shifting */
1118	dest_qword = LE64_TO_CPU(src_qword);
1119
1120	dest_qword >>= shift_width;
1121
1122	/* get the address from the struct field */
1123	target = dest + ce_info->offset;
1124
1125	/* put it back in the struct */
1126	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1127		    I40E_NONDMA_TO_DMA);
1128}
1129
1130/**
1131 * i40e_get_hmc_context - extract HMC context bits
1132 * @context_bytes: pointer to the context bit array
1133 * @ce_info: a description of the struct to be filled
1134 * @dest: the struct to be filled
1135 **/
1136static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1137					struct i40e_context_ele *ce_info,
1138					u8 *dest)
1139{
1140	int f;
1141
1142	for (f = 0; ce_info[f].width != 0; f++) {
1143		switch (ce_info[f].size_of) {
1144		case 1:
1145			i40e_read_byte(context_bytes, &ce_info[f], dest);
1146			break;
1147		case 2:
1148			i40e_read_word(context_bytes, &ce_info[f], dest);
1149			break;
1150		case 4:
1151			i40e_read_dword(context_bytes, &ce_info[f], dest);
1152			break;
1153		case 8:
1154			i40e_read_qword(context_bytes, &ce_info[f], dest);
1155			break;
1156		default:
1157			/* nothing to do, just keep going */
1158			break;
1159		}
1160	}
1161
1162	return I40E_SUCCESS;
1163}
1164
1165/**
1166 * i40e_clear_hmc_context - zero out the HMC context bits
1167 * @hw:       the hardware struct
1168 * @context_bytes: pointer to the context bit array (DMA memory)
1169 * @hmc_type: the type of HMC resource
1170 **/
1171static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1172					u8 *context_bytes,
1173					enum i40e_hmc_lan_rsrc_type hmc_type)
1174{
1175	/* clean the bit array */
1176	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1177		    I40E_DMA_MEM);
1178
1179	return I40E_SUCCESS;
1180}
1181
1182/**
1183 * i40e_set_hmc_context - replace HMC context bits
1184 * @context_bytes: pointer to the context bit array
1185 * @ce_info:  a description of the struct to be filled
1186 * @dest:     the struct to be filled
1187 **/
1188static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1189					struct i40e_context_ele *ce_info,
1190					u8 *dest)
1191{
1192	int f;
1193
1194	for (f = 0; ce_info[f].width != 0; f++) {
1195
1196		/* we have to deal with each element of the HMC using the
1197		 * correct size so that we are correct regardless of the
1198		 * endianness of the machine
1199		 */
1200		switch (ce_info[f].size_of) {
1201		case 1:
1202			i40e_write_byte(context_bytes, &ce_info[f], dest);
1203			break;
1204		case 2:
1205			i40e_write_word(context_bytes, &ce_info[f], dest);
1206			break;
1207		case 4:
1208			i40e_write_dword(context_bytes, &ce_info[f], dest);
1209			break;
1210		case 8:
1211			i40e_write_qword(context_bytes, &ce_info[f], dest);
1212			break;
1213		}
1214	}
1215
1216	return I40E_SUCCESS;
1217}
1218
1219/**
1220 * i40e_hmc_get_object_va - retrieves an object's virtual address
1221 * @hmc_info: pointer to i40e_hmc_info struct
1222 * @object_base: pointer to u64 to get the va
1223 * @rsrc_type: the hmc resource type
1224 * @obj_idx: hmc object index
1225 *
1226 * This function retrieves the object's virtual address from the object
1227 * base pointer.  This function is used for LAN Queue contexts.
1228 **/
1229static
1230enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
1231					u8 **object_base,
1232					enum i40e_hmc_lan_rsrc_type rsrc_type,
1233					u32 obj_idx)
1234{
1235	u32 obj_offset_in_sd, obj_offset_in_pd;
1236	struct i40e_hmc_sd_entry *sd_entry;
1237	struct i40e_hmc_pd_entry *pd_entry;
1238	u32 pd_idx, pd_lmt, rel_pd_idx;
1239	enum i40e_status_code ret_code = I40E_SUCCESS;
1240	u64 obj_offset_in_fpm;
1241	u32 sd_idx, sd_lmt;
1242
1243	if (NULL == hmc_info) {
1244		ret_code = I40E_ERR_BAD_PTR;
1245		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1246		goto exit;
1247	}
1248	if (NULL == hmc_info->hmc_obj) {
1249		ret_code = I40E_ERR_BAD_PTR;
1250		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1251		goto exit;
1252	}
1253	if (NULL == object_base) {
1254		ret_code = I40E_ERR_BAD_PTR;
1255		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1256		goto exit;
1257	}
1258	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259		ret_code = I40E_ERR_BAD_PTR;
1260		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1261		goto exit;
1262	}
1263	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1265			  ret_code);
1266		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1267		goto exit;
1268	}
1269	/* find sd index and limit */
1270	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1271				 &sd_idx, &sd_lmt);
1272
1273	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1276
1277	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1279					 &pd_idx, &pd_lmt);
1280		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283					 I40E_HMC_PAGED_BP_SIZE);
1284		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1285	} else {
1286		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287					 I40E_HMC_DIRECT_BP_SIZE);
1288		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1289	}
1290exit:
1291	return ret_code;
1292}
1293
1294/**
1295 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296 * @hw:    the hardware struct
1297 * @queue: the queue we care about
1298 * @s:     the struct to be filled
1299 **/
1300enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1301						    u16 queue,
1302						    struct i40e_hmc_obj_txq *s)
1303{
1304	enum i40e_status_code err;
1305	u8 *context_bytes;
1306
1307	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1308				     I40E_HMC_LAN_TX, queue);
1309	if (err < 0)
1310		return err;
1311
1312	return i40e_get_hmc_context(context_bytes,
1313				    i40e_hmc_txq_ce_info, (u8 *)s);
1314}
1315
1316/**
1317 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1318 * @hw:    the hardware struct
1319 * @queue: the queue we care about
1320 **/
1321enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1322						      u16 queue)
1323{
1324	enum i40e_status_code err;
1325	u8 *context_bytes;
1326
1327	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1328				     I40E_HMC_LAN_TX, queue);
1329	if (err < 0)
1330		return err;
1331
1332	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1333}
1334
1335/**
1336 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1337 * @hw:    the hardware struct
1338 * @queue: the queue we care about
1339 * @s:     the struct to be filled
1340 **/
1341enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1342						    u16 queue,
1343						    struct i40e_hmc_obj_txq *s)
1344{
1345	enum i40e_status_code err;
1346	u8 *context_bytes;
1347
1348	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1349				     I40E_HMC_LAN_TX, queue);
1350	if (err < 0)
1351		return err;
1352
1353	return i40e_set_hmc_context(context_bytes,
1354				    i40e_hmc_txq_ce_info, (u8 *)s);
1355}
1356
1357/**
1358 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1359 * @hw:    the hardware struct
1360 * @queue: the queue we care about
1361 * @s:     the struct to be filled
1362 **/
1363enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1364						    u16 queue,
1365						    struct i40e_hmc_obj_rxq *s)
1366{
1367	enum i40e_status_code err;
1368	u8 *context_bytes;
1369
1370	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1371				     I40E_HMC_LAN_RX, queue);
1372	if (err < 0)
1373		return err;
1374
1375	return i40e_get_hmc_context(context_bytes,
1376				    i40e_hmc_rxq_ce_info, (u8 *)s);
1377}
1378
1379/**
1380 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1381 * @hw:    the hardware struct
1382 * @queue: the queue we care about
1383 **/
1384enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1385						      u16 queue)
1386{
1387	enum i40e_status_code err;
1388	u8 *context_bytes;
1389
1390	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1391				     I40E_HMC_LAN_RX, queue);
1392	if (err < 0)
1393		return err;
1394
1395	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1396}
1397
1398/**
1399 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1400 * @hw:    the hardware struct
1401 * @queue: the queue we care about
1402 * @s:     the struct to be filled
1403 **/
1404enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1405						    u16 queue,
1406						    struct i40e_hmc_obj_rxq *s)
1407{
1408	enum i40e_status_code err;
1409	u8 *context_bytes;
1410
1411	err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
1412				     I40E_HMC_LAN_RX, queue);
1413	if (err < 0)
1414		return err;
1415
1416	return i40e_set_hmc_context(context_bytes,
1417				    i40e_hmc_rxq_ce_info, (u8 *)s);
1418}
1419