1/* SPDX-License-Identifier: BSD-3-Clause */
2/*  Copyright (c) 2020, Intel Corporation
3 *  All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions are met:
7 *
8 *   1. Redistributions of source code must retain the above copyright notice,
9 *      this list of conditions and the following disclaimer.
10 *
11 *   2. Redistributions in binary form must reproduce the above copyright
12 *      notice, this list of conditions and the following disclaimer in the
13 *      documentation and/or other materials provided with the distribution.
14 *
15 *   3. Neither the name of the Intel Corporation nor the names of its
16 *      contributors may be used to endorse or promote products derived from
17 *      this software without specific prior written permission.
18 *
19 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 *  POSSIBILITY OF SUCH DAMAGE.
30 */
31/*$FreeBSD$*/
32
33#include "ice_common.h"
34#include "ice_sched.h"
35#include "ice_adminq_cmd.h"
36
37#include "ice_flow.h"
38#include "ice_switch.h"
39
40#define ICE_PF_RESET_WAIT_COUNT	300
41
42/**
43 * ice_set_mac_type - Sets MAC type
44 * @hw: pointer to the HW structure
45 *
46 * This function sets the MAC type of the adapter based on the
47 * vendor ID and device ID stored in the HW structure.
48 */
49enum ice_status ice_set_mac_type(struct ice_hw *hw)
50{
51	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
52
53	if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54		return ICE_ERR_DEVICE_NOT_SUPPORTED;
55
56	switch (hw->device_id) {
57	case ICE_DEV_ID_E810C_BACKPLANE:
58	case ICE_DEV_ID_E810C_QSFP:
59	case ICE_DEV_ID_E810C_SFP:
60	case ICE_DEV_ID_E810_XXV_BACKPLANE:
61	case ICE_DEV_ID_E810_XXV_QSFP:
62	case ICE_DEV_ID_E810_XXV_SFP:
63		hw->mac_type = ICE_MAC_E810;
64		break;
65	case ICE_DEV_ID_E822C_10G_BASE_T:
66	case ICE_DEV_ID_E822C_BACKPLANE:
67	case ICE_DEV_ID_E822C_QSFP:
68	case ICE_DEV_ID_E822C_SFP:
69	case ICE_DEV_ID_E822C_SGMII:
70	case ICE_DEV_ID_E822L_10G_BASE_T:
71	case ICE_DEV_ID_E822L_BACKPLANE:
72	case ICE_DEV_ID_E822L_SFP:
73	case ICE_DEV_ID_E822L_SGMII:
74	case ICE_DEV_ID_E823L_10G_BASE_T:
75	case ICE_DEV_ID_E823L_1GBE:
76	case ICE_DEV_ID_E823L_BACKPLANE:
77	case ICE_DEV_ID_E823L_QSFP:
78	case ICE_DEV_ID_E823L_SFP:
79		hw->mac_type = ICE_MAC_GENERIC;
80		break;
81	default:
82		hw->mac_type = ICE_MAC_UNKNOWN;
83		break;
84	}
85
86	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
87	return ICE_SUCCESS;
88}
89
90/**
91 * ice_clear_pf_cfg - Clear PF configuration
92 * @hw: pointer to the hardware structure
93 *
94 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95 * configuration, flow director filters, etc.).
96 */
97enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
98{
99	struct ice_aq_desc desc;
100
101	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
102
103	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
104}
105
106/**
107 * ice_aq_manage_mac_read - manage MAC address read command
108 * @hw: pointer to the HW struct
109 * @buf: a virtual buffer to hold the manage MAC read response
110 * @buf_size: Size of the virtual buffer
111 * @cd: pointer to command details structure or NULL
112 *
113 * This function is used to return per PF station MAC address (0x0107).
114 * NOTE: Upon successful completion of this command, MAC address information
115 * is returned in user specified buffer. Please interpret user specified
116 * buffer as "manage_mac_read" response.
117 * Response such as various MAC addresses are stored in HW struct (port.mac)
118 * ice_discover_dev_caps is expected to be called before this function is
119 * called.
120 */
121enum ice_status
122ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
123		       struct ice_sq_cd *cd)
124{
125	struct ice_aqc_manage_mac_read_resp *resp;
126	struct ice_aqc_manage_mac_read *cmd;
127	struct ice_aq_desc desc;
128	enum ice_status status;
129	u16 flags;
130	u8 i;
131
132	cmd = &desc.params.mac_read;
133
134	if (buf_size < sizeof(*resp))
135		return ICE_ERR_BUF_TOO_SHORT;
136
137	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
138
139	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
140	if (status)
141		return status;
142
143	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
144	flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
145
146	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
147		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
148		return ICE_ERR_CFG;
149	}
150
151	/* A single port can report up to two (LAN and WoL) addresses */
152	for (i = 0; i < cmd->num_addr; i++)
153		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
154			ice_memcpy(hw->port_info->mac.lan_addr,
155				   resp[i].mac_addr, ETH_ALEN,
156				   ICE_DMA_TO_NONDMA);
157			ice_memcpy(hw->port_info->mac.perm_addr,
158				   resp[i].mac_addr,
159				   ETH_ALEN, ICE_DMA_TO_NONDMA);
160			break;
161		}
162	return ICE_SUCCESS;
163}
164
165/**
166 * ice_aq_get_phy_caps - returns PHY capabilities
167 * @pi: port information structure
168 * @qual_mods: report qualified modules
169 * @report_mode: report mode capabilities
170 * @pcaps: structure for PHY capabilities to be filled
171 * @cd: pointer to command details structure or NULL
172 *
173 * Returns the various PHY capabilities supported on the Port (0x0600)
174 */
175enum ice_status
176ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
177		    struct ice_aqc_get_phy_caps_data *pcaps,
178		    struct ice_sq_cd *cd)
179{
180	struct ice_aqc_get_phy_caps *cmd;
181	u16 pcaps_size = sizeof(*pcaps);
182	struct ice_aq_desc desc;
183	enum ice_status status;
184	struct ice_hw *hw;
185
186	cmd = &desc.params.get_phy;
187
188	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
189		return ICE_ERR_PARAM;
190	hw = pi->hw;
191
192	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
193
194	if (qual_mods)
195		cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
196
197	cmd->param0 |= CPU_TO_LE16(report_mode);
198	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
199
200	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
201		  report_mode);
202	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
203		  (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
204	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
205		  (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
206	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
207	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
208		  pcaps->low_power_ctrl_an);
209	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
210	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
211		  pcaps->eeer_value);
212	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
213		  pcaps->link_fec_options);
214	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
215		  pcaps->module_compliance_enforcement);
216	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
217		  pcaps->extended_compliance_code);
218	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
219		  pcaps->module_type[0]);
220	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
221		  pcaps->module_type[1]);
222	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
223		  pcaps->module_type[2]);
224
225	if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
226		pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
227		pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
228		ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
229			   sizeof(pi->phy.link_info.module_type),
230			   ICE_NONDMA_TO_NONDMA);
231	}
232
233	return status;
234}
235
236/**
237 * ice_aq_get_link_topo_handle - get link topology node return status
238 * @pi: port information structure
239 * @node_type: requested node type
240 * @cd: pointer to command details structure or NULL
241 *
242 * Get link topology node return status for specified node type (0x06E0)
243 *
244 * Node type cage can be used to determine if cage is present. If AQC
245 * returns error (ENOENT), then no cage present. If no cage present, then
246 * connection type is backplane or BASE-T.
247 */
248static enum ice_status
249ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
250			    struct ice_sq_cd *cd)
251{
252	struct ice_aqc_get_link_topo *cmd;
253	struct ice_aq_desc desc;
254
255	cmd = &desc.params.get_link_topo;
256
257	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
258
259	cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
260				   ICE_AQC_LINK_TOPO_NODE_CTX_S);
261
262	/* set node type */
263	cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
264
265	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
266}
267
268/*
269 * ice_is_media_cage_present
270 * @pi: port information structure
271 *
272 * Returns true if media cage is present, else false. If no cage, then
273 * media type is backplane or BASE-T.
274 */
275static bool ice_is_media_cage_present(struct ice_port_info *pi)
276{
277	/* Node type cage can be used to determine if cage is present. If AQC
278	 * returns error (ENOENT), then no cage present. If no cage present then
279	 * connection type is backplane or BASE-T.
280	 */
281	return !ice_aq_get_link_topo_handle(pi,
282					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
283					    NULL);
284}
285
286/**
287 * ice_get_media_type - Gets media type
288 * @pi: port information structure
289 */
290static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
291{
292	struct ice_link_status *hw_link_info;
293
294	if (!pi)
295		return ICE_MEDIA_UNKNOWN;
296
297	hw_link_info = &pi->phy.link_info;
298	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
299		/* If more than one media type is selected, report unknown */
300		return ICE_MEDIA_UNKNOWN;
301
302	if (hw_link_info->phy_type_low) {
303		/* 1G SGMII is a special case where some DA cable PHYs
304		 * may show this as an option when it really shouldn't
305		 * be since SGMII is meant to be between a MAC and a PHY
306		 * in a backplane. Try to detect this case and handle it
307		 */
308		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
309		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
310		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
311		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
312		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
313			return ICE_MEDIA_DA;
314
315		switch (hw_link_info->phy_type_low) {
316		case ICE_PHY_TYPE_LOW_1000BASE_SX:
317		case ICE_PHY_TYPE_LOW_1000BASE_LX:
318		case ICE_PHY_TYPE_LOW_10GBASE_SR:
319		case ICE_PHY_TYPE_LOW_10GBASE_LR:
320		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
321		case ICE_PHY_TYPE_LOW_25GBASE_SR:
322		case ICE_PHY_TYPE_LOW_25GBASE_LR:
323		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
324		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
325		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
326		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
327		case ICE_PHY_TYPE_LOW_50GBASE_SR:
328		case ICE_PHY_TYPE_LOW_50GBASE_FR:
329		case ICE_PHY_TYPE_LOW_50GBASE_LR:
330		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
331		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
332		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
333		case ICE_PHY_TYPE_LOW_100GBASE_DR:
334			return ICE_MEDIA_FIBER;
335		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
336		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
337		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
338		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
339		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
340		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
341		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
342		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
343			return ICE_MEDIA_FIBER;
344		case ICE_PHY_TYPE_LOW_100BASE_TX:
345		case ICE_PHY_TYPE_LOW_1000BASE_T:
346		case ICE_PHY_TYPE_LOW_2500BASE_T:
347		case ICE_PHY_TYPE_LOW_5GBASE_T:
348		case ICE_PHY_TYPE_LOW_10GBASE_T:
349		case ICE_PHY_TYPE_LOW_25GBASE_T:
350			return ICE_MEDIA_BASET;
351		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
352		case ICE_PHY_TYPE_LOW_25GBASE_CR:
353		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
354		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
355		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
356		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
357		case ICE_PHY_TYPE_LOW_50GBASE_CP:
358		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
359		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
360		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
361			return ICE_MEDIA_DA;
362		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
363		case ICE_PHY_TYPE_LOW_40G_XLAUI:
364		case ICE_PHY_TYPE_LOW_50G_LAUI2:
365		case ICE_PHY_TYPE_LOW_50G_AUI2:
366		case ICE_PHY_TYPE_LOW_50G_AUI1:
367		case ICE_PHY_TYPE_LOW_100G_AUI4:
368		case ICE_PHY_TYPE_LOW_100G_CAUI4:
369			if (ice_is_media_cage_present(pi))
370				return ICE_MEDIA_AUI;
371			/* fall-through */
372		case ICE_PHY_TYPE_LOW_1000BASE_KX:
373		case ICE_PHY_TYPE_LOW_2500BASE_KX:
374		case ICE_PHY_TYPE_LOW_2500BASE_X:
375		case ICE_PHY_TYPE_LOW_5GBASE_KR:
376		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
377		case ICE_PHY_TYPE_LOW_25GBASE_KR:
378		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
379		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
380		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
381		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
382		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
383		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
384		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
385			return ICE_MEDIA_BACKPLANE;
386		}
387	} else {
388		switch (hw_link_info->phy_type_high) {
389		case ICE_PHY_TYPE_HIGH_100G_AUI2:
390		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
391			if (ice_is_media_cage_present(pi))
392				return ICE_MEDIA_AUI;
393			/* fall-through */
394		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
395			return ICE_MEDIA_BACKPLANE;
396		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
397		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
398			return ICE_MEDIA_FIBER;
399		}
400	}
401	return ICE_MEDIA_UNKNOWN;
402}
403
404/**
405 * ice_aq_get_link_info
406 * @pi: port information structure
407 * @ena_lse: enable/disable LinkStatusEvent reporting
408 * @link: pointer to link status structure - optional
409 * @cd: pointer to command details structure or NULL
410 *
411 * Get Link Status (0x607). Returns the link status of the adapter.
412 */
413enum ice_status
414ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
415		     struct ice_link_status *link, struct ice_sq_cd *cd)
416{
417	struct ice_aqc_get_link_status_data link_data = { 0 };
418	struct ice_aqc_get_link_status *resp;
419	struct ice_link_status *li_old, *li;
420	enum ice_media_type *hw_media_type;
421	struct ice_fc_info *hw_fc_info;
422	bool tx_pause, rx_pause;
423	struct ice_aq_desc desc;
424	enum ice_status status;
425	struct ice_hw *hw;
426	u16 cmd_flags;
427
428	if (!pi)
429		return ICE_ERR_PARAM;
430	hw = pi->hw;
431
432	li_old = &pi->phy.link_info_old;
433	hw_media_type = &pi->phy.media_type;
434	li = &pi->phy.link_info;
435	hw_fc_info = &pi->fc;
436
437	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
438	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
439	resp = &desc.params.get_link_status;
440	resp->cmd_flags = CPU_TO_LE16(cmd_flags);
441	resp->lport_num = pi->lport;
442
443	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
444
445	if (status != ICE_SUCCESS)
446		return status;
447
448	/* save off old link status information */
449	*li_old = *li;
450
451	/* update current link status information */
452	li->link_speed = LE16_TO_CPU(link_data.link_speed);
453	li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
454	li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
455	*hw_media_type = ice_get_media_type(pi);
456	li->link_info = link_data.link_info;
457	li->an_info = link_data.an_info;
458	li->ext_info = link_data.ext_info;
459	li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
460	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
461	li->topo_media_conflict = link_data.topo_media_conflict;
462	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
463				      ICE_AQ_CFG_PACING_TYPE_M);
464
465	/* update fc info */
466	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
467	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
468	if (tx_pause && rx_pause)
469		hw_fc_info->current_mode = ICE_FC_FULL;
470	else if (tx_pause)
471		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
472	else if (rx_pause)
473		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
474	else
475		hw_fc_info->current_mode = ICE_FC_NONE;
476
477	li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
478
479	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
480	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
481	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
482		  (unsigned long long)li->phy_type_low);
483	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
484		  (unsigned long long)li->phy_type_high);
485	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
486	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
487	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
488	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
489	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
490	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
491	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
492		  li->max_frame_size);
493	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
494
495	/* save link status information */
496	if (link)
497		*link = *li;
498
499	/* flag cleared so calling functions don't call AQ again */
500	pi->phy.get_link_info = false;
501
502	return ICE_SUCCESS;
503}
504
505/**
506 * ice_fill_tx_timer_and_fc_thresh
507 * @hw: pointer to the HW struct
508 * @cmd: pointer to MAC cfg structure
509 *
510 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
511 * descriptor
512 */
513static void
514ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
515				struct ice_aqc_set_mac_cfg *cmd)
516{
517	u16 fc_thres_val, tx_timer_val;
518	u32 val;
519
520	/* We read back the transmit timer and fc threshold value of
521	 * LFC. Thus, we will use index =
522	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
523	 *
524	 * Also, because we are opearating on transmit timer and fc
525	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
526	 */
527#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
528
529	/* Retrieve the transmit timer */
530	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
531	tx_timer_val = val &
532		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
533	cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
534
535	/* Retrieve the fc threshold */
536	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
537	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
538
539	cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
540}
541
542/**
543 * ice_aq_set_mac_cfg
544 * @hw: pointer to the HW struct
545 * @max_frame_size: Maximum Frame Size to be supported
546 * @cd: pointer to command details structure or NULL
547 *
548 * Set MAC configuration (0x0603)
549 */
550enum ice_status
551ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
552{
553	struct ice_aqc_set_mac_cfg *cmd;
554	struct ice_aq_desc desc;
555
556	cmd = &desc.params.set_mac_cfg;
557
558	if (max_frame_size == 0)
559		return ICE_ERR_PARAM;
560
561	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
562
563	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
564
565	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
566
567	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
568}
569
570/**
571 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
572 * @hw: pointer to the HW struct
573 */
574static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
575{
576	struct ice_switch_info *sw;
577	enum ice_status status;
578
579	hw->switch_info = (struct ice_switch_info *)
580			  ice_malloc(hw, sizeof(*hw->switch_info));
581
582	sw = hw->switch_info;
583
584	if (!sw)
585		return ICE_ERR_NO_MEMORY;
586
587	INIT_LIST_HEAD(&sw->vsi_list_map_head);
588	sw->prof_res_bm_init = 0;
589
590	status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
591	if (status) {
592		ice_free(hw, hw->switch_info);
593		return status;
594	}
595	return ICE_SUCCESS;
596}
597
598/**
599 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
600 * @hw: pointer to the HW struct
601 * @sw: pointer to switch info struct for which function clears filters
602 */
603static void
604ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
605{
606	struct ice_vsi_list_map_info *v_pos_map;
607	struct ice_vsi_list_map_info *v_tmp_map;
608	struct ice_sw_recipe *recps;
609	u8 i;
610
611	if (!sw)
612		return;
613
614	LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
615				 ice_vsi_list_map_info, list_entry) {
616		LIST_DEL(&v_pos_map->list_entry);
617		ice_free(hw, v_pos_map);
618	}
619	recps = sw->recp_list;
620	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
621		struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
622
623		recps[i].root_rid = i;
624		LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
625					 &recps[i].rg_list, ice_recp_grp_entry,
626					 l_entry) {
627			LIST_DEL(&rg_entry->l_entry);
628			ice_free(hw, rg_entry);
629		}
630
631		if (recps[i].adv_rule) {
632			struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
633			struct ice_adv_fltr_mgmt_list_entry *lst_itr;
634
635			ice_destroy_lock(&recps[i].filt_rule_lock);
636			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
637						 &recps[i].filt_rules,
638						 ice_adv_fltr_mgmt_list_entry,
639						 list_entry) {
640				LIST_DEL(&lst_itr->list_entry);
641				ice_free(hw, lst_itr->lkups);
642				ice_free(hw, lst_itr);
643			}
644		} else {
645			struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
646
647			ice_destroy_lock(&recps[i].filt_rule_lock);
648			LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
649						 &recps[i].filt_rules,
650						 ice_fltr_mgmt_list_entry,
651						 list_entry) {
652				LIST_DEL(&lst_itr->list_entry);
653				ice_free(hw, lst_itr);
654			}
655		}
656		if (recps[i].root_buf)
657			ice_free(hw, recps[i].root_buf);
658	}
659	ice_rm_sw_replay_rule_info(hw, sw);
660	ice_free(hw, sw->recp_list);
661	ice_free(hw, sw);
662}
663
664/**
665 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
666 * @hw: pointer to the HW struct
667 */
668static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
669{
670	ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
671}
672
673/**
674 * ice_get_itr_intrl_gran
675 * @hw: pointer to the HW struct
676 *
677 * Determines the ITR/INTRL granularities based on the maximum aggregate
678 * bandwidth according to the device's configuration during power-on.
679 */
680static void ice_get_itr_intrl_gran(struct ice_hw *hw)
681{
682	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
683			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
684			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
685
686	switch (max_agg_bw) {
687	case ICE_MAX_AGG_BW_200G:
688	case ICE_MAX_AGG_BW_100G:
689	case ICE_MAX_AGG_BW_50G:
690		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
691		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
692		break;
693	case ICE_MAX_AGG_BW_25G:
694		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
695		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
696		break;
697	}
698}
699
700/**
701 * ice_print_rollback_msg - print FW rollback message
702 * @hw: pointer to the hardware structure
703 */
704void ice_print_rollback_msg(struct ice_hw *hw)
705{
706	char nvm_str[ICE_NVM_VER_LEN] = { 0 };
707	struct ice_nvm_info *nvm = &hw->nvm;
708	struct ice_orom_info *orom;
709
710	orom = &nvm->orom;
711
712	SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
713		 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
714		 orom->build, orom->patch);
715	ice_warn(hw,
716		 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
717		 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
718}
719
720/**
721 * ice_init_hw - main hardware initialization routine
722 * @hw: pointer to the hardware structure
723 */
724enum ice_status ice_init_hw(struct ice_hw *hw)
725{
726	struct ice_aqc_get_phy_caps_data *pcaps;
727	enum ice_status status;
728	u16 mac_buf_len;
729	void *mac_buf;
730
731	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
732
733	/* Set MAC type based on DeviceID */
734	status = ice_set_mac_type(hw);
735	if (status)
736		return status;
737
738	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
739			 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
740		PF_FUNC_RID_FUNCTION_NUMBER_S;
741
742	status = ice_reset(hw, ICE_RESET_PFR);
743	if (status)
744		return status;
745	ice_get_itr_intrl_gran(hw);
746
747	status = ice_create_all_ctrlq(hw);
748	if (status)
749		goto err_unroll_cqinit;
750
751	status = ice_init_nvm(hw);
752	if (status)
753		goto err_unroll_cqinit;
754
755	if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
756		ice_print_rollback_msg(hw);
757
758	status = ice_clear_pf_cfg(hw);
759	if (status)
760		goto err_unroll_cqinit;
761
762	ice_clear_pxe_mode(hw);
763
764	status = ice_get_caps(hw);
765	if (status)
766		goto err_unroll_cqinit;
767
768	hw->port_info = (struct ice_port_info *)
769			ice_malloc(hw, sizeof(*hw->port_info));
770	if (!hw->port_info) {
771		status = ICE_ERR_NO_MEMORY;
772		goto err_unroll_cqinit;
773	}
774
775	/* set the back pointer to HW */
776	hw->port_info->hw = hw;
777
778	/* Initialize port_info struct with switch configuration data */
779	status = ice_get_initial_sw_cfg(hw);
780	if (status)
781		goto err_unroll_alloc;
782
783	hw->evb_veb = true;
784	/* Query the allocated resources for Tx scheduler */
785	status = ice_sched_query_res_alloc(hw);
786	if (status) {
787		ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
788		goto err_unroll_alloc;
789	}
790	ice_sched_get_psm_clk_freq(hw);
791
792	/* Initialize port_info struct with scheduler data */
793	status = ice_sched_init_port(hw->port_info);
794	if (status)
795		goto err_unroll_sched;
796	pcaps = (struct ice_aqc_get_phy_caps_data *)
797		ice_malloc(hw, sizeof(*pcaps));
798	if (!pcaps) {
799		status = ICE_ERR_NO_MEMORY;
800		goto err_unroll_sched;
801	}
802
803	/* Initialize port_info struct with PHY capabilities */
804	status = ice_aq_get_phy_caps(hw->port_info, false,
805				     ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
806	ice_free(hw, pcaps);
807	if (status)
808		ice_debug(hw, ICE_DBG_PHY, "%s: Get PHY capabilities failed, continuing anyway\n",
809			  __func__);
810
811	/* Initialize port_info struct with link information */
812	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
813	if (status)
814		goto err_unroll_sched;
815	/* need a valid SW entry point to build a Tx tree */
816	if (!hw->sw_entry_point_layer) {
817		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
818		status = ICE_ERR_CFG;
819		goto err_unroll_sched;
820	}
821	INIT_LIST_HEAD(&hw->agg_list);
822	/* Initialize max burst size */
823	if (!hw->max_burst_size)
824		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
825	status = ice_init_fltr_mgmt_struct(hw);
826	if (status)
827		goto err_unroll_sched;
828
829	/* Get MAC information */
830	/* A single port can report up to two (LAN and WoL) addresses */
831	mac_buf = ice_calloc(hw, 2,
832			     sizeof(struct ice_aqc_manage_mac_read_resp));
833	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
834
835	if (!mac_buf) {
836		status = ICE_ERR_NO_MEMORY;
837		goto err_unroll_fltr_mgmt_struct;
838	}
839
840	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
841	ice_free(hw, mac_buf);
842
843	if (status)
844		goto err_unroll_fltr_mgmt_struct;
845	/* enable jumbo frame support at MAC level */
846	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
847	if (status)
848		goto err_unroll_fltr_mgmt_struct;
849	status = ice_init_hw_tbls(hw);
850	if (status)
851		goto err_unroll_fltr_mgmt_struct;
852	ice_init_lock(&hw->tnl_lock);
853	return ICE_SUCCESS;
854
855err_unroll_fltr_mgmt_struct:
856	ice_cleanup_fltr_mgmt_struct(hw);
857err_unroll_sched:
858	ice_sched_cleanup_all(hw);
859err_unroll_alloc:
860	ice_free(hw, hw->port_info);
861	hw->port_info = NULL;
862err_unroll_cqinit:
863	ice_destroy_all_ctrlq(hw);
864	return status;
865}
866
867/**
868 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
869 * @hw: pointer to the hardware structure
870 *
871 * This should be called only during nominal operation, not as a result of
872 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
873 * applicable initializations if it fails for any reason.
874 */
875void ice_deinit_hw(struct ice_hw *hw)
876{
877	ice_cleanup_fltr_mgmt_struct(hw);
878
879	ice_sched_cleanup_all(hw);
880	ice_sched_clear_agg(hw);
881	ice_free_seg(hw);
882	ice_free_hw_tbls(hw);
883	ice_destroy_lock(&hw->tnl_lock);
884
885	if (hw->port_info) {
886		ice_free(hw, hw->port_info);
887		hw->port_info = NULL;
888	}
889
890	ice_destroy_all_ctrlq(hw);
891
892	/* Clear VSI contexts if not already cleared */
893	ice_clear_all_vsi_ctx(hw);
894}
895
896/**
897 * ice_check_reset - Check to see if a global reset is complete
898 * @hw: pointer to the hardware structure
899 */
900enum ice_status ice_check_reset(struct ice_hw *hw)
901{
902	u32 cnt, reg = 0, grst_timeout, uld_mask;
903
904	/* Poll for Device Active state in case a recent CORER, GLOBR,
905	 * or EMPR has occurred. The grst delay value is in 100ms units.
906	 * Add 1sec for outstanding AQ commands that can take a long time.
907	 */
908	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
909			GLGEN_RSTCTL_GRSTDEL_S) + 10;
910
911	for (cnt = 0; cnt < grst_timeout; cnt++) {
912		ice_msec_delay(100, true);
913		reg = rd32(hw, GLGEN_RSTAT);
914		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
915			break;
916	}
917
918	if (cnt == grst_timeout) {
919		ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
920		return ICE_ERR_RESET_FAILED;
921	}
922
923#define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
924				 GLNVM_ULD_PCIER_DONE_1_M |\
925				 GLNVM_ULD_CORER_DONE_M |\
926				 GLNVM_ULD_GLOBR_DONE_M |\
927				 GLNVM_ULD_POR_DONE_M |\
928				 GLNVM_ULD_POR_DONE_1_M |\
929				 GLNVM_ULD_PCIER_DONE_2_M)
930
931	uld_mask = ICE_RESET_DONE_MASK;
932
933	/* Device is Active; check Global Reset processes are done */
934	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
935		reg = rd32(hw, GLNVM_ULD) & uld_mask;
936		if (reg == uld_mask) {
937			ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
938			break;
939		}
940		ice_msec_delay(10, true);
941	}
942
943	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
944		ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
945			  reg);
946		return ICE_ERR_RESET_FAILED;
947	}
948
949	return ICE_SUCCESS;
950}
951
952/**
953 * ice_pf_reset - Reset the PF
954 * @hw: pointer to the hardware structure
955 *
956 * If a global reset has been triggered, this function checks
957 * for its completion and then issues the PF reset
958 */
959static enum ice_status ice_pf_reset(struct ice_hw *hw)
960{
961	u32 cnt, reg;
962
963	/* If at function entry a global reset was already in progress, i.e.
964	 * state is not 'device active' or any of the reset done bits are not
965	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
966	 * global reset is done.
967	 */
968	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
969	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
970		/* poll on global reset currently in progress until done */
971		if (ice_check_reset(hw))
972			return ICE_ERR_RESET_FAILED;
973
974		return ICE_SUCCESS;
975	}
976
977	/* Reset the PF */
978	reg = rd32(hw, PFGEN_CTRL);
979
980	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
981
982	/* Wait for the PFR to complete. The wait time is the global config lock
983	 * timeout plus the PFR timeout which will account for a possible reset
984	 * that is occurring during a download package operation.
985	 */
986	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
987	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
988		reg = rd32(hw, PFGEN_CTRL);
989		if (!(reg & PFGEN_CTRL_PFSWR_M))
990			break;
991
992		ice_msec_delay(1, true);
993	}
994
995	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
996		ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
997		return ICE_ERR_RESET_FAILED;
998	}
999
1000	return ICE_SUCCESS;
1001}
1002
1003/**
1004 * ice_reset - Perform different types of reset
1005 * @hw: pointer to the hardware structure
1006 * @req: reset request
1007 *
1008 * This function triggers a reset as specified by the req parameter.
1009 *
1010 * Note:
1011 * If anything other than a PF reset is triggered, PXE mode is restored.
1012 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1013 * interface has been restored in the rebuild flow.
1014 */
1015enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1016{
1017	u32 val = 0;
1018
1019	switch (req) {
1020	case ICE_RESET_PFR:
1021		return ice_pf_reset(hw);
1022	case ICE_RESET_CORER:
1023		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1024		val = GLGEN_RTRIG_CORER_M;
1025		break;
1026	case ICE_RESET_GLOBR:
1027		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1028		val = GLGEN_RTRIG_GLOBR_M;
1029		break;
1030	default:
1031		return ICE_ERR_PARAM;
1032	}
1033
1034	val |= rd32(hw, GLGEN_RTRIG);
1035	wr32(hw, GLGEN_RTRIG, val);
1036	ice_flush(hw);
1037
1038	/* wait for the FW to be ready */
1039	return ice_check_reset(hw);
1040}
1041
1042/**
1043 * ice_copy_rxq_ctx_to_hw
1044 * @hw: pointer to the hardware structure
1045 * @ice_rxq_ctx: pointer to the rxq context
1046 * @rxq_index: the index of the Rx queue
1047 *
1048 * Copies rxq context from dense structure to HW register space
1049 */
1050static enum ice_status
1051ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1052{
1053	u8 i;
1054
1055	if (!ice_rxq_ctx)
1056		return ICE_ERR_BAD_PTR;
1057
1058	if (rxq_index > QRX_CTRL_MAX_INDEX)
1059		return ICE_ERR_PARAM;
1060
1061	/* Copy each dword separately to HW */
1062	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1063		wr32(hw, QRX_CONTEXT(i, rxq_index),
1064		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1065
1066		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1067			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1068	}
1069
1070	return ICE_SUCCESS;
1071}
1072
1073/* LAN Rx Queue Context */
1074static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1075	/* Field		Width	LSB */
1076	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1077	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1078	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1079	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1080	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1081	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1082	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1083	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1084	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1085	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1086	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1087	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1088	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1089	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1090	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1091	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1092	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1093	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1094	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1095	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1096	{ 0 }
1097};
1098
1099/**
1100 * ice_write_rxq_ctx
1101 * @hw: pointer to the hardware structure
1102 * @rlan_ctx: pointer to the rxq context
1103 * @rxq_index: the index of the Rx queue
1104 *
1105 * Converts rxq context from sparse to dense structure and then writes
1106 * it to HW register space and enables the hardware to prefetch descriptors
1107 * instead of only fetching them on demand
1108 */
1109enum ice_status
1110ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1111		  u32 rxq_index)
1112{
1113	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1114
1115	if (!rlan_ctx)
1116		return ICE_ERR_BAD_PTR;
1117
1118	rlan_ctx->prefena = 1;
1119
1120	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1121	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1122}
1123
1124/**
1125 * ice_clear_rxq_ctx
1126 * @hw: pointer to the hardware structure
1127 * @rxq_index: the index of the Rx queue to clear
1128 *
1129 * Clears rxq context in HW register space
1130 */
1131enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1132{
1133	u8 i;
1134
1135	if (rxq_index > QRX_CTRL_MAX_INDEX)
1136		return ICE_ERR_PARAM;
1137
1138	/* Clear each dword register separately */
1139	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1140		wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1141
1142	return ICE_SUCCESS;
1143}
1144
1145/* LAN Tx Queue Context */
1146const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1147				    /* Field			Width	LSB */
1148	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1149	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1150	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1151	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1152	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1153	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1154	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1155	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1156	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1157	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1158	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1159	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1160	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1161	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1162	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1163	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1164	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1165	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1166	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1167	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1168	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1169	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1170	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1171	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1172	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1173	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1174	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1175	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1176	{ 0 }
1177};
1178
1179/**
1180 * ice_copy_tx_cmpltnq_ctx_to_hw
1181 * @hw: pointer to the hardware structure
1182 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1183 * @tx_cmpltnq_index: the index of the completion queue
1184 *
1185 * Copies Tx completion queue context from dense structure to HW register space
1186 */
1187static enum ice_status
1188ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1189			      u32 tx_cmpltnq_index)
1190{
1191	u8 i;
1192
1193	if (!ice_tx_cmpltnq_ctx)
1194		return ICE_ERR_BAD_PTR;
1195
1196	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1197		return ICE_ERR_PARAM;
1198
1199	/* Copy each dword separately to HW */
1200	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1201		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1202		     *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1203
1204		ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1205			  *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1206	}
1207
1208	return ICE_SUCCESS;
1209}
1210
1211/* LAN Tx Completion Queue Context */
1212static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1213				       /* Field			Width   LSB */
1214	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base,			57,	0),
1215	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len,		18,	64),
1216	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation,		1,	96),
1217	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr,		22,	97),
1218	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num,		3,	128),
1219	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num,		10,	131),
1220	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type,		2,	141),
1221	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr,		1,	160),
1222	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid,		8,	161),
1223	ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache,		512,	192),
1224	{ 0 }
1225};
1226
1227/**
1228 * ice_write_tx_cmpltnq_ctx
1229 * @hw: pointer to the hardware structure
1230 * @tx_cmpltnq_ctx: pointer to the completion queue context
1231 * @tx_cmpltnq_index: the index of the completion queue
1232 *
1233 * Converts completion queue context from sparse to dense structure and then
1234 * writes it to HW register space
1235 */
1236enum ice_status
1237ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1238			 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1239			 u32 tx_cmpltnq_index)
1240{
1241	u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1242
1243	ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1244	return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1245}
1246
1247/**
1248 * ice_clear_tx_cmpltnq_ctx
1249 * @hw: pointer to the hardware structure
1250 * @tx_cmpltnq_index: the index of the completion queue to clear
1251 *
1252 * Clears Tx completion queue context in HW register space
1253 */
1254enum ice_status
1255ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1256{
1257	u8 i;
1258
1259	if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1260		return ICE_ERR_PARAM;
1261
1262	/* Clear each dword register separately */
1263	for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1264		wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1265
1266	return ICE_SUCCESS;
1267}
1268
1269/**
1270 * ice_copy_tx_drbell_q_ctx_to_hw
1271 * @hw: pointer to the hardware structure
1272 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1273 * @tx_drbell_q_index: the index of the doorbell queue
1274 *
1275 * Copies doorbell queue context from dense structure to HW register space
1276 */
1277static enum ice_status
1278ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1279			       u32 tx_drbell_q_index)
1280{
1281	u8 i;
1282
1283	if (!ice_tx_drbell_q_ctx)
1284		return ICE_ERR_BAD_PTR;
1285
1286	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1287		return ICE_ERR_PARAM;
1288
1289	/* Copy each dword separately to HW */
1290	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1291		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1292		     *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1293
1294		ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1295			  *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1296	}
1297
1298	return ICE_SUCCESS;
1299}
1300
1301/* LAN Tx Doorbell Queue Context info */
1302static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1303					/* Field		Width   LSB */
1304	ICE_CTX_STORE(ice_tx_drbell_q_ctx, base,		57,	0),
1305	ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len,		13,	64),
1306	ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num,		3,	80),
1307	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num,		8,	84),
1308	ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type,		2,	94),
1309	ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid,		8,	96),
1310	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd,		1,	104),
1311	ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr,		1,	108),
1312	ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en,		1,	112),
1313	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head,		13,	128),
1314	ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail,		13,	144),
1315	{ 0 }
1316};
1317
1318/**
1319 * ice_write_tx_drbell_q_ctx
1320 * @hw: pointer to the hardware structure
1321 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1322 * @tx_drbell_q_index: the index of the doorbell queue
1323 *
1324 * Converts doorbell queue context from sparse to dense structure and then
1325 * writes it to HW register space
1326 */
1327enum ice_status
1328ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1329			  struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1330			  u32 tx_drbell_q_index)
1331{
1332	u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1333
1334	ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1335		    ice_tx_drbell_q_ctx_info);
1336	return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1337}
1338
1339/**
1340 * ice_clear_tx_drbell_q_ctx
1341 * @hw: pointer to the hardware structure
1342 * @tx_drbell_q_index: the index of the doorbell queue to clear
1343 *
1344 * Clears doorbell queue context in HW register space
1345 */
1346enum ice_status
1347ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1348{
1349	u8 i;
1350
1351	if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1352		return ICE_ERR_PARAM;
1353
1354	/* Clear each dword register separately */
1355	for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1356		wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1357
1358	return ICE_SUCCESS;
1359}
1360
1361/* FW Admin Queue command wrappers */
1362
1363/**
1364 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1365 * @hw: pointer to the HW struct
1366 * @desc: descriptor describing the command
1367 * @buf: buffer to use for indirect commands (NULL for direct commands)
1368 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1369 * @cd: pointer to command details structure
1370 *
1371 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1372 */
1373enum ice_status
1374ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1375		u16 buf_size, struct ice_sq_cd *cd)
1376{
1377	return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1378}
1379
1380/**
1381 * ice_aq_get_fw_ver
1382 * @hw: pointer to the HW struct
1383 * @cd: pointer to command details structure or NULL
1384 *
1385 * Get the firmware version (0x0001) from the admin queue commands
1386 */
1387enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1388{
1389	struct ice_aqc_get_ver *resp;
1390	struct ice_aq_desc desc;
1391	enum ice_status status;
1392
1393	resp = &desc.params.get_ver;
1394
1395	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1396
1397	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1398
1399	if (!status) {
1400		hw->fw_branch = resp->fw_branch;
1401		hw->fw_maj_ver = resp->fw_major;
1402		hw->fw_min_ver = resp->fw_minor;
1403		hw->fw_patch = resp->fw_patch;
1404		hw->fw_build = LE32_TO_CPU(resp->fw_build);
1405		hw->api_branch = resp->api_branch;
1406		hw->api_maj_ver = resp->api_major;
1407		hw->api_min_ver = resp->api_minor;
1408		hw->api_patch = resp->api_patch;
1409	}
1410
1411	return status;
1412}
1413
1414/**
1415 * ice_aq_send_driver_ver
1416 * @hw: pointer to the HW struct
1417 * @dv: driver's major, minor version
1418 * @cd: pointer to command details structure or NULL
1419 *
1420 * Send the driver version (0x0002) to the firmware
1421 */
1422enum ice_status
1423ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1424		       struct ice_sq_cd *cd)
1425{
1426	struct ice_aqc_driver_ver *cmd;
1427	struct ice_aq_desc desc;
1428	u16 len;
1429
1430	cmd = &desc.params.driver_ver;
1431
1432	if (!dv)
1433		return ICE_ERR_PARAM;
1434
1435	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1436
1437	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1438	cmd->major_ver = dv->major_ver;
1439	cmd->minor_ver = dv->minor_ver;
1440	cmd->build_ver = dv->build_ver;
1441	cmd->subbuild_ver = dv->subbuild_ver;
1442
1443	len = 0;
1444	while (len < sizeof(dv->driver_string) &&
1445	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1446		len++;
1447
1448	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1449}
1450
1451/**
1452 * ice_aq_q_shutdown
1453 * @hw: pointer to the HW struct
1454 * @unloading: is the driver unloading itself
1455 *
1456 * Tell the Firmware that we're shutting down the AdminQ and whether
1457 * or not the driver is unloading as well (0x0003).
1458 */
1459enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1460{
1461	struct ice_aqc_q_shutdown *cmd;
1462	struct ice_aq_desc desc;
1463
1464	cmd = &desc.params.q_shutdown;
1465
1466	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1467
1468	if (unloading)
1469		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1470
1471	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1472}
1473
1474/**
1475 * ice_aq_req_res
1476 * @hw: pointer to the HW struct
1477 * @res: resource ID
1478 * @access: access type
1479 * @sdp_number: resource number
1480 * @timeout: the maximum time in ms that the driver may hold the resource
1481 * @cd: pointer to command details structure or NULL
1482 *
1483 * Requests common resource using the admin queue commands (0x0008).
1484 * When attempting to acquire the Global Config Lock, the driver can
1485 * learn of three states:
1486 *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1487 *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1488 *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1489 *                          successfully downloaded the package; the driver does
1490 *                          not have to download the package and can continue
1491 *                          loading
1492 *
1493 * Note that if the caller is in an acquire lock, perform action, release lock
1494 * phase of operation, it is possible that the FW may detect a timeout and issue
1495 * a CORER. In this case, the driver will receive a CORER interrupt and will
1496 * have to determine its cause. The calling thread that is handling this flow
1497 * will likely get an error propagated back to it indicating the Download
1498 * Package, Update Package or the Release Resource AQ commands timed out.
1499 */
1500static enum ice_status
1501ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1502	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1503	       struct ice_sq_cd *cd)
1504{
1505	struct ice_aqc_req_res *cmd_resp;
1506	struct ice_aq_desc desc;
1507	enum ice_status status;
1508
1509	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1510
1511	cmd_resp = &desc.params.res_owner;
1512
1513	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1514
1515	cmd_resp->res_id = CPU_TO_LE16(res);
1516	cmd_resp->access_type = CPU_TO_LE16(access);
1517	cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1518	cmd_resp->timeout = CPU_TO_LE32(*timeout);
1519	*timeout = 0;
1520
1521	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1522
1523	/* The completion specifies the maximum time in ms that the driver
1524	 * may hold the resource in the Timeout field.
1525	 */
1526
1527	/* Global config lock response utilizes an additional status field.
1528	 *
1529	 * If the Global config lock resource is held by some other driver, the
1530	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1531	 * and the timeout field indicates the maximum time the current owner
1532	 * of the resource has to free it.
1533	 */
1534	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1535		if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1536			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1537			return ICE_SUCCESS;
1538		} else if (LE16_TO_CPU(cmd_resp->status) ==
1539			   ICE_AQ_RES_GLBL_IN_PROG) {
1540			*timeout = LE32_TO_CPU(cmd_resp->timeout);
1541			return ICE_ERR_AQ_ERROR;
1542		} else if (LE16_TO_CPU(cmd_resp->status) ==
1543			   ICE_AQ_RES_GLBL_DONE) {
1544			return ICE_ERR_AQ_NO_WORK;
1545		}
1546
1547		/* invalid FW response, force a timeout immediately */
1548		*timeout = 0;
1549		return ICE_ERR_AQ_ERROR;
1550	}
1551
1552	/* If the resource is held by some other driver, the command completes
1553	 * with a busy return value and the timeout field indicates the maximum
1554	 * time the current owner of the resource has to free it.
1555	 */
1556	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1557		*timeout = LE32_TO_CPU(cmd_resp->timeout);
1558
1559	return status;
1560}
1561
1562/**
1563 * ice_aq_release_res
1564 * @hw: pointer to the HW struct
1565 * @res: resource ID
1566 * @sdp_number: resource number
1567 * @cd: pointer to command details structure or NULL
1568 *
1569 * release common resource using the admin queue commands (0x0009)
1570 */
1571static enum ice_status
1572ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1573		   struct ice_sq_cd *cd)
1574{
1575	struct ice_aqc_req_res *cmd;
1576	struct ice_aq_desc desc;
1577
1578	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1579
1580	cmd = &desc.params.res_owner;
1581
1582	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1583
1584	cmd->res_id = CPU_TO_LE16(res);
1585	cmd->res_number = CPU_TO_LE32(sdp_number);
1586
1587	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1588}
1589
1590/**
1591 * ice_acquire_res
1592 * @hw: pointer to the HW structure
1593 * @res: resource ID
1594 * @access: access type (read or write)
1595 * @timeout: timeout in milliseconds
1596 *
1597 * This function will attempt to acquire the ownership of a resource.
1598 */
1599enum ice_status
1600ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1601		enum ice_aq_res_access_type access, u32 timeout)
1602{
1603#define ICE_RES_POLLING_DELAY_MS	10
1604	u32 delay = ICE_RES_POLLING_DELAY_MS;
1605	u32 time_left = timeout;
1606	enum ice_status status;
1607
1608	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1609
1610	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1611
1612	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1613	 * previously acquired the resource and performed any necessary updates;
1614	 * in this case the caller does not obtain the resource and has no
1615	 * further work to do.
1616	 */
1617	if (status == ICE_ERR_AQ_NO_WORK)
1618		goto ice_acquire_res_exit;
1619
1620	if (status)
1621		ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1622
1623	/* If necessary, poll until the current lock owner timeouts */
1624	timeout = time_left;
1625	while (status && timeout && time_left) {
1626		ice_msec_delay(delay, true);
1627		timeout = (timeout > delay) ? timeout - delay : 0;
1628		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1629
1630		if (status == ICE_ERR_AQ_NO_WORK)
1631			/* lock free, but no work to do */
1632			break;
1633
1634		if (!status)
1635			/* lock acquired */
1636			break;
1637	}
1638	if (status && status != ICE_ERR_AQ_NO_WORK)
1639		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1640
1641ice_acquire_res_exit:
1642	if (status == ICE_ERR_AQ_NO_WORK) {
1643		if (access == ICE_RES_WRITE)
1644			ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1645		else
1646			ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1647	}
1648	return status;
1649}
1650
1651/**
1652 * ice_release_res
1653 * @hw: pointer to the HW structure
1654 * @res: resource ID
1655 *
1656 * This function will release a resource using the proper Admin Command.
1657 */
1658void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1659{
1660	enum ice_status status;
1661	u32 total_delay = 0;
1662
1663	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1664
1665	status = ice_aq_release_res(hw, res, 0, NULL);
1666
1667	/* there are some rare cases when trying to release the resource
1668	 * results in an admin queue timeout, so handle them correctly
1669	 */
1670	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1671	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1672		ice_msec_delay(1, true);
1673		status = ice_aq_release_res(hw, res, 0, NULL);
1674		total_delay++;
1675	}
1676}
1677
1678/**
1679 * ice_aq_alloc_free_res - command to allocate/free resources
1680 * @hw: pointer to the HW struct
1681 * @num_entries: number of resource entries in buffer
1682 * @buf: Indirect buffer to hold data parameters and response
1683 * @buf_size: size of buffer for indirect commands
1684 * @opc: pass in the command opcode
1685 * @cd: pointer to command details structure or NULL
1686 *
1687 * Helper function to allocate/free resources using the admin queue commands
1688 */
1689enum ice_status
1690ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1691		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1692		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1693{
1694	struct ice_aqc_alloc_free_res_cmd *cmd;
1695	struct ice_aq_desc desc;
1696
1697	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1698
1699	cmd = &desc.params.sw_res_ctrl;
1700
1701	if (!buf)
1702		return ICE_ERR_PARAM;
1703
1704	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1705		return ICE_ERR_PARAM;
1706
1707	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1708
1709	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1710
1711	cmd->num_entries = CPU_TO_LE16(num_entries);
1712
1713	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1714}
1715
1716/**
1717 * ice_alloc_hw_res - allocate resource
1718 * @hw: pointer to the HW struct
1719 * @type: type of resource
1720 * @num: number of resources to allocate
1721 * @btm: allocate from bottom
1722 * @res: pointer to array that will receive the resources
1723 */
1724enum ice_status
1725ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1726{
1727	struct ice_aqc_alloc_free_res_elem *buf;
1728	enum ice_status status;
1729	u16 buf_len;
1730
1731	buf_len = ice_struct_size(buf, elem, num);
1732	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1733	if (!buf)
1734		return ICE_ERR_NO_MEMORY;
1735
1736	/* Prepare buffer to allocate resource. */
1737	buf->num_elems = CPU_TO_LE16(num);
1738	buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1739				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1740	if (btm)
1741		buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1742
1743	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1744				       ice_aqc_opc_alloc_res, NULL);
1745	if (status)
1746		goto ice_alloc_res_exit;
1747
1748	ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1749		   ICE_NONDMA_TO_NONDMA);
1750
1751ice_alloc_res_exit:
1752	ice_free(hw, buf);
1753	return status;
1754}
1755
1756/**
1757 * ice_free_hw_res - free allocated HW resource
1758 * @hw: pointer to the HW struct
1759 * @type: type of resource to free
1760 * @num: number of resources
1761 * @res: pointer to array that contains the resources to free
1762 */
1763enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1764{
1765	struct ice_aqc_alloc_free_res_elem *buf;
1766	enum ice_status status;
1767	u16 buf_len;
1768
1769	buf_len = ice_struct_size(buf, elem, num);
1770	buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1771	if (!buf)
1772		return ICE_ERR_NO_MEMORY;
1773
1774	/* Prepare buffer to free resource. */
1775	buf->num_elems = CPU_TO_LE16(num);
1776	buf->res_type = CPU_TO_LE16(type);
1777	ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1778		   ICE_NONDMA_TO_NONDMA);
1779
1780	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1781				       ice_aqc_opc_free_res, NULL);
1782	if (status)
1783		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1784
1785	ice_free(hw, buf);
1786	return status;
1787}
1788
1789/**
1790 * ice_get_num_per_func - determine number of resources per PF
1791 * @hw: pointer to the HW structure
1792 * @max: value to be evenly split between each PF
1793 *
1794 * Determine the number of valid functions by going through the bitmap returned
1795 * from parsing capabilities and use this to calculate the number of resources
1796 * per PF based on the max value passed in.
1797 */
1798static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1799{
1800	u8 funcs;
1801
1802#define ICE_CAPS_VALID_FUNCS_M	0xFF
1803	funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1804			     ICE_CAPS_VALID_FUNCS_M);
1805
1806	if (!funcs)
1807		return 0;
1808
1809	return max / funcs;
1810}
1811
1812/**
1813 * ice_print_led_caps - print LED capabilities
1814 * @hw: pointer to the ice_hw instance
1815 * @caps: pointer to common caps instance
1816 * @prefix: string to prefix when printing
1817 * @debug: set to indicate debug print
1818 */
1819static void
1820ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1821		   char const *prefix, bool debug)
1822{
1823	u8 i;
1824
1825	if (debug)
1826		ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1827			  caps->led_pin_num);
1828	else
1829		ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1830			 caps->led_pin_num);
1831
1832	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1833		if (!caps->led[i])
1834			continue;
1835
1836		if (debug)
1837			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1838				  prefix, i, caps->led[i]);
1839		else
1840			ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1841				 caps->led[i]);
1842	}
1843}
1844
1845/**
1846 * ice_print_sdp_caps - print SDP capabilities
1847 * @hw: pointer to the ice_hw instance
1848 * @caps: pointer to common caps instance
1849 * @prefix: string to prefix when printing
1850 * @debug: set to indicate debug print
1851 */
1852static void
1853ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1854		   char const *prefix, bool debug)
1855{
1856	u8 i;
1857
1858	if (debug)
1859		ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1860			  caps->sdp_pin_num);
1861	else
1862		ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1863			 caps->sdp_pin_num);
1864
1865	for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1866		if (!caps->sdp[i])
1867			continue;
1868
1869		if (debug)
1870			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1871				  prefix, i, caps->sdp[i]);
1872		else
1873			ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1874				 i, caps->sdp[i]);
1875	}
1876}
1877
1878/**
1879 * ice_parse_common_caps - parse common device/function capabilities
1880 * @hw: pointer to the HW struct
1881 * @caps: pointer to common capabilities structure
1882 * @elem: the capability element to parse
1883 * @prefix: message prefix for tracing capabilities
1884 *
1885 * Given a capability element, extract relevant details into the common
1886 * capability structure.
1887 *
1888 * Returns: true if the capability matches one of the common capability ids,
1889 * false otherwise.
1890 */
1891static bool
1892ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1893		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1894{
1895	u32 logical_id = LE32_TO_CPU(elem->logical_id);
1896	u32 phys_id = LE32_TO_CPU(elem->phys_id);
1897	u32 number = LE32_TO_CPU(elem->number);
1898	u16 cap = LE16_TO_CPU(elem->cap);
1899	bool found = true;
1900
1901	switch (cap) {
1902	case ICE_AQC_CAPS_SWITCHING_MODE:
1903		caps->switching_mode = number;
1904		ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
1905			  caps->switching_mode);
1906		break;
1907	case ICE_AQC_CAPS_MANAGEABILITY_MODE:
1908		caps->mgmt_mode = number;
1909		caps->mgmt_protocols_mctp = logical_id;
1910		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
1911			  caps->mgmt_mode);
1912		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
1913			  caps->mgmt_protocols_mctp);
1914		break;
1915	case ICE_AQC_CAPS_OS2BMC:
1916		caps->os2bmc = number;
1917		ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
1918		break;
1919	case ICE_AQC_CAPS_VALID_FUNCTIONS:
1920		caps->valid_functions = number;
1921		ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1922			  caps->valid_functions);
1923		break;
1924	case ICE_AQC_CAPS_SRIOV:
1925		caps->sr_iov_1_1 = (number == 1);
1926		ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1927			  caps->sr_iov_1_1);
1928		break;
1929	case ICE_AQC_CAPS_802_1QBG:
1930		caps->evb_802_1_qbg = (number == 1);
1931		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
1932		break;
1933	case ICE_AQC_CAPS_802_1BR:
1934		caps->evb_802_1_qbh = (number == 1);
1935		ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
1936		break;
1937	case ICE_AQC_CAPS_DCB:
1938		caps->dcb = (number == 1);
1939		caps->active_tc_bitmap = logical_id;
1940		caps->maxtc = phys_id;
1941		ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1942		ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1943			  caps->active_tc_bitmap);
1944		ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1945		break;
1946	case ICE_AQC_CAPS_ISCSI:
1947		caps->iscsi = (number == 1);
1948		ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
1949		break;
1950	case ICE_AQC_CAPS_RSS:
1951		caps->rss_table_size = number;
1952		caps->rss_table_entry_width = logical_id;
1953		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1954			  caps->rss_table_size);
1955		ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1956			  caps->rss_table_entry_width);
1957		break;
1958	case ICE_AQC_CAPS_RXQS:
1959		caps->num_rxq = number;
1960		caps->rxq_first_id = phys_id;
1961		ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1962			  caps->num_rxq);
1963		ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1964			  caps->rxq_first_id);
1965		break;
1966	case ICE_AQC_CAPS_TXQS:
1967		caps->num_txq = number;
1968		caps->txq_first_id = phys_id;
1969		ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1970			  caps->num_txq);
1971		ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1972			  caps->txq_first_id);
1973		break;
1974	case ICE_AQC_CAPS_MSIX:
1975		caps->num_msix_vectors = number;
1976		caps->msix_vector_first_id = phys_id;
1977		ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1978			  caps->num_msix_vectors);
1979		ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1980			  caps->msix_vector_first_id);
1981		break;
1982	case ICE_AQC_CAPS_NVM_VER:
1983		break;
1984	case ICE_AQC_CAPS_NVM_MGMT:
1985		caps->nvm_unified_update =
1986			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1987			true : false;
1988		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1989			  caps->nvm_unified_update);
1990		break;
1991	case ICE_AQC_CAPS_CEM:
1992		caps->mgmt_cem = (number == 1);
1993		ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
1994			  caps->mgmt_cem);
1995		break;
1996	case ICE_AQC_CAPS_LED:
1997		if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
1998			caps->led[phys_id] = true;
1999			caps->led_pin_num++;
2000			ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2001		}
2002		break;
2003	case ICE_AQC_CAPS_SDP:
2004		if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2005			caps->sdp[phys_id] = true;
2006			caps->sdp_pin_num++;
2007			ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2008		}
2009		break;
2010	case ICE_AQC_CAPS_WR_CSR_PROT:
2011		caps->wr_csr_prot = number;
2012		caps->wr_csr_prot |= (u64)logical_id << 32;
2013		ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2014			  (unsigned long long)caps->wr_csr_prot);
2015		break;
2016	case ICE_AQC_CAPS_WOL_PROXY:
2017		caps->num_wol_proxy_fltr = number;
2018		caps->wol_proxy_vsi_seid = logical_id;
2019		caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2020		caps->acpi_prog_mthd = !!(phys_id &
2021					  ICE_ACPI_PROG_MTHD_M);
2022		caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2023		ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2024			  caps->num_wol_proxy_fltr);
2025		ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2026			  caps->wol_proxy_vsi_seid);
2027		break;
2028	case ICE_AQC_CAPS_MAX_MTU:
2029		caps->max_mtu = number;
2030		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2031			  prefix, caps->max_mtu);
2032		break;
2033	default:
2034		/* Not one of the recognized common capabilities */
2035		found = false;
2036	}
2037
2038	return found;
2039}
2040
2041/**
2042 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2043 * @hw: pointer to the HW structure
2044 * @caps: pointer to capabilities structure to fix
2045 *
2046 * Re-calculate the capabilities that are dependent on the number of physical
2047 * ports; i.e. some features are not supported or function differently on
2048 * devices with more than 4 ports.
2049 */
2050static void
2051ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2052{
2053	/* This assumes device capabilities are always scanned before function
2054	 * capabilities during the initialization flow.
2055	 */
2056	if (hw->dev_caps.num_funcs > 4) {
2057		/* Max 4 TCs per port */
2058		caps->maxtc = 4;
2059		ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2060			  caps->maxtc);
2061	}
2062}
2063
2064/**
2065 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2066 * @hw: pointer to the HW struct
2067 * @func_p: pointer to function capabilities structure
2068 * @cap: pointer to the capability element to parse
2069 *
2070 * Extract function capabilities for ICE_AQC_CAPS_VF.
2071 */
2072static void
2073ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2074		       struct ice_aqc_list_caps_elem *cap)
2075{
2076	u32 number = LE32_TO_CPU(cap->number);
2077	u32 logical_id = LE32_TO_CPU(cap->logical_id);
2078
2079	func_p->num_allocd_vfs = number;
2080	func_p->vf_base_id = logical_id;
2081	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2082		  func_p->num_allocd_vfs);
2083	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2084		  func_p->vf_base_id);
2085}
2086
2087/**
2088 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2089 * @hw: pointer to the HW struct
2090 * @func_p: pointer to function capabilities structure
2091 * @cap: pointer to the capability element to parse
2092 *
2093 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2094 */
2095static void
2096ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2097			struct ice_aqc_list_caps_elem *cap)
2098{
2099	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2100	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2101		  LE32_TO_CPU(cap->number));
2102	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2103		  func_p->guar_num_vsi);
2104}
2105
2106/**
2107 * ice_parse_func_caps - Parse function capabilities
2108 * @hw: pointer to the HW struct
2109 * @func_p: pointer to function capabilities structure
2110 * @buf: buffer containing the function capability records
2111 * @cap_count: the number of capabilities
2112 *
2113 * Helper function to parse function (0x000A) capabilities list. For
2114 * capabilities shared between device and function, this relies on
2115 * ice_parse_common_caps.
2116 *
2117 * Loop through the list of provided capabilities and extract the relevant
2118 * data into the function capabilities structured.
2119 */
2120static void
2121ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2122		    void *buf, u32 cap_count)
2123{
2124	struct ice_aqc_list_caps_elem *cap_resp;
2125	u32 i;
2126
2127	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2128
2129	ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2130
2131	for (i = 0; i < cap_count; i++) {
2132		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2133		bool found;
2134
2135		found = ice_parse_common_caps(hw, &func_p->common_cap,
2136					      &cap_resp[i], "func caps");
2137
2138		switch (cap) {
2139		case ICE_AQC_CAPS_VF:
2140			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2141			break;
2142		case ICE_AQC_CAPS_VSI:
2143			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2144			break;
2145		default:
2146			/* Don't list common capabilities as unknown */
2147			if (!found)
2148				ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2149					  i, cap);
2150			break;
2151		}
2152	}
2153
2154	ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2155	ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2156
2157	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2158}
2159
2160/**
2161 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2162 * @hw: pointer to the HW struct
2163 * @dev_p: pointer to device capabilities structure
2164 * @cap: capability element to parse
2165 *
2166 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2167 */
2168static void
2169ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2170			      struct ice_aqc_list_caps_elem *cap)
2171{
2172	u32 number = LE32_TO_CPU(cap->number);
2173
2174	dev_p->num_funcs = ice_hweight32(number);
2175	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2176		  dev_p->num_funcs);
2177}
2178
2179/**
2180 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2181 * @hw: pointer to the HW struct
2182 * @dev_p: pointer to device capabilities structure
2183 * @cap: capability element to parse
2184 *
2185 * Parse ICE_AQC_CAPS_VF for device capabilities.
2186 */
2187static void
2188ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2189		      struct ice_aqc_list_caps_elem *cap)
2190{
2191	u32 number = LE32_TO_CPU(cap->number);
2192
2193	dev_p->num_vfs_exposed = number;
2194	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2195		  dev_p->num_vfs_exposed);
2196}
2197
2198/**
2199 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2200 * @hw: pointer to the HW struct
2201 * @dev_p: pointer to device capabilities structure
2202 * @cap: capability element to parse
2203 *
2204 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2205 */
2206static void
2207ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2208		       struct ice_aqc_list_caps_elem *cap)
2209{
2210	u32 number = LE32_TO_CPU(cap->number);
2211
2212	dev_p->num_vsi_allocd_to_host = number;
2213	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2214		  dev_p->num_vsi_allocd_to_host);
2215}
2216
2217/**
2218 * ice_parse_dev_caps - Parse device capabilities
2219 * @hw: pointer to the HW struct
2220 * @dev_p: pointer to device capabilities structure
2221 * @buf: buffer containing the device capability records
2222 * @cap_count: the number of capabilities
2223 *
2224 * Helper device to parse device (0x000B) capabilities list. For
2225 * capabilities shared between device and function, this relies on
2226 * ice_parse_common_caps.
2227 *
2228 * Loop through the list of provided capabilities and extract the relevant
2229 * data into the device capabilities structured.
2230 */
2231static void
2232ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2233		   void *buf, u32 cap_count)
2234{
2235	struct ice_aqc_list_caps_elem *cap_resp;
2236	u32 i;
2237
2238	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2239
2240	ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2241
2242	for (i = 0; i < cap_count; i++) {
2243		u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2244		bool found;
2245
2246		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2247					      &cap_resp[i], "dev caps");
2248
2249		switch (cap) {
2250		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2251			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2252			break;
2253		case ICE_AQC_CAPS_VF:
2254			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2255			break;
2256		case ICE_AQC_CAPS_VSI:
2257			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2258			break;
2259		default:
2260			/* Don't list common capabilities as unknown */
2261			if (!found)
2262				ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2263					  i, cap);
2264			break;
2265		}
2266	}
2267
2268	ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2269	ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2270
2271	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2272}
2273
2274/**
2275 * ice_aq_list_caps - query function/device capabilities
2276 * @hw: pointer to the HW struct
2277 * @buf: a buffer to hold the capabilities
2278 * @buf_size: size of the buffer
2279 * @cap_count: if not NULL, set to the number of capabilities reported
2280 * @opc: capabilities type to discover, device or function
2281 * @cd: pointer to command details structure or NULL
2282 *
2283 * Get the function (0x000A) or device (0x000B) capabilities description from
2284 * firmware and store it in the buffer.
2285 *
2286 * If the cap_count pointer is not NULL, then it is set to the number of
2287 * capabilities firmware will report. Note that if the buffer size is too
2288 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2289 * cap_count will still be updated in this case. It is recommended that the
2290 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2291 * firmware could return) to avoid this.
2292 */
2293static enum ice_status
2294ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2295		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2296{
2297	struct ice_aqc_list_caps *cmd;
2298	struct ice_aq_desc desc;
2299	enum ice_status status;
2300
2301	cmd = &desc.params.get_cap;
2302
2303	if (opc != ice_aqc_opc_list_func_caps &&
2304	    opc != ice_aqc_opc_list_dev_caps)
2305		return ICE_ERR_PARAM;
2306
2307	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2308	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2309
2310	if (cap_count)
2311		*cap_count = LE32_TO_CPU(cmd->count);
2312
2313	return status;
2314}
2315
2316/**
2317 * ice_discover_dev_caps - Read and extract device capabilities
2318 * @hw: pointer to the hardware structure
2319 * @dev_caps: pointer to device capabilities structure
2320 *
2321 * Read the device capabilities and extract them into the dev_caps structure
2322 * for later use.
2323 */
2324static enum ice_status
2325ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2326{
2327	enum ice_status status;
2328	u32 cap_count = 0;
2329	void *cbuf;
2330
2331	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2332	if (!cbuf)
2333		return ICE_ERR_NO_MEMORY;
2334
2335	/* Although the driver doesn't know the number of capabilities the
2336	 * device will return, we can simply send a 4KB buffer, the maximum
2337	 * possible size that firmware can return.
2338	 */
2339	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2340
2341	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2342				  ice_aqc_opc_list_dev_caps, NULL);
2343	if (!status)
2344		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2345	ice_free(hw, cbuf);
2346
2347	return status;
2348}
2349
2350/**
2351 * ice_discover_func_caps - Read and extract function capabilities
2352 * @hw: pointer to the hardware structure
2353 * @func_caps: pointer to function capabilities structure
2354 *
2355 * Read the function capabilities and extract them into the func_caps structure
2356 * for later use.
2357 */
2358static enum ice_status
2359ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2360{
2361	enum ice_status status;
2362	u32 cap_count = 0;
2363	void *cbuf;
2364
2365	cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2366	if (!cbuf)
2367		return ICE_ERR_NO_MEMORY;
2368
2369	/* Although the driver doesn't know the number of capabilities the
2370	 * device will return, we can simply send a 4KB buffer, the maximum
2371	 * possible size that firmware can return.
2372	 */
2373	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2374
2375	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2376				  ice_aqc_opc_list_func_caps, NULL);
2377	if (!status)
2378		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2379	ice_free(hw, cbuf);
2380
2381	return status;
2382}
2383
2384/**
2385 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2386 * @hw: pointer to the hardware structure
2387 */
2388void ice_set_safe_mode_caps(struct ice_hw *hw)
2389{
2390	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2391	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2392	u32 valid_func, rxq_first_id, txq_first_id;
2393	u32 msix_vector_first_id, max_mtu;
2394	u32 num_funcs;
2395
2396	/* cache some func_caps values that should be restored after memset */
2397	valid_func = func_caps->common_cap.valid_functions;
2398	txq_first_id = func_caps->common_cap.txq_first_id;
2399	rxq_first_id = func_caps->common_cap.rxq_first_id;
2400	msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2401	max_mtu = func_caps->common_cap.max_mtu;
2402
2403	/* unset func capabilities */
2404	memset(func_caps, 0, sizeof(*func_caps));
2405
2406	/* restore cached values */
2407	func_caps->common_cap.valid_functions = valid_func;
2408	func_caps->common_cap.txq_first_id = txq_first_id;
2409	func_caps->common_cap.rxq_first_id = rxq_first_id;
2410	func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2411	func_caps->common_cap.max_mtu = max_mtu;
2412
2413	/* one Tx and one Rx queue in safe mode */
2414	func_caps->common_cap.num_rxq = 1;
2415	func_caps->common_cap.num_txq = 1;
2416
2417	/* two MSIX vectors, one for traffic and one for misc causes */
2418	func_caps->common_cap.num_msix_vectors = 2;
2419	func_caps->guar_num_vsi = 1;
2420
2421	/* cache some dev_caps values that should be restored after memset */
2422	valid_func = dev_caps->common_cap.valid_functions;
2423	txq_first_id = dev_caps->common_cap.txq_first_id;
2424	rxq_first_id = dev_caps->common_cap.rxq_first_id;
2425	msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2426	max_mtu = dev_caps->common_cap.max_mtu;
2427	num_funcs = dev_caps->num_funcs;
2428
2429	/* unset dev capabilities */
2430	memset(dev_caps, 0, sizeof(*dev_caps));
2431
2432	/* restore cached values */
2433	dev_caps->common_cap.valid_functions = valid_func;
2434	dev_caps->common_cap.txq_first_id = txq_first_id;
2435	dev_caps->common_cap.rxq_first_id = rxq_first_id;
2436	dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2437	dev_caps->common_cap.max_mtu = max_mtu;
2438	dev_caps->num_funcs = num_funcs;
2439
2440	/* one Tx and one Rx queue per function in safe mode */
2441	dev_caps->common_cap.num_rxq = num_funcs;
2442	dev_caps->common_cap.num_txq = num_funcs;
2443
2444	/* two MSIX vectors per function */
2445	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2446}
2447
2448/**
2449 * ice_get_caps - get info about the HW
2450 * @hw: pointer to the hardware structure
2451 */
2452enum ice_status ice_get_caps(struct ice_hw *hw)
2453{
2454	enum ice_status status;
2455
2456	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2457	if (status)
2458		return status;
2459
2460	return ice_discover_func_caps(hw, &hw->func_caps);
2461}
2462
2463/**
2464 * ice_aq_manage_mac_write - manage MAC address write command
2465 * @hw: pointer to the HW struct
2466 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2467 * @flags: flags to control write behavior
2468 * @cd: pointer to command details structure or NULL
2469 *
2470 * This function is used to write MAC address to the NVM (0x0108).
2471 */
2472enum ice_status
2473ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2474			struct ice_sq_cd *cd)
2475{
2476	struct ice_aqc_manage_mac_write *cmd;
2477	struct ice_aq_desc desc;
2478
2479	cmd = &desc.params.mac_write;
2480	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2481
2482	cmd->flags = flags;
2483	ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2484
2485	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2486}
2487
2488/**
2489 * ice_aq_clear_pxe_mode
2490 * @hw: pointer to the HW struct
2491 *
2492 * Tell the firmware that the driver is taking over from PXE (0x0110).
2493 */
2494static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2495{
2496	struct ice_aq_desc desc;
2497
2498	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2499	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2500
2501	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2502}
2503
2504/**
2505 * ice_clear_pxe_mode - clear pxe operations mode
2506 * @hw: pointer to the HW struct
2507 *
2508 * Make sure all PXE mode settings are cleared, including things
2509 * like descriptor fetch/write-back mode.
2510 */
2511void ice_clear_pxe_mode(struct ice_hw *hw)
2512{
2513	if (ice_check_sq_alive(hw, &hw->adminq))
2514		ice_aq_clear_pxe_mode(hw);
2515}
2516
2517/**
2518 * ice_aq_set_port_params - set physical port parameters.
2519 * @pi: pointer to the port info struct
2520 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2521 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2522 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2523 * @double_vlan: if set double VLAN is enabled
2524 * @cd: pointer to command details structure or NULL
2525 *
2526 * Set Physical port parameters (0x0203)
2527 */
2528enum ice_status
2529ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2530		       bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2531		       struct ice_sq_cd *cd)
2532
2533{
2534	struct ice_aqc_set_port_params *cmd;
2535	struct ice_hw *hw = pi->hw;
2536	struct ice_aq_desc desc;
2537	u16 cmd_flags = 0;
2538
2539	cmd = &desc.params.set_port_params;
2540
2541	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2542	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2543	if (save_bad_pac)
2544		cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2545	if (pad_short_pac)
2546		cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2547	if (double_vlan)
2548		cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2549	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2550
2551	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2552}
2553
2554/**
2555 * ice_get_link_speed_based_on_phy_type - returns link speed
2556 * @phy_type_low: lower part of phy_type
2557 * @phy_type_high: higher part of phy_type
2558 *
2559 * This helper function will convert an entry in PHY type structure
2560 * [phy_type_low, phy_type_high] to its corresponding link speed.
2561 * Note: In the structure of [phy_type_low, phy_type_high], there should
2562 * be one bit set, as this function will convert one PHY type to its
2563 * speed.
2564 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2565 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2566 */
2567static u16
2568ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2569{
2570	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2571	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2572
2573	switch (phy_type_low) {
2574	case ICE_PHY_TYPE_LOW_100BASE_TX:
2575	case ICE_PHY_TYPE_LOW_100M_SGMII:
2576		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2577		break;
2578	case ICE_PHY_TYPE_LOW_1000BASE_T:
2579	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2580	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2581	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2582	case ICE_PHY_TYPE_LOW_1G_SGMII:
2583		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2584		break;
2585	case ICE_PHY_TYPE_LOW_2500BASE_T:
2586	case ICE_PHY_TYPE_LOW_2500BASE_X:
2587	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2588		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2589		break;
2590	case ICE_PHY_TYPE_LOW_5GBASE_T:
2591	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2592		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2593		break;
2594	case ICE_PHY_TYPE_LOW_10GBASE_T:
2595	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2596	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2597	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2598	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2599	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2600	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2601		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2602		break;
2603	case ICE_PHY_TYPE_LOW_25GBASE_T:
2604	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2605	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2606	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2607	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2608	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2609	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2610	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2611	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2612	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2613	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2614		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2615		break;
2616	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2617	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2618	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2619	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2620	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2621	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2622		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2623		break;
2624	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2625	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2626	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2627	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2628	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2629	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2630	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2631	case ICE_PHY_TYPE_LOW_50G_AUI2:
2632	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2633	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2634	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2635	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2636	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2637	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2638	case ICE_PHY_TYPE_LOW_50G_AUI1:
2639		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2640		break;
2641	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2642	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2643	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2644	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2645	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2646	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2647	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2648	case ICE_PHY_TYPE_LOW_100G_AUI4:
2649	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2650	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2651	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2652	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2653	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2654		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2655		break;
2656	default:
2657		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2658		break;
2659	}
2660
2661	switch (phy_type_high) {
2662	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2663	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2664	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2665	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2666	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2667		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2668		break;
2669	default:
2670		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2671		break;
2672	}
2673
2674	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2675	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2676		return ICE_AQ_LINK_SPEED_UNKNOWN;
2677	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2678		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2679		return ICE_AQ_LINK_SPEED_UNKNOWN;
2680	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2681		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2682		return speed_phy_type_low;
2683	else
2684		return speed_phy_type_high;
2685}
2686
2687/**
2688 * ice_update_phy_type
2689 * @phy_type_low: pointer to the lower part of phy_type
2690 * @phy_type_high: pointer to the higher part of phy_type
2691 * @link_speeds_bitmap: targeted link speeds bitmap
2692 *
2693 * Note: For the link_speeds_bitmap structure, you can check it at
2694 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2695 * link_speeds_bitmap include multiple speeds.
2696 *
2697 * Each entry in this [phy_type_low, phy_type_high] structure will
2698 * present a certain link speed. This helper function will turn on bits
2699 * in [phy_type_low, phy_type_high] structure based on the value of
2700 * link_speeds_bitmap input parameter.
2701 */
2702void
2703ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2704		    u16 link_speeds_bitmap)
2705{
2706	u64 pt_high;
2707	u64 pt_low;
2708	int index;
2709	u16 speed;
2710
2711	/* We first check with low part of phy_type */
2712	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2713		pt_low = BIT_ULL(index);
2714		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2715
2716		if (link_speeds_bitmap & speed)
2717			*phy_type_low |= BIT_ULL(index);
2718	}
2719
2720	/* We then check with high part of phy_type */
2721	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2722		pt_high = BIT_ULL(index);
2723		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2724
2725		if (link_speeds_bitmap & speed)
2726			*phy_type_high |= BIT_ULL(index);
2727	}
2728}
2729
2730/**
2731 * ice_aq_set_phy_cfg
2732 * @hw: pointer to the HW struct
2733 * @pi: port info structure of the interested logical port
2734 * @cfg: structure with PHY configuration data to be set
2735 * @cd: pointer to command details structure or NULL
2736 *
2737 * Set the various PHY configuration parameters supported on the Port.
2738 * One or more of the Set PHY config parameters may be ignored in an MFP
2739 * mode as the PF may not have the privilege to set some of the PHY Config
2740 * parameters. This status will be indicated by the command response (0x0601).
2741 */
2742enum ice_status
2743ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2744		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2745{
2746	struct ice_aq_desc desc;
2747	enum ice_status status;
2748
2749	if (!cfg)
2750		return ICE_ERR_PARAM;
2751
2752	/* Ensure that only valid bits of cfg->caps can be turned on. */
2753	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2754		ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2755			  cfg->caps);
2756
2757		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2758	}
2759
2760	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2761	desc.params.set_phy.lport_num = pi->lport;
2762	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2763
2764	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2765	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2766		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2767	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2768		  (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2769	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2770	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2771		  cfg->low_power_ctrl_an);
2772	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2773	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2774	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2775		  cfg->link_fec_opt);
2776
2777	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2778
2779	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2780		status = ICE_SUCCESS;
2781
2782	if (!status)
2783		pi->phy.curr_user_phy_cfg = *cfg;
2784
2785	return status;
2786}
2787
2788/**
2789 * ice_update_link_info - update status of the HW network link
2790 * @pi: port info structure of the interested logical port
2791 */
2792enum ice_status ice_update_link_info(struct ice_port_info *pi)
2793{
2794	struct ice_link_status *li;
2795	enum ice_status status;
2796
2797	if (!pi)
2798		return ICE_ERR_PARAM;
2799
2800	li = &pi->phy.link_info;
2801
2802	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2803	if (status)
2804		return status;
2805
2806	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2807		struct ice_aqc_get_phy_caps_data *pcaps;
2808		struct ice_hw *hw;
2809
2810		hw = pi->hw;
2811		pcaps = (struct ice_aqc_get_phy_caps_data *)
2812			ice_malloc(hw, sizeof(*pcaps));
2813		if (!pcaps)
2814			return ICE_ERR_NO_MEMORY;
2815
2816		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2817					     pcaps, NULL);
2818
2819		ice_free(hw, pcaps);
2820	}
2821
2822	return status;
2823}
2824
2825/**
2826 * ice_cache_phy_user_req
2827 * @pi: port information structure
2828 * @cache_data: PHY logging data
2829 * @cache_mode: PHY logging mode
2830 *
2831 * Log the user request on (FC, FEC, SPEED) for later user.
2832 */
2833static void
2834ice_cache_phy_user_req(struct ice_port_info *pi,
2835		       struct ice_phy_cache_mode_data cache_data,
2836		       enum ice_phy_cache_mode cache_mode)
2837{
2838	if (!pi)
2839		return;
2840
2841	switch (cache_mode) {
2842	case ICE_FC_MODE:
2843		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2844		break;
2845	case ICE_SPEED_MODE:
2846		pi->phy.curr_user_speed_req =
2847			cache_data.data.curr_user_speed_req;
2848		break;
2849	case ICE_FEC_MODE:
2850		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2851		break;
2852	default:
2853		break;
2854	}
2855}
2856
2857/**
2858 * ice_caps_to_fc_mode
2859 * @caps: PHY capabilities
2860 *
2861 * Convert PHY FC capabilities to ice FC mode
2862 */
2863enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2864{
2865	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2866	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2867		return ICE_FC_FULL;
2868
2869	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2870		return ICE_FC_TX_PAUSE;
2871
2872	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2873		return ICE_FC_RX_PAUSE;
2874
2875	return ICE_FC_NONE;
2876}
2877
2878/**
2879 * ice_caps_to_fec_mode
2880 * @caps: PHY capabilities
2881 * @fec_options: Link FEC options
2882 *
2883 * Convert PHY FEC capabilities to ice FEC mode
2884 */
2885enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2886{
2887	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2888		return ICE_FEC_AUTO;
2889
2890	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2891			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2892			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2893			   ICE_AQC_PHY_FEC_25G_KR_REQ))
2894		return ICE_FEC_BASER;
2895
2896	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2897			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2898			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2899		return ICE_FEC_RS;
2900
2901	return ICE_FEC_NONE;
2902}
2903
2904/**
2905 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2906 * @pi: port information structure
2907 * @cfg: PHY configuration data to set FC mode
2908 * @req_mode: FC mode to configure
2909 */
2910static enum ice_status
2911ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2912	       enum ice_fc_mode req_mode)
2913{
2914	struct ice_phy_cache_mode_data cache_data;
2915	u8 pause_mask = 0x0;
2916
2917	if (!pi || !cfg)
2918		return ICE_ERR_BAD_PTR;
2919
2920	switch (req_mode) {
2921	case ICE_FC_AUTO:
2922	{
2923		struct ice_aqc_get_phy_caps_data *pcaps;
2924		enum ice_status status;
2925
2926		pcaps = (struct ice_aqc_get_phy_caps_data *)
2927			ice_malloc(pi->hw, sizeof(*pcaps));
2928		if (!pcaps)
2929			return ICE_ERR_NO_MEMORY;
2930
2931		/* Query the value of FC that both the NIC and attached media
2932		 * can do.
2933		 */
2934		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2935					     pcaps, NULL);
2936		if (status) {
2937			ice_free(pi->hw, pcaps);
2938			return status;
2939		}
2940
2941		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2942		pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2943
2944		ice_free(pi->hw, pcaps);
2945		break;
2946	}
2947	case ICE_FC_FULL:
2948		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2949		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2950		break;
2951	case ICE_FC_RX_PAUSE:
2952		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2953		break;
2954	case ICE_FC_TX_PAUSE:
2955		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2956		break;
2957	default:
2958		break;
2959	}
2960
2961	/* clear the old pause settings */
2962	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2963		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2964
2965	/* set the new capabilities */
2966	cfg->caps |= pause_mask;
2967
2968	/* Cache user FC request */
2969	cache_data.data.curr_user_fc_req = req_mode;
2970	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2971
2972	return ICE_SUCCESS;
2973}
2974
2975/**
2976 * ice_set_fc
2977 * @pi: port information structure
2978 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2979 * @ena_auto_link_update: enable automatic link update
2980 *
2981 * Set the requested flow control mode.
2982 */
2983enum ice_status
2984ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2985{
2986	struct ice_aqc_set_phy_cfg_data  cfg = { 0 };
2987	struct ice_aqc_get_phy_caps_data *pcaps;
2988	enum ice_status status;
2989	struct ice_hw *hw;
2990
2991	if (!pi || !aq_failures)
2992		return ICE_ERR_BAD_PTR;
2993
2994	*aq_failures = 0;
2995	hw = pi->hw;
2996
2997	pcaps = (struct ice_aqc_get_phy_caps_data *)
2998		ice_malloc(hw, sizeof(*pcaps));
2999	if (!pcaps)
3000		return ICE_ERR_NO_MEMORY;
3001
3002	/* Get the current PHY config */
3003	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3004				     NULL);
3005	if (status) {
3006		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3007		goto out;
3008	}
3009
3010	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3011
3012	/* Configure the set PHY data */
3013	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3014	if (status) {
3015		if (status != ICE_ERR_BAD_PTR)
3016			*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3017
3018		goto out;
3019	}
3020
3021	/* If the capabilities have changed, then set the new config */
3022	if (cfg.caps != pcaps->caps) {
3023		int retry_count, retry_max = 10;
3024
3025		/* Auto restart link so settings take effect */
3026		if (ena_auto_link_update)
3027			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3028
3029		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3030		if (status) {
3031			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3032			goto out;
3033		}
3034
3035		/* Update the link info
3036		 * It sometimes takes a really long time for link to
3037		 * come back from the atomic reset. Thus, we wait a
3038		 * little bit.
3039		 */
3040		for (retry_count = 0; retry_count < retry_max; retry_count++) {
3041			status = ice_update_link_info(pi);
3042
3043			if (status == ICE_SUCCESS)
3044				break;
3045
3046			ice_msec_delay(100, true);
3047		}
3048
3049		if (status)
3050			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3051	}
3052
3053out:
3054	ice_free(hw, pcaps);
3055	return status;
3056}
3057
3058/**
3059 * ice_phy_caps_equals_cfg
3060 * @phy_caps: PHY capabilities
3061 * @phy_cfg: PHY configuration
3062 *
3063 * Helper function to determine if PHY capabilities matches PHY
3064 * configuration
3065 */
3066bool
3067ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3068			struct ice_aqc_set_phy_cfg_data *phy_cfg)
3069{
3070	u8 caps_mask, cfg_mask;
3071
3072	if (!phy_caps || !phy_cfg)
3073		return false;
3074
3075	/* These bits are not common between capabilities and configuration.
3076	 * Do not use them to determine equality.
3077	 */
3078	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3079					      ICE_AQC_PHY_EN_MOD_QUAL);
3080	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3081
3082	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3083	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3084	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3085	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3086	    phy_caps->eee_cap != phy_cfg->eee_cap ||
3087	    phy_caps->eeer_value != phy_cfg->eeer_value ||
3088	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3089		return false;
3090
3091	return true;
3092}
3093
3094/**
3095 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3096 * @pi: port information structure
3097 * @caps: PHY ability structure to copy date from
3098 * @cfg: PHY configuration structure to copy data to
3099 *
3100 * Helper function to copy AQC PHY get ability data to PHY set configuration
3101 * data structure
3102 */
3103void
3104ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3105			 struct ice_aqc_get_phy_caps_data *caps,
3106			 struct ice_aqc_set_phy_cfg_data *cfg)
3107{
3108	if (!pi || !caps || !cfg)
3109		return;
3110
3111	ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3112	cfg->phy_type_low = caps->phy_type_low;
3113	cfg->phy_type_high = caps->phy_type_high;
3114	cfg->caps = caps->caps;
3115	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3116	cfg->eee_cap = caps->eee_cap;
3117	cfg->eeer_value = caps->eeer_value;
3118	cfg->link_fec_opt = caps->link_fec_options;
3119	cfg->module_compliance_enforcement =
3120		caps->module_compliance_enforcement;
3121
3122	if (ice_fw_supports_link_override(pi->hw)) {
3123		struct ice_link_default_override_tlv tlv;
3124
3125		if (ice_get_link_default_override(&tlv, pi))
3126			return;
3127
3128		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
3129			cfg->module_compliance_enforcement |=
3130				ICE_LINK_OVERRIDE_STRICT_MODE;
3131	}
3132}
3133
3134/**
3135 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3136 * @pi: port information structure
3137 * @cfg: PHY configuration data to set FEC mode
3138 * @fec: FEC mode to configure
3139 */
3140enum ice_status
3141ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3142		enum ice_fec_mode fec)
3143{
3144	struct ice_aqc_get_phy_caps_data *pcaps;
3145	enum ice_status status = ICE_SUCCESS;
3146	struct ice_hw *hw;
3147
3148	if (!pi || !cfg)
3149		return ICE_ERR_BAD_PTR;
3150
3151	hw = pi->hw;
3152
3153	pcaps = (struct ice_aqc_get_phy_caps_data *)
3154		ice_malloc(hw, sizeof(*pcaps));
3155	if (!pcaps)
3156		return ICE_ERR_NO_MEMORY;
3157
3158	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
3159				     NULL);
3160	if (status)
3161		goto out;
3162
3163	cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3164	cfg->link_fec_opt = pcaps->link_fec_options;
3165
3166	switch (fec) {
3167	case ICE_FEC_BASER:
3168		/* Clear RS bits, and AND BASE-R ability
3169		 * bits and OR request bits.
3170		 */
3171		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3172			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3173		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3174			ICE_AQC_PHY_FEC_25G_KR_REQ;
3175		break;
3176	case ICE_FEC_RS:
3177		/* Clear BASE-R bits, and AND RS ability
3178		 * bits and OR request bits.
3179		 */
3180		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3181		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3182			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3183		break;
3184	case ICE_FEC_NONE:
3185		/* Clear all FEC option bits. */
3186		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3187		break;
3188	case ICE_FEC_AUTO:
3189		/* AND auto FEC bit, and all caps bits. */
3190		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3191		cfg->link_fec_opt |= pcaps->link_fec_options;
3192		break;
3193	default:
3194		status = ICE_ERR_PARAM;
3195		break;
3196	}
3197
3198	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3199		struct ice_link_default_override_tlv tlv;
3200
3201		if (ice_get_link_default_override(&tlv, pi))
3202			goto out;
3203
3204		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3205		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3206			cfg->link_fec_opt = tlv.fec_options;
3207	}
3208
3209out:
3210	ice_free(hw, pcaps);
3211
3212	return status;
3213}
3214
3215/**
3216 * ice_get_link_status - get status of the HW network link
3217 * @pi: port information structure
3218 * @link_up: pointer to bool (true/false = linkup/linkdown)
3219 *
3220 * Variable link_up is true if link is up, false if link is down.
3221 * The variable link_up is invalid if status is non zero. As a
3222 * result of this call, link status reporting becomes enabled
3223 */
3224enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3225{
3226	struct ice_phy_info *phy_info;
3227	enum ice_status status = ICE_SUCCESS;
3228
3229	if (!pi || !link_up)
3230		return ICE_ERR_PARAM;
3231
3232	phy_info = &pi->phy;
3233
3234	if (phy_info->get_link_info) {
3235		status = ice_update_link_info(pi);
3236
3237		if (status)
3238			ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3239				  status);
3240	}
3241
3242	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3243
3244	return status;
3245}
3246
3247/**
3248 * ice_aq_set_link_restart_an
3249 * @pi: pointer to the port information structure
3250 * @ena_link: if true: enable link, if false: disable link
3251 * @cd: pointer to command details structure or NULL
3252 *
3253 * Sets up the link and restarts the Auto-Negotiation over the link.
3254 */
3255enum ice_status
3256ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3257			   struct ice_sq_cd *cd)
3258{
3259	struct ice_aqc_restart_an *cmd;
3260	struct ice_aq_desc desc;
3261
3262	cmd = &desc.params.restart_an;
3263
3264	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3265
3266	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3267	cmd->lport_num = pi->lport;
3268	if (ena_link)
3269		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3270	else
3271		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3272
3273	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3274}
3275
3276/**
3277 * ice_aq_set_event_mask
3278 * @hw: pointer to the HW struct
3279 * @port_num: port number of the physical function
3280 * @mask: event mask to be set
3281 * @cd: pointer to command details structure or NULL
3282 *
3283 * Set event mask (0x0613)
3284 */
3285enum ice_status
3286ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3287		      struct ice_sq_cd *cd)
3288{
3289	struct ice_aqc_set_event_mask *cmd;
3290	struct ice_aq_desc desc;
3291
3292	cmd = &desc.params.set_event_mask;
3293
3294	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3295
3296	cmd->lport_num = port_num;
3297
3298	cmd->event_mask = CPU_TO_LE16(mask);
3299	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3300}
3301
3302/**
3303 * ice_aq_set_mac_loopback
3304 * @hw: pointer to the HW struct
3305 * @ena_lpbk: Enable or Disable loopback
3306 * @cd: pointer to command details structure or NULL
3307 *
3308 * Enable/disable loopback on a given port
3309 */
3310enum ice_status
3311ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3312{
3313	struct ice_aqc_set_mac_lb *cmd;
3314	struct ice_aq_desc desc;
3315
3316	cmd = &desc.params.set_mac_lb;
3317
3318	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3319	if (ena_lpbk)
3320		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3321
3322	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3323}
3324
3325/**
3326 * ice_aq_set_port_id_led
3327 * @pi: pointer to the port information
3328 * @is_orig_mode: is this LED set to original mode (by the net-list)
3329 * @cd: pointer to command details structure or NULL
3330 *
3331 * Set LED value for the given port (0x06e9)
3332 */
3333enum ice_status
3334ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3335		       struct ice_sq_cd *cd)
3336{
3337	struct ice_aqc_set_port_id_led *cmd;
3338	struct ice_hw *hw = pi->hw;
3339	struct ice_aq_desc desc;
3340
3341	cmd = &desc.params.set_port_id_led;
3342
3343	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3344
3345	if (is_orig_mode)
3346		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3347	else
3348		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3349
3350	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3351}
3352
3353/**
3354 * ice_aq_sff_eeprom
3355 * @hw: pointer to the HW struct
3356 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3357 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3358 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3359 * @page: QSFP page
3360 * @set_page: set or ignore the page
3361 * @data: pointer to data buffer to be read/written to the I2C device.
3362 * @length: 1-16 for read, 1 for write.
3363 * @write: 0 read, 1 for write.
3364 * @cd: pointer to command details structure or NULL
3365 *
3366 * Read/Write SFF EEPROM (0x06EE)
3367 */
3368enum ice_status
3369ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3370		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3371		  bool write, struct ice_sq_cd *cd)
3372{
3373	struct ice_aqc_sff_eeprom *cmd;
3374	struct ice_aq_desc desc;
3375	enum ice_status status;
3376
3377	if (!data || (mem_addr & 0xff00))
3378		return ICE_ERR_PARAM;
3379
3380	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3381	cmd = &desc.params.read_write_sff_param;
3382	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3383	cmd->lport_num = (u8)(lport & 0xff);
3384	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3385	cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3386					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3387					((set_page <<
3388					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3389					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3390	cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3391	cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3392	if (write)
3393		cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3394
3395	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3396	return status;
3397}
3398
3399/**
3400 * __ice_aq_get_set_rss_lut
3401 * @hw: pointer to the hardware structure
3402 * @vsi_id: VSI FW index
3403 * @lut_type: LUT table type
3404 * @lut: pointer to the LUT buffer provided by the caller
3405 * @lut_size: size of the LUT buffer
3406 * @glob_lut_idx: global LUT index
3407 * @set: set true to set the table, false to get the table
3408 *
3409 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3410 */
3411static enum ice_status
3412__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3413			 u16 lut_size, u8 glob_lut_idx, bool set)
3414{
3415	struct ice_aqc_get_set_rss_lut *cmd_resp;
3416	struct ice_aq_desc desc;
3417	enum ice_status status;
3418	u16 flags = 0;
3419
3420	cmd_resp = &desc.params.get_set_rss_lut;
3421
3422	if (set) {
3423		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3424		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3425	} else {
3426		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3427	}
3428
3429	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3430					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3431					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3432				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3433
3434	switch (lut_type) {
3435	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3436	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3437	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3438		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3439			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3440		break;
3441	default:
3442		status = ICE_ERR_PARAM;
3443		goto ice_aq_get_set_rss_lut_exit;
3444	}
3445
3446	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3447		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3448			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3449
3450		if (!set)
3451			goto ice_aq_get_set_rss_lut_send;
3452	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3453		if (!set)
3454			goto ice_aq_get_set_rss_lut_send;
3455	} else {
3456		goto ice_aq_get_set_rss_lut_send;
3457	}
3458
3459	/* LUT size is only valid for Global and PF table types */
3460	switch (lut_size) {
3461	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3462		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3463			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3464			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3465		break;
3466	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3467		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3468			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3469			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3470		break;
3471	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3472		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3473			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3474				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3475				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3476			break;
3477		}
3478		/* fall-through */
3479	default:
3480		status = ICE_ERR_PARAM;
3481		goto ice_aq_get_set_rss_lut_exit;
3482	}
3483
3484ice_aq_get_set_rss_lut_send:
3485	cmd_resp->flags = CPU_TO_LE16(flags);
3486	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3487
3488ice_aq_get_set_rss_lut_exit:
3489	return status;
3490}
3491
3492/**
3493 * ice_aq_get_rss_lut
3494 * @hw: pointer to the hardware structure
3495 * @vsi_handle: software VSI handle
3496 * @lut_type: LUT table type
3497 * @lut: pointer to the LUT buffer provided by the caller
3498 * @lut_size: size of the LUT buffer
3499 *
3500 * get the RSS lookup table, PF or VSI type
3501 */
3502enum ice_status
3503ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3504		   u8 *lut, u16 lut_size)
3505{
3506	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3507		return ICE_ERR_PARAM;
3508
3509	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3510					lut_type, lut, lut_size, 0, false);
3511}
3512
3513/**
3514 * ice_aq_set_rss_lut
3515 * @hw: pointer to the hardware structure
3516 * @vsi_handle: software VSI handle
3517 * @lut_type: LUT table type
3518 * @lut: pointer to the LUT buffer provided by the caller
3519 * @lut_size: size of the LUT buffer
3520 *
3521 * set the RSS lookup table, PF or VSI type
3522 */
3523enum ice_status
3524ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3525		   u8 *lut, u16 lut_size)
3526{
3527	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3528		return ICE_ERR_PARAM;
3529
3530	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3531					lut_type, lut, lut_size, 0, true);
3532}
3533
3534/**
3535 * __ice_aq_get_set_rss_key
3536 * @hw: pointer to the HW struct
3537 * @vsi_id: VSI FW index
3538 * @key: pointer to key info struct
3539 * @set: set true to set the key, false to get the key
3540 *
3541 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3542 */
3543static enum
3544ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3545				    struct ice_aqc_get_set_rss_keys *key,
3546				    bool set)
3547{
3548	struct ice_aqc_get_set_rss_key *cmd_resp;
3549	u16 key_size = sizeof(*key);
3550	struct ice_aq_desc desc;
3551
3552	cmd_resp = &desc.params.get_set_rss_key;
3553
3554	if (set) {
3555		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3556		desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3557	} else {
3558		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3559	}
3560
3561	cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3562					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3563					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3564				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3565
3566	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3567}
3568
3569/**
3570 * ice_aq_get_rss_key
3571 * @hw: pointer to the HW struct
3572 * @vsi_handle: software VSI handle
3573 * @key: pointer to key info struct
3574 *
3575 * get the RSS key per VSI
3576 */
3577enum ice_status
3578ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3579		   struct ice_aqc_get_set_rss_keys *key)
3580{
3581	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3582		return ICE_ERR_PARAM;
3583
3584	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3585					key, false);
3586}
3587
3588/**
3589 * ice_aq_set_rss_key
3590 * @hw: pointer to the HW struct
3591 * @vsi_handle: software VSI handle
3592 * @keys: pointer to key info struct
3593 *
3594 * set the RSS key per VSI
3595 */
3596enum ice_status
3597ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3598		   struct ice_aqc_get_set_rss_keys *keys)
3599{
3600	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3601		return ICE_ERR_PARAM;
3602
3603	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3604					keys, true);
3605}
3606
3607/**
3608 * ice_aq_add_lan_txq
3609 * @hw: pointer to the hardware structure
3610 * @num_qgrps: Number of added queue groups
3611 * @qg_list: list of queue groups to be added
3612 * @buf_size: size of buffer for indirect command
3613 * @cd: pointer to command details structure or NULL
3614 *
3615 * Add Tx LAN queue (0x0C30)
3616 *
3617 * NOTE:
3618 * Prior to calling add Tx LAN queue:
3619 * Initialize the following as part of the Tx queue context:
3620 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3621 * Cache profile and Packet shaper profile.
3622 *
3623 * After add Tx LAN queue AQ command is completed:
3624 * Interrupts should be associated with specific queues,
3625 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3626 * flow.
3627 */
3628enum ice_status
3629ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3630		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3631		   struct ice_sq_cd *cd)
3632{
3633	struct ice_aqc_add_tx_qgrp *list;
3634	struct ice_aqc_add_txqs *cmd;
3635	struct ice_aq_desc desc;
3636	u16 i, sum_size = 0;
3637
3638	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3639
3640	cmd = &desc.params.add_txqs;
3641
3642	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3643
3644	if (!qg_list)
3645		return ICE_ERR_PARAM;
3646
3647	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3648		return ICE_ERR_PARAM;
3649
3650	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3651		sum_size += ice_struct_size(list, txqs, list->num_txqs);
3652		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3653						      list->num_txqs);
3654	}
3655
3656	if (buf_size != sum_size)
3657		return ICE_ERR_PARAM;
3658
3659	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3660
3661	cmd->num_qgrps = num_qgrps;
3662
3663	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3664}
3665
3666/**
3667 * ice_aq_dis_lan_txq
3668 * @hw: pointer to the hardware structure
3669 * @num_qgrps: number of groups in the list
3670 * @qg_list: the list of groups to disable
3671 * @buf_size: the total size of the qg_list buffer in bytes
3672 * @rst_src: if called due to reset, specifies the reset source
3673 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3674 * @cd: pointer to command details structure or NULL
3675 *
3676 * Disable LAN Tx queue (0x0C31)
3677 */
3678static enum ice_status
3679ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3680		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3681		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3682		   struct ice_sq_cd *cd)
3683{
3684	struct ice_aqc_dis_txq_item *item;
3685	struct ice_aqc_dis_txqs *cmd;
3686	struct ice_aq_desc desc;
3687	enum ice_status status;
3688	u16 i, sz = 0;
3689
3690	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3691	cmd = &desc.params.dis_txqs;
3692	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3693
3694	/* qg_list can be NULL only in VM/VF reset flow */
3695	if (!qg_list && !rst_src)
3696		return ICE_ERR_PARAM;
3697
3698	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3699		return ICE_ERR_PARAM;
3700
3701	cmd->num_entries = num_qgrps;
3702
3703	cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3704					    ICE_AQC_Q_DIS_TIMEOUT_M);
3705
3706	switch (rst_src) {
3707	case ICE_VM_RESET:
3708		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3709		cmd->vmvf_and_timeout |=
3710			CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3711		break;
3712	case ICE_VF_RESET:
3713		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3714		/* In this case, FW expects vmvf_num to be absolute VF ID */
3715		cmd->vmvf_and_timeout |=
3716			CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3717				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3718		break;
3719	case ICE_NO_RESET:
3720	default:
3721		break;
3722	}
3723
3724	/* flush pipe on time out */
3725	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3726	/* If no queue group info, we are in a reset flow. Issue the AQ */
3727	if (!qg_list)
3728		goto do_aq;
3729
3730	/* set RD bit to indicate that command buffer is provided by the driver
3731	 * and it needs to be read by the firmware
3732	 */
3733	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3734
3735	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3736		u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3737
3738		/* If the num of queues is even, add 2 bytes of padding */
3739		if ((item->num_qs % 2) == 0)
3740			item_size += 2;
3741
3742		sz += item_size;
3743
3744		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3745	}
3746
3747	if (buf_size != sz)
3748		return ICE_ERR_PARAM;
3749
3750do_aq:
3751	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3752	if (status) {
3753		if (!qg_list)
3754			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3755				  vmvf_num, hw->adminq.sq_last_status);
3756		else
3757			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3758				  LE16_TO_CPU(qg_list[0].q_id[0]),
3759				  hw->adminq.sq_last_status);
3760	}
3761	return status;
3762}
3763
3764/**
3765 * ice_aq_move_recfg_lan_txq
3766 * @hw: pointer to the hardware structure
3767 * @num_qs: number of queues to move/reconfigure
3768 * @is_move: true if this operation involves node movement
3769 * @is_tc_change: true if this operation involves a TC change
3770 * @subseq_call: true if this operation is a subsequent call
3771 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3772 * @timeout: timeout in units of 100 usec (valid values 0-50)
3773 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3774 * @buf: struct containing src/dest TEID and per-queue info
3775 * @buf_size: size of buffer for indirect command
3776 * @txqs_moved: out param, number of queues successfully moved
3777 * @cd: pointer to command details structure or NULL
3778 *
3779 * Move / Reconfigure Tx LAN queues (0x0C32)
3780 */
3781enum ice_status
3782ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3783			  bool is_tc_change, bool subseq_call, bool flush_pipe,
3784			  u8 timeout, u32 *blocked_cgds,
3785			  struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3786			  u8 *txqs_moved, struct ice_sq_cd *cd)
3787{
3788	struct ice_aqc_move_txqs *cmd;
3789	struct ice_aq_desc desc;
3790	enum ice_status status;
3791
3792	cmd = &desc.params.move_txqs;
3793	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3794
3795#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3796	if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3797		return ICE_ERR_PARAM;
3798
3799	if (is_tc_change && !flush_pipe && !blocked_cgds)
3800		return ICE_ERR_PARAM;
3801
3802	if (!is_move && !is_tc_change)
3803		return ICE_ERR_PARAM;
3804
3805	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3806
3807	if (is_move)
3808		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3809
3810	if (is_tc_change)
3811		cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3812
3813	if (subseq_call)
3814		cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3815
3816	if (flush_pipe)
3817		cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3818
3819	cmd->num_qs = num_qs;
3820	cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3821			ICE_AQC_Q_CMD_TIMEOUT_M);
3822
3823	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3824
3825	if (!status && txqs_moved)
3826		*txqs_moved = cmd->num_qs;
3827
3828	if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3829	    is_tc_change && !flush_pipe)
3830		*blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3831
3832	return status;
3833}
3834
3835/* End of FW Admin Queue command wrappers */
3836
3837/**
3838 * ice_write_byte - write a byte to a packed context structure
3839 * @src_ctx:  the context structure to read from
3840 * @dest_ctx: the context to be written to
3841 * @ce_info:  a description of the struct to be filled
3842 */
3843static void
3844ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3845{
3846	u8 src_byte, dest_byte, mask;
3847	u8 *from, *dest;
3848	u16 shift_width;
3849
3850	/* copy from the next struct field */
3851	from = src_ctx + ce_info->offset;
3852
3853	/* prepare the bits and mask */
3854	shift_width = ce_info->lsb % 8;
3855	mask = (u8)(BIT(ce_info->width) - 1);
3856
3857	src_byte = *from;
3858	src_byte &= mask;
3859
3860	/* shift to correct alignment */
3861	mask <<= shift_width;
3862	src_byte <<= shift_width;
3863
3864	/* get the current bits from the target bit string */
3865	dest = dest_ctx + (ce_info->lsb / 8);
3866
3867	ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3868
3869	dest_byte &= ~mask;	/* get the bits not changing */
3870	dest_byte |= src_byte;	/* add in the new bits */
3871
3872	/* put it all back */
3873	ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3874}
3875
3876/**
3877 * ice_write_word - write a word to a packed context structure
3878 * @src_ctx:  the context structure to read from
3879 * @dest_ctx: the context to be written to
3880 * @ce_info:  a description of the struct to be filled
3881 */
3882static void
3883ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3884{
3885	u16 src_word, mask;
3886	__le16 dest_word;
3887	u8 *from, *dest;
3888	u16 shift_width;
3889
3890	/* copy from the next struct field */
3891	from = src_ctx + ce_info->offset;
3892
3893	/* prepare the bits and mask */
3894	shift_width = ce_info->lsb % 8;
3895	mask = BIT(ce_info->width) - 1;
3896
3897	/* don't swizzle the bits until after the mask because the mask bits
3898	 * will be in a different bit position on big endian machines
3899	 */
3900	src_word = *(u16 *)from;
3901	src_word &= mask;
3902
3903	/* shift to correct alignment */
3904	mask <<= shift_width;
3905	src_word <<= shift_width;
3906
3907	/* get the current bits from the target bit string */
3908	dest = dest_ctx + (ce_info->lsb / 8);
3909
3910	ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3911
3912	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
3913	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
3914
3915	/* put it all back */
3916	ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3917}
3918
3919/**
3920 * ice_write_dword - write a dword to a packed context structure
3921 * @src_ctx:  the context structure to read from
3922 * @dest_ctx: the context to be written to
3923 * @ce_info:  a description of the struct to be filled
3924 */
3925static void
3926ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3927{
3928	u32 src_dword, mask;
3929	__le32 dest_dword;
3930	u8 *from, *dest;
3931	u16 shift_width;
3932
3933	/* copy from the next struct field */
3934	from = src_ctx + ce_info->offset;
3935
3936	/* prepare the bits and mask */
3937	shift_width = ce_info->lsb % 8;
3938
3939	/* if the field width is exactly 32 on an x86 machine, then the shift
3940	 * operation will not work because the SHL instructions count is masked
3941	 * to 5 bits so the shift will do nothing
3942	 */
3943	if (ce_info->width < 32)
3944		mask = BIT(ce_info->width) - 1;
3945	else
3946		mask = (u32)~0;
3947
3948	/* don't swizzle the bits until after the mask because the mask bits
3949	 * will be in a different bit position on big endian machines
3950	 */
3951	src_dword = *(u32 *)from;
3952	src_dword &= mask;
3953
3954	/* shift to correct alignment */
3955	mask <<= shift_width;
3956	src_dword <<= shift_width;
3957
3958	/* get the current bits from the target bit string */
3959	dest = dest_ctx + (ce_info->lsb / 8);
3960
3961	ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3962
3963	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
3964	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
3965
3966	/* put it all back */
3967	ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3968}
3969
3970/**
3971 * ice_write_qword - write a qword to a packed context structure
3972 * @src_ctx:  the context structure to read from
3973 * @dest_ctx: the context to be written to
3974 * @ce_info:  a description of the struct to be filled
3975 */
3976static void
3977ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3978{
3979	u64 src_qword, mask;
3980	__le64 dest_qword;
3981	u8 *from, *dest;
3982	u16 shift_width;
3983
3984	/* copy from the next struct field */
3985	from = src_ctx + ce_info->offset;
3986
3987	/* prepare the bits and mask */
3988	shift_width = ce_info->lsb % 8;
3989
3990	/* if the field width is exactly 64 on an x86 machine, then the shift
3991	 * operation will not work because the SHL instructions count is masked
3992	 * to 6 bits so the shift will do nothing
3993	 */
3994	if (ce_info->width < 64)
3995		mask = BIT_ULL(ce_info->width) - 1;
3996	else
3997		mask = (u64)~0;
3998
3999	/* don't swizzle the bits until after the mask because the mask bits
4000	 * will be in a different bit position on big endian machines
4001	 */
4002	src_qword = *(u64 *)from;
4003	src_qword &= mask;
4004
4005	/* shift to correct alignment */
4006	mask <<= shift_width;
4007	src_qword <<= shift_width;
4008
4009	/* get the current bits from the target bit string */
4010	dest = dest_ctx + (ce_info->lsb / 8);
4011
4012	ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4013
4014	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
4015	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
4016
4017	/* put it all back */
4018	ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4019}
4020
4021/**
4022 * ice_set_ctx - set context bits in packed structure
4023 * @hw: pointer to the hardware structure
4024 * @src_ctx:  pointer to a generic non-packed context structure
4025 * @dest_ctx: pointer to memory for the packed structure
4026 * @ce_info:  a description of the structure to be transformed
4027 */
4028enum ice_status
4029ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4030	    const struct ice_ctx_ele *ce_info)
4031{
4032	int f;
4033
4034	for (f = 0; ce_info[f].width; f++) {
4035		/* We have to deal with each element of the FW response
4036		 * using the correct size so that we are correct regardless
4037		 * of the endianness of the machine.
4038		 */
4039		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4040			ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4041				  f, ce_info[f].width, ce_info[f].size_of);
4042			continue;
4043		}
4044		switch (ce_info[f].size_of) {
4045		case sizeof(u8):
4046			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4047			break;
4048		case sizeof(u16):
4049			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4050			break;
4051		case sizeof(u32):
4052			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4053			break;
4054		case sizeof(u64):
4055			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4056			break;
4057		default:
4058			return ICE_ERR_INVAL_SIZE;
4059		}
4060	}
4061
4062	return ICE_SUCCESS;
4063}
4064
4065/**
4066 * ice_read_byte - read context byte into struct
4067 * @src_ctx:  the context structure to read from
4068 * @dest_ctx: the context to be written to
4069 * @ce_info:  a description of the struct to be filled
4070 */
4071static void
4072ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4073{
4074	u8 dest_byte, mask;
4075	u8 *src, *target;
4076	u16 shift_width;
4077
4078	/* prepare the bits and mask */
4079	shift_width = ce_info->lsb % 8;
4080	mask = (u8)(BIT(ce_info->width) - 1);
4081
4082	/* shift to correct alignment */
4083	mask <<= shift_width;
4084
4085	/* get the current bits from the src bit string */
4086	src = src_ctx + (ce_info->lsb / 8);
4087
4088	ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4089
4090	dest_byte &= ~(mask);
4091
4092	dest_byte >>= shift_width;
4093
4094	/* get the address from the struct field */
4095	target = dest_ctx + ce_info->offset;
4096
4097	/* put it back in the struct */
4098	ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4099}
4100
4101/**
4102 * ice_read_word - read context word into struct
4103 * @src_ctx:  the context structure to read from
4104 * @dest_ctx: the context to be written to
4105 * @ce_info:  a description of the struct to be filled
4106 */
4107static void
4108ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4109{
4110	u16 dest_word, mask;
4111	u8 *src, *target;
4112	__le16 src_word;
4113	u16 shift_width;
4114
4115	/* prepare the bits and mask */
4116	shift_width = ce_info->lsb % 8;
4117	mask = BIT(ce_info->width) - 1;
4118
4119	/* shift to correct alignment */
4120	mask <<= shift_width;
4121
4122	/* get the current bits from the src bit string */
4123	src = src_ctx + (ce_info->lsb / 8);
4124
4125	ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4126
4127	/* the data in the memory is stored as little endian so mask it
4128	 * correctly
4129	 */
4130	src_word &= ~(CPU_TO_LE16(mask));
4131
4132	/* get the data back into host order before shifting */
4133	dest_word = LE16_TO_CPU(src_word);
4134
4135	dest_word >>= shift_width;
4136
4137	/* get the address from the struct field */
4138	target = dest_ctx + ce_info->offset;
4139
4140	/* put it back in the struct */
4141	ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4142}
4143
4144/**
4145 * ice_read_dword - read context dword into struct
4146 * @src_ctx:  the context structure to read from
4147 * @dest_ctx: the context to be written to
4148 * @ce_info:  a description of the struct to be filled
4149 */
4150static void
4151ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4152{
4153	u32 dest_dword, mask;
4154	__le32 src_dword;
4155	u8 *src, *target;
4156	u16 shift_width;
4157
4158	/* prepare the bits and mask */
4159	shift_width = ce_info->lsb % 8;
4160
4161	/* if the field width is exactly 32 on an x86 machine, then the shift
4162	 * operation will not work because the SHL instructions count is masked
4163	 * to 5 bits so the shift will do nothing
4164	 */
4165	if (ce_info->width < 32)
4166		mask = BIT(ce_info->width) - 1;
4167	else
4168		mask = (u32)~0;
4169
4170	/* shift to correct alignment */
4171	mask <<= shift_width;
4172
4173	/* get the current bits from the src bit string */
4174	src = src_ctx + (ce_info->lsb / 8);
4175
4176	ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4177
4178	/* the data in the memory is stored as little endian so mask it
4179	 * correctly
4180	 */
4181	src_dword &= ~(CPU_TO_LE32(mask));
4182
4183	/* get the data back into host order before shifting */
4184	dest_dword = LE32_TO_CPU(src_dword);
4185
4186	dest_dword >>= shift_width;
4187
4188	/* get the address from the struct field */
4189	target = dest_ctx + ce_info->offset;
4190
4191	/* put it back in the struct */
4192	ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4193}
4194
4195/**
4196 * ice_read_qword - read context qword into struct
4197 * @src_ctx:  the context structure to read from
4198 * @dest_ctx: the context to be written to
4199 * @ce_info:  a description of the struct to be filled
4200 */
4201static void
4202ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4203{
4204	u64 dest_qword, mask;
4205	__le64 src_qword;
4206	u8 *src, *target;
4207	u16 shift_width;
4208
4209	/* prepare the bits and mask */
4210	shift_width = ce_info->lsb % 8;
4211
4212	/* if the field width is exactly 64 on an x86 machine, then the shift
4213	 * operation will not work because the SHL instructions count is masked
4214	 * to 6 bits so the shift will do nothing
4215	 */
4216	if (ce_info->width < 64)
4217		mask = BIT_ULL(ce_info->width) - 1;
4218	else
4219		mask = (u64)~0;
4220
4221	/* shift to correct alignment */
4222	mask <<= shift_width;
4223
4224	/* get the current bits from the src bit string */
4225	src = src_ctx + (ce_info->lsb / 8);
4226
4227	ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4228
4229	/* the data in the memory is stored as little endian so mask it
4230	 * correctly
4231	 */
4232	src_qword &= ~(CPU_TO_LE64(mask));
4233
4234	/* get the data back into host order before shifting */
4235	dest_qword = LE64_TO_CPU(src_qword);
4236
4237	dest_qword >>= shift_width;
4238
4239	/* get the address from the struct field */
4240	target = dest_ctx + ce_info->offset;
4241
4242	/* put it back in the struct */
4243	ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4244}
4245
4246/**
4247 * ice_get_ctx - extract context bits from a packed structure
4248 * @src_ctx:  pointer to a generic packed context structure
4249 * @dest_ctx: pointer to a generic non-packed context structure
4250 * @ce_info:  a description of the structure to be read from
4251 */
4252enum ice_status
4253ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4254{
4255	int f;
4256
4257	for (f = 0; ce_info[f].width; f++) {
4258		switch (ce_info[f].size_of) {
4259		case 1:
4260			ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4261			break;
4262		case 2:
4263			ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4264			break;
4265		case 4:
4266			ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4267			break;
4268		case 8:
4269			ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4270			break;
4271		default:
4272			/* nothing to do, just keep going */
4273			break;
4274		}
4275	}
4276
4277	return ICE_SUCCESS;
4278}
4279
4280/**
4281 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4282 * @hw: pointer to the HW struct
4283 * @vsi_handle: software VSI handle
4284 * @tc: TC number
4285 * @q_handle: software queue handle
4286 */
4287struct ice_q_ctx *
4288ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4289{
4290	struct ice_vsi_ctx *vsi;
4291	struct ice_q_ctx *q_ctx;
4292
4293	vsi = ice_get_vsi_ctx(hw, vsi_handle);
4294	if (!vsi)
4295		return NULL;
4296	if (q_handle >= vsi->num_lan_q_entries[tc])
4297		return NULL;
4298	if (!vsi->lan_q_ctx[tc])
4299		return NULL;
4300	q_ctx = vsi->lan_q_ctx[tc];
4301	return &q_ctx[q_handle];
4302}
4303
4304/**
4305 * ice_ena_vsi_txq
4306 * @pi: port information structure
4307 * @vsi_handle: software VSI handle
4308 * @tc: TC number
4309 * @q_handle: software queue handle
4310 * @num_qgrps: Number of added queue groups
4311 * @buf: list of queue groups to be added
4312 * @buf_size: size of buffer for indirect command
4313 * @cd: pointer to command details structure or NULL
4314 *
4315 * This function adds one LAN queue
4316 */
4317enum ice_status
4318ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4319		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4320		struct ice_sq_cd *cd)
4321{
4322	struct ice_aqc_txsched_elem_data node = { 0 };
4323	struct ice_sched_node *parent;
4324	struct ice_q_ctx *q_ctx;
4325	enum ice_status status;
4326	struct ice_hw *hw;
4327
4328	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4329		return ICE_ERR_CFG;
4330
4331	if (num_qgrps > 1 || buf->num_txqs > 1)
4332		return ICE_ERR_MAX_LIMIT;
4333
4334	hw = pi->hw;
4335
4336	if (!ice_is_vsi_valid(hw, vsi_handle))
4337		return ICE_ERR_PARAM;
4338
4339	ice_acquire_lock(&pi->sched_lock);
4340
4341	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4342	if (!q_ctx) {
4343		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4344			  q_handle);
4345		status = ICE_ERR_PARAM;
4346		goto ena_txq_exit;
4347	}
4348
4349	/* find a parent node */
4350	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4351					    ICE_SCHED_NODE_OWNER_LAN);
4352	if (!parent) {
4353		status = ICE_ERR_PARAM;
4354		goto ena_txq_exit;
4355	}
4356
4357	buf->parent_teid = parent->info.node_teid;
4358	node.parent_teid = parent->info.node_teid;
4359	/* Mark that the values in the "generic" section as valid. The default
4360	 * value in the "generic" section is zero. This means that :
4361	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4362	 * - 0 priority among siblings, indicated by Bit 1-3.
4363	 * - WFQ, indicated by Bit 4.
4364	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4365	 * Bit 5-6.
4366	 * - Bit 7 is reserved.
4367	 * Without setting the generic section as valid in valid_sections, the
4368	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4369	 */
4370	buf->txqs[0].info.valid_sections =
4371		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4372		ICE_AQC_ELEM_VALID_EIR;
4373	buf->txqs[0].info.generic = 0;
4374	buf->txqs[0].info.cir_bw.bw_profile_idx =
4375		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4376	buf->txqs[0].info.cir_bw.bw_alloc =
4377		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4378	buf->txqs[0].info.eir_bw.bw_profile_idx =
4379		CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4380	buf->txqs[0].info.eir_bw.bw_alloc =
4381		CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4382
4383	/* add the LAN queue */
4384	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4385	if (status != ICE_SUCCESS) {
4386		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4387			  LE16_TO_CPU(buf->txqs[0].txq_id),
4388			  hw->adminq.sq_last_status);
4389		goto ena_txq_exit;
4390	}
4391
4392	node.node_teid = buf->txqs[0].q_teid;
4393	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4394	q_ctx->q_handle = q_handle;
4395	q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4396
4397	/* add a leaf node into scheduler tree queue layer */
4398	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4399	if (!status)
4400		status = ice_sched_replay_q_bw(pi, q_ctx);
4401
4402ena_txq_exit:
4403	ice_release_lock(&pi->sched_lock);
4404	return status;
4405}
4406
4407/**
4408 * ice_dis_vsi_txq
4409 * @pi: port information structure
4410 * @vsi_handle: software VSI handle
4411 * @tc: TC number
4412 * @num_queues: number of queues
4413 * @q_handles: pointer to software queue handle array
4414 * @q_ids: pointer to the q_id array
4415 * @q_teids: pointer to queue node teids
4416 * @rst_src: if called due to reset, specifies the reset source
4417 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4418 * @cd: pointer to command details structure or NULL
4419 *
4420 * This function removes queues and their corresponding nodes in SW DB
4421 */
4422enum ice_status
4423ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4424		u16 *q_handles, u16 *q_ids, u32 *q_teids,
4425		enum ice_disq_rst_src rst_src, u16 vmvf_num,
4426		struct ice_sq_cd *cd)
4427{
4428	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4429	struct ice_aqc_dis_txq_item *qg_list;
4430	struct ice_q_ctx *q_ctx;
4431	struct ice_hw *hw;
4432	u16 i, buf_size;
4433
4434	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4435		return ICE_ERR_CFG;
4436
4437	hw = pi->hw;
4438
4439	if (!num_queues) {
4440		/* if queue is disabled already yet the disable queue command
4441		 * has to be sent to complete the VF reset, then call
4442		 * ice_aq_dis_lan_txq without any queue information
4443		 */
4444		if (rst_src)
4445			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4446						  vmvf_num, NULL);
4447		return ICE_ERR_CFG;
4448	}
4449
4450	buf_size = ice_struct_size(qg_list, q_id, 1);
4451	qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4452	if (!qg_list)
4453		return ICE_ERR_NO_MEMORY;
4454
4455	ice_acquire_lock(&pi->sched_lock);
4456
4457	for (i = 0; i < num_queues; i++) {
4458		struct ice_sched_node *node;
4459
4460		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4461		if (!node)
4462			continue;
4463		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4464		if (!q_ctx) {
4465			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4466				  q_handles[i]);
4467			continue;
4468		}
4469		if (q_ctx->q_handle != q_handles[i]) {
4470			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4471				  q_ctx->q_handle, q_handles[i]);
4472			continue;
4473		}
4474		qg_list->parent_teid = node->info.parent_teid;
4475		qg_list->num_qs = 1;
4476		qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4477		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4478					    vmvf_num, cd);
4479
4480		if (status != ICE_SUCCESS)
4481			break;
4482		ice_free_sched_node(pi, node);
4483		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4484	}
4485	ice_release_lock(&pi->sched_lock);
4486	ice_free(hw, qg_list);
4487	return status;
4488}
4489
4490/**
4491 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4492 * @pi: port information structure
4493 * @vsi_handle: software VSI handle
4494 * @tc_bitmap: TC bitmap
4495 * @maxqs: max queues array per TC
4496 * @owner: LAN or RDMA
4497 *
4498 * This function adds/updates the VSI queues per TC.
4499 */
4500static enum ice_status
4501ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4502	       u16 *maxqs, u8 owner)
4503{
4504	enum ice_status status = ICE_SUCCESS;
4505	u8 i;
4506
4507	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4508		return ICE_ERR_CFG;
4509
4510	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4511		return ICE_ERR_PARAM;
4512
4513	ice_acquire_lock(&pi->sched_lock);
4514
4515	ice_for_each_traffic_class(i) {
4516		/* configuration is possible only if TC node is present */
4517		if (!ice_sched_get_tc_node(pi, i))
4518			continue;
4519
4520		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4521					   ice_is_tc_ena(tc_bitmap, i));
4522		if (status)
4523			break;
4524	}
4525
4526	ice_release_lock(&pi->sched_lock);
4527	return status;
4528}
4529
4530/**
4531 * ice_cfg_vsi_lan - configure VSI LAN queues
4532 * @pi: port information structure
4533 * @vsi_handle: software VSI handle
4534 * @tc_bitmap: TC bitmap
4535 * @max_lanqs: max LAN queues array per TC
4536 *
4537 * This function adds/updates the VSI LAN queues per TC.
4538 */
4539enum ice_status
4540ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4541		u16 *max_lanqs)
4542{
4543	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4544			      ICE_SCHED_NODE_OWNER_LAN);
4545}
4546
4547/**
4548 * ice_is_main_vsi - checks whether the VSI is main VSI
4549 * @hw: pointer to the HW struct
4550 * @vsi_handle: VSI handle
4551 *
4552 * Checks whether the VSI is the main VSI (the first PF VSI created on
4553 * given PF).
4554 */
4555static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4556{
4557	return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4558}
4559
4560/**
4561 * ice_replay_pre_init - replay pre initialization
4562 * @hw: pointer to the HW struct
4563 * @sw: pointer to switch info struct for which function initializes filters
4564 *
4565 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4566 */
4567static enum ice_status
4568ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4569{
4570	enum ice_status status;
4571	u8 i;
4572
4573	/* Delete old entries from replay filter list head if there is any */
4574	ice_rm_sw_replay_rule_info(hw, sw);
4575	/* In start of replay, move entries into replay_rules list, it
4576	 * will allow adding rules entries back to filt_rules list,
4577	 * which is operational list.
4578	 */
4579	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4580		LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4581				  &sw->recp_list[i].filt_replay_rules);
4582	ice_sched_replay_agg_vsi_preinit(hw);
4583
4584	status = ice_sched_replay_root_node_bw(hw->port_info);
4585	if (status)
4586		return status;
4587
4588	return ice_sched_replay_tc_node_bw(hw->port_info);
4589}
4590
4591/**
4592 * ice_replay_vsi - replay VSI configuration
4593 * @hw: pointer to the HW struct
4594 * @vsi_handle: driver VSI handle
4595 *
4596 * Restore all VSI configuration after reset. It is required to call this
4597 * function with main VSI first.
4598 */
4599enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4600{
4601	struct ice_switch_info *sw = hw->switch_info;
4602	struct ice_port_info *pi = hw->port_info;
4603	enum ice_status status;
4604
4605	if (!ice_is_vsi_valid(hw, vsi_handle))
4606		return ICE_ERR_PARAM;
4607
4608	/* Replay pre-initialization if there is any */
4609	if (ice_is_main_vsi(hw, vsi_handle)) {
4610		status = ice_replay_pre_init(hw, sw);
4611		if (status)
4612			return status;
4613	}
4614	/* Replay per VSI all RSS configurations */
4615	status = ice_replay_rss_cfg(hw, vsi_handle);
4616	if (status)
4617		return status;
4618	/* Replay per VSI all filters */
4619	status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4620	if (!status)
4621		status = ice_replay_vsi_agg(hw, vsi_handle);
4622	return status;
4623}
4624
4625/**
4626 * ice_replay_post - post replay configuration cleanup
4627 * @hw: pointer to the HW struct
4628 *
4629 * Post replay cleanup.
4630 */
4631void ice_replay_post(struct ice_hw *hw)
4632{
4633	/* Delete old entries from replay filter list head */
4634	ice_rm_all_sw_replay_rule_info(hw);
4635	ice_sched_replay_agg(hw);
4636}
4637
4638/**
4639 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4640 * @hw: ptr to the hardware info
4641 * @reg: offset of 64 bit HW register to read from
4642 * @prev_stat_loaded: bool to specify if previous stats are loaded
4643 * @prev_stat: ptr to previous loaded stat value
4644 * @cur_stat: ptr to current stat value
4645 */
4646void
4647ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4648		  u64 *prev_stat, u64 *cur_stat)
4649{
4650	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4651
4652	/* device stats are not reset at PFR, they likely will not be zeroed
4653	 * when the driver starts. Thus, save the value from the first read
4654	 * without adding to the statistic value so that we report stats which
4655	 * count up from zero.
4656	 */
4657	if (!prev_stat_loaded) {
4658		*prev_stat = new_data;
4659		return;
4660	}
4661
4662	/* Calculate the difference between the new and old values, and then
4663	 * add it to the software stat value.
4664	 */
4665	if (new_data >= *prev_stat)
4666		*cur_stat += new_data - *prev_stat;
4667	else
4668		/* to manage the potential roll-over */
4669		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4670
4671	/* Update the previously stored value to prepare for next read */
4672	*prev_stat = new_data;
4673}
4674
4675/**
4676 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4677 * @hw: ptr to the hardware info
4678 * @reg: offset of HW register to read from
4679 * @prev_stat_loaded: bool to specify if previous stats are loaded
4680 * @prev_stat: ptr to previous loaded stat value
4681 * @cur_stat: ptr to current stat value
4682 */
4683void
4684ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4685		  u64 *prev_stat, u64 *cur_stat)
4686{
4687	u32 new_data;
4688
4689	new_data = rd32(hw, reg);
4690
4691	/* device stats are not reset at PFR, they likely will not be zeroed
4692	 * when the driver starts. Thus, save the value from the first read
4693	 * without adding to the statistic value so that we report stats which
4694	 * count up from zero.
4695	 */
4696	if (!prev_stat_loaded) {
4697		*prev_stat = new_data;
4698		return;
4699	}
4700
4701	/* Calculate the difference between the new and old values, and then
4702	 * add it to the software stat value.
4703	 */
4704	if (new_data >= *prev_stat)
4705		*cur_stat += new_data - *prev_stat;
4706	else
4707		/* to manage the potential roll-over */
4708		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4709
4710	/* Update the previously stored value to prepare for next read */
4711	*prev_stat = new_data;
4712}
4713
4714/**
4715 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4716 * @hw: ptr to the hardware info
4717 * @vsi_handle: VSI handle
4718 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4719 * @cur_stats: ptr to current stats structure
4720 *
4721 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4722 * thus cannot be read using the normal ice_stat_update32 function.
4723 *
4724 * Read the GLV_REPC register associated with the given VSI, and update the
4725 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4726 *
4727 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4728 * cleared each time it's read.
4729 *
4730 * Note that the GLV_RDPC register also counts the causes that would trigger
4731 * GLV_REPC. However, it does not give the finer grained detail about why the
4732 * packets are being dropped. The GLV_REPC values can be used to distinguish
4733 * whether Rx packets are dropped due to errors or due to no available
4734 * descriptors.
4735 */
4736void
4737ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4738		     struct ice_eth_stats *cur_stats)
4739{
4740	u16 vsi_num, no_desc, error_cnt;
4741	u32 repc;
4742
4743	if (!ice_is_vsi_valid(hw, vsi_handle))
4744		return;
4745
4746	vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4747
4748	/* If we haven't loaded stats yet, just clear the current value */
4749	if (!prev_stat_loaded) {
4750		wr32(hw, GLV_REPC(vsi_num), 0);
4751		return;
4752	}
4753
4754	repc = rd32(hw, GLV_REPC(vsi_num));
4755	no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4756	error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4757
4758	/* Clear the count by writing to the stats register */
4759	wr32(hw, GLV_REPC(vsi_num), 0);
4760
4761	cur_stats->rx_no_desc += no_desc;
4762	cur_stats->rx_errors += error_cnt;
4763}
4764
4765/**
4766 * ice_aq_alternate_write
4767 * @hw: pointer to the hardware structure
4768 * @reg_addr0: address of first dword to be written
4769 * @reg_val0: value to be written under 'reg_addr0'
4770 * @reg_addr1: address of second dword to be written
4771 * @reg_val1: value to be written under 'reg_addr1'
4772 *
4773 * Write one or two dwords to alternate structure. Fields are indicated
4774 * by 'reg_addr0' and 'reg_addr1' register numbers.
4775 */
4776enum ice_status
4777ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4778		       u32 reg_addr1, u32 reg_val1)
4779{
4780	struct ice_aqc_read_write_alt_direct *cmd;
4781	struct ice_aq_desc desc;
4782	enum ice_status status;
4783
4784	cmd = &desc.params.read_write_alt_direct;
4785
4786	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4787	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4788	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4789	cmd->dword0_value = CPU_TO_LE32(reg_val0);
4790	cmd->dword1_value = CPU_TO_LE32(reg_val1);
4791
4792	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4793
4794	return status;
4795}
4796
4797/**
4798 * ice_aq_alternate_read
4799 * @hw: pointer to the hardware structure
4800 * @reg_addr0: address of first dword to be read
4801 * @reg_val0: pointer for data read from 'reg_addr0'
4802 * @reg_addr1: address of second dword to be read
4803 * @reg_val1: pointer for data read from 'reg_addr1'
4804 *
4805 * Read one or two dwords from alternate structure. Fields are indicated
4806 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4807 * is not passed then only register at 'reg_addr0' is read.
4808 */
4809enum ice_status
4810ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4811		      u32 reg_addr1, u32 *reg_val1)
4812{
4813	struct ice_aqc_read_write_alt_direct *cmd;
4814	struct ice_aq_desc desc;
4815	enum ice_status status;
4816
4817	cmd = &desc.params.read_write_alt_direct;
4818
4819	if (!reg_val0)
4820		return ICE_ERR_PARAM;
4821
4822	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4823	cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4824	cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4825
4826	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4827
4828	if (status == ICE_SUCCESS) {
4829		*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4830
4831		if (reg_val1)
4832			*reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4833	}
4834
4835	return status;
4836}
4837
4838/**
4839 *  ice_aq_alternate_write_done
4840 *  @hw: pointer to the HW structure.
4841 *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4842 *  @reset_needed: indicates the SW should trigger GLOBAL reset
4843 *
4844 *  Indicates to the FW that alternate structures have been changed.
4845 */
4846enum ice_status
4847ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4848{
4849	struct ice_aqc_done_alt_write *cmd;
4850	struct ice_aq_desc desc;
4851	enum ice_status status;
4852
4853	cmd = &desc.params.done_alt_write;
4854
4855	if (!reset_needed)
4856		return ICE_ERR_PARAM;
4857
4858	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4859	cmd->flags = bios_mode;
4860
4861	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4862	if (!status)
4863		*reset_needed = (LE16_TO_CPU(cmd->flags) &
4864				 ICE_AQC_RESP_RESET_NEEDED) != 0;
4865
4866	return status;
4867}
4868
4869/**
4870 *  ice_aq_alternate_clear
4871 *  @hw: pointer to the HW structure.
4872 *
4873 *  Clear the alternate structures of the port from which the function
4874 *  is called.
4875 */
4876enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4877{
4878	struct ice_aq_desc desc;
4879	enum ice_status status;
4880
4881	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4882
4883	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4884
4885	return status;
4886}
4887
4888/**
4889 * ice_sched_query_elem - query element information from HW
4890 * @hw: pointer to the HW struct
4891 * @node_teid: node TEID to be queried
4892 * @buf: buffer to element information
4893 *
4894 * This function queries HW element information
4895 */
4896enum ice_status
4897ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4898		     struct ice_aqc_txsched_elem_data *buf)
4899{
4900	u16 buf_size, num_elem_ret = 0;
4901	enum ice_status status;
4902
4903	buf_size = sizeof(*buf);
4904	ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4905	buf->node_teid = CPU_TO_LE32(node_teid);
4906	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4907					  NULL);
4908	if (status != ICE_SUCCESS || num_elem_ret != 1)
4909		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4910	return status;
4911}
4912
4913/**
4914 * ice_get_fw_mode - returns FW mode
4915 * @hw: pointer to the HW struct
4916 */
4917enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4918{
4919#define ICE_FW_MODE_DBG_M BIT(0)
4920#define ICE_FW_MODE_REC_M BIT(1)
4921#define ICE_FW_MODE_ROLLBACK_M BIT(2)
4922	u32 fw_mode;
4923
4924	/* check the current FW mode */
4925	fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4926
4927	if (fw_mode & ICE_FW_MODE_DBG_M)
4928		return ICE_FW_MODE_DBG;
4929	else if (fw_mode & ICE_FW_MODE_REC_M)
4930		return ICE_FW_MODE_REC;
4931	else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4932		return ICE_FW_MODE_ROLLBACK;
4933	else
4934		return ICE_FW_MODE_NORMAL;
4935}
4936
4937/**
4938 * ice_cfg_get_cur_lldp_persist_status
4939 * @hw: pointer to the HW struct
4940 * @lldp_status: return value of LLDP persistent status
4941 *
4942 * Get the current status of LLDP persistent
4943 */
4944enum ice_status
4945ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4946{
4947	struct ice_port_info *pi = hw->port_info;
4948	enum ice_status ret;
4949	__le32 raw_data;
4950	u32 data, mask;
4951
4952	if (!lldp_status)
4953		return ICE_ERR_BAD_PTR;
4954
4955	ret = ice_acquire_nvm(hw, ICE_RES_READ);
4956	if (ret)
4957		return ret;
4958
4959	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
4960			      ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
4961			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
4962			      false, true, NULL);
4963	if (!ret) {
4964		data = LE32_TO_CPU(raw_data);
4965		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4966			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4967		data = data & mask;
4968		*lldp_status = data >>
4969			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4970	}
4971
4972	ice_release_nvm(hw);
4973
4974	return ret;
4975}
4976
4977/**
4978 * ice_get_dflt_lldp_persist_status
4979 * @hw: pointer to the HW struct
4980 * @lldp_status: return value of LLDP persistent status
4981 *
4982 * Get the default status of LLDP persistent
4983 */
4984enum ice_status
4985ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4986{
4987	struct ice_port_info *pi = hw->port_info;
4988	u32 data, mask, loc_data, loc_data_tmp;
4989	enum ice_status ret;
4990	__le16 loc_raw_data;
4991	__le32 raw_data;
4992
4993	if (!lldp_status)
4994		return ICE_ERR_BAD_PTR;
4995
4996	ret = ice_acquire_nvm(hw, ICE_RES_READ);
4997	if (ret)
4998		return ret;
4999
5000	/* Read the offset of EMP_SR_PTR */
5001	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5002			      ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5003			      ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5004			      &loc_raw_data, false, true, NULL);
5005	if (ret)
5006		goto exit;
5007
5008	loc_data = LE16_TO_CPU(loc_raw_data);
5009	if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5010		loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5011		loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5012	} else {
5013		loc_data *= ICE_AQC_NVM_WORD_UNIT;
5014	}
5015
5016	/* Read the offset of LLDP configuration pointer */
5017	loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5018	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5019			      ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5020			      false, true, NULL);
5021	if (ret)
5022		goto exit;
5023
5024	loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5025	loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5026	loc_data += loc_data_tmp;
5027
5028	/* We need to skip LLDP configuration section length (2 bytes) */
5029	loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5030
5031	/* Read the LLDP Default Configure */
5032	ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5033			      ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5034			      true, NULL);
5035	if (!ret) {
5036		data = LE32_TO_CPU(raw_data);
5037		mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5038			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5039		data = data & mask;
5040		*lldp_status = data >>
5041			(ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5042	}
5043
5044exit:
5045	ice_release_nvm(hw);
5046
5047	return ret;
5048}
5049
5050/**
5051 * ice_fw_supports_link_override
5052 * @hw: pointer to the hardware structure
5053 *
5054 * Checks if the firmware supports link override
5055 */
5056bool ice_fw_supports_link_override(struct ice_hw *hw)
5057{
5058	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5059		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5060			return true;
5061		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5062		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5063			return true;
5064	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5065		return true;
5066	}
5067
5068	return false;
5069}
5070
5071/**
5072 * ice_get_link_default_override
5073 * @ldo: pointer to the link default override struct
5074 * @pi: pointer to the port info struct
5075 *
5076 * Gets the link default override for a port
5077 */
5078enum ice_status
5079ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5080			      struct ice_port_info *pi)
5081{
5082	u16 i, tlv, tlv_len, tlv_start, buf, offset;
5083	struct ice_hw *hw = pi->hw;
5084	enum ice_status status;
5085
5086	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5087					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5088	if (status) {
5089		ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5090		return status;
5091	}
5092
5093	/* Each port has its own config; calculate for our port */
5094	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5095		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5096
5097	/* link options first */
5098	status = ice_read_sr_word(hw, tlv_start, &buf);
5099	if (status) {
5100		ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5101		return status;
5102	}
5103	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5104	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5105		ICE_LINK_OVERRIDE_PHY_CFG_S;
5106
5107	/* link PHY config */
5108	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5109	status = ice_read_sr_word(hw, offset, &buf);
5110	if (status) {
5111		ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5112		return status;
5113	}
5114	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5115
5116	/* PHY types low */
5117	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5118	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5119		status = ice_read_sr_word(hw, (offset + i), &buf);
5120		if (status) {
5121			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5122			return status;
5123		}
5124		/* shift 16 bits at a time to fill 64 bits */
5125		ldo->phy_type_low |= ((u64)buf << (i * 16));
5126	}
5127
5128	/* PHY types high */
5129	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5130		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5131	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5132		status = ice_read_sr_word(hw, (offset + i), &buf);
5133		if (status) {
5134			ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5135			return status;
5136		}
5137		/* shift 16 bits at a time to fill 64 bits */
5138		ldo->phy_type_high |= ((u64)buf << (i * 16));
5139	}
5140
5141	return status;
5142}
5143
5144/**
5145 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5146 * @caps: get PHY capability data
5147 */
5148bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5149{
5150	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5151	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5152				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
5153				       ICE_AQC_PHY_AN_EN_CLAUSE37))
5154		return true;
5155
5156	return false;
5157}
5158
5159/**
5160 * ice_aq_set_lldp_mib - Set the LLDP MIB
5161 * @hw: pointer to the HW struct
5162 * @mib_type: Local, Remote or both Local and Remote MIBs
5163 * @buf: pointer to the caller-supplied buffer to store the MIB block
5164 * @buf_size: size of the buffer (in bytes)
5165 * @cd: pointer to command details structure or NULL
5166 *
5167 * Set the LLDP MIB. (0x0A08)
5168 */
5169enum ice_status
5170ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5171		    struct ice_sq_cd *cd)
5172{
5173	struct ice_aqc_lldp_set_local_mib *cmd;
5174	struct ice_aq_desc desc;
5175
5176	cmd = &desc.params.lldp_set_mib;
5177
5178	if (buf_size == 0 || !buf)
5179		return ICE_ERR_PARAM;
5180
5181	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5182
5183	desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5184	desc.datalen = CPU_TO_LE16(buf_size);
5185
5186	cmd->type = mib_type;
5187	cmd->length = CPU_TO_LE16(buf_size);
5188
5189	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5190}
5191
5192/**
5193 * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
5194 * @hw: pointer to HW struct
5195 */
5196bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5197{
5198	if (hw->mac_type != ICE_MAC_E810)
5199		return false;
5200
5201	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5202		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5203			return true;
5204		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5205		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5206			return true;
5207	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5208		return true;
5209	}
5210	return false;
5211}
5212
5213/**
5214 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5215 * @hw: pointer to HW struct
5216 * @vsi_num: absolute HW index for VSI
5217 * @add: boolean for if adding or removing a filter
5218 */
5219enum ice_status
5220ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5221{
5222	struct ice_aqc_lldp_filter_ctrl *cmd;
5223	struct ice_aq_desc desc;
5224
5225	cmd = &desc.params.lldp_filter_ctrl;
5226
5227	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5228
5229	if (add)
5230		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5231	else
5232		cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5233
5234	cmd->vsi_num = CPU_TO_LE16(vsi_num);
5235
5236	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5237}
5238