1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33
34
35#include "ixl_pf.h"
36
37#ifdef PCI_IOV
38#include "ixl_pf_iov.h"
39#endif
40
41#ifdef IXL_IW
42#include "ixl_iw.h"
43#include "ixl_iw_int.h"
44#endif
45
46static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
47static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
48static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
49static u_int	ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
50static u_int	ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
51static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
52static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
53
54/* Sysctls */
55static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
56static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
57static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
58static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
59static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
60static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
61static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
62
63static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
64static int	ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
65
66/* Debug Sysctls */
67static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69static int	ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS);
70static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89/* Debug Sysctls */
90static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94#ifdef IXL_DEBUG
95static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97#endif
98
99#ifdef IXL_IW
100extern int ixl_enable_iwarp;
101extern int ixl_limit_iwarp_msix;
102#endif
103
104static const char * const ixl_fc_string[6] = {
105	"None",
106	"Rx",
107	"Tx",
108	"Full",
109	"Priority",
110	"Default"
111};
112
113static char *ixl_fec_string[3] = {
114       "CL108 RS-FEC",
115       "CL74 FC-FEC/BASE-R",
116       "None"
117};
118
119/* Functions for setting and checking driver state. Note the functions take
120 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
121 * operations require bitmasks. This can easily lead to programming error, so
122 * we provide wrapper functions to avoid this.
123 */
124
125/**
126 * ixl_set_state - Set the specified state
127 * @s: the state bitmap
128 * @bit: the state to set
129 *
130 * Atomically update the state bitmap with the specified bit set.
131 */
132inline void
133ixl_set_state(volatile u32 *s, enum ixl_state bit)
134{
135	/* atomic_set_32 expects a bitmask */
136	atomic_set_32(s, BIT(bit));
137}
138
139/**
140 * ixl_clear_state - Clear the specified state
141 * @s: the state bitmap
142 * @bit: the state to clear
143 *
144 * Atomically update the state bitmap with the specified bit cleared.
145 */
146inline void
147ixl_clear_state(volatile u32 *s, enum ixl_state bit)
148{
149	/* atomic_clear_32 expects a bitmask */
150	atomic_clear_32(s, BIT(bit));
151}
152
153/**
154 * ixl_test_state - Test the specified state
155 * @s: the state bitmap
156 * @bit: the bit to test
157 *
158 * Return true if the state is set, false otherwise. Use this only if the flow
159 * does not need to update the state. If you must update the state as well,
160 * prefer ixl_testandset_state.
161 */
162inline bool
163ixl_test_state(volatile u32 *s, enum ixl_state bit)
164{
165	return !!(*s & BIT(bit));
166}
167
168/**
169 * ixl_testandset_state - Test and set the specified state
170 * @s: the state bitmap
171 * @bit: the bit to test
172 *
173 * Atomically update the state bitmap, setting the specified bit. Returns the
174 * previous value of the bit.
175 */
176inline u32
177ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
178{
179	/* atomic_testandset_32 expects a bit position, as opposed to bitmask
180	expected by other atomic functions */
181	return atomic_testandset_32(s, bit);
182}
183
184MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
185
186/*
187** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
188*/
189void
190ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
191{
192	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
193	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
194	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
195
196	sbuf_printf(buf,
197	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
198	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
199	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
200	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
201	    IXL_NVM_VERSION_HI_SHIFT,
202	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
203	    IXL_NVM_VERSION_LO_SHIFT,
204	    hw->nvm.eetrack,
205	    oem_ver, oem_build, oem_patch);
206}
207
208void
209ixl_print_nvm_version(struct ixl_pf *pf)
210{
211	struct i40e_hw *hw = &pf->hw;
212	device_t dev = pf->dev;
213	struct sbuf *sbuf;
214
215	sbuf = sbuf_new_auto();
216	ixl_nvm_version_str(hw, sbuf);
217	sbuf_finish(sbuf);
218	device_printf(dev, "%s\n", sbuf_data(sbuf));
219	sbuf_delete(sbuf);
220}
221
222/**
223 * ixl_get_fw_mode - Check the state of FW
224 * @hw: device hardware structure
225 *
226 * Identify state of FW. It might be in a recovery mode
227 * which limits functionality and requires special handling
228 * from the driver.
229 *
230 * @returns FW mode (normal, recovery, unexpected EMP reset)
231 */
232static enum ixl_fw_mode
233ixl_get_fw_mode(struct ixl_pf *pf)
234{
235	struct i40e_hw *hw = &pf->hw;
236	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
237	u32 fwsts;
238
239#ifdef IXL_DEBUG
240	if (pf->recovery_mode)
241		return IXL_FW_MODE_RECOVERY;
242#endif
243	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
244
245	/* Is set and has one of expected values */
246	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
247	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
248	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
249	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
250		fw_mode = IXL_FW_MODE_RECOVERY;
251	else {
252		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
253		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
254			fw_mode = IXL_FW_MODE_UEMPR;
255	}
256	return (fw_mode);
257}
258
259/**
260 * ixl_pf_reset - Reset the PF
261 * @pf: PF structure
262 *
263 * Ensure that FW is in the right state and do the reset
264 * if needed.
265 *
266 * @returns zero on success, or an error code on failure.
267 */
268int
269ixl_pf_reset(struct ixl_pf *pf)
270{
271	struct i40e_hw *hw = &pf->hw;
272	enum i40e_status_code status;
273	enum ixl_fw_mode fw_mode;
274
275	fw_mode = ixl_get_fw_mode(pf);
276	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
277	if (fw_mode == IXL_FW_MODE_RECOVERY) {
278		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
279		/* Don't try to reset device if it's in recovery mode */
280		return (0);
281	}
282
283	status = i40e_pf_reset(hw);
284	if (status == I40E_SUCCESS)
285		return (0);
286
287	/* Check FW mode again in case it has changed while
288	 * waiting for reset to complete */
289	fw_mode = ixl_get_fw_mode(pf);
290	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
291	if (fw_mode == IXL_FW_MODE_RECOVERY) {
292		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
293		return (0);
294	}
295
296	if (fw_mode == IXL_FW_MODE_UEMPR)
297		device_printf(pf->dev,
298		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
299	else
300		device_printf(pf->dev, "PF reset failure %s\n",
301		    i40e_stat_str(hw, status));
302	return (EIO);
303}
304
305/**
306 * ixl_setup_hmc - Setup LAN Host Memory Cache
307 * @pf: PF structure
308 *
309 * Init and configure LAN Host Memory Cache
310 *
311 * @returns 0 on success, EIO on error
312 */
313int
314ixl_setup_hmc(struct ixl_pf *pf)
315{
316	struct i40e_hw *hw = &pf->hw;
317	enum i40e_status_code status;
318
319	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
320	    hw->func_caps.num_rx_qp, 0, 0);
321	if (status) {
322		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
323		    i40e_stat_str(hw, status));
324		return (EIO);
325	}
326
327	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
328	if (status) {
329		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
330		    i40e_stat_str(hw, status));
331		return (EIO);
332	}
333
334	return (0);
335}
336
337/**
338 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
339 * @pf: PF structure
340 *
341 * Shutdown Host Memory Cache if configured.
342 *
343 */
344void
345ixl_shutdown_hmc(struct ixl_pf *pf)
346{
347	struct i40e_hw *hw = &pf->hw;
348	enum i40e_status_code status;
349
350	/* HMC not configured, no need to shutdown */
351	if (hw->hmc.hmc_obj == NULL)
352		return;
353
354	status = i40e_shutdown_lan_hmc(hw);
355	if (status)
356		device_printf(pf->dev,
357		    "Shutdown LAN HMC failed with code %s\n",
358		    i40e_stat_str(hw, status));
359}
360/*
361 * Write PF ITR values to queue ITR registers.
362 */
363void
364ixl_configure_itr(struct ixl_pf *pf)
365{
366	ixl_configure_tx_itr(pf);
367	ixl_configure_rx_itr(pf);
368}
369
370/*********************************************************************
371 *
372 *  Get the hardware capabilities
373 *
374 **********************************************************************/
375
376int
377ixl_get_hw_capabilities(struct ixl_pf *pf)
378{
379	struct i40e_aqc_list_capabilities_element_resp *buf;
380	struct i40e_hw	*hw = &pf->hw;
381	device_t 	dev = pf->dev;
382	enum i40e_status_code status;
383	int len, i2c_intfc_num;
384	bool again = TRUE;
385	u16 needed;
386
387	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
388		hw->func_caps.iwarp = 0;
389		return (0);
390	}
391
392	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
393retry:
394	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
395	    malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
396		device_printf(dev, "Unable to allocate cap memory\n");
397                return (ENOMEM);
398	}
399
400	/* This populates the hw struct */
401        status = i40e_aq_discover_capabilities(hw, buf, len,
402	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
403	free(buf, M_IXL);
404	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
405	    (again == TRUE)) {
406		/* retry once with a larger buffer */
407		again = FALSE;
408		len = needed;
409		goto retry;
410	} else if (status != I40E_SUCCESS) {
411		device_printf(dev, "capability discovery failed; status %s, error %s\n",
412		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
413		return (ENODEV);
414	}
415
416	/*
417	 * Some devices have both MDIO and I2C; since this isn't reported
418	 * by the FW, check registers to see if an I2C interface exists.
419	 */
420	i2c_intfc_num = ixl_find_i2c_interface(pf);
421	if (i2c_intfc_num != -1)
422		pf->has_i2c = true;
423
424	/* Determine functions to use for driver I2C accesses */
425	switch (pf->i2c_access_method) {
426	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
427		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
428			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
429			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
430		} else {
431			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
432			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
433		}
434		break;
435	}
436	case IXL_I2C_ACCESS_METHOD_AQ:
437		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
438		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
439		break;
440	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
441		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
442		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
443		break;
444	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
445		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
446		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
447		break;
448	default:
449		/* Should not happen */
450		device_printf(dev, "Error setting I2C access functions\n");
451		break;
452	}
453
454	/* Keep link active by default */
455	ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
456
457	/* Print a subset of the capability information. */
458	device_printf(dev,
459	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
460	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
461	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
462	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
463	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
464	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
465	    "MDIO shared");
466
467	return (0);
468}
469
470/* For the set_advertise sysctl */
471void
472ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
473{
474	device_t dev = pf->dev;
475	int err;
476
477	/* Make sure to initialize the device to the complete list of
478	 * supported speeds on driver load, to ensure unloading and
479	 * reloading the driver will restore this value.
480	 */
481	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
482	if (err) {
483		/* Non-fatal error */
484		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
485			      __func__, err);
486		return;
487	}
488
489	pf->advertised_speed =
490	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
491}
492
493int
494ixl_teardown_hw_structs(struct ixl_pf *pf)
495{
496	enum i40e_status_code status = 0;
497	struct i40e_hw *hw = &pf->hw;
498	device_t dev = pf->dev;
499
500	/* Shutdown LAN HMC */
501	if (hw->hmc.hmc_obj) {
502		status = i40e_shutdown_lan_hmc(hw);
503		if (status) {
504			device_printf(dev,
505			    "init: LAN HMC shutdown failure; status %s\n",
506			    i40e_stat_str(hw, status));
507			goto err_out;
508		}
509	}
510
511	/* Shutdown admin queue */
512	ixl_disable_intr0(hw);
513	status = i40e_shutdown_adminq(hw);
514	if (status)
515		device_printf(dev,
516		    "init: Admin Queue shutdown failure; status %s\n",
517		    i40e_stat_str(hw, status));
518
519	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
520err_out:
521	return (status);
522}
523
524/*
525** Creates new filter with given MAC address and VLAN ID
526*/
527static struct ixl_mac_filter *
528ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
529{
530	struct ixl_mac_filter  *f;
531
532	/* create a new empty filter */
533	f = malloc(sizeof(struct ixl_mac_filter),
534	    M_IXL, M_NOWAIT | M_ZERO);
535	if (f) {
536		LIST_INSERT_HEAD(headp, f, ftle);
537		bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
538		f->vlan = vlan;
539	}
540
541	return (f);
542}
543
544/**
545 * ixl_free_filters - Free all filters in given list
546 * headp - pointer to list head
547 *
548 * Frees memory used by each entry in the list.
549 * Does not remove filters from HW.
550 */
551void
552ixl_free_filters(struct ixl_ftl_head *headp)
553{
554	struct ixl_mac_filter *f, *nf;
555
556	f = LIST_FIRST(headp);
557	while (f != NULL) {
558		nf = LIST_NEXT(f, ftle);
559		free(f, M_IXL);
560		f = nf;
561	}
562
563	LIST_INIT(headp);
564}
565
566static u_int
567ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
568{
569	struct ixl_add_maddr_arg *ama = arg;
570	struct ixl_vsi *vsi = ama->vsi;
571	const u8 *macaddr = (u8*)LLADDR(sdl);
572	struct ixl_mac_filter *f;
573
574	/* Does one already exist */
575	f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
576	if (f != NULL)
577		return (0);
578
579	f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
580	if (f == NULL) {
581		device_printf(vsi->dev, "WARNING: no filter available!!\n");
582		return (0);
583	}
584	f->flags |= IXL_FILTER_MC;
585
586	return (1);
587}
588
589/*********************************************************************
590 * 	Filter Routines
591 *
592 *	Routines for multicast and vlan filter management.
593 *
594 *********************************************************************/
595
596/**
597 * ixl_add_multi - Add multicast filters to the hardware
598 * @vsi: The VSI structure
599 *
600 * In case number of multicast filters in the IFP exceeds 127 entries,
601 * multicast promiscuous mode will be enabled and the filters will be removed
602 * from the hardware
603 */
604void
605ixl_add_multi(struct ixl_vsi *vsi)
606{
607	if_t			ifp = vsi->ifp;
608	struct i40e_hw		*hw = vsi->hw;
609	int			mcnt = 0;
610	struct ixl_add_maddr_arg cb_arg;
611	enum i40e_status_code	status;
612
613	IOCTL_DEBUGOUT("ixl_add_multi: begin");
614
615	mcnt = if_llmaddr_count(ifp);
616	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
617		status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
618		    TRUE, NULL);
619		if (status != I40E_SUCCESS)
620			if_printf(ifp, "Failed to enable multicast promiscuous "
621			    "mode, status: %s\n", i40e_stat_str(hw, status));
622		else
623			if_printf(ifp, "Enabled multicast promiscuous mode\n");
624		/* Delete all existing MC filters */
625		ixl_del_multi(vsi, true);
626		return;
627	}
628
629	cb_arg.vsi = vsi;
630	LIST_INIT(&cb_arg.to_add);
631
632	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
633	if (mcnt > 0)
634		ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
635
636	IOCTL_DEBUGOUT("ixl_add_multi: end");
637}
638
639static u_int
640ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
641{
642	struct ixl_mac_filter *f = arg;
643
644	if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
645		return (1);
646	else
647		return (0);
648}
649
650/**
651 * ixl_dis_multi_promisc - Disable multicast promiscuous mode
652 * @vsi: The VSI structure
653 * @vsi_mcnt: Number of multicast filters in the VSI
654 *
655 * Disable multicast promiscuous mode based on number of entries in the IFP
656 * and the VSI, then re-add multicast filters.
657 *
658 */
659static void
660ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
661{
662	struct ifnet		*ifp = vsi->ifp;
663	struct i40e_hw		*hw = vsi->hw;
664	int			ifp_mcnt = 0;
665	enum i40e_status_code	status;
666
667	ifp_mcnt = if_llmaddr_count(ifp);
668	/*
669	 * Equal lists or empty ifp list mean the list has not been changed
670	 * and in such case avoid disabling multicast promiscuous mode as it
671	 * was not previously enabled. Case where multicast promiscuous mode has
672	 * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
673	 */
674	if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
675	    ifp_mcnt >= MAX_MULTICAST_ADDR)
676		return;
677
678	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
679	    FALSE, NULL);
680	if (status != I40E_SUCCESS) {
681		if_printf(ifp, "Failed to disable multicast promiscuous "
682		    "mode, status: %s\n", i40e_stat_str(hw, status));
683
684		return;
685	}
686
687	if_printf(ifp, "Disabled multicast promiscuous mode\n");
688
689	ixl_add_multi(vsi);
690}
691
692/**
693 * ixl_del_multi - Delete multicast filters from the hardware
694 * @vsi: The VSI structure
695 * @all: Bool to determine if all the multicast filters should be removed
696 *
697 * In case number of multicast filters in the IFP drops to 127 entries,
698 * multicast promiscuous mode will be disabled and the filters will be reapplied
699 * to the hardware.
700 */
701void
702ixl_del_multi(struct ixl_vsi *vsi, bool all)
703{
704	int			to_del_cnt = 0, vsi_mcnt = 0;
705	if_t			ifp = vsi->ifp;
706	struct ixl_mac_filter	*f, *fn;
707	struct ixl_ftl_head	to_del;
708
709	IOCTL_DEBUGOUT("ixl_del_multi: begin");
710
711	LIST_INIT(&to_del);
712	/* Search for removed multicast addresses */
713	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
714		if ((f->flags & IXL_FILTER_MC) == 0)
715			continue;
716
717		/* Count all the multicast filters in the VSI for comparison */
718		vsi_mcnt++;
719
720		if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
721			continue;
722
723		LIST_REMOVE(f, ftle);
724		LIST_INSERT_HEAD(&to_del, f, ftle);
725		to_del_cnt++;
726	}
727
728	if (to_del_cnt > 0) {
729		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
730		return;
731	}
732
733	ixl_dis_multi_promisc(vsi, vsi_mcnt);
734
735	IOCTL_DEBUGOUT("ixl_del_multi: end");
736}
737
738void
739ixl_link_up_msg(struct ixl_pf *pf)
740{
741	struct i40e_hw *hw = &pf->hw;
742	if_t ifp = pf->vsi.ifp;
743	char *req_fec_string, *neg_fec_string;
744	u8 fec_abilities;
745
746	fec_abilities = hw->phy.link_info.req_fec_info;
747	/* If both RS and KR are requested, only show RS */
748	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
749		req_fec_string = ixl_fec_string[0];
750	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
751		req_fec_string = ixl_fec_string[1];
752	else
753		req_fec_string = ixl_fec_string[2];
754
755	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
756		neg_fec_string = ixl_fec_string[0];
757	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
758		neg_fec_string = ixl_fec_string[1];
759	else
760		neg_fec_string = ixl_fec_string[2];
761
762	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
763	    if_name(ifp),
764	    ixl_link_speed_string(hw->phy.link_info.link_speed),
765	    req_fec_string, neg_fec_string,
766	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
767	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
768	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
769		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
770		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
771		ixl_fc_string[1] : ixl_fc_string[0]);
772}
773
774/*
775 * Configure admin queue/misc interrupt cause registers in hardware.
776 */
777void
778ixl_configure_intr0_msix(struct ixl_pf *pf)
779{
780	struct i40e_hw *hw = &pf->hw;
781	u32 reg;
782
783	/* First set up the adminq - vector 0 */
784	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
785	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
786
787	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
788	    I40E_PFINT_ICR0_ENA_GRST_MASK |
789	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
790	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
791	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
792	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
793	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
794	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
795	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
796
797	/*
798	 * 0x7FF is the end of the queue list.
799	 * This means we won't use MSI-X vector 0 for a queue interrupt
800	 * in MSI-X mode.
801	 */
802	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
803	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
804	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
805
806	wr32(hw, I40E_PFINT_DYN_CTL0,
807	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
808	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
809
810	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
811}
812
813void
814ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
815{
816	/* Display supported media types */
817	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
818		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
819
820	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
821		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
822	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
823		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
824	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
825		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
826
827	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
828		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
829
830	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
831		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
832
833	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
834	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
835	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
836		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
837
838	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
839		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
840	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
841		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
842	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
843		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
844
845	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
846	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
847	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
848	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
849	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
850		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
851	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
852		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
853	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
854		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
855
856	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
857		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
858
859	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
860	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
861		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
862	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
863		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
864	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
865		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
866	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
867		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
868	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
869		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
870
871	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
872		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
873
874	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
875		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
876	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
877		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
878
879	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
880		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
881	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
882		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
883	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
884		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
885	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
886		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
887	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
888		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
889	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
890		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
891}
892
893/*********************************************************************
894 *
895 *  Get Firmware Switch configuration
896 *	- this will need to be more robust when more complex
897 *	  switch configurations are enabled.
898 *
899 **********************************************************************/
900int
901ixl_switch_config(struct ixl_pf *pf)
902{
903	struct i40e_hw	*hw = &pf->hw;
904	struct ixl_vsi	*vsi = &pf->vsi;
905	device_t 	dev = iflib_get_dev(vsi->ctx);
906	struct i40e_aqc_get_switch_config_resp *sw_config;
907	u8	aq_buf[I40E_AQ_LARGE_BUF];
908	int	ret;
909	u16	next = 0;
910
911	memset(&aq_buf, 0, sizeof(aq_buf));
912	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
913	ret = i40e_aq_get_switch_config(hw, sw_config,
914	    sizeof(aq_buf), &next, NULL);
915	if (ret) {
916		device_printf(dev, "aq_get_switch_config() failed, error %d,"
917		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
918		return (ret);
919	}
920	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
921		device_printf(dev,
922		    "Switch config: header reported: %d in structure, %d total\n",
923		    LE16_TO_CPU(sw_config->header.num_reported),
924		    LE16_TO_CPU(sw_config->header.num_total));
925		for (int i = 0;
926		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
927			device_printf(dev,
928			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
929			    sw_config->element[i].element_type,
930			    LE16_TO_CPU(sw_config->element[i].seid),
931			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
932			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
933		}
934	}
935	/* Simplified due to a single VSI */
936	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
937	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
938	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
939	return (ret);
940}
941
942void
943ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
944{
945	struct sysctl_oid *tree;
946	struct sysctl_oid_list *child;
947	struct sysctl_oid_list *vsi_list;
948
949	tree = device_get_sysctl_tree(vsi->dev);
950	child = SYSCTL_CHILDREN(tree);
951	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
952			CTLFLAG_RD, NULL, "VSI Number");
953
954	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
955	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
956
957	/* Copy of netstat RX errors counter for validation purposes */
958	SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
959			CTLFLAG_RD, &vsi->ierrors,
960			"RX packet errors");
961
962	if (queues_sysctls)
963		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
964}
965
966/*
967 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
968 * Writes to the ITR registers immediately.
969 */
970static int
971ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
972{
973	struct ixl_pf *pf = (struct ixl_pf *)arg1;
974	device_t dev = pf->dev;
975	int error = 0;
976	int requested_tx_itr;
977
978	requested_tx_itr = pf->tx_itr;
979	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
980	if ((error) || (req->newptr == NULL))
981		return (error);
982	if (pf->dynamic_tx_itr) {
983		device_printf(dev,
984		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
985		    return (EINVAL);
986	}
987	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
988		device_printf(dev,
989		    "Invalid TX itr value; value must be between 0 and %d\n",
990		        IXL_MAX_ITR);
991		return (EINVAL);
992	}
993
994	pf->tx_itr = requested_tx_itr;
995	ixl_configure_tx_itr(pf);
996
997	return (error);
998}
999
1000/*
1001 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1002 * Writes to the ITR registers immediately.
1003 */
1004static int
1005ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1006{
1007	struct ixl_pf *pf = (struct ixl_pf *)arg1;
1008	device_t dev = pf->dev;
1009	int error = 0;
1010	int requested_rx_itr;
1011
1012	requested_rx_itr = pf->rx_itr;
1013	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1014	if ((error) || (req->newptr == NULL))
1015		return (error);
1016	if (pf->dynamic_rx_itr) {
1017		device_printf(dev,
1018		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
1019		    return (EINVAL);
1020	}
1021	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1022		device_printf(dev,
1023		    "Invalid RX itr value; value must be between 0 and %d\n",
1024		        IXL_MAX_ITR);
1025		return (EINVAL);
1026	}
1027
1028	pf->rx_itr = requested_rx_itr;
1029	ixl_configure_rx_itr(pf);
1030
1031	return (error);
1032}
1033
1034void
1035ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1036	struct sysctl_oid_list *child,
1037	struct i40e_hw_port_stats *stats)
1038{
1039	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1040	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1041	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1042
1043	struct i40e_eth_stats *eth_stats = &stats->eth;
1044	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1045
1046	struct ixl_sysctl_info ctls[] =
1047	{
1048		{&stats->crc_errors, "crc_errors", "CRC Errors"},
1049		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1050		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1051		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1052		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1053		/* Packet Reception Stats */
1054		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1055		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1056		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1057		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1058		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1059		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1060		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1061		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1062		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1063		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1064		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1065		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1066		/* Packet Transmission Stats */
1067		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1068		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1069		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1070		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1071		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1072		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1073		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1074		/* Flow control */
1075		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1076		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1077		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1078		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1079		/* End */
1080		{0,0,0}
1081	};
1082
1083	struct ixl_sysctl_info *entry = ctls;
1084	while (entry->stat != 0)
1085	{
1086		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1087				CTLFLAG_RD, entry->stat,
1088				entry->description);
1089		entry++;
1090	}
1091}
1092
1093void
1094ixl_set_rss_key(struct ixl_pf *pf)
1095{
1096	struct i40e_hw *hw = &pf->hw;
1097	struct ixl_vsi *vsi = &pf->vsi;
1098	device_t	dev = pf->dev;
1099	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1100	enum i40e_status_code status;
1101
1102#ifdef RSS
1103        /* Fetch the configured RSS key */
1104        rss_getkey((uint8_t *) &rss_seed);
1105#else
1106	ixl_get_default_rss_key(rss_seed);
1107#endif
1108	/* Fill out hash function seed */
1109	if (hw->mac.type == I40E_MAC_X722) {
1110		struct i40e_aqc_get_set_rss_key_data key_data;
1111		bcopy(rss_seed, &key_data, 52);
1112		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1113		if (status)
1114			device_printf(dev,
1115			    "i40e_aq_set_rss_key status %s, error %s\n",
1116			    i40e_stat_str(hw, status),
1117			    i40e_aq_str(hw, hw->aq.asq_last_status));
1118	} else {
1119		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1120			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1121	}
1122}
1123
1124/*
1125 * Configure enabled PCTYPES for RSS.
1126 */
1127void
1128ixl_set_rss_pctypes(struct ixl_pf *pf)
1129{
1130	struct i40e_hw *hw = &pf->hw;
1131	u64		set_hena = 0, hena;
1132
1133#ifdef RSS
1134	u32		rss_hash_config;
1135
1136	rss_hash_config = rss_gethashconfig();
1137	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1138                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1139	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1140                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1141	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1142                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1143	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1144                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1145	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1146		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1147	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1148                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1149        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1150                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1151#else
1152	if (hw->mac.type == I40E_MAC_X722)
1153		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1154	else
1155		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1156#endif
1157	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1158	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1159	hena |= set_hena;
1160	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1161	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1162
1163}
1164
1165/*
1166** Setup the PF's RSS parameters.
1167*/
1168void
1169ixl_config_rss(struct ixl_pf *pf)
1170{
1171	ixl_set_rss_key(pf);
1172	ixl_set_rss_pctypes(pf);
1173	ixl_set_rss_hlut(pf);
1174}
1175
1176/*
1177 * In some firmware versions there is default MAC/VLAN filter
1178 * configured which interferes with filters managed by driver.
1179 * Make sure it's removed.
1180 */
1181void
1182ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1183{
1184	struct i40e_aqc_remove_macvlan_element_data e;
1185
1186	bzero(&e, sizeof(e));
1187	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1188	e.vlan_tag = 0;
1189	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1190	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1191
1192	bzero(&e, sizeof(e));
1193	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1194	e.vlan_tag = 0;
1195	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1196		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1197	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1198}
1199
1200/*
1201** Initialize filter list and add filters that the hardware
1202** needs to know about.
1203**
1204** Requires VSI's seid to be set before calling.
1205*/
1206void
1207ixl_init_filters(struct ixl_vsi *vsi)
1208{
1209	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1210
1211	ixl_dbg_filter(pf, "%s: start\n", __func__);
1212
1213	/* Initialize mac filter list for VSI */
1214	LIST_INIT(&vsi->ftl);
1215	vsi->num_hw_filters = 0;
1216
1217	/* Receive broadcast Ethernet frames */
1218	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1219
1220	if (IXL_VSI_IS_VF(vsi))
1221		return;
1222
1223	ixl_del_default_hw_filters(vsi);
1224
1225	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1226
1227	/*
1228	 * Prevent Tx flow control frames from being sent out by
1229	 * non-firmware transmitters.
1230	 * This affects every VSI in the PF.
1231	 */
1232#ifndef IXL_DEBUG_FC
1233	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1234#else
1235	if (pf->enable_tx_fc_filter)
1236		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1237#endif
1238}
1239
1240void
1241ixl_reconfigure_filters(struct ixl_vsi *vsi)
1242{
1243	struct i40e_hw *hw = vsi->hw;
1244	struct ixl_ftl_head tmp;
1245	int cnt;
1246
1247	/*
1248	 * The ixl_add_hw_filters function adds filters configured
1249	 * in HW to a list in VSI. Move all filters to a temporary
1250	 * list to avoid corrupting it by concatenating to itself.
1251	 */
1252	LIST_INIT(&tmp);
1253	LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1254	cnt = vsi->num_hw_filters;
1255	vsi->num_hw_filters = 0;
1256
1257	ixl_add_hw_filters(vsi, &tmp, cnt);
1258
1259	/*
1260	 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1261	 * will be NULL. Furthermore, the ftl of such vsi already contains
1262	 * IXL_VLAN_ANY filter so we can skip that as well.
1263	 */
1264	if (hw == NULL)
1265		return;
1266
1267	/* Filter could be removed if MAC address was changed */
1268	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1269
1270	if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1271		return;
1272	/*
1273	 * VLAN HW filtering is enabled, make sure that filters
1274	 * for all registered VLAN tags are configured
1275	 */
1276	ixl_add_vlan_filters(vsi, hw->mac.addr);
1277}
1278
1279/*
1280 * This routine adds a MAC/VLAN filter to the software filter
1281 * list, then adds that new filter to the HW if it doesn't already
1282 * exist in the SW filter list.
1283 */
1284void
1285ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1286{
1287	struct ixl_mac_filter	*f, *tmp;
1288	struct ixl_pf		*pf;
1289	device_t		dev;
1290	struct ixl_ftl_head	to_add;
1291	int			to_add_cnt;
1292
1293	pf = vsi->back;
1294	dev = pf->dev;
1295	to_add_cnt = 1;
1296
1297	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1298	    MAC_FORMAT_ARGS(macaddr), vlan);
1299
1300	/* Does one already exist */
1301	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1302	if (f != NULL)
1303		return;
1304
1305	LIST_INIT(&to_add);
1306	f = ixl_new_filter(&to_add, macaddr, vlan);
1307	if (f == NULL) {
1308		device_printf(dev, "WARNING: no filter available!!\n");
1309		return;
1310	}
1311	if (f->vlan != IXL_VLAN_ANY)
1312		f->flags |= IXL_FILTER_VLAN;
1313	else
1314		vsi->num_macs++;
1315
1316	/*
1317	** Is this the first vlan being registered, if so we
1318	** need to remove the ANY filter that indicates we are
1319	** not in a vlan, and replace that with a 0 filter.
1320	*/
1321	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1322		tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1323		if (tmp != NULL) {
1324			struct ixl_ftl_head to_del;
1325
1326			/* Prepare new filter first to avoid removing
1327			 * VLAN_ANY filter if allocation fails */
1328			f = ixl_new_filter(&to_add, macaddr, 0);
1329			if (f == NULL) {
1330				device_printf(dev, "WARNING: no filter available!!\n");
1331				free(LIST_FIRST(&to_add), M_IXL);
1332				return;
1333			}
1334			to_add_cnt++;
1335
1336			LIST_REMOVE(tmp, ftle);
1337			LIST_INIT(&to_del);
1338			LIST_INSERT_HEAD(&to_del, tmp, ftle);
1339			ixl_del_hw_filters(vsi, &to_del, 1);
1340		}
1341	}
1342
1343	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1344}
1345
1346/**
1347 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1348 * @vsi: pointer to VSI
1349 * @macaddr: MAC address
1350 *
1351 * Adds MAC/VLAN filter for each VLAN configured on the interface
1352 * if there is enough HW filters. Otherwise adds a single filter
1353 * for all tagged and untagged frames to allow all configured VLANs
1354 * to recieve traffic.
1355 */
1356void
1357ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1358{
1359	struct ixl_ftl_head to_add;
1360	struct ixl_mac_filter *f;
1361	int to_add_cnt = 0;
1362	int i, vlan = 0;
1363
1364	if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1365		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1366		return;
1367	}
1368	LIST_INIT(&to_add);
1369
1370	/* Add filter for untagged frames if it does not exist yet */
1371	f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1372	if (f == NULL) {
1373		f = ixl_new_filter(&to_add, macaddr, 0);
1374		if (f == NULL) {
1375			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1376			return;
1377		}
1378		to_add_cnt++;
1379	}
1380
1381	for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1382		bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1383		if (vlan == -1)
1384			break;
1385
1386		/* Does one already exist */
1387		f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1388		if (f != NULL)
1389			continue;
1390
1391		f = ixl_new_filter(&to_add, macaddr, vlan);
1392		if (f == NULL) {
1393			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1394			ixl_free_filters(&to_add);
1395			return;
1396		}
1397		to_add_cnt++;
1398	}
1399
1400	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1401}
1402
1403void
1404ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1405{
1406	struct ixl_mac_filter *f, *tmp;
1407	struct ixl_ftl_head ftl_head;
1408	int to_del_cnt = 1;
1409
1410	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1411	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1412	    MAC_FORMAT_ARGS(macaddr), vlan);
1413
1414	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1415	if (f == NULL)
1416		return;
1417
1418	LIST_REMOVE(f, ftle);
1419	LIST_INIT(&ftl_head);
1420	LIST_INSERT_HEAD(&ftl_head, f, ftle);
1421	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1422		vsi->num_macs--;
1423
1424	/* If this is not the last vlan just remove the filter */
1425	if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1426		ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1427		return;
1428	}
1429
1430	/* It's the last vlan, we need to switch back to a non-vlan filter */
1431	tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1432	if (tmp != NULL) {
1433		LIST_REMOVE(tmp, ftle);
1434		LIST_INSERT_AFTER(f, tmp, ftle);
1435		to_del_cnt++;
1436	}
1437	ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1438
1439	ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1440}
1441
1442/**
1443 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1444 * @vsi: VSI which filters need to be removed
1445 * @macaddr: MAC address
1446 *
1447 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1448 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1449 * so skip them to speed up processing. Those filters should be removed
1450 * using ixl_del_filter function.
1451 */
1452void
1453ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1454{
1455	struct ixl_mac_filter *f, *tmp;
1456	struct ixl_ftl_head to_del;
1457	int to_del_cnt = 0;
1458
1459	LIST_INIT(&to_del);
1460
1461	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1462		if ((f->flags & IXL_FILTER_MC) != 0 ||
1463		    !ixl_ether_is_equal(f->macaddr, macaddr))
1464			continue;
1465
1466		LIST_REMOVE(f, ftle);
1467		LIST_INSERT_HEAD(&to_del, f, ftle);
1468		to_del_cnt++;
1469	}
1470
1471	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1472	    "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1473	    __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1474	if (to_del_cnt > 0)
1475		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1476}
1477
1478/*
1479** Find the filter with both matching mac addr and vlan id
1480*/
1481struct ixl_mac_filter *
1482ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1483{
1484	struct ixl_mac_filter	*f;
1485
1486	LIST_FOREACH(f, headp, ftle) {
1487		if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1488		    (f->vlan == vlan)) {
1489			return (f);
1490		}
1491	}
1492
1493	return (NULL);
1494}
1495
1496/*
1497** This routine takes additions to the vsi filter
1498** table and creates an Admin Queue call to create
1499** the filters in the hardware.
1500*/
1501void
1502ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1503{
1504	struct i40e_aqc_add_macvlan_element_data *a, *b;
1505	struct ixl_mac_filter	*f, *fn;
1506	struct ixl_pf		*pf;
1507	struct i40e_hw		*hw;
1508	device_t		dev;
1509	enum i40e_status_code	status;
1510	int			j = 0;
1511
1512	pf = vsi->back;
1513	dev = vsi->dev;
1514	hw = &pf->hw;
1515
1516	ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1517
1518	if (cnt < 1) {
1519		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1520		return;
1521	}
1522
1523	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1524	    M_IXL, M_NOWAIT | M_ZERO);
1525	if (a == NULL) {
1526		device_printf(dev, "add_hw_filters failed to get memory\n");
1527		return;
1528	}
1529
1530	LIST_FOREACH(f, to_add, ftle) {
1531		b = &a[j]; // a pox on fvl long names :)
1532		bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1533		if (f->vlan == IXL_VLAN_ANY) {
1534			b->vlan_tag = 0;
1535			b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1536		} else {
1537			b->vlan_tag = f->vlan;
1538			b->flags = 0;
1539		}
1540		b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1541		/* Some FW versions do not set match method
1542		 * when adding filters fails. Initialize it with
1543		 * expected error value to allow detection which
1544		 * filters were not added */
1545		b->match_method = I40E_AQC_MM_ERR_NO_RES;
1546		ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1547		    MAC_FORMAT_ARGS(f->macaddr));
1548
1549		if (++j == cnt)
1550			break;
1551	}
1552	if (j != cnt) {
1553		/* Something went wrong */
1554		device_printf(dev,
1555		    "%s ERROR: list of filters to short expected: %d, found: %d\n",
1556		    __func__, cnt, j);
1557		ixl_free_filters(to_add);
1558		goto out_free;
1559	}
1560
1561	status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1562	if (status == I40E_SUCCESS) {
1563		LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1564		vsi->num_hw_filters += j;
1565		goto out_free;
1566	}
1567
1568	device_printf(dev,
1569	    "i40e_aq_add_macvlan status %s, error %s\n",
1570	    i40e_stat_str(hw, status),
1571	    i40e_aq_str(hw, hw->aq.asq_last_status));
1572	j = 0;
1573
1574	/* Verify which filters were actually configured in HW
1575	 * and add them to the list */
1576	LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1577		LIST_REMOVE(f, ftle);
1578		if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1579			ixl_dbg_filter(pf,
1580			    "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1581			    __func__,
1582			    MAC_FORMAT_ARGS(f->macaddr),
1583			    f->vlan);
1584			free(f, M_IXL);
1585		} else {
1586			LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1587			vsi->num_hw_filters++;
1588		}
1589		j++;
1590	}
1591
1592out_free:
1593	free(a, M_IXL);
1594}
1595
1596/*
1597** This routine takes removals in the vsi filter
1598** table and creates an Admin Queue call to delete
1599** the filters in the hardware.
1600*/
1601void
1602ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1603{
1604	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1605	struct ixl_pf		*pf;
1606	struct i40e_hw		*hw;
1607	device_t		dev;
1608	struct ixl_mac_filter	*f, *f_temp;
1609	enum i40e_status_code	status;
1610	int			j = 0;
1611
1612	pf = vsi->back;
1613	hw = &pf->hw;
1614	dev = vsi->dev;
1615
1616	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1617
1618	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1619	    M_IXL, M_NOWAIT | M_ZERO);
1620	if (d == NULL) {
1621		device_printf(dev, "%s: failed to get memory\n", __func__);
1622		return;
1623	}
1624
1625	LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1626		e = &d[j]; // a pox on fvl long names :)
1627		bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1628		e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1629		if (f->vlan == IXL_VLAN_ANY) {
1630			e->vlan_tag = 0;
1631			e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1632		} else {
1633			e->vlan_tag = f->vlan;
1634		}
1635
1636		ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1637		    MAC_FORMAT_ARGS(f->macaddr));
1638
1639		/* delete entry from the list */
1640		LIST_REMOVE(f, ftle);
1641		free(f, M_IXL);
1642		if (++j == cnt)
1643			break;
1644	}
1645	if (j != cnt || !LIST_EMPTY(to_del)) {
1646		/* Something went wrong */
1647		device_printf(dev,
1648		    "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1649		    __func__, cnt, j);
1650		ixl_free_filters(to_del);
1651		goto out_free;
1652	}
1653	status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1654	if (status) {
1655		device_printf(dev,
1656		    "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1657		    __func__, i40e_stat_str(hw, status),
1658		    i40e_aq_str(hw, hw->aq.asq_last_status));
1659		for (int i = 0; i < j; i++) {
1660			if (d[i].error_code == 0)
1661				continue;
1662			device_printf(dev,
1663			    "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1664			    __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1665			    d[i].vlan_tag);
1666		}
1667	}
1668
1669	vsi->num_hw_filters -= j;
1670
1671out_free:
1672	free(d, M_IXL);
1673
1674	ixl_dbg_filter(pf, "%s: end\n", __func__);
1675}
1676
1677int
1678ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1679{
1680	struct i40e_hw	*hw = &pf->hw;
1681	int		error = 0;
1682	u32		reg;
1683	u16		pf_qidx;
1684
1685	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1686
1687	ixl_dbg(pf, IXL_DBG_EN_DIS,
1688	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1689	    pf_qidx, vsi_qidx);
1690
1691	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1692
1693	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1694	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1695	    I40E_QTX_ENA_QENA_STAT_MASK;
1696	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1697	/* Verify the enable took */
1698	for (int j = 0; j < 10; j++) {
1699		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1700		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1701			break;
1702		i40e_usec_delay(10);
1703	}
1704	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1705		device_printf(pf->dev, "TX queue %d still disabled!\n",
1706		    pf_qidx);
1707		error = ETIMEDOUT;
1708	}
1709
1710	return (error);
1711}
1712
1713int
1714ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1715{
1716	struct i40e_hw	*hw = &pf->hw;
1717	int		error = 0;
1718	u32		reg;
1719	u16		pf_qidx;
1720
1721	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1722
1723	ixl_dbg(pf, IXL_DBG_EN_DIS,
1724	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1725	    pf_qidx, vsi_qidx);
1726
1727	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1728	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1729	    I40E_QRX_ENA_QENA_STAT_MASK;
1730	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1731	/* Verify the enable took */
1732	for (int j = 0; j < 10; j++) {
1733		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1734		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1735			break;
1736		i40e_usec_delay(10);
1737	}
1738	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1739		device_printf(pf->dev, "RX queue %d still disabled!\n",
1740		    pf_qidx);
1741		error = ETIMEDOUT;
1742	}
1743
1744	return (error);
1745}
1746
1747int
1748ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1749{
1750	int error = 0;
1751
1752	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1753	/* Called function already prints error message */
1754	if (error)
1755		return (error);
1756	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1757	return (error);
1758}
1759
1760/*
1761 * Returns error on first ring that is detected hung.
1762 */
1763int
1764ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1765{
1766	struct i40e_hw	*hw = &pf->hw;
1767	int		error = 0;
1768	u32		reg;
1769	u16		pf_qidx;
1770
1771	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1772
1773	ixl_dbg(pf, IXL_DBG_EN_DIS,
1774	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1775	    pf_qidx, vsi_qidx);
1776
1777	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1778	i40e_usec_delay(500);
1779
1780	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1781	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1782	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1783	/* Verify the disable took */
1784	for (int j = 0; j < 10; j++) {
1785		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1786		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1787			break;
1788		i40e_msec_delay(10);
1789	}
1790	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1791		device_printf(pf->dev, "TX queue %d still enabled!\n",
1792		    pf_qidx);
1793		error = ETIMEDOUT;
1794	}
1795
1796	return (error);
1797}
1798
1799/*
1800 * Returns error on first ring that is detected hung.
1801 */
1802int
1803ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1804{
1805	struct i40e_hw	*hw = &pf->hw;
1806	int		error = 0;
1807	u32		reg;
1808	u16		pf_qidx;
1809
1810	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1811
1812	ixl_dbg(pf, IXL_DBG_EN_DIS,
1813	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1814	    pf_qidx, vsi_qidx);
1815
1816	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1817	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1818	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1819	/* Verify the disable took */
1820	for (int j = 0; j < 10; j++) {
1821		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1822		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1823			break;
1824		i40e_msec_delay(10);
1825	}
1826	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1827		device_printf(pf->dev, "RX queue %d still enabled!\n",
1828		    pf_qidx);
1829		error = ETIMEDOUT;
1830	}
1831
1832	return (error);
1833}
1834
1835int
1836ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1837{
1838	int error = 0;
1839
1840	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1841	/* Called function already prints error message */
1842	if (error)
1843		return (error);
1844	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1845	return (error);
1846}
1847
1848static void
1849ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1850{
1851	struct i40e_hw *hw = &pf->hw;
1852	device_t dev = pf->dev;
1853	struct ixl_vf *vf;
1854	bool mdd_detected = false;
1855	bool pf_mdd_detected = false;
1856	bool vf_mdd_detected = false;
1857	u16 vf_num, queue;
1858	u8 pf_num, event;
1859	u8 pf_mdet_num, vp_mdet_num;
1860	u32 reg;
1861
1862	/* find what triggered the MDD event */
1863	reg = rd32(hw, I40E_GL_MDET_TX);
1864	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1865		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1866		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1867		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1868		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1869		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1870		    I40E_GL_MDET_TX_EVENT_SHIFT;
1871		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1872		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1873		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1874		mdd_detected = true;
1875	}
1876
1877	if (!mdd_detected)
1878		return;
1879
1880	reg = rd32(hw, I40E_PF_MDET_TX);
1881	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1882		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1883		pf_mdet_num = hw->pf_id;
1884		pf_mdd_detected = true;
1885	}
1886
1887	/* Check if MDD was caused by a VF */
1888	for (int i = 0; i < pf->num_vfs; i++) {
1889		vf = &(pf->vfs[i]);
1890		reg = rd32(hw, I40E_VP_MDET_TX(i));
1891		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1892			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1893			vp_mdet_num = i;
1894			vf->num_mdd_events++;
1895			vf_mdd_detected = true;
1896		}
1897	}
1898
1899	/* Print out an error message */
1900	if (vf_mdd_detected && pf_mdd_detected)
1901		device_printf(dev,
1902		    "Malicious Driver Detection event %d"
1903		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1904		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1905	else if (vf_mdd_detected && !pf_mdd_detected)
1906		device_printf(dev,
1907		    "Malicious Driver Detection event %d"
1908		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1909		    event, queue, pf_num, vf_num, vp_mdet_num);
1910	else if (!vf_mdd_detected && pf_mdd_detected)
1911		device_printf(dev,
1912		    "Malicious Driver Detection event %d"
1913		    " on TX queue %d, pf number %d (PF-%d)\n",
1914		    event, queue, pf_num, pf_mdet_num);
1915	/* Theoretically shouldn't happen */
1916	else
1917		device_printf(dev,
1918		    "TX Malicious Driver Detection event (unknown)\n");
1919}
1920
1921static void
1922ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1923{
1924	struct i40e_hw *hw = &pf->hw;
1925	device_t dev = pf->dev;
1926	struct ixl_vf *vf;
1927	bool mdd_detected = false;
1928	bool pf_mdd_detected = false;
1929	bool vf_mdd_detected = false;
1930	u16 queue;
1931	u8 pf_num, event;
1932	u8 pf_mdet_num, vp_mdet_num;
1933	u32 reg;
1934
1935	/*
1936	 * GL_MDET_RX doesn't contain VF number information, unlike
1937	 * GL_MDET_TX.
1938	 */
1939	reg = rd32(hw, I40E_GL_MDET_RX);
1940	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1941		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1942		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1943		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1944		    I40E_GL_MDET_RX_EVENT_SHIFT;
1945		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1946		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1947		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1948		mdd_detected = true;
1949	}
1950
1951	if (!mdd_detected)
1952		return;
1953
1954	reg = rd32(hw, I40E_PF_MDET_RX);
1955	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1956		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1957		pf_mdet_num = hw->pf_id;
1958		pf_mdd_detected = true;
1959	}
1960
1961	/* Check if MDD was caused by a VF */
1962	for (int i = 0; i < pf->num_vfs; i++) {
1963		vf = &(pf->vfs[i]);
1964		reg = rd32(hw, I40E_VP_MDET_RX(i));
1965		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1966			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1967			vp_mdet_num = i;
1968			vf->num_mdd_events++;
1969			vf_mdd_detected = true;
1970		}
1971	}
1972
1973	/* Print out an error message */
1974	if (vf_mdd_detected && pf_mdd_detected)
1975		device_printf(dev,
1976		    "Malicious Driver Detection event %d"
1977		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1978		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1979	else if (vf_mdd_detected && !pf_mdd_detected)
1980		device_printf(dev,
1981		    "Malicious Driver Detection event %d"
1982		    " on RX queue %d, pf number %d, (VF-%d)\n",
1983		    event, queue, pf_num, vp_mdet_num);
1984	else if (!vf_mdd_detected && pf_mdd_detected)
1985		device_printf(dev,
1986		    "Malicious Driver Detection event %d"
1987		    " on RX queue %d, pf number %d (PF-%d)\n",
1988		    event, queue, pf_num, pf_mdet_num);
1989	/* Theoretically shouldn't happen */
1990	else
1991		device_printf(dev,
1992		    "RX Malicious Driver Detection event (unknown)\n");
1993}
1994
1995/**
1996 * ixl_handle_mdd_event
1997 *
1998 * Called from interrupt handler to identify possibly malicious vfs
1999 * (But also detects events from the PF, as well)
2000 **/
2001void
2002ixl_handle_mdd_event(struct ixl_pf *pf)
2003{
2004	struct i40e_hw *hw = &pf->hw;
2005	u32 reg;
2006
2007	/*
2008	 * Handle both TX/RX because it's possible they could
2009	 * both trigger in the same interrupt.
2010	 */
2011	ixl_handle_tx_mdd_event(pf);
2012	ixl_handle_rx_mdd_event(pf);
2013
2014	ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
2015
2016	/* re-enable mdd interrupt cause */
2017	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2018	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2019	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2020	ixl_flush(hw);
2021}
2022
2023void
2024ixl_enable_intr0(struct i40e_hw *hw)
2025{
2026	u32		reg;
2027
2028	/* Use IXL_ITR_NONE so ITR isn't updated here */
2029	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2030	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2031	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2032	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2033}
2034
2035void
2036ixl_disable_intr0(struct i40e_hw *hw)
2037{
2038	u32		reg;
2039
2040	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2041	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2042	ixl_flush(hw);
2043}
2044
2045void
2046ixl_enable_queue(struct i40e_hw *hw, int id)
2047{
2048	u32		reg;
2049
2050	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2051	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2052	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2053	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2054}
2055
2056void
2057ixl_disable_queue(struct i40e_hw *hw, int id)
2058{
2059	u32		reg;
2060
2061	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2062	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2063}
2064
2065void
2066ixl_handle_empr_reset(struct ixl_pf *pf)
2067{
2068	struct ixl_vsi	*vsi = &pf->vsi;
2069	bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
2070
2071	ixl_prepare_for_reset(pf, is_up);
2072	/*
2073	 * i40e_pf_reset checks the type of reset and acts
2074	 * accordingly. If EMP or Core reset was performed
2075	 * doing PF reset is not necessary and it sometimes
2076	 * fails.
2077	 */
2078	ixl_pf_reset(pf);
2079
2080	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2081	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2082		ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2083		device_printf(pf->dev,
2084		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2085		pf->link_up = FALSE;
2086		ixl_update_link_status(pf);
2087	}
2088
2089	ixl_rebuild_hw_structs_after_reset(pf, is_up);
2090
2091	ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2092}
2093
2094void
2095ixl_update_stats_counters(struct ixl_pf *pf)
2096{
2097	struct i40e_hw	*hw = &pf->hw;
2098	struct ixl_vsi	*vsi = &pf->vsi;
2099	struct ixl_vf	*vf;
2100	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2101
2102	struct i40e_hw_port_stats *nsd = &pf->stats;
2103	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2104
2105	/* Update hw stats */
2106	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2107			   pf->stat_offsets_loaded,
2108			   &osd->crc_errors, &nsd->crc_errors);
2109	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2110			   pf->stat_offsets_loaded,
2111			   &osd->illegal_bytes, &nsd->illegal_bytes);
2112	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
2113			   I40E_GLPRT_GORCL(hw->port),
2114			   pf->stat_offsets_loaded,
2115			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2116	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
2117			   I40E_GLPRT_GOTCL(hw->port),
2118			   pf->stat_offsets_loaded,
2119			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2120	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2121			   pf->stat_offsets_loaded,
2122			   &osd->eth.rx_discards,
2123			   &nsd->eth.rx_discards);
2124	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
2125			   I40E_GLPRT_UPRCL(hw->port),
2126			   pf->stat_offsets_loaded,
2127			   &osd->eth.rx_unicast,
2128			   &nsd->eth.rx_unicast);
2129	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
2130			   I40E_GLPRT_UPTCL(hw->port),
2131			   pf->stat_offsets_loaded,
2132			   &osd->eth.tx_unicast,
2133			   &nsd->eth.tx_unicast);
2134	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
2135			   I40E_GLPRT_MPRCL(hw->port),
2136			   pf->stat_offsets_loaded,
2137			   &osd->eth.rx_multicast,
2138			   &nsd->eth.rx_multicast);
2139	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
2140			   I40E_GLPRT_MPTCL(hw->port),
2141			   pf->stat_offsets_loaded,
2142			   &osd->eth.tx_multicast,
2143			   &nsd->eth.tx_multicast);
2144	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
2145			   I40E_GLPRT_BPRCL(hw->port),
2146			   pf->stat_offsets_loaded,
2147			   &osd->eth.rx_broadcast,
2148			   &nsd->eth.rx_broadcast);
2149	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
2150			   I40E_GLPRT_BPTCL(hw->port),
2151			   pf->stat_offsets_loaded,
2152			   &osd->eth.tx_broadcast,
2153			   &nsd->eth.tx_broadcast);
2154
2155	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2156			   pf->stat_offsets_loaded,
2157			   &osd->tx_dropped_link_down,
2158			   &nsd->tx_dropped_link_down);
2159	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2160			   pf->stat_offsets_loaded,
2161			   &osd->mac_local_faults,
2162			   &nsd->mac_local_faults);
2163	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2164			   pf->stat_offsets_loaded,
2165			   &osd->mac_remote_faults,
2166			   &nsd->mac_remote_faults);
2167	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2168			   pf->stat_offsets_loaded,
2169			   &osd->rx_length_errors,
2170			   &nsd->rx_length_errors);
2171
2172	/* Flow control (LFC) stats */
2173	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2174			   pf->stat_offsets_loaded,
2175			   &osd->link_xon_rx, &nsd->link_xon_rx);
2176	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2177			   pf->stat_offsets_loaded,
2178			   &osd->link_xon_tx, &nsd->link_xon_tx);
2179	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2180			   pf->stat_offsets_loaded,
2181			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2182	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2183			   pf->stat_offsets_loaded,
2184			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2185
2186	/*
2187	 * For watchdog management we need to know if we have been paused
2188	 * during the last interval, so capture that here.
2189	 */
2190	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2191		vsi->shared->isc_pause_frames = 1;
2192
2193	/* Packet size stats rx */
2194	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2195			   I40E_GLPRT_PRC64L(hw->port),
2196			   pf->stat_offsets_loaded,
2197			   &osd->rx_size_64, &nsd->rx_size_64);
2198	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2199			   I40E_GLPRT_PRC127L(hw->port),
2200			   pf->stat_offsets_loaded,
2201			   &osd->rx_size_127, &nsd->rx_size_127);
2202	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2203			   I40E_GLPRT_PRC255L(hw->port),
2204			   pf->stat_offsets_loaded,
2205			   &osd->rx_size_255, &nsd->rx_size_255);
2206	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2207			   I40E_GLPRT_PRC511L(hw->port),
2208			   pf->stat_offsets_loaded,
2209			   &osd->rx_size_511, &nsd->rx_size_511);
2210	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2211			   I40E_GLPRT_PRC1023L(hw->port),
2212			   pf->stat_offsets_loaded,
2213			   &osd->rx_size_1023, &nsd->rx_size_1023);
2214	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2215			   I40E_GLPRT_PRC1522L(hw->port),
2216			   pf->stat_offsets_loaded,
2217			   &osd->rx_size_1522, &nsd->rx_size_1522);
2218	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2219			   I40E_GLPRT_PRC9522L(hw->port),
2220			   pf->stat_offsets_loaded,
2221			   &osd->rx_size_big, &nsd->rx_size_big);
2222
2223	/* Packet size stats tx */
2224	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2225			   I40E_GLPRT_PTC64L(hw->port),
2226			   pf->stat_offsets_loaded,
2227			   &osd->tx_size_64, &nsd->tx_size_64);
2228	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2229			   I40E_GLPRT_PTC127L(hw->port),
2230			   pf->stat_offsets_loaded,
2231			   &osd->tx_size_127, &nsd->tx_size_127);
2232	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2233			   I40E_GLPRT_PTC255L(hw->port),
2234			   pf->stat_offsets_loaded,
2235			   &osd->tx_size_255, &nsd->tx_size_255);
2236	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2237			   I40E_GLPRT_PTC511L(hw->port),
2238			   pf->stat_offsets_loaded,
2239			   &osd->tx_size_511, &nsd->tx_size_511);
2240	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2241			   I40E_GLPRT_PTC1023L(hw->port),
2242			   pf->stat_offsets_loaded,
2243			   &osd->tx_size_1023, &nsd->tx_size_1023);
2244	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2245			   I40E_GLPRT_PTC1522L(hw->port),
2246			   pf->stat_offsets_loaded,
2247			   &osd->tx_size_1522, &nsd->tx_size_1522);
2248	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2249			   I40E_GLPRT_PTC9522L(hw->port),
2250			   pf->stat_offsets_loaded,
2251			   &osd->tx_size_big, &nsd->tx_size_big);
2252
2253	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2254			   pf->stat_offsets_loaded,
2255			   &osd->rx_undersize, &nsd->rx_undersize);
2256	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2257			   pf->stat_offsets_loaded,
2258			   &osd->rx_fragments, &nsd->rx_fragments);
2259	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2260			   pf->stat_offsets_loaded,
2261			   &osd->rx_oversize, &nsd->rx_oversize);
2262	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2263			   pf->stat_offsets_loaded,
2264			   &osd->rx_jabber, &nsd->rx_jabber);
2265	/* EEE */
2266	i40e_get_phy_lpi_status(hw, nsd);
2267
2268	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2269			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
2270			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
2271
2272	pf->stat_offsets_loaded = true;
2273	/* End hw stats */
2274
2275	/* Update vsi stats */
2276	ixl_update_vsi_stats(vsi);
2277
2278	for (int i = 0; i < pf->num_vfs; i++) {
2279		vf = &pf->vfs[i];
2280		if (vf->vf_flags & VF_FLAG_ENABLED)
2281			ixl_update_eth_stats(&pf->vfs[i].vsi);
2282	}
2283}
2284
2285/**
2286 * Update VSI-specific ethernet statistics counters.
2287 **/
2288void
2289ixl_update_eth_stats(struct ixl_vsi *vsi)
2290{
2291	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2292	struct i40e_hw *hw = &pf->hw;
2293	struct i40e_eth_stats *es;
2294	struct i40e_eth_stats *oes;
2295	u16 stat_idx = vsi->info.stat_counter_idx;
2296
2297	es = &vsi->eth_stats;
2298	oes = &vsi->eth_stats_offsets;
2299
2300	/* Gather up the stats that the hw collects */
2301	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2302			   vsi->stat_offsets_loaded,
2303			   &oes->tx_errors, &es->tx_errors);
2304	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2305			   vsi->stat_offsets_loaded,
2306			   &oes->rx_discards, &es->rx_discards);
2307
2308	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2309			   I40E_GLV_GORCL(stat_idx),
2310			   vsi->stat_offsets_loaded,
2311			   &oes->rx_bytes, &es->rx_bytes);
2312	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2313			   I40E_GLV_UPRCL(stat_idx),
2314			   vsi->stat_offsets_loaded,
2315			   &oes->rx_unicast, &es->rx_unicast);
2316	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2317			   I40E_GLV_MPRCL(stat_idx),
2318			   vsi->stat_offsets_loaded,
2319			   &oes->rx_multicast, &es->rx_multicast);
2320	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2321			   I40E_GLV_BPRCL(stat_idx),
2322			   vsi->stat_offsets_loaded,
2323			   &oes->rx_broadcast, &es->rx_broadcast);
2324
2325	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2326			   I40E_GLV_GOTCL(stat_idx),
2327			   vsi->stat_offsets_loaded,
2328			   &oes->tx_bytes, &es->tx_bytes);
2329	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2330			   I40E_GLV_UPTCL(stat_idx),
2331			   vsi->stat_offsets_loaded,
2332			   &oes->tx_unicast, &es->tx_unicast);
2333	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2334			   I40E_GLV_MPTCL(stat_idx),
2335			   vsi->stat_offsets_loaded,
2336			   &oes->tx_multicast, &es->tx_multicast);
2337	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2338			   I40E_GLV_BPTCL(stat_idx),
2339			   vsi->stat_offsets_loaded,
2340			   &oes->tx_broadcast, &es->tx_broadcast);
2341	vsi->stat_offsets_loaded = true;
2342}
2343
2344void
2345ixl_update_vsi_stats(struct ixl_vsi *vsi)
2346{
2347	struct ixl_pf		*pf;
2348	struct i40e_eth_stats	*es;
2349	u64			tx_discards, csum_errs;
2350
2351	struct i40e_hw_port_stats *nsd;
2352
2353	pf = vsi->back;
2354	es = &vsi->eth_stats;
2355	nsd = &pf->stats;
2356
2357	ixl_update_eth_stats(vsi);
2358
2359	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2360
2361	csum_errs = 0;
2362	for (int i = 0; i < vsi->num_rx_queues; i++)
2363		csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2364	nsd->checksum_error = csum_errs;
2365
2366	/* Update ifnet stats */
2367	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2368	                   es->rx_multicast +
2369			   es->rx_broadcast);
2370	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2371	                   es->tx_multicast +
2372			   es->tx_broadcast);
2373	IXL_SET_IBYTES(vsi, es->rx_bytes);
2374	IXL_SET_OBYTES(vsi, es->tx_bytes);
2375	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2376	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2377
2378	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2379	    nsd->checksum_error + nsd->rx_length_errors +
2380	    nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2381	    nsd->rx_jabber);
2382	IXL_SET_OERRORS(vsi, es->tx_errors);
2383	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2384	IXL_SET_OQDROPS(vsi, tx_discards);
2385	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2386	IXL_SET_COLLISIONS(vsi, 0);
2387}
2388
2389/**
2390 * Reset all of the stats for the given pf
2391 **/
2392void
2393ixl_pf_reset_stats(struct ixl_pf *pf)
2394{
2395	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2396	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2397	pf->stat_offsets_loaded = false;
2398}
2399
2400/**
2401 * Resets all stats of the given vsi
2402 **/
2403void
2404ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2405{
2406	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2407	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2408	vsi->stat_offsets_loaded = false;
2409}
2410
2411/**
2412 * Read and update a 48 bit stat from the hw
2413 *
2414 * Since the device stats are not reset at PFReset, they likely will not
2415 * be zeroed when the driver starts.  We'll save the first values read
2416 * and use them as offsets to be subtracted from the raw values in order
2417 * to report stats that count from zero.
2418 **/
2419void
2420ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2421	bool offset_loaded, u64 *offset, u64 *stat)
2422{
2423	u64 new_data;
2424
2425	new_data = rd64(hw, loreg);
2426
2427	if (!offset_loaded)
2428		*offset = new_data;
2429	if (new_data >= *offset)
2430		*stat = new_data - *offset;
2431	else
2432		*stat = (new_data + ((u64)1 << 48)) - *offset;
2433	*stat &= 0xFFFFFFFFFFFFULL;
2434}
2435
2436/**
2437 * Read and update a 32 bit stat from the hw
2438 **/
2439void
2440ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2441	bool offset_loaded, u64 *offset, u64 *stat)
2442{
2443	u32 new_data;
2444
2445	new_data = rd32(hw, reg);
2446	if (!offset_loaded)
2447		*offset = new_data;
2448	if (new_data >= *offset)
2449		*stat = (u32)(new_data - *offset);
2450	else
2451		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2452}
2453
2454/**
2455 * Add subset of device sysctls safe to use in recovery mode
2456 */
2457void
2458ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2459{
2460	device_t dev = pf->dev;
2461
2462	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2463	struct sysctl_oid_list *ctx_list =
2464	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2465
2466	struct sysctl_oid *debug_node;
2467	struct sysctl_oid_list *debug_list;
2468
2469	SYSCTL_ADD_PROC(ctx, ctx_list,
2470	    OID_AUTO, "fw_version",
2471	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2472	    ixl_sysctl_show_fw, "A", "Firmware version");
2473
2474	/* Add sysctls meant to print debug information, but don't list them
2475	 * in "sysctl -a" output. */
2476	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2477	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2478	    "Debug Sysctls");
2479	debug_list = SYSCTL_CHILDREN(debug_node);
2480
2481	SYSCTL_ADD_UINT(ctx, debug_list,
2482	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2483	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2484
2485	SYSCTL_ADD_UINT(ctx, debug_list,
2486	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2487	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2488
2489	SYSCTL_ADD_PROC(ctx, debug_list,
2490	    OID_AUTO, "dump_debug_data",
2491	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2492	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2493
2494	SYSCTL_ADD_PROC(ctx, debug_list,
2495	    OID_AUTO, "do_pf_reset",
2496	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2497	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2498
2499	SYSCTL_ADD_PROC(ctx, debug_list,
2500	    OID_AUTO, "do_core_reset",
2501	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2502	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2503
2504	SYSCTL_ADD_PROC(ctx, debug_list,
2505	    OID_AUTO, "do_global_reset",
2506	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2507	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2508
2509	SYSCTL_ADD_PROC(ctx, debug_list,
2510	    OID_AUTO, "queue_interrupt_table",
2511	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2512	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2513}
2514
2515void
2516ixl_add_device_sysctls(struct ixl_pf *pf)
2517{
2518	device_t dev = pf->dev;
2519	struct i40e_hw *hw = &pf->hw;
2520
2521	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2522	struct sysctl_oid_list *ctx_list =
2523	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2524
2525	struct sysctl_oid *debug_node;
2526	struct sysctl_oid_list *debug_list;
2527
2528	struct sysctl_oid *fec_node;
2529	struct sysctl_oid_list *fec_list;
2530	struct sysctl_oid *eee_node;
2531	struct sysctl_oid_list *eee_list;
2532
2533	/* Set up sysctls */
2534	SYSCTL_ADD_PROC(ctx, ctx_list,
2535	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2536	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2537
2538	SYSCTL_ADD_PROC(ctx, ctx_list,
2539	    OID_AUTO, "advertise_speed",
2540	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2541	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2542
2543	SYSCTL_ADD_PROC(ctx, ctx_list,
2544	    OID_AUTO, "supported_speeds",
2545	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2546	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2547
2548	SYSCTL_ADD_PROC(ctx, ctx_list,
2549	    OID_AUTO, "current_speed",
2550	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2551	    ixl_sysctl_current_speed, "A", "Current Port Speed");
2552
2553	SYSCTL_ADD_PROC(ctx, ctx_list,
2554	    OID_AUTO, "fw_version",
2555	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2556	    ixl_sysctl_show_fw, "A", "Firmware version");
2557
2558	SYSCTL_ADD_PROC(ctx, ctx_list,
2559	    OID_AUTO, "unallocated_queues",
2560	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2561	    ixl_sysctl_unallocated_queues, "I",
2562	    "Queues not allocated to a PF or VF");
2563
2564	SYSCTL_ADD_PROC(ctx, ctx_list,
2565	    OID_AUTO, "tx_itr",
2566	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2567	    ixl_sysctl_pf_tx_itr, "I",
2568	    "Immediately set TX ITR value for all queues");
2569
2570	SYSCTL_ADD_PROC(ctx, ctx_list,
2571	    OID_AUTO, "rx_itr",
2572	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2573	    ixl_sysctl_pf_rx_itr, "I",
2574	    "Immediately set RX ITR value for all queues");
2575
2576	SYSCTL_ADD_INT(ctx, ctx_list,
2577	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2578	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2579
2580	SYSCTL_ADD_INT(ctx, ctx_list,
2581	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2582	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2583
2584	/* Add FEC sysctls for 25G adapters */
2585	if (i40e_is_25G_device(hw->device_id)) {
2586		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2587		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2588		    "FEC Sysctls");
2589		fec_list = SYSCTL_CHILDREN(fec_node);
2590
2591		SYSCTL_ADD_PROC(ctx, fec_list,
2592		    OID_AUTO, "fc_ability",
2593		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2594		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2595
2596		SYSCTL_ADD_PROC(ctx, fec_list,
2597		    OID_AUTO, "rs_ability",
2598		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2599		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2600
2601		SYSCTL_ADD_PROC(ctx, fec_list,
2602		    OID_AUTO, "fc_requested",
2603		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2604		    ixl_sysctl_fec_fc_request, "I",
2605		    "FC FEC mode requested on link");
2606
2607		SYSCTL_ADD_PROC(ctx, fec_list,
2608		    OID_AUTO, "rs_requested",
2609		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2610		    ixl_sysctl_fec_rs_request, "I",
2611		    "RS FEC mode requested on link");
2612
2613		SYSCTL_ADD_PROC(ctx, fec_list,
2614		    OID_AUTO, "auto_fec_enabled",
2615		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2616		    ixl_sysctl_fec_auto_enable, "I",
2617		    "Let FW decide FEC ability/request modes");
2618	}
2619
2620	SYSCTL_ADD_PROC(ctx, ctx_list,
2621	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2622	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2623
2624	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2625	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2626	    "Energy Efficient Ethernet (EEE) Sysctls");
2627	eee_list = SYSCTL_CHILDREN(eee_node);
2628
2629	SYSCTL_ADD_PROC(ctx, eee_list,
2630	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2631	    pf, 0, ixl_sysctl_eee_enable, "I",
2632	    "Enable Energy Efficient Ethernet (EEE)");
2633
2634	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2635	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2636	    "TX LPI status");
2637
2638	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2639	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2640	    "RX LPI status");
2641
2642	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2643	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2644	    "TX LPI count");
2645
2646	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2647	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2648	    "RX LPI count");
2649
2650	SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2651	    "link_active_on_if_down",
2652	    CTLTYPE_INT | CTLFLAG_RWTUN,
2653	    pf, 0, ixl_sysctl_set_link_active, "I",
2654	    IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2655
2656	/* Add sysctls meant to print debug information, but don't list them
2657	 * in "sysctl -a" output. */
2658	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2659	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2660	    "Debug Sysctls");
2661	debug_list = SYSCTL_CHILDREN(debug_node);
2662
2663	SYSCTL_ADD_UINT(ctx, debug_list,
2664	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2665	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2666
2667	SYSCTL_ADD_UINT(ctx, debug_list,
2668	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2669	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2670
2671	SYSCTL_ADD_PROC(ctx, debug_list,
2672	    OID_AUTO, "link_status",
2673	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2674	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2675
2676	SYSCTL_ADD_PROC(ctx, debug_list,
2677	    OID_AUTO, "phy_abilities_init",
2678	    CTLTYPE_STRING | CTLFLAG_RD,
2679	    pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2680
2681	SYSCTL_ADD_PROC(ctx, debug_list,
2682	    OID_AUTO, "phy_abilities",
2683	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2684	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2685
2686	SYSCTL_ADD_PROC(ctx, debug_list,
2687	    OID_AUTO, "filter_list",
2688	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2689	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2690
2691	SYSCTL_ADD_PROC(ctx, debug_list,
2692	    OID_AUTO, "hw_res_alloc",
2693	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2694	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2695
2696	SYSCTL_ADD_PROC(ctx, debug_list,
2697	    OID_AUTO, "switch_config",
2698	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2699	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2700
2701	SYSCTL_ADD_PROC(ctx, debug_list,
2702	    OID_AUTO, "switch_vlans",
2703	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2704	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2705
2706	SYSCTL_ADD_PROC(ctx, debug_list,
2707	    OID_AUTO, "rss_key",
2708	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2709	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2710
2711	SYSCTL_ADD_PROC(ctx, debug_list,
2712	    OID_AUTO, "rss_lut",
2713	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2714	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2715
2716	SYSCTL_ADD_PROC(ctx, debug_list,
2717	    OID_AUTO, "rss_hena",
2718	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2719	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2720
2721	SYSCTL_ADD_PROC(ctx, debug_list,
2722	    OID_AUTO, "disable_fw_link_management",
2723	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2724	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2725
2726	SYSCTL_ADD_PROC(ctx, debug_list,
2727	    OID_AUTO, "dump_debug_data",
2728	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2729	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2730
2731	SYSCTL_ADD_PROC(ctx, debug_list,
2732	    OID_AUTO, "do_pf_reset",
2733	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2734	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2735
2736	SYSCTL_ADD_PROC(ctx, debug_list,
2737	    OID_AUTO, "do_core_reset",
2738	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2739	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2740
2741	SYSCTL_ADD_PROC(ctx, debug_list,
2742	    OID_AUTO, "do_global_reset",
2743	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2744	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2745
2746	SYSCTL_ADD_PROC(ctx, debug_list,
2747	    OID_AUTO, "queue_interrupt_table",
2748	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2749	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2750
2751	SYSCTL_ADD_PROC(ctx, debug_list,
2752	    OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD,
2753	    pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics");
2754
2755	if (pf->has_i2c) {
2756		SYSCTL_ADD_PROC(ctx, debug_list,
2757		    OID_AUTO, "read_i2c_byte",
2758		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2759		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2760
2761		SYSCTL_ADD_PROC(ctx, debug_list,
2762		    OID_AUTO, "write_i2c_byte",
2763		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2764		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2765
2766		SYSCTL_ADD_PROC(ctx, debug_list,
2767		    OID_AUTO, "read_i2c_diag_data",
2768		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2769		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2770	}
2771}
2772
2773/*
2774 * Primarily for finding out how many queues can be assigned to VFs,
2775 * at runtime.
2776 */
2777static int
2778ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2779{
2780	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2781	int queues;
2782
2783	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2784
2785	return sysctl_handle_int(oidp, NULL, queues, req);
2786}
2787
2788static const char *
2789ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2790{
2791	const char * link_speed_str[] = {
2792		"Unknown",
2793		"100 Mbps",
2794		"1 Gbps",
2795		"10 Gbps",
2796		"40 Gbps",
2797		"20 Gbps",
2798		"25 Gbps",
2799		"2.5 Gbps",
2800		"5 Gbps"
2801	};
2802	int index;
2803
2804	switch (link_speed) {
2805	case I40E_LINK_SPEED_100MB:
2806		index = 1;
2807		break;
2808	case I40E_LINK_SPEED_1GB:
2809		index = 2;
2810		break;
2811	case I40E_LINK_SPEED_10GB:
2812		index = 3;
2813		break;
2814	case I40E_LINK_SPEED_40GB:
2815		index = 4;
2816		break;
2817	case I40E_LINK_SPEED_20GB:
2818		index = 5;
2819		break;
2820	case I40E_LINK_SPEED_25GB:
2821		index = 6;
2822		break;
2823	case I40E_LINK_SPEED_2_5GB:
2824		index = 7;
2825		break;
2826	case I40E_LINK_SPEED_5GB:
2827		index = 8;
2828		break;
2829	case I40E_LINK_SPEED_UNKNOWN:
2830	default:
2831		index = 0;
2832		break;
2833	}
2834
2835	return (link_speed_str[index]);
2836}
2837
2838int
2839ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2840{
2841	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2842	struct i40e_hw *hw = &pf->hw;
2843	int error = 0;
2844
2845	ixl_update_link_status(pf);
2846
2847	error = sysctl_handle_string(oidp,
2848	    __DECONST(void *,
2849		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2850	    8, req);
2851
2852	return (error);
2853}
2854
2855/*
2856 * Converts 8-bit speeds value to and from sysctl flags and
2857 * Admin Queue flags.
2858 */
2859static u8
2860ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2861{
2862#define SPEED_MAP_SIZE 8
2863	static u16 speedmap[SPEED_MAP_SIZE] = {
2864		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2865		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2866		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2867		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2868		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2869		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2870		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2871		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2872	};
2873	u8 retval = 0;
2874
2875	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2876		if (to_aq)
2877			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2878		else
2879			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2880	}
2881
2882	return (retval);
2883}
2884
2885int
2886ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2887{
2888	struct i40e_hw *hw = &pf->hw;
2889	device_t dev = pf->dev;
2890	struct i40e_aq_get_phy_abilities_resp abilities;
2891	struct i40e_aq_set_phy_config config;
2892	enum i40e_status_code aq_error = 0;
2893
2894	/* Get current capability information */
2895	aq_error = i40e_aq_get_phy_capabilities(hw,
2896	    FALSE, FALSE, &abilities, NULL);
2897	if (aq_error) {
2898		device_printf(dev,
2899		    "%s: Error getting phy capabilities %d,"
2900		    " aq error: %d\n", __func__, aq_error,
2901		    hw->aq.asq_last_status);
2902		return (EIO);
2903	}
2904
2905	/* Prepare new config */
2906	bzero(&config, sizeof(config));
2907	if (from_aq)
2908		config.link_speed = speeds;
2909	else
2910		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2911	config.phy_type = abilities.phy_type;
2912	config.phy_type_ext = abilities.phy_type_ext;
2913	config.abilities = abilities.abilities
2914	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2915	config.eee_capability = abilities.eee_capability;
2916	config.eeer = abilities.eeer_val;
2917	config.low_power_ctrl = abilities.d3_lpan;
2918	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2919	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2920
2921	/* Do aq command & restart link */
2922	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2923	if (aq_error) {
2924		device_printf(dev,
2925		    "%s: Error setting new phy config %d,"
2926		    " aq error: %d\n", __func__, aq_error,
2927		    hw->aq.asq_last_status);
2928		return (EIO);
2929	}
2930
2931	return (0);
2932}
2933
2934/*
2935** Supported link speeds
2936**	Flags:
2937**	 0x1 - 100 Mb
2938**	 0x2 - 1G
2939**	 0x4 - 10G
2940**	 0x8 - 20G
2941**	0x10 - 25G
2942**	0x20 - 40G
2943**	0x40 - 2.5G
2944**	0x80 - 5G
2945*/
2946static int
2947ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2948{
2949	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2950	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2951
2952	return sysctl_handle_int(oidp, NULL, supported, req);
2953}
2954
2955/*
2956** Control link advertise speed:
2957**	Flags:
2958**	 0x1 - advertise 100 Mb
2959**	 0x2 - advertise 1G
2960**	 0x4 - advertise 10G
2961**	 0x8 - advertise 20G
2962**	0x10 - advertise 25G
2963**	0x20 - advertise 40G
2964**	0x40 - advertise 2.5G
2965**	0x80 - advertise 5G
2966**
2967**	Set to 0 to disable link
2968*/
2969int
2970ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2971{
2972	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2973	device_t dev = pf->dev;
2974	u8 converted_speeds;
2975	int requested_ls = 0;
2976	int error = 0;
2977
2978	/* Read in new mode */
2979	requested_ls = pf->advertised_speed;
2980	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2981	if ((error) || (req->newptr == NULL))
2982		return (error);
2983	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2984		device_printf(dev, "Interface is currently in FW recovery mode. "
2985				"Setting advertise speed not supported\n");
2986		return (EINVAL);
2987	}
2988
2989	/* Error out if bits outside of possible flag range are set */
2990	if ((requested_ls & ~((u8)0xFF)) != 0) {
2991		device_printf(dev, "Input advertised speed out of range; "
2992		    "valid flags are: 0x%02x\n",
2993		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2994		return (EINVAL);
2995	}
2996
2997	/* Check if adapter supports input value */
2998	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2999	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3000		device_printf(dev, "Invalid advertised speed; "
3001		    "valid flags are: 0x%02x\n",
3002		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3003		return (EINVAL);
3004	}
3005
3006	error = ixl_set_advertised_speeds(pf, requested_ls, false);
3007	if (error)
3008		return (error);
3009
3010	pf->advertised_speed = requested_ls;
3011	ixl_update_link_status(pf);
3012	return (0);
3013}
3014
3015/*
3016 * Input: bitmap of enum i40e_aq_link_speed
3017 */
3018u64
3019ixl_max_aq_speed_to_value(u8 link_speeds)
3020{
3021	if (link_speeds & I40E_LINK_SPEED_40GB)
3022		return IF_Gbps(40);
3023	if (link_speeds & I40E_LINK_SPEED_25GB)
3024		return IF_Gbps(25);
3025	if (link_speeds & I40E_LINK_SPEED_20GB)
3026		return IF_Gbps(20);
3027	if (link_speeds & I40E_LINK_SPEED_10GB)
3028		return IF_Gbps(10);
3029	if (link_speeds & I40E_LINK_SPEED_5GB)
3030		return IF_Gbps(5);
3031	if (link_speeds & I40E_LINK_SPEED_2_5GB)
3032		return IF_Mbps(2500);
3033	if (link_speeds & I40E_LINK_SPEED_1GB)
3034		return IF_Gbps(1);
3035	if (link_speeds & I40E_LINK_SPEED_100MB)
3036		return IF_Mbps(100);
3037	else
3038		/* Minimum supported link speed */
3039		return IF_Mbps(100);
3040}
3041
3042/*
3043** Get the width and transaction speed of
3044** the bus this adapter is plugged into.
3045*/
3046void
3047ixl_get_bus_info(struct ixl_pf *pf)
3048{
3049	struct i40e_hw *hw = &pf->hw;
3050	device_t dev = pf->dev;
3051        u16 link;
3052        u32 offset, num_ports;
3053	u64 max_speed;
3054
3055	/* Some devices don't use PCIE */
3056	if (hw->mac.type == I40E_MAC_X722)
3057		return;
3058
3059        /* Read PCI Express Capabilities Link Status Register */
3060        pci_find_cap(dev, PCIY_EXPRESS, &offset);
3061        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3062
3063	/* Fill out hw struct with PCIE info */
3064	i40e_set_pci_config_data(hw, link);
3065
3066	/* Use info to print out bandwidth messages */
3067        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3068            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3069            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3070            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3071            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3072            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3073            (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3074            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3075            ("Unknown"));
3076
3077	/*
3078	 * If adapter is in slot with maximum supported speed,
3079	 * no warning message needs to be printed out.
3080	 */
3081	if (hw->bus.speed >= i40e_bus_speed_8000
3082	    && hw->bus.width >= i40e_bus_width_pcie_x8)
3083		return;
3084
3085	num_ports = bitcount32(hw->func_caps.valid_functions);
3086	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3087
3088	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3089                device_printf(dev, "PCI-Express bandwidth available"
3090                    " for this device may be insufficient for"
3091                    " optimal performance.\n");
3092                device_printf(dev, "Please move the device to a different"
3093		    " PCI-e link with more lanes and/or higher"
3094		    " transfer rate.\n");
3095        }
3096}
3097
3098static int
3099ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3100{
3101	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
3102	struct i40e_hw	*hw = &pf->hw;
3103	struct sbuf	*sbuf;
3104
3105	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3106	ixl_nvm_version_str(hw, sbuf);
3107	sbuf_finish(sbuf);
3108	sbuf_delete(sbuf);
3109
3110	return (0);
3111}
3112
3113void
3114ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3115{
3116	u8 nvma_ptr = nvma->config & 0xFF;
3117	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3118	const char * cmd_str;
3119
3120	switch (nvma->command) {
3121	case I40E_NVM_READ:
3122		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3123		    nvma->offset == 0 && nvma->data_size == 1) {
3124			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3125			return;
3126		}
3127		cmd_str = "READ ";
3128		break;
3129	case I40E_NVM_WRITE:
3130		cmd_str = "WRITE";
3131		break;
3132	default:
3133		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3134		return;
3135	}
3136	device_printf(dev,
3137	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3138	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3139}
3140
3141int
3142ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3143{
3144	struct i40e_hw *hw = &pf->hw;
3145	struct i40e_nvm_access *nvma;
3146	device_t dev = pf->dev;
3147	enum i40e_status_code status = 0;
3148	size_t nvma_size, ifd_len, exp_len;
3149	int err, perrno;
3150
3151	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3152
3153	/* Sanity checks */
3154	nvma_size = sizeof(struct i40e_nvm_access);
3155	ifd_len = ifd->ifd_len;
3156
3157	if (ifd_len < nvma_size ||
3158	    ifd->ifd_data == NULL) {
3159		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3160		    __func__);
3161		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3162		    __func__, ifd_len, nvma_size);
3163		device_printf(dev, "%s: data pointer: %p\n", __func__,
3164		    ifd->ifd_data);
3165		return (EINVAL);
3166	}
3167
3168	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3169	err = copyin(ifd->ifd_data, nvma, ifd_len);
3170	if (err) {
3171		device_printf(dev, "%s: Cannot get request from user space\n",
3172		    __func__);
3173		free(nvma, M_IXL);
3174		return (err);
3175	}
3176
3177	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3178		ixl_print_nvm_cmd(dev, nvma);
3179
3180	if (IXL_PF_IS_RESETTING(pf)) {
3181		int count = 0;
3182		while (count++ < 100) {
3183			i40e_msec_delay(100);
3184			if (!(IXL_PF_IS_RESETTING(pf)))
3185				break;
3186		}
3187	}
3188
3189	if (IXL_PF_IS_RESETTING(pf)) {
3190		device_printf(dev,
3191		    "%s: timeout waiting for EMP reset to finish\n",
3192		    __func__);
3193		free(nvma, M_IXL);
3194		return (-EBUSY);
3195	}
3196
3197	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3198		device_printf(dev,
3199		    "%s: invalid request, data size not in supported range\n",
3200		    __func__);
3201		free(nvma, M_IXL);
3202		return (EINVAL);
3203	}
3204
3205	/*
3206	 * Older versions of the NVM update tool don't set ifd_len to the size
3207	 * of the entire buffer passed to the ioctl. Check the data_size field
3208	 * in the contained i40e_nvm_access struct and ensure everything is
3209	 * copied in from userspace.
3210	 */
3211	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3212
3213	if (ifd_len < exp_len) {
3214		ifd_len = exp_len;
3215		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3216		err = copyin(ifd->ifd_data, nvma, ifd_len);
3217		if (err) {
3218			device_printf(dev, "%s: Cannot get request from user space\n",
3219					__func__);
3220			free(nvma, M_IXL);
3221			return (err);
3222		}
3223	}
3224
3225	// TODO: Might need a different lock here
3226	// IXL_PF_LOCK(pf);
3227	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3228	// IXL_PF_UNLOCK(pf);
3229
3230	err = copyout(nvma, ifd->ifd_data, ifd_len);
3231	free(nvma, M_IXL);
3232	if (err) {
3233		device_printf(dev, "%s: Cannot return data to user space\n",
3234				__func__);
3235		return (err);
3236	}
3237
3238	/* Let the nvmupdate report errors, show them only when debug is enabled */
3239	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3240		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3241		    i40e_stat_str(hw, status), perrno);
3242
3243	/*
3244	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3245	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3246	 */
3247	if (perrno == -EPERM)
3248		return (-EACCES);
3249	else
3250		return (perrno);
3251}
3252
3253int
3254ixl_find_i2c_interface(struct ixl_pf *pf)
3255{
3256	struct i40e_hw *hw = &pf->hw;
3257	bool i2c_en, port_matched;
3258	u32 reg;
3259
3260	for (int i = 0; i < 4; i++) {
3261		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3262		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3263		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3264		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3265		    & BIT(hw->port);
3266		if (i2c_en && port_matched)
3267			return (i);
3268	}
3269
3270	return (-1);
3271}
3272
3273void
3274ixl_set_link(struct ixl_pf *pf, bool enable)
3275{
3276	struct i40e_hw *hw = &pf->hw;
3277	device_t dev = pf->dev;
3278	struct i40e_aq_get_phy_abilities_resp abilities;
3279	struct i40e_aq_set_phy_config config;
3280	enum i40e_status_code aq_error = 0;
3281	u32 phy_type, phy_type_ext;
3282
3283	/* Get initial capability information */
3284	aq_error = i40e_aq_get_phy_capabilities(hw,
3285	    FALSE, TRUE, &abilities, NULL);
3286	if (aq_error) {
3287		device_printf(dev,
3288		    "%s: Error getting phy capabilities %d,"
3289		    " aq error: %d\n", __func__, aq_error,
3290		    hw->aq.asq_last_status);
3291		return;
3292	}
3293
3294	phy_type = abilities.phy_type;
3295	phy_type_ext = abilities.phy_type_ext;
3296
3297	/* Get current capability information */
3298	aq_error = i40e_aq_get_phy_capabilities(hw,
3299	    FALSE, FALSE, &abilities, NULL);
3300	if (aq_error) {
3301		device_printf(dev,
3302		    "%s: Error getting phy capabilities %d,"
3303		    " aq error: %d\n", __func__, aq_error,
3304		    hw->aq.asq_last_status);
3305		return;
3306	}
3307
3308	/* Prepare new config */
3309	memset(&config, 0, sizeof(config));
3310	config.link_speed = abilities.link_speed;
3311	config.abilities = abilities.abilities;
3312	config.eee_capability = abilities.eee_capability;
3313	config.eeer = abilities.eeer_val;
3314	config.low_power_ctrl = abilities.d3_lpan;
3315	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3316	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
3317	config.phy_type = 0;
3318	config.phy_type_ext = 0;
3319
3320	config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3321			I40E_AQ_PHY_FLAG_PAUSE_RX);
3322
3323	switch (pf->fc) {
3324	case I40E_FC_FULL:
3325		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3326			I40E_AQ_PHY_FLAG_PAUSE_RX;
3327		break;
3328	case I40E_FC_RX_PAUSE:
3329		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3330		break;
3331	case I40E_FC_TX_PAUSE:
3332		config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3333		break;
3334	default:
3335		break;
3336	}
3337
3338	if (enable) {
3339		config.phy_type = phy_type;
3340		config.phy_type_ext = phy_type_ext;
3341
3342	}
3343
3344	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3345	if (aq_error) {
3346		device_printf(dev,
3347		    "%s: Error setting new phy config %d,"
3348		    " aq error: %d\n", __func__, aq_error,
3349		    hw->aq.asq_last_status);
3350		return;
3351	}
3352
3353	aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3354	if (aq_error) {
3355		device_printf(dev,
3356		    "%s: Error set link config %d,"
3357		    " aq error: %d\n", __func__, aq_error,
3358		    hw->aq.asq_last_status);
3359		return;
3360	}
3361}
3362
3363static char *
3364ixl_phy_type_string(u32 bit_pos, bool ext)
3365{
3366	static char * phy_types_str[32] = {
3367		"SGMII",
3368		"1000BASE-KX",
3369		"10GBASE-KX4",
3370		"10GBASE-KR",
3371		"40GBASE-KR4",
3372		"XAUI",
3373		"XFI",
3374		"SFI",
3375		"XLAUI",
3376		"XLPPI",
3377		"40GBASE-CR4",
3378		"10GBASE-CR1",
3379		"SFP+ Active DA",
3380		"QSFP+ Active DA",
3381		"Reserved (14)",
3382		"Reserved (15)",
3383		"Reserved (16)",
3384		"100BASE-TX",
3385		"1000BASE-T",
3386		"10GBASE-T",
3387		"10GBASE-SR",
3388		"10GBASE-LR",
3389		"10GBASE-SFP+Cu",
3390		"10GBASE-CR1",
3391		"40GBASE-CR4",
3392		"40GBASE-SR4",
3393		"40GBASE-LR4",
3394		"1000BASE-SX",
3395		"1000BASE-LX",
3396		"1000BASE-T Optical",
3397		"20GBASE-KR2",
3398		"Reserved (31)"
3399	};
3400	static char * ext_phy_types_str[8] = {
3401		"25GBASE-KR",
3402		"25GBASE-CR",
3403		"25GBASE-SR",
3404		"25GBASE-LR",
3405		"25GBASE-AOC",
3406		"25GBASE-ACC",
3407		"2.5GBASE-T",
3408		"5GBASE-T"
3409	};
3410
3411	if (ext && bit_pos > 7) return "Invalid_Ext";
3412	if (bit_pos > 31) return "Invalid";
3413
3414	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3415}
3416
3417/* TODO: ERJ: I don't this is necessary anymore. */
3418int
3419ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3420{
3421	device_t dev = pf->dev;
3422	struct i40e_hw *hw = &pf->hw;
3423	struct i40e_aq_desc desc;
3424	enum i40e_status_code status;
3425
3426	struct i40e_aqc_get_link_status *aq_link_status =
3427		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3428
3429	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3430	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3431	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3432	if (status) {
3433		device_printf(dev,
3434		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3435		    __func__, i40e_stat_str(hw, status),
3436		    i40e_aq_str(hw, hw->aq.asq_last_status));
3437		return (EIO);
3438	}
3439
3440	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3441	return (0);
3442}
3443
3444static char *
3445ixl_phy_type_string_ls(u8 val)
3446{
3447	if (val >= 0x1F)
3448		return ixl_phy_type_string(val - 0x1F, true);
3449	else
3450		return ixl_phy_type_string(val, false);
3451}
3452
3453static int
3454ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3455{
3456	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3457	device_t dev = pf->dev;
3458	struct sbuf *buf;
3459	int error = 0;
3460
3461	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3462	if (!buf) {
3463		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3464		return (ENOMEM);
3465	}
3466
3467	struct i40e_aqc_get_link_status link_status;
3468	error = ixl_aq_get_link_status(pf, &link_status);
3469	if (error) {
3470		sbuf_delete(buf);
3471		return (error);
3472	}
3473
3474	sbuf_printf(buf, "\n"
3475	    "PHY Type : 0x%02x<%s>\n"
3476	    "Speed    : 0x%02x\n"
3477	    "Link info: 0x%02x\n"
3478	    "AN info  : 0x%02x\n"
3479	    "Ext info : 0x%02x\n"
3480	    "Loopback : 0x%02x\n"
3481	    "Max Frame: %d\n"
3482	    "Config   : 0x%02x\n"
3483	    "Power    : 0x%02x",
3484	    link_status.phy_type,
3485	    ixl_phy_type_string_ls(link_status.phy_type),
3486	    link_status.link_speed,
3487	    link_status.link_info,
3488	    link_status.an_info,
3489	    link_status.ext_info,
3490	    link_status.loopback,
3491	    link_status.max_frame_size,
3492	    link_status.config,
3493	    link_status.power_desc);
3494
3495	error = sbuf_finish(buf);
3496	if (error)
3497		device_printf(dev, "Error finishing sbuf: %d\n", error);
3498
3499	sbuf_delete(buf);
3500	return (error);
3501}
3502
3503static int
3504ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3505{
3506	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3507	struct i40e_hw *hw = &pf->hw;
3508	device_t dev = pf->dev;
3509	enum i40e_status_code status;
3510	struct i40e_aq_get_phy_abilities_resp abilities;
3511	struct sbuf *buf;
3512	int error = 0;
3513
3514	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3515	if (!buf) {
3516		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3517		return (ENOMEM);
3518	}
3519
3520	status = i40e_aq_get_phy_capabilities(hw,
3521	    FALSE, arg2 != 0, &abilities, NULL);
3522	if (status) {
3523		device_printf(dev,
3524		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3525		    __func__, i40e_stat_str(hw, status),
3526		    i40e_aq_str(hw, hw->aq.asq_last_status));
3527		sbuf_delete(buf);
3528		return (EIO);
3529	}
3530
3531	sbuf_printf(buf, "\n"
3532	    "PHY Type : %08x",
3533	    abilities.phy_type);
3534
3535	if (abilities.phy_type != 0) {
3536		sbuf_printf(buf, "<");
3537		for (int i = 0; i < 32; i++)
3538			if ((1 << i) & abilities.phy_type)
3539				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3540		sbuf_printf(buf, ">");
3541	}
3542
3543	sbuf_printf(buf, "\nPHY Ext  : %02x",
3544	    abilities.phy_type_ext);
3545
3546	if (abilities.phy_type_ext != 0) {
3547		sbuf_printf(buf, "<");
3548		for (int i = 0; i < 4; i++)
3549			if ((1 << i) & abilities.phy_type_ext)
3550				sbuf_printf(buf, "%s,",
3551				    ixl_phy_type_string(i, true));
3552		sbuf_printf(buf, ">");
3553	}
3554
3555	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3556	if (abilities.link_speed != 0) {
3557		u8 link_speed;
3558		sbuf_printf(buf, " <");
3559		for (int i = 0; i < 8; i++) {
3560			link_speed = (1 << i) & abilities.link_speed;
3561			if (link_speed)
3562				sbuf_printf(buf, "%s, ",
3563				    ixl_link_speed_string(link_speed));
3564		}
3565		sbuf_printf(buf, ">");
3566	}
3567
3568	sbuf_printf(buf, "\n"
3569	    "Abilities: %02x\n"
3570	    "EEE cap  : %04x\n"
3571	    "EEER reg : %08x\n"
3572	    "D3 Lpan  : %02x\n"
3573	    "ID       : %02x %02x %02x %02x\n"
3574	    "ModType  : %02x %02x %02x\n"
3575	    "ModType E: %01x\n"
3576	    "FEC Cfg  : %02x\n"
3577	    "Ext CC   : %02x",
3578	    abilities.abilities, abilities.eee_capability,
3579	    abilities.eeer_val, abilities.d3_lpan,
3580	    abilities.phy_id[0], abilities.phy_id[1],
3581	    abilities.phy_id[2], abilities.phy_id[3],
3582	    abilities.module_type[0], abilities.module_type[1],
3583	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3584	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3585	    abilities.ext_comp_code);
3586
3587	error = sbuf_finish(buf);
3588	if (error)
3589		device_printf(dev, "Error finishing sbuf: %d\n", error);
3590
3591	sbuf_delete(buf);
3592	return (error);
3593}
3594
3595static int
3596ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)
3597{
3598	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3599	struct i40e_hw *hw = &pf->hw;
3600	device_t dev = pf->dev;
3601	struct sbuf *buf;
3602	int error = 0;
3603
3604	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3605	if (buf == NULL) {
3606		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3607		return (ENOMEM);
3608	}
3609
3610	if (hw->mac.type == I40E_MAC_X722) {
3611		sbuf_printf(buf, "\n"
3612		    "PCS Link Control Register:                          unavailable\n"
3613		    "PCS Link Status 1:                                  unavailable\n"
3614		    "PCS Link Status 2:                                  unavailable\n"
3615		    "XGMII FIFO Status:                                  unavailable\n"
3616		    "Auto-Negotiation (AN) Status:                       unavailable\n"
3617		    "KR PCS Status:                                      unavailable\n"
3618		    "KR FEC Status 1 ��� FEC Correctable Blocks Counter:   unavailable\n"
3619		    "KR FEC Status 2 ��� FEC Uncorrectable Blocks Counter: unavailable"
3620		);
3621	} else {
3622		sbuf_printf(buf, "\n"
3623		    "PCS Link Control Register:                          %#010X\n"
3624		    "PCS Link Status 1:                                  %#010X\n"
3625		    "PCS Link Status 2:                                  %#010X\n"
3626		    "XGMII FIFO Status:                                  %#010X\n"
3627		    "Auto-Negotiation (AN) Status:                       %#010X\n"
3628		    "KR PCS Status:                                      %#010X\n"
3629		    "KR FEC Status 1 ��� FEC Correctable Blocks Counter:   %#010X\n"
3630		    "KR FEC Status 2 ��� FEC Uncorrectable Blocks Counter: %#010X",
3631		    rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL),
3632		    rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)),
3633		    rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2),
3634		    rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS),
3635		    rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS),
3636		    rd32(hw, I40E_PRTMAC_PCS_KR_STATUS),
3637		    rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1),
3638		    rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2)
3639		);
3640	}
3641
3642	error = sbuf_finish(buf);
3643	if (error)
3644		device_printf(dev, "Error finishing sbuf: %d\n", error);
3645
3646	sbuf_delete(buf);
3647	return (error);
3648}
3649
3650static int
3651ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3652{
3653	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3654	struct ixl_vsi *vsi = &pf->vsi;
3655	struct ixl_mac_filter *f;
3656	device_t dev = pf->dev;
3657	int error = 0, ftl_len = 0, ftl_counter = 0;
3658
3659	struct sbuf *buf;
3660
3661	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3662	if (!buf) {
3663		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3664		return (ENOMEM);
3665	}
3666
3667	sbuf_printf(buf, "\n");
3668
3669	/* Print MAC filters */
3670	sbuf_printf(buf, "PF Filters:\n");
3671	LIST_FOREACH(f, &vsi->ftl, ftle)
3672		ftl_len++;
3673
3674	if (ftl_len < 1)
3675		sbuf_printf(buf, "(none)\n");
3676	else {
3677		LIST_FOREACH(f, &vsi->ftl, ftle) {
3678			sbuf_printf(buf,
3679			    MAC_FORMAT ", vlan %4d, flags %#06x",
3680			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3681			/* don't print '\n' for last entry */
3682			if (++ftl_counter != ftl_len)
3683				sbuf_printf(buf, "\n");
3684		}
3685	}
3686
3687#ifdef PCI_IOV
3688	/* TODO: Give each VF its own filter list sysctl */
3689	struct ixl_vf *vf;
3690	if (pf->num_vfs > 0) {
3691		sbuf_printf(buf, "\n\n");
3692		for (int i = 0; i < pf->num_vfs; i++) {
3693			vf = &pf->vfs[i];
3694			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3695				continue;
3696
3697			vsi = &vf->vsi;
3698			ftl_len = 0, ftl_counter = 0;
3699			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3700			LIST_FOREACH(f, &vsi->ftl, ftle)
3701				ftl_len++;
3702
3703			if (ftl_len < 1)
3704				sbuf_printf(buf, "(none)\n");
3705			else {
3706				LIST_FOREACH(f, &vsi->ftl, ftle) {
3707					sbuf_printf(buf,
3708					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3709					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3710				}
3711			}
3712		}
3713	}
3714#endif
3715
3716	error = sbuf_finish(buf);
3717	if (error)
3718		device_printf(dev, "Error finishing sbuf: %d\n", error);
3719	sbuf_delete(buf);
3720
3721	return (error);
3722}
3723
3724#define IXL_SW_RES_SIZE 0x14
3725int
3726ixl_res_alloc_cmp(const void *a, const void *b)
3727{
3728	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3729	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3730	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3731
3732	return ((int)one->resource_type - (int)two->resource_type);
3733}
3734
3735/*
3736 * Longest string length: 25
3737 */
3738const char *
3739ixl_switch_res_type_string(u8 type)
3740{
3741	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3742		"VEB",
3743		"VSI",
3744		"Perfect Match MAC address",
3745		"S-tag",
3746		"(Reserved)",
3747		"Multicast hash entry",
3748		"Unicast hash entry",
3749		"VLAN",
3750		"VSI List entry",
3751		"(Reserved)",
3752		"VLAN Statistic Pool",
3753		"Mirror Rule",
3754		"Queue Set",
3755		"Inner VLAN Forward filter",
3756		"(Reserved)",
3757		"Inner MAC",
3758		"IP",
3759		"GRE/VN1 Key",
3760		"VN2 Key",
3761		"Tunneling Port"
3762	};
3763
3764	if (type < IXL_SW_RES_SIZE)
3765		return ixl_switch_res_type_strings[type];
3766	else
3767		return "(Reserved)";
3768}
3769
3770static int
3771ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3772{
3773	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3774	struct i40e_hw *hw = &pf->hw;
3775	device_t dev = pf->dev;
3776	struct sbuf *buf;
3777	enum i40e_status_code status;
3778	int error = 0;
3779
3780	u8 num_entries;
3781	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3782
3783	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3784	if (!buf) {
3785		device_printf(dev, "Could not allocate sbuf for output.\n");
3786		return (ENOMEM);
3787	}
3788
3789	bzero(resp, sizeof(resp));
3790	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3791				resp,
3792				IXL_SW_RES_SIZE,
3793				NULL);
3794	if (status) {
3795		device_printf(dev,
3796		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3797		    __func__, i40e_stat_str(hw, status),
3798		    i40e_aq_str(hw, hw->aq.asq_last_status));
3799		sbuf_delete(buf);
3800		return (error);
3801	}
3802
3803	/* Sort entries by type for display */
3804	qsort(resp, num_entries,
3805	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3806	    &ixl_res_alloc_cmp);
3807
3808	sbuf_cat(buf, "\n");
3809	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3810	sbuf_printf(buf,
3811	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3812	    "                          | (this)     | (all) | (this) | (all)       \n");
3813	for (int i = 0; i < num_entries; i++) {
3814		sbuf_printf(buf,
3815		    "%25s | %10d   %5d   %6d   %12d",
3816		    ixl_switch_res_type_string(resp[i].resource_type),
3817		    resp[i].guaranteed,
3818		    resp[i].total,
3819		    resp[i].used,
3820		    resp[i].total_unalloced);
3821		if (i < num_entries - 1)
3822			sbuf_cat(buf, "\n");
3823	}
3824
3825	error = sbuf_finish(buf);
3826	if (error)
3827		device_printf(dev, "Error finishing sbuf: %d\n", error);
3828
3829	sbuf_delete(buf);
3830	return (error);
3831}
3832
3833enum ixl_sw_seid_offset {
3834	IXL_SW_SEID_EMP = 1,
3835	IXL_SW_SEID_MAC_START = 2,
3836	IXL_SW_SEID_MAC_END = 5,
3837	IXL_SW_SEID_PF_START = 16,
3838	IXL_SW_SEID_PF_END = 31,
3839	IXL_SW_SEID_VF_START = 32,
3840	IXL_SW_SEID_VF_END = 159,
3841};
3842
3843/*
3844 * Caller must init and delete sbuf; this function will clear and
3845 * finish it for caller.
3846 *
3847 * Note: The SEID argument only applies for elements defined by FW at
3848 * power-on; these include the EMP, Ports, PFs and VFs.
3849 */
3850static char *
3851ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3852{
3853	sbuf_clear(s);
3854
3855	/* If SEID is in certain ranges, then we can infer the
3856	 * mapping of SEID to switch element.
3857	 */
3858	if (seid == IXL_SW_SEID_EMP) {
3859		sbuf_cat(s, "EMP");
3860		goto out;
3861	} else if (seid >= IXL_SW_SEID_MAC_START &&
3862	    seid <= IXL_SW_SEID_MAC_END) {
3863		sbuf_printf(s, "MAC  %2d",
3864		    seid - IXL_SW_SEID_MAC_START);
3865		goto out;
3866	} else if (seid >= IXL_SW_SEID_PF_START &&
3867	    seid <= IXL_SW_SEID_PF_END) {
3868		sbuf_printf(s, "PF  %3d",
3869		    seid - IXL_SW_SEID_PF_START);
3870		goto out;
3871	} else if (seid >= IXL_SW_SEID_VF_START &&
3872	    seid <= IXL_SW_SEID_VF_END) {
3873		sbuf_printf(s, "VF  %3d",
3874		    seid - IXL_SW_SEID_VF_START);
3875		goto out;
3876	}
3877
3878	switch (element_type) {
3879	case I40E_AQ_SW_ELEM_TYPE_BMC:
3880		sbuf_cat(s, "BMC");
3881		break;
3882	case I40E_AQ_SW_ELEM_TYPE_PV:
3883		sbuf_cat(s, "PV");
3884		break;
3885	case I40E_AQ_SW_ELEM_TYPE_VEB:
3886		sbuf_cat(s, "VEB");
3887		break;
3888	case I40E_AQ_SW_ELEM_TYPE_PA:
3889		sbuf_cat(s, "PA");
3890		break;
3891	case I40E_AQ_SW_ELEM_TYPE_VSI:
3892		sbuf_printf(s, "VSI");
3893		break;
3894	default:
3895		sbuf_cat(s, "?");
3896		break;
3897	}
3898
3899out:
3900	sbuf_finish(s);
3901	return sbuf_data(s);
3902}
3903
3904static int
3905ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3906{
3907	const struct i40e_aqc_switch_config_element_resp *one, *two;
3908	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3909	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3910
3911	return ((int)one->seid - (int)two->seid);
3912}
3913
3914static int
3915ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3916{
3917	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3918	struct i40e_hw *hw = &pf->hw;
3919	device_t dev = pf->dev;
3920	struct sbuf *buf;
3921	struct sbuf *nmbuf;
3922	enum i40e_status_code status;
3923	int error = 0;
3924	u16 next = 0;
3925	u8 aq_buf[I40E_AQ_LARGE_BUF];
3926
3927	struct i40e_aqc_switch_config_element_resp *elem;
3928	struct i40e_aqc_get_switch_config_resp *sw_config;
3929	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3930
3931	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3932	if (!buf) {
3933		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3934		return (ENOMEM);
3935	}
3936
3937	status = i40e_aq_get_switch_config(hw, sw_config,
3938	    sizeof(aq_buf), &next, NULL);
3939	if (status) {
3940		device_printf(dev,
3941		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3942		    __func__, i40e_stat_str(hw, status),
3943		    i40e_aq_str(hw, hw->aq.asq_last_status));
3944		sbuf_delete(buf);
3945		return error;
3946	}
3947	if (next)
3948		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3949		    __func__, next);
3950
3951	nmbuf = sbuf_new_auto();
3952	if (!nmbuf) {
3953		device_printf(dev, "Could not allocate sbuf for name output.\n");
3954		sbuf_delete(buf);
3955		return (ENOMEM);
3956	}
3957
3958	/* Sort entries by SEID for display */
3959	qsort(sw_config->element, sw_config->header.num_reported,
3960	    sizeof(struct i40e_aqc_switch_config_element_resp),
3961	    &ixl_sw_cfg_elem_seid_cmp);
3962
3963	sbuf_cat(buf, "\n");
3964	/* Assuming <= 255 elements in switch */
3965	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3966	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3967	/* Exclude:
3968	 * Revision -- all elements are revision 1 for now
3969	 */
3970	sbuf_printf(buf,
3971	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3972	    "                |                 |                 | (uplink)\n");
3973	for (int i = 0; i < sw_config->header.num_reported; i++) {
3974		elem = &sw_config->element[i];
3975
3976		// "%4d (%8s) | %8s   %8s   %#8x",
3977		sbuf_printf(buf, "%4d", elem->seid);
3978		sbuf_cat(buf, " ");
3979		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3980		    elem->element_type, elem->seid));
3981		sbuf_cat(buf, " | ");
3982		sbuf_printf(buf, "%4d", elem->uplink_seid);
3983		sbuf_cat(buf, " ");
3984		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3985		    0, elem->uplink_seid));
3986		sbuf_cat(buf, " | ");
3987		sbuf_printf(buf, "%4d", elem->downlink_seid);
3988		sbuf_cat(buf, " ");
3989		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3990		    0, elem->downlink_seid));
3991		sbuf_cat(buf, " | ");
3992		sbuf_printf(buf, "%8d", elem->connection_type);
3993		if (i < sw_config->header.num_reported - 1)
3994			sbuf_cat(buf, "\n");
3995	}
3996	sbuf_delete(nmbuf);
3997
3998	error = sbuf_finish(buf);
3999	if (error)
4000		device_printf(dev, "Error finishing sbuf: %d\n", error);
4001
4002	sbuf_delete(buf);
4003
4004	return (error);
4005}
4006
4007static int
4008ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
4009{
4010	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4011	struct i40e_hw *hw = &pf->hw;
4012	device_t dev = pf->dev;
4013	int requested_vlan = -1;
4014	enum i40e_status_code status = 0;
4015	int error = 0;
4016
4017	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
4018	if ((error) || (req->newptr == NULL))
4019	    return (error);
4020
4021	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
4022		device_printf(dev, "Flags disallow setting of vlans\n");
4023		return (ENODEV);
4024	}
4025
4026	hw->switch_tag = requested_vlan;
4027	device_printf(dev,
4028	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
4029	    hw->switch_tag, hw->first_tag, hw->second_tag);
4030	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
4031	if (status) {
4032		device_printf(dev,
4033		    "%s: aq_set_switch_config() error %s, aq error %s\n",
4034		    __func__, i40e_stat_str(hw, status),
4035		    i40e_aq_str(hw, hw->aq.asq_last_status));
4036		return (status);
4037	}
4038	return (0);
4039}
4040
4041static int
4042ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4043{
4044	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4045	struct i40e_hw *hw = &pf->hw;
4046	device_t dev = pf->dev;
4047	struct sbuf *buf;
4048	int error = 0;
4049	enum i40e_status_code status;
4050	u32 reg;
4051
4052	struct i40e_aqc_get_set_rss_key_data key_data;
4053
4054	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4055	if (!buf) {
4056		device_printf(dev, "Could not allocate sbuf for output.\n");
4057		return (ENOMEM);
4058	}
4059
4060	bzero(&key_data, sizeof(key_data));
4061
4062	sbuf_cat(buf, "\n");
4063	if (hw->mac.type == I40E_MAC_X722) {
4064		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4065		if (status)
4066			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4067			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4068	} else {
4069		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4070			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4071			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
4072		}
4073	}
4074
4075	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4076
4077	error = sbuf_finish(buf);
4078	if (error)
4079		device_printf(dev, "Error finishing sbuf: %d\n", error);
4080	sbuf_delete(buf);
4081
4082	return (error);
4083}
4084
4085static void
4086ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4087{
4088	int i, j, k, width;
4089	char c;
4090
4091	if (length < 1 || buf == NULL) return;
4092
4093	int byte_stride = 16;
4094	int lines = length / byte_stride;
4095	int rem = length % byte_stride;
4096	if (rem > 0)
4097		lines++;
4098
4099	for (i = 0; i < lines; i++) {
4100		width = (rem > 0 && i == lines - 1)
4101		    ? rem : byte_stride;
4102
4103		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4104
4105		for (j = 0; j < width; j++)
4106			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4107
4108		if (width < byte_stride) {
4109			for (k = 0; k < (byte_stride - width); k++)
4110				sbuf_printf(sb, "   ");
4111		}
4112
4113		if (!text) {
4114			sbuf_printf(sb, "\n");
4115			continue;
4116		}
4117
4118		for (j = 0; j < width; j++) {
4119			c = (char)buf[i * byte_stride + j];
4120			if (c < 32 || c > 126)
4121				sbuf_printf(sb, ".");
4122			else
4123				sbuf_printf(sb, "%c", c);
4124
4125			if (j == width - 1)
4126				sbuf_printf(sb, "\n");
4127		}
4128	}
4129}
4130
4131static int
4132ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4133{
4134	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4135	struct i40e_hw *hw = &pf->hw;
4136	device_t dev = pf->dev;
4137	struct sbuf *buf;
4138	int error = 0;
4139	enum i40e_status_code status;
4140	u8 hlut[512];
4141	u32 reg;
4142
4143	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4144	if (!buf) {
4145		device_printf(dev, "Could not allocate sbuf for output.\n");
4146		return (ENOMEM);
4147	}
4148
4149	bzero(hlut, sizeof(hlut));
4150	sbuf_cat(buf, "\n");
4151	if (hw->mac.type == I40E_MAC_X722) {
4152		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4153		if (status)
4154			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4155			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4156	} else {
4157		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4158			reg = rd32(hw, I40E_PFQF_HLUT(i));
4159			bcopy(&reg, &hlut[i << 2], 4);
4160		}
4161	}
4162	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4163
4164	error = sbuf_finish(buf);
4165	if (error)
4166		device_printf(dev, "Error finishing sbuf: %d\n", error);
4167	sbuf_delete(buf);
4168
4169	return (error);
4170}
4171
4172static int
4173ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4174{
4175	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4176	struct i40e_hw *hw = &pf->hw;
4177	u64 hena;
4178
4179	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4180	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4181
4182	return sysctl_handle_long(oidp, NULL, hena, req);
4183}
4184
4185/*
4186 * Sysctl to disable firmware's link management
4187 *
4188 * 1 - Disable link management on this port
4189 * 0 - Re-enable link management
4190 *
4191 * On normal NVMs, firmware manages link by default.
4192 */
4193static int
4194ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4195{
4196	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4197	struct i40e_hw *hw = &pf->hw;
4198	device_t dev = pf->dev;
4199	int requested_mode = -1;
4200	enum i40e_status_code status = 0;
4201	int error = 0;
4202
4203	/* Read in new mode */
4204	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4205	if ((error) || (req->newptr == NULL))
4206		return (error);
4207	/* Check for sane value */
4208	if (requested_mode < 0 || requested_mode > 1) {
4209		device_printf(dev, "Valid modes are 0 or 1\n");
4210		return (EINVAL);
4211	}
4212
4213	/* Set new mode */
4214	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4215	if (status) {
4216		device_printf(dev,
4217		    "%s: Error setting new phy debug mode %s,"
4218		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4219		    i40e_aq_str(hw, hw->aq.asq_last_status));
4220		return (EIO);
4221	}
4222
4223	return (0);
4224}
4225
4226/*
4227 * Read some diagnostic data from a (Q)SFP+ module
4228 *
4229 *             SFP A2   QSFP Lower Page
4230 * Temperature 96-97	22-23
4231 * Vcc         98-99    26-27
4232 * TX power    102-103  34-35..40-41
4233 * RX power    104-105  50-51..56-57
4234 */
4235static int
4236ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4237{
4238	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4239	device_t dev = pf->dev;
4240	struct sbuf *sbuf;
4241	int error = 0;
4242	u8 output;
4243
4244	if (req->oldptr == NULL) {
4245		error = SYSCTL_OUT(req, 0, 128);
4246		return (0);
4247	}
4248
4249	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4250	if (error) {
4251		device_printf(dev, "Error reading from i2c\n");
4252		return (error);
4253	}
4254
4255	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4256	if (output == 0x3) {
4257		/*
4258		 * Check for:
4259		 * - Internally calibrated data
4260		 * - Diagnostic monitoring is implemented
4261		 */
4262		pf->read_i2c_byte(pf, 92, 0xA0, &output);
4263		if (!(output & 0x60)) {
4264			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4265			return (0);
4266		}
4267
4268		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4269
4270		for (u8 offset = 96; offset < 100; offset++) {
4271			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4272			sbuf_printf(sbuf, "%02X ", output);
4273		}
4274		for (u8 offset = 102; offset < 106; offset++) {
4275			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4276			sbuf_printf(sbuf, "%02X ", output);
4277		}
4278	} else if (output == 0xD || output == 0x11) {
4279		/*
4280		 * QSFP+ modules are always internally calibrated, and must indicate
4281		 * what types of diagnostic monitoring are implemented
4282		 */
4283		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4284
4285		for (u8 offset = 22; offset < 24; offset++) {
4286			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4287			sbuf_printf(sbuf, "%02X ", output);
4288		}
4289		for (u8 offset = 26; offset < 28; offset++) {
4290			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4291			sbuf_printf(sbuf, "%02X ", output);
4292		}
4293		/* Read the data from the first lane */
4294		for (u8 offset = 34; offset < 36; offset++) {
4295			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4296			sbuf_printf(sbuf, "%02X ", output);
4297		}
4298		for (u8 offset = 50; offset < 52; offset++) {
4299			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4300			sbuf_printf(sbuf, "%02X ", output);
4301		}
4302	} else {
4303		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4304		return (0);
4305	}
4306
4307	sbuf_finish(sbuf);
4308	sbuf_delete(sbuf);
4309
4310	return (0);
4311}
4312
4313/*
4314 * Sysctl to read a byte from I2C bus.
4315 *
4316 * Input: 32-bit value:
4317 * 	bits 0-7:   device address (0xA0 or 0xA2)
4318 * 	bits 8-15:  offset (0-255)
4319 *	bits 16-31: unused
4320 * Output: 8-bit value read
4321 */
4322static int
4323ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4324{
4325	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4326	device_t dev = pf->dev;
4327	int input = -1, error = 0;
4328	u8 dev_addr, offset, output;
4329
4330	/* Read in I2C read parameters */
4331	error = sysctl_handle_int(oidp, &input, 0, req);
4332	if ((error) || (req->newptr == NULL))
4333		return (error);
4334	/* Validate device address */
4335	dev_addr = input & 0xFF;
4336	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4337		return (EINVAL);
4338	}
4339	offset = (input >> 8) & 0xFF;
4340
4341	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4342	if (error)
4343		return (error);
4344
4345	device_printf(dev, "%02X\n", output);
4346	return (0);
4347}
4348
4349/*
4350 * Sysctl to write a byte to the I2C bus.
4351 *
4352 * Input: 32-bit value:
4353 * 	bits 0-7:   device address (0xA0 or 0xA2)
4354 * 	bits 8-15:  offset (0-255)
4355 *	bits 16-23: value to write
4356 *	bits 24-31: unused
4357 * Output: 8-bit value written
4358 */
4359static int
4360ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4361{
4362	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4363	device_t dev = pf->dev;
4364	int input = -1, error = 0;
4365	u8 dev_addr, offset, value;
4366
4367	/* Read in I2C write parameters */
4368	error = sysctl_handle_int(oidp, &input, 0, req);
4369	if ((error) || (req->newptr == NULL))
4370		return (error);
4371	/* Validate device address */
4372	dev_addr = input & 0xFF;
4373	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4374		return (EINVAL);
4375	}
4376	offset = (input >> 8) & 0xFF;
4377	value = (input >> 16) & 0xFF;
4378
4379	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4380	if (error)
4381		return (error);
4382
4383	device_printf(dev, "%02X written\n", value);
4384	return (0);
4385}
4386
4387static int
4388ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4389    u8 bit_pos, int *is_set)
4390{
4391	device_t dev = pf->dev;
4392	struct i40e_hw *hw = &pf->hw;
4393	enum i40e_status_code status;
4394
4395	if (IXL_PF_IN_RECOVERY_MODE(pf))
4396		return (EIO);
4397
4398	status = i40e_aq_get_phy_capabilities(hw,
4399	    FALSE, FALSE, abilities, NULL);
4400	if (status) {
4401		device_printf(dev,
4402		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4403		    __func__, i40e_stat_str(hw, status),
4404		    i40e_aq_str(hw, hw->aq.asq_last_status));
4405		return (EIO);
4406	}
4407
4408	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4409	return (0);
4410}
4411
4412static int
4413ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4414    u8 bit_pos, int set)
4415{
4416	device_t dev = pf->dev;
4417	struct i40e_hw *hw = &pf->hw;
4418	struct i40e_aq_set_phy_config config;
4419	enum i40e_status_code status;
4420
4421	/* Set new PHY config */
4422	memset(&config, 0, sizeof(config));
4423	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4424	if (set)
4425		config.fec_config |= bit_pos;
4426	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4427		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4428		config.phy_type = abilities->phy_type;
4429		config.phy_type_ext = abilities->phy_type_ext;
4430		config.link_speed = abilities->link_speed;
4431		config.eee_capability = abilities->eee_capability;
4432		config.eeer = abilities->eeer_val;
4433		config.low_power_ctrl = abilities->d3_lpan;
4434		status = i40e_aq_set_phy_config(hw, &config, NULL);
4435
4436		if (status) {
4437			device_printf(dev,
4438			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4439			    __func__, i40e_stat_str(hw, status),
4440			    i40e_aq_str(hw, hw->aq.asq_last_status));
4441			return (EIO);
4442		}
4443	}
4444
4445	return (0);
4446}
4447
4448static int
4449ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4450{
4451	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4452	int mode, error = 0;
4453
4454	struct i40e_aq_get_phy_abilities_resp abilities;
4455	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4456	if (error)
4457		return (error);
4458	/* Read in new mode */
4459	error = sysctl_handle_int(oidp, &mode, 0, req);
4460	if ((error) || (req->newptr == NULL))
4461		return (error);
4462
4463	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4464}
4465
4466static int
4467ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4468{
4469	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4470	int mode, error = 0;
4471
4472	struct i40e_aq_get_phy_abilities_resp abilities;
4473	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4474	if (error)
4475		return (error);
4476	/* Read in new mode */
4477	error = sysctl_handle_int(oidp, &mode, 0, req);
4478	if ((error) || (req->newptr == NULL))
4479		return (error);
4480
4481	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4482}
4483
4484static int
4485ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4486{
4487	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4488	int mode, error = 0;
4489
4490	struct i40e_aq_get_phy_abilities_resp abilities;
4491	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4492	if (error)
4493		return (error);
4494	/* Read in new mode */
4495	error = sysctl_handle_int(oidp, &mode, 0, req);
4496	if ((error) || (req->newptr == NULL))
4497		return (error);
4498
4499	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4500}
4501
4502static int
4503ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4504{
4505	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4506	int mode, error = 0;
4507
4508	struct i40e_aq_get_phy_abilities_resp abilities;
4509	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4510	if (error)
4511		return (error);
4512	/* Read in new mode */
4513	error = sysctl_handle_int(oidp, &mode, 0, req);
4514	if ((error) || (req->newptr == NULL))
4515		return (error);
4516
4517	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4518}
4519
4520static int
4521ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4522{
4523	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4524	int mode, error = 0;
4525
4526	struct i40e_aq_get_phy_abilities_resp abilities;
4527	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4528	if (error)
4529		return (error);
4530	/* Read in new mode */
4531	error = sysctl_handle_int(oidp, &mode, 0, req);
4532	if ((error) || (req->newptr == NULL))
4533		return (error);
4534
4535	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4536}
4537
4538static int
4539ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4540{
4541	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4542	struct i40e_hw *hw = &pf->hw;
4543	device_t dev = pf->dev;
4544	struct sbuf *buf;
4545	int error = 0;
4546	enum i40e_status_code status;
4547
4548	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4549	if (!buf) {
4550		device_printf(dev, "Could not allocate sbuf for output.\n");
4551		return (ENOMEM);
4552	}
4553
4554	u8 *final_buff;
4555	/* This amount is only necessary if reading the entire cluster into memory */
4556#define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4557	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4558	if (final_buff == NULL) {
4559		device_printf(dev, "Could not allocate memory for output.\n");
4560		goto out;
4561	}
4562	int final_buff_len = 0;
4563
4564	u8 cluster_id = 1;
4565	bool more = true;
4566
4567	u8 dump_buf[4096];
4568	u16 curr_buff_size = 4096;
4569	u8 curr_next_table = 0;
4570	u32 curr_next_index = 0;
4571
4572	u16 ret_buff_size;
4573	u8 ret_next_table;
4574	u32 ret_next_index;
4575
4576	sbuf_cat(buf, "\n");
4577
4578	while (more) {
4579		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4580		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4581		if (status) {
4582			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4583			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4584			goto free_out;
4585		}
4586
4587		/* copy info out of temp buffer */
4588		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4589		final_buff_len += ret_buff_size;
4590
4591		if (ret_next_table != curr_next_table) {
4592			/* We're done with the current table; we can dump out read data. */
4593			sbuf_printf(buf, "%d:", curr_next_table);
4594			int bytes_printed = 0;
4595			while (bytes_printed <= final_buff_len) {
4596				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4597				bytes_printed += 16;
4598			}
4599				sbuf_cat(buf, "\n");
4600
4601			/* The entire cluster has been read; we're finished */
4602			if (ret_next_table == 0xFF)
4603				break;
4604
4605			/* Otherwise clear the output buffer and continue reading */
4606			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4607			final_buff_len = 0;
4608		}
4609
4610		if (ret_next_index == 0xFFFFFFFF)
4611			ret_next_index = 0;
4612
4613		bzero(dump_buf, sizeof(dump_buf));
4614		curr_next_table = ret_next_table;
4615		curr_next_index = ret_next_index;
4616	}
4617
4618free_out:
4619	free(final_buff, M_IXL);
4620out:
4621	error = sbuf_finish(buf);
4622	if (error)
4623		device_printf(dev, "Error finishing sbuf: %d\n", error);
4624	sbuf_delete(buf);
4625
4626	return (error);
4627}
4628
4629static int
4630ixl_start_fw_lldp(struct ixl_pf *pf)
4631{
4632	struct i40e_hw *hw = &pf->hw;
4633	enum i40e_status_code status;
4634
4635	status = i40e_aq_start_lldp(hw, false, NULL);
4636	if (status != I40E_SUCCESS) {
4637		switch (hw->aq.asq_last_status) {
4638		case I40E_AQ_RC_EEXIST:
4639			device_printf(pf->dev,
4640			    "FW LLDP agent is already running\n");
4641			break;
4642		case I40E_AQ_RC_EPERM:
4643			device_printf(pf->dev,
4644			    "Device configuration forbids SW from starting "
4645			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4646			    "attribute to \"Enabled\" to use this sysctl\n");
4647			return (EINVAL);
4648		default:
4649			device_printf(pf->dev,
4650			    "Starting FW LLDP agent failed: error: %s, %s\n",
4651			    i40e_stat_str(hw, status),
4652			    i40e_aq_str(hw, hw->aq.asq_last_status));
4653			return (EINVAL);
4654		}
4655	}
4656
4657	ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4658	return (0);
4659}
4660
4661static int
4662ixl_stop_fw_lldp(struct ixl_pf *pf)
4663{
4664	struct i40e_hw *hw = &pf->hw;
4665	device_t dev = pf->dev;
4666	enum i40e_status_code status;
4667
4668	if (hw->func_caps.npar_enable != 0) {
4669		device_printf(dev,
4670		    "Disabling FW LLDP agent is not supported on this device\n");
4671		return (EINVAL);
4672	}
4673
4674	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4675		device_printf(dev,
4676		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4677		return (EINVAL);
4678	}
4679
4680	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4681	if (status != I40E_SUCCESS) {
4682		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4683			device_printf(dev,
4684			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4685			    i40e_stat_str(hw, status),
4686			    i40e_aq_str(hw, hw->aq.asq_last_status));
4687			return (EINVAL);
4688		}
4689
4690		device_printf(dev, "FW LLDP agent is already stopped\n");
4691	}
4692
4693	i40e_aq_set_dcb_parameters(hw, true, NULL);
4694	ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4695	return (0);
4696}
4697
4698static int
4699ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4700{
4701	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4702	int state, new_state, error = 0;
4703
4704	state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4705
4706	/* Read in new mode */
4707	error = sysctl_handle_int(oidp, &new_state, 0, req);
4708	if ((error) || (req->newptr == NULL))
4709		return (error);
4710
4711	/* Already in requested state */
4712	if (new_state == state)
4713		return (error);
4714
4715	if (new_state == 0)
4716		return ixl_stop_fw_lldp(pf);
4717
4718	return ixl_start_fw_lldp(pf);
4719}
4720
4721static int
4722ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4723{
4724	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4725	int                   state, new_state;
4726	int                   sysctl_handle_status = 0;
4727	enum i40e_status_code cmd_status;
4728
4729	/* Init states' values */
4730	state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4731
4732	/* Get requested mode */
4733	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4734	if ((sysctl_handle_status) || (req->newptr == NULL))
4735		return (sysctl_handle_status);
4736
4737	/* Check if state has changed */
4738	if (new_state == state)
4739		return (0);
4740
4741	/* Set new state */
4742	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4743
4744	/* Save new state or report error */
4745	if (!cmd_status) {
4746		if (new_state == 0)
4747			ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4748		else
4749			ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4750	} else if (cmd_status == I40E_ERR_CONFIG)
4751		return (EPERM);
4752	else
4753		return (EIO);
4754
4755	return (0);
4756}
4757
4758static int
4759ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4760{
4761	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4762	int error, state;
4763
4764	state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4765
4766	error = sysctl_handle_int(oidp, &state, 0, req);
4767	if ((error) || (req->newptr == NULL))
4768		return (error);
4769
4770	if (state == 0)
4771		ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4772	else
4773		ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4774
4775	return (0);
4776}
4777
4778
4779int
4780ixl_attach_get_link_status(struct ixl_pf *pf)
4781{
4782	struct i40e_hw *hw = &pf->hw;
4783	device_t dev = pf->dev;
4784	enum i40e_status_code status;
4785
4786	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4787	    (hw->aq.fw_maj_ver < 4)) {
4788		i40e_msec_delay(75);
4789		status = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4790		if (status != I40E_SUCCESS) {
4791			device_printf(dev,
4792			    "%s link restart failed status: %s, aq_err=%s\n",
4793			    __func__, i40e_stat_str(hw, status),
4794			    i40e_aq_str(hw, hw->aq.asq_last_status));
4795			return (EINVAL);
4796		}
4797	}
4798
4799	/* Determine link state */
4800	hw->phy.get_link_info = TRUE;
4801	status = i40e_get_link_status(hw, &pf->link_up);
4802	if (status != I40E_SUCCESS) {
4803		device_printf(dev,
4804		    "%s get link status, status: %s aq_err=%s\n",
4805		    __func__, i40e_stat_str(hw, status),
4806		    i40e_aq_str(hw, hw->aq.asq_last_status));
4807		/*
4808		 * Most probably FW has not finished configuring PHY.
4809		 * Retry periodically in a timer callback.
4810		 */
4811		ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
4812		pf->link_poll_start = getsbinuptime();
4813		return (EAGAIN);
4814	}
4815 	ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up);
4816
4817	/* Flow Control mode not set by user, read current FW settings */
4818	if (pf->fc == -1)
4819		pf->fc = hw->fc.current_mode;
4820
4821	return (0);
4822}
4823
4824static int
4825ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4826{
4827	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4828	int requested = 0, error = 0;
4829
4830	/* Read in new mode */
4831	error = sysctl_handle_int(oidp, &requested, 0, req);
4832	if ((error) || (req->newptr == NULL))
4833		return (error);
4834
4835	/* Initiate the PF reset later in the admin task */
4836	ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4837
4838	return (error);
4839}
4840
4841static int
4842ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4843{
4844	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4845	struct i40e_hw *hw = &pf->hw;
4846	int requested = 0, error = 0;
4847
4848	/* Read in new mode */
4849	error = sysctl_handle_int(oidp, &requested, 0, req);
4850	if ((error) || (req->newptr == NULL))
4851		return (error);
4852
4853	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4854
4855	return (error);
4856}
4857
4858static int
4859ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4860{
4861	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4862	struct i40e_hw *hw = &pf->hw;
4863	int requested = 0, error = 0;
4864
4865	/* Read in new mode */
4866	error = sysctl_handle_int(oidp, &requested, 0, req);
4867	if ((error) || (req->newptr == NULL))
4868		return (error);
4869
4870	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4871
4872	return (error);
4873}
4874
4875/*
4876 * Print out mapping of TX queue indexes and Rx queue indexes
4877 * to MSI-X vectors.
4878 */
4879static int
4880ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4881{
4882	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4883	struct ixl_vsi *vsi = &pf->vsi;
4884	device_t dev = pf->dev;
4885	struct sbuf *buf;
4886	int error = 0;
4887
4888	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4889	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4890
4891	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4892	if (!buf) {
4893		device_printf(dev, "Could not allocate sbuf for output.\n");
4894		return (ENOMEM);
4895	}
4896
4897	sbuf_cat(buf, "\n");
4898	for (int i = 0; i < vsi->num_rx_queues; i++) {
4899		rx_que = &vsi->rx_queues[i];
4900		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4901	}
4902	for (int i = 0; i < vsi->num_tx_queues; i++) {
4903		tx_que = &vsi->tx_queues[i];
4904		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4905	}
4906
4907	error = sbuf_finish(buf);
4908	if (error)
4909		device_printf(dev, "Error finishing sbuf: %d\n", error);
4910	sbuf_delete(buf);
4911
4912	return (error);
4913}
4914