1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "ixl_pf.h"
37
38#ifdef PCI_IOV
39#include "ixl_pf_iov.h"
40#endif
41
42#ifdef IXL_IW
43#include "ixl_iw.h"
44#include "ixl_iw_int.h"
45#endif
46
47static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50static u_int	ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51static u_int	ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
53static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
54
55/* Sysctls */
56static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
63
64static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65static int	ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
66
67/* Debug Sysctls */
68static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89/* Debug Sysctls */
90static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94#ifdef IXL_DEBUG
95static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
97#endif
98
99#ifdef IXL_IW
100extern int ixl_enable_iwarp;
101extern int ixl_limit_iwarp_msix;
102#endif
103
104static const char * const ixl_fc_string[6] = {
105	"None",
106	"Rx",
107	"Tx",
108	"Full",
109	"Priority",
110	"Default"
111};
112
113static char *ixl_fec_string[3] = {
114       "CL108 RS-FEC",
115       "CL74 FC-FEC/BASE-R",
116       "None"
117};
118
119MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
120
121/*
122** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
123*/
124void
125ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126{
127	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
130
131	sbuf_printf(buf,
132	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
135	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136	    IXL_NVM_VERSION_HI_SHIFT,
137	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138	    IXL_NVM_VERSION_LO_SHIFT,
139	    hw->nvm.eetrack,
140	    oem_ver, oem_build, oem_patch);
141}
142
143void
144ixl_print_nvm_version(struct ixl_pf *pf)
145{
146	struct i40e_hw *hw = &pf->hw;
147	device_t dev = pf->dev;
148	struct sbuf *sbuf;
149
150	sbuf = sbuf_new_auto();
151	ixl_nvm_version_str(hw, sbuf);
152	sbuf_finish(sbuf);
153	device_printf(dev, "%s\n", sbuf_data(sbuf));
154	sbuf_delete(sbuf);
155}
156
157/**
158 * ixl_get_fw_mode - Check the state of FW
159 * @hw: device hardware structure
160 *
161 * Identify state of FW. It might be in a recovery mode
162 * which limits functionality and requires special handling
163 * from the driver.
164 *
165 * @returns FW mode (normal, recovery, unexpected EMP reset)
166 */
167static enum ixl_fw_mode
168ixl_get_fw_mode(struct ixl_pf *pf)
169{
170	struct i40e_hw *hw = &pf->hw;
171	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
172	u32 fwsts;
173
174#ifdef IXL_DEBUG
175	if (pf->recovery_mode)
176		return IXL_FW_MODE_RECOVERY;
177#endif
178	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179
180	/* Is set and has one of expected values */
181	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
182	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
183	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
184	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
185		fw_mode = IXL_FW_MODE_RECOVERY;
186	else {
187		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
188		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
189			fw_mode = IXL_FW_MODE_UEMPR;
190	}
191	return (fw_mode);
192}
193
194/**
195 * ixl_pf_reset - Reset the PF
196 * @pf: PF structure
197 *
198 * Ensure that FW is in the right state and do the reset
199 * if needed.
200 *
201 * @returns zero on success, or an error code on failure.
202 */
203int
204ixl_pf_reset(struct ixl_pf *pf)
205{
206	struct i40e_hw *hw = &pf->hw;
207	enum i40e_status_code status;
208	enum ixl_fw_mode fw_mode;
209
210	fw_mode = ixl_get_fw_mode(pf);
211	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
212	if (fw_mode == IXL_FW_MODE_RECOVERY) {
213		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
214		/* Don't try to reset device if it's in recovery mode */
215		return (0);
216	}
217
218	status = i40e_pf_reset(hw);
219	if (status == I40E_SUCCESS)
220		return (0);
221
222	/* Check FW mode again in case it has changed while
223	 * waiting for reset to complete */
224	fw_mode = ixl_get_fw_mode(pf);
225	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
226	if (fw_mode == IXL_FW_MODE_RECOVERY) {
227		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
228		return (0);
229	}
230
231	if (fw_mode == IXL_FW_MODE_UEMPR)
232		device_printf(pf->dev,
233		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234	else
235		device_printf(pf->dev, "PF reset failure %s\n",
236		    i40e_stat_str(hw, status));
237	return (EIO);
238}
239
240/**
241 * ixl_setup_hmc - Setup LAN Host Memory Cache
242 * @pf: PF structure
243 *
244 * Init and configure LAN Host Memory Cache
245 *
246 * @returns 0 on success, EIO on error
247 */
248int
249ixl_setup_hmc(struct ixl_pf *pf)
250{
251	struct i40e_hw *hw = &pf->hw;
252	enum i40e_status_code status;
253
254	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
255	    hw->func_caps.num_rx_qp, 0, 0);
256	if (status) {
257		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
258		    i40e_stat_str(hw, status));
259		return (EIO);
260	}
261
262	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263	if (status) {
264		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
265		    i40e_stat_str(hw, status));
266		return (EIO);
267	}
268
269	return (0);
270}
271
272/**
273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
274 * @pf: PF structure
275 *
276 * Shutdown Host Memory Cache if configured.
277 *
278 */
279void
280ixl_shutdown_hmc(struct ixl_pf *pf)
281{
282	struct i40e_hw *hw = &pf->hw;
283	enum i40e_status_code status;
284
285	/* HMC not configured, no need to shutdown */
286	if (hw->hmc.hmc_obj == NULL)
287		return;
288
289	status = i40e_shutdown_lan_hmc(hw);
290	if (status)
291		device_printf(pf->dev,
292		    "Shutdown LAN HMC failed with code %s\n",
293		    i40e_stat_str(hw, status));
294}
295/*
296 * Write PF ITR values to queue ITR registers.
297 */
298void
299ixl_configure_itr(struct ixl_pf *pf)
300{
301	ixl_configure_tx_itr(pf);
302	ixl_configure_rx_itr(pf);
303}
304
305/*********************************************************************
306 *
307 *  Get the hardware capabilities
308 *
309 **********************************************************************/
310
311int
312ixl_get_hw_capabilities(struct ixl_pf *pf)
313{
314	struct i40e_aqc_list_capabilities_element_resp *buf;
315	struct i40e_hw	*hw = &pf->hw;
316	device_t 	dev = pf->dev;
317	enum i40e_status_code status;
318	int len, i2c_intfc_num;
319	bool again = TRUE;
320	u16 needed;
321
322	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
323		hw->func_caps.iwarp = 0;
324		return (0);
325	}
326
327	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328retry:
329	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
330	    malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
331		device_printf(dev, "Unable to allocate cap memory\n");
332                return (ENOMEM);
333	}
334
335	/* This populates the hw struct */
336        status = i40e_aq_discover_capabilities(hw, buf, len,
337	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338	free(buf, M_IXL);
339	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340	    (again == TRUE)) {
341		/* retry once with a larger buffer */
342		again = FALSE;
343		len = needed;
344		goto retry;
345	} else if (status != I40E_SUCCESS) {
346		device_printf(dev, "capability discovery failed; status %s, error %s\n",
347		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
348		return (ENODEV);
349	}
350
351	/*
352	 * Some devices have both MDIO and I2C; since this isn't reported
353	 * by the FW, check registers to see if an I2C interface exists.
354	 */
355	i2c_intfc_num = ixl_find_i2c_interface(pf);
356	if (i2c_intfc_num != -1)
357		pf->has_i2c = true;
358
359	/* Determine functions to use for driver I2C accesses */
360	switch (pf->i2c_access_method) {
361	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
362		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
363			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
364			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365		} else {
366			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
367			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
368		}
369		break;
370	}
371	case IXL_I2C_ACCESS_METHOD_AQ:
372		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
373		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374		break;
375	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
376		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
377		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378		break;
379	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
380		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
381		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
382		break;
383	default:
384		/* Should not happen */
385		device_printf(dev, "Error setting I2C access functions\n");
386		break;
387	}
388
389	/* Keep link active by default */
390	atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
391
392	/* Print a subset of the capability information. */
393	device_printf(dev,
394	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
395	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
396	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
397	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
398	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
399	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
400	    "MDIO shared");
401
402	return (0);
403}
404
405/* For the set_advertise sysctl */
406void
407ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
408{
409	device_t dev = pf->dev;
410	int err;
411
412	/* Make sure to initialize the device to the complete list of
413	 * supported speeds on driver load, to ensure unloading and
414	 * reloading the driver will restore this value.
415	 */
416	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
417	if (err) {
418		/* Non-fatal error */
419		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420			      __func__, err);
421		return;
422	}
423
424	pf->advertised_speed =
425	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
426}
427
428int
429ixl_teardown_hw_structs(struct ixl_pf *pf)
430{
431	enum i40e_status_code status = 0;
432	struct i40e_hw *hw = &pf->hw;
433	device_t dev = pf->dev;
434
435	/* Shutdown LAN HMC */
436	if (hw->hmc.hmc_obj) {
437		status = i40e_shutdown_lan_hmc(hw);
438		if (status) {
439			device_printf(dev,
440			    "init: LAN HMC shutdown failure; status %s\n",
441			    i40e_stat_str(hw, status));
442			goto err_out;
443		}
444	}
445
446	/* Shutdown admin queue */
447	ixl_disable_intr0(hw);
448	status = i40e_shutdown_adminq(hw);
449	if (status)
450		device_printf(dev,
451		    "init: Admin Queue shutdown failure; status %s\n",
452		    i40e_stat_str(hw, status));
453
454	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
455err_out:
456	return (status);
457}
458
459/*
460** Creates new filter with given MAC address and VLAN ID
461*/
462static struct ixl_mac_filter *
463ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
464{
465	struct ixl_mac_filter  *f;
466
467	/* create a new empty filter */
468	f = malloc(sizeof(struct ixl_mac_filter),
469	    M_IXL, M_NOWAIT | M_ZERO);
470	if (f) {
471		LIST_INSERT_HEAD(headp, f, ftle);
472		bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
473		f->vlan = vlan;
474	}
475
476	return (f);
477}
478
479/**
480 * ixl_free_filters - Free all filters in given list
481 * headp - pointer to list head
482 *
483 * Frees memory used by each entry in the list.
484 * Does not remove filters from HW.
485 */
486void
487ixl_free_filters(struct ixl_ftl_head *headp)
488{
489	struct ixl_mac_filter *f, *nf;
490
491	f = LIST_FIRST(headp);
492	while (f != NULL) {
493		nf = LIST_NEXT(f, ftle);
494		free(f, M_IXL);
495		f = nf;
496	}
497
498	LIST_INIT(headp);
499}
500
501static u_int
502ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
503{
504	struct ixl_add_maddr_arg *ama = arg;
505	struct ixl_vsi *vsi = ama->vsi;
506	const u8 *macaddr = (u8*)LLADDR(sdl);
507	struct ixl_mac_filter *f;
508
509	/* Does one already exist */
510	f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
511	if (f != NULL)
512		return (0);
513
514	f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
515	if (f == NULL) {
516		device_printf(vsi->dev, "WARNING: no filter available!!\n");
517		return (0);
518	}
519	f->flags |= IXL_FILTER_MC;
520
521	return (1);
522}
523
524/*********************************************************************
525 * 	Filter Routines
526 *
527 *	Routines for multicast and vlan filter management.
528 *
529 *********************************************************************/
530void
531ixl_add_multi(struct ixl_vsi *vsi)
532{
533	struct ifnet		*ifp = vsi->ifp;
534	struct i40e_hw		*hw = vsi->hw;
535	int			mcnt = 0;
536	struct ixl_add_maddr_arg cb_arg;
537
538	IOCTL_DEBUGOUT("ixl_add_multi: begin");
539
540	mcnt = if_llmaddr_count(ifp);
541	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
542		i40e_aq_set_vsi_multicast_promiscuous(hw,
543		    vsi->seid, TRUE, NULL);
544		/* delete all existing MC filters */
545		ixl_del_multi(vsi, true);
546		return;
547	}
548
549	cb_arg.vsi = vsi;
550	LIST_INIT(&cb_arg.to_add);
551
552	mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
553	if (mcnt > 0)
554		ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
555
556	IOCTL_DEBUGOUT("ixl_add_multi: end");
557}
558
559static u_int
560ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
561{
562	struct ixl_mac_filter *f = arg;
563
564	if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
565		return (1);
566	else
567		return (0);
568}
569
570void
571ixl_del_multi(struct ixl_vsi *vsi, bool all)
572{
573	struct ixl_ftl_head	to_del;
574	struct ifnet		*ifp = vsi->ifp;
575	struct ixl_mac_filter	*f, *fn;
576	int			mcnt = 0;
577
578	IOCTL_DEBUGOUT("ixl_del_multi: begin");
579
580	LIST_INIT(&to_del);
581	/* Search for removed multicast addresses */
582	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
583		if ((f->flags & IXL_FILTER_MC) == 0 ||
584		    (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
585			continue;
586
587		LIST_REMOVE(f, ftle);
588		LIST_INSERT_HEAD(&to_del, f, ftle);
589		mcnt++;
590	}
591
592	if (mcnt > 0)
593		ixl_del_hw_filters(vsi, &to_del, mcnt);
594}
595
596void
597ixl_link_up_msg(struct ixl_pf *pf)
598{
599	struct i40e_hw *hw = &pf->hw;
600	struct ifnet *ifp = pf->vsi.ifp;
601	char *req_fec_string, *neg_fec_string;
602	u8 fec_abilities;
603
604	fec_abilities = hw->phy.link_info.req_fec_info;
605	/* If both RS and KR are requested, only show RS */
606	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
607		req_fec_string = ixl_fec_string[0];
608	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
609		req_fec_string = ixl_fec_string[1];
610	else
611		req_fec_string = ixl_fec_string[2];
612
613	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
614		neg_fec_string = ixl_fec_string[0];
615	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
616		neg_fec_string = ixl_fec_string[1];
617	else
618		neg_fec_string = ixl_fec_string[2];
619
620	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
621	    ifp->if_xname,
622	    ixl_link_speed_string(hw->phy.link_info.link_speed),
623	    req_fec_string, neg_fec_string,
624	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
625	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
626	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
627		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
628		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
629		ixl_fc_string[1] : ixl_fc_string[0]);
630}
631
632/*
633 * Configure admin queue/misc interrupt cause registers in hardware.
634 */
635void
636ixl_configure_intr0_msix(struct ixl_pf *pf)
637{
638	struct i40e_hw *hw = &pf->hw;
639	u32 reg;
640
641	/* First set up the adminq - vector 0 */
642	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
643	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
644
645	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
646	    I40E_PFINT_ICR0_ENA_GRST_MASK |
647	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
648	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
649	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
650	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
651	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
652	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
653	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
654
655	/*
656	 * 0x7FF is the end of the queue list.
657	 * This means we won't use MSI-X vector 0 for a queue interrupt
658	 * in MSI-X mode.
659	 */
660	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
661	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
662	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
663
664	wr32(hw, I40E_PFINT_DYN_CTL0,
665	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
666	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
667
668	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
669}
670
671void
672ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
673{
674	/* Display supported media types */
675	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
676		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
677
678	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
679		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
680	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
681		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
682	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
683		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
684
685	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
686		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
687
688	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
689		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
690
691	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
692	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
693	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
694		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
695
696	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
697		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
698	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
699		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
700	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
701		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
702
703	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
704	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
705	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
706	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
707	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
708		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
709	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
710		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
711	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
712		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
713
714	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
715		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
716
717	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
718	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
719		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
720	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
721		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
722	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
723		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
724	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
725		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
726	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
727		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
728
729	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
730		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
731
732	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
733		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
734	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
735		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
736
737	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
738		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
739	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
740		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
741	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
742		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
743	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
744		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
745	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
746		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
747	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
748		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
749}
750
751/*********************************************************************
752 *
753 *  Get Firmware Switch configuration
754 *	- this will need to be more robust when more complex
755 *	  switch configurations are enabled.
756 *
757 **********************************************************************/
758int
759ixl_switch_config(struct ixl_pf *pf)
760{
761	struct i40e_hw	*hw = &pf->hw;
762	struct ixl_vsi	*vsi = &pf->vsi;
763	device_t 	dev = iflib_get_dev(vsi->ctx);
764	struct i40e_aqc_get_switch_config_resp *sw_config;
765	u8	aq_buf[I40E_AQ_LARGE_BUF];
766	int	ret;
767	u16	next = 0;
768
769	memset(&aq_buf, 0, sizeof(aq_buf));
770	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
771	ret = i40e_aq_get_switch_config(hw, sw_config,
772	    sizeof(aq_buf), &next, NULL);
773	if (ret) {
774		device_printf(dev, "aq_get_switch_config() failed, error %d,"
775		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
776		return (ret);
777	}
778	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
779		device_printf(dev,
780		    "Switch config: header reported: %d in structure, %d total\n",
781		    LE16_TO_CPU(sw_config->header.num_reported),
782		    LE16_TO_CPU(sw_config->header.num_total));
783		for (int i = 0;
784		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
785			device_printf(dev,
786			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
787			    sw_config->element[i].element_type,
788			    LE16_TO_CPU(sw_config->element[i].seid),
789			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
790			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
791		}
792	}
793	/* Simplified due to a single VSI */
794	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
795	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
796	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797	return (ret);
798}
799
800void
801ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
802{
803	struct sysctl_oid *tree;
804	struct sysctl_oid_list *child;
805	struct sysctl_oid_list *vsi_list;
806
807	tree = device_get_sysctl_tree(vsi->dev);
808	child = SYSCTL_CHILDREN(tree);
809	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
810			CTLFLAG_RD, NULL, "VSI Number");
811
812	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
813	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
814
815	/* Copy of netstat RX errors counter for validation purposes */
816	SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
817			CTLFLAG_RD, &vsi->ierrors,
818			"RX packet errors");
819
820	if (queues_sysctls)
821		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
822}
823
824/*
825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
826 * Writes to the ITR registers immediately.
827 */
828static int
829ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
830{
831	struct ixl_pf *pf = (struct ixl_pf *)arg1;
832	device_t dev = pf->dev;
833	int error = 0;
834	int requested_tx_itr;
835
836	requested_tx_itr = pf->tx_itr;
837	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
838	if ((error) || (req->newptr == NULL))
839		return (error);
840	if (pf->dynamic_tx_itr) {
841		device_printf(dev,
842		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
843		    return (EINVAL);
844	}
845	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
846		device_printf(dev,
847		    "Invalid TX itr value; value must be between 0 and %d\n",
848		        IXL_MAX_ITR);
849		return (EINVAL);
850	}
851
852	pf->tx_itr = requested_tx_itr;
853	ixl_configure_tx_itr(pf);
854
855	return (error);
856}
857
858/*
859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
860 * Writes to the ITR registers immediately.
861 */
862static int
863ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
864{
865	struct ixl_pf *pf = (struct ixl_pf *)arg1;
866	device_t dev = pf->dev;
867	int error = 0;
868	int requested_rx_itr;
869
870	requested_rx_itr = pf->rx_itr;
871	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
872	if ((error) || (req->newptr == NULL))
873		return (error);
874	if (pf->dynamic_rx_itr) {
875		device_printf(dev,
876		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
877		    return (EINVAL);
878	}
879	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
880		device_printf(dev,
881		    "Invalid RX itr value; value must be between 0 and %d\n",
882		        IXL_MAX_ITR);
883		return (EINVAL);
884	}
885
886	pf->rx_itr = requested_rx_itr;
887	ixl_configure_rx_itr(pf);
888
889	return (error);
890}
891
892void
893ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
894	struct sysctl_oid_list *child,
895	struct i40e_hw_port_stats *stats)
896{
897	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
898	    "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
899	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
900
901	struct i40e_eth_stats *eth_stats = &stats->eth;
902	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
903
904	struct ixl_sysctl_info ctls[] =
905	{
906		{&stats->crc_errors, "crc_errors", "CRC Errors"},
907		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
908		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
909		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
910		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
911		/* Packet Reception Stats */
912		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
913		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
914		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
915		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
916		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
917		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
918		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
919		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
920		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
921		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
922		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
923		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
924		/* Packet Transmission Stats */
925		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
926		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
927		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
928		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
929		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
930		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
931		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
932		/* Flow control */
933		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
934		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
935		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
936		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
937		/* End */
938		{0,0,0}
939	};
940
941	struct ixl_sysctl_info *entry = ctls;
942	while (entry->stat != 0)
943	{
944		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
945				CTLFLAG_RD, entry->stat,
946				entry->description);
947		entry++;
948	}
949}
950
951void
952ixl_set_rss_key(struct ixl_pf *pf)
953{
954	struct i40e_hw *hw = &pf->hw;
955	struct ixl_vsi *vsi = &pf->vsi;
956	device_t	dev = pf->dev;
957	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
958	enum i40e_status_code status;
959
960#ifdef RSS
961        /* Fetch the configured RSS key */
962        rss_getkey((uint8_t *) &rss_seed);
963#else
964	ixl_get_default_rss_key(rss_seed);
965#endif
966	/* Fill out hash function seed */
967	if (hw->mac.type == I40E_MAC_X722) {
968		struct i40e_aqc_get_set_rss_key_data key_data;
969		bcopy(rss_seed, &key_data, 52);
970		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
971		if (status)
972			device_printf(dev,
973			    "i40e_aq_set_rss_key status %s, error %s\n",
974			    i40e_stat_str(hw, status),
975			    i40e_aq_str(hw, hw->aq.asq_last_status));
976	} else {
977		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
978			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
979	}
980}
981
982/*
983 * Configure enabled PCTYPES for RSS.
984 */
985void
986ixl_set_rss_pctypes(struct ixl_pf *pf)
987{
988	struct i40e_hw *hw = &pf->hw;
989	u64		set_hena = 0, hena;
990
991#ifdef RSS
992	u32		rss_hash_config;
993
994	rss_hash_config = rss_gethashconfig();
995	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
996                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
997	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
998                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
999	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1000                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1001	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1002                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1003	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1004		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1005	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1006                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1007        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1008                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1009#else
1010	if (hw->mac.type == I40E_MAC_X722)
1011		set_hena = IXL_DEFAULT_RSS_HENA_X722;
1012	else
1013		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1014#endif
1015	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1016	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1017	hena |= set_hena;
1018	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1019	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1020
1021}
1022
1023/*
1024** Setup the PF's RSS parameters.
1025*/
1026void
1027ixl_config_rss(struct ixl_pf *pf)
1028{
1029	ixl_set_rss_key(pf);
1030	ixl_set_rss_pctypes(pf);
1031	ixl_set_rss_hlut(pf);
1032}
1033
1034/*
1035 * In some firmware versions there is default MAC/VLAN filter
1036 * configured which interferes with filters managed by driver.
1037 * Make sure it's removed.
1038 */
1039void
1040ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1041{
1042	struct i40e_aqc_remove_macvlan_element_data e;
1043
1044	bzero(&e, sizeof(e));
1045	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1046	e.vlan_tag = 0;
1047	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1048	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1049
1050	bzero(&e, sizeof(e));
1051	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1052	e.vlan_tag = 0;
1053	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1054		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1055	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1056}
1057
1058/*
1059** Initialize filter list and add filters that the hardware
1060** needs to know about.
1061**
1062** Requires VSI's seid to be set before calling.
1063*/
1064void
1065ixl_init_filters(struct ixl_vsi *vsi)
1066{
1067	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1068
1069	ixl_dbg_filter(pf, "%s: start\n", __func__);
1070
1071	/* Initialize mac filter list for VSI */
1072	LIST_INIT(&vsi->ftl);
1073	vsi->num_hw_filters = 0;
1074
1075	/* Receive broadcast Ethernet frames */
1076	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1077
1078	if (IXL_VSI_IS_VF(vsi))
1079		return;
1080
1081	ixl_del_default_hw_filters(vsi);
1082
1083	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1084
1085	/*
1086	 * Prevent Tx flow control frames from being sent out by
1087	 * non-firmware transmitters.
1088	 * This affects every VSI in the PF.
1089	 */
1090#ifndef IXL_DEBUG_FC
1091	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1092#else
1093	if (pf->enable_tx_fc_filter)
1094		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1095#endif
1096}
1097
1098void
1099ixl_reconfigure_filters(struct ixl_vsi *vsi)
1100{
1101	struct i40e_hw *hw = vsi->hw;
1102	struct ixl_ftl_head tmp;
1103	int cnt;
1104
1105	/*
1106	 * The ixl_add_hw_filters function adds filters configured
1107	 * in HW to a list in VSI. Move all filters to a temporary
1108	 * list to avoid corrupting it by concatenating to itself.
1109	 */
1110	LIST_INIT(&tmp);
1111	LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1112	cnt = vsi->num_hw_filters;
1113	vsi->num_hw_filters = 0;
1114
1115	ixl_add_hw_filters(vsi, &tmp, cnt);
1116
1117	/* Filter could be removed if MAC address was changed */
1118	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1119
1120	if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1121		return;
1122	/*
1123	 * VLAN HW filtering is enabled, make sure that filters
1124	 * for all registered VLAN tags are configured
1125	 */
1126	ixl_add_vlan_filters(vsi, hw->mac.addr);
1127}
1128
1129/*
1130 * This routine adds a MAC/VLAN filter to the software filter
1131 * list, then adds that new filter to the HW if it doesn't already
1132 * exist in the SW filter list.
1133 */
1134void
1135ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1136{
1137	struct ixl_mac_filter	*f, *tmp;
1138	struct ixl_pf		*pf;
1139	device_t		dev;
1140	struct ixl_ftl_head	to_add;
1141	int			to_add_cnt;
1142
1143	pf = vsi->back;
1144	dev = pf->dev;
1145	to_add_cnt = 1;
1146
1147	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1148	    MAC_FORMAT_ARGS(macaddr), vlan);
1149
1150	/* Does one already exist */
1151	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1152	if (f != NULL)
1153		return;
1154
1155	LIST_INIT(&to_add);
1156	f = ixl_new_filter(&to_add, macaddr, vlan);
1157	if (f == NULL) {
1158		device_printf(dev, "WARNING: no filter available!!\n");
1159		return;
1160	}
1161	if (f->vlan != IXL_VLAN_ANY)
1162		f->flags |= IXL_FILTER_VLAN;
1163	else
1164		vsi->num_macs++;
1165
1166	/*
1167	** Is this the first vlan being registered, if so we
1168	** need to remove the ANY filter that indicates we are
1169	** not in a vlan, and replace that with a 0 filter.
1170	*/
1171	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1172		tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1173		if (tmp != NULL) {
1174			struct ixl_ftl_head to_del;
1175
1176			/* Prepare new filter first to avoid removing
1177			 * VLAN_ANY filter if allocation fails */
1178			f = ixl_new_filter(&to_add, macaddr, 0);
1179			if (f == NULL) {
1180				device_printf(dev, "WARNING: no filter available!!\n");
1181				free(LIST_FIRST(&to_add), M_IXL);
1182				return;
1183			}
1184			to_add_cnt++;
1185
1186			LIST_REMOVE(tmp, ftle);
1187			LIST_INIT(&to_del);
1188			LIST_INSERT_HEAD(&to_del, tmp, ftle);
1189			ixl_del_hw_filters(vsi, &to_del, 1);
1190		}
1191	}
1192
1193	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1194}
1195
1196/**
1197 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1198 * @vsi: pointer to VSI
1199 * @macaddr: MAC address
1200 *
1201 * Adds MAC/VLAN filter for each VLAN configured on the interface
1202 * if there is enough HW filters. Otherwise adds a single filter
1203 * for all tagged and untagged frames to allow all configured VLANs
1204 * to recieve traffic.
1205 */
1206void
1207ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1208{
1209	struct ixl_ftl_head to_add;
1210	struct ixl_mac_filter *f;
1211	int to_add_cnt = 0;
1212	int i, vlan = 0;
1213
1214	if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1215		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1216		return;
1217	}
1218	LIST_INIT(&to_add);
1219
1220	/* Add filter for untagged frames if it does not exist yet */
1221	f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1222	if (f == NULL) {
1223		f = ixl_new_filter(&to_add, macaddr, 0);
1224		if (f == NULL) {
1225			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1226			return;
1227		}
1228		to_add_cnt++;
1229	}
1230
1231	for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1232		bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1233		if (vlan == -1)
1234			break;
1235
1236		/* Does one already exist */
1237		f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1238		if (f != NULL)
1239			continue;
1240
1241		f = ixl_new_filter(&to_add, macaddr, vlan);
1242		if (f == NULL) {
1243			device_printf(vsi->dev, "WARNING: no filter available!!\n");
1244			ixl_free_filters(&to_add);
1245			return;
1246		}
1247		to_add_cnt++;
1248	}
1249
1250	ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1251}
1252
1253void
1254ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1255{
1256	struct ixl_mac_filter *f, *tmp;
1257	struct ixl_ftl_head ftl_head;
1258	int to_del_cnt = 1;
1259
1260	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1261	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1262	    MAC_FORMAT_ARGS(macaddr), vlan);
1263
1264	f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1265	if (f == NULL)
1266		return;
1267
1268	LIST_REMOVE(f, ftle);
1269	LIST_INIT(&ftl_head);
1270	LIST_INSERT_HEAD(&ftl_head, f, ftle);
1271	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1272		vsi->num_macs--;
1273
1274	/* If this is not the last vlan just remove the filter */
1275	if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1276		ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1277		return;
1278	}
1279
1280	/* It's the last vlan, we need to switch back to a non-vlan filter */
1281	tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1282	if (tmp != NULL) {
1283		LIST_REMOVE(tmp, ftle);
1284		LIST_INSERT_AFTER(f, tmp, ftle);
1285		to_del_cnt++;
1286	}
1287	ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1288
1289	ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1290}
1291
1292/**
1293 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1294 * @vsi: VSI which filters need to be removed
1295 * @macaddr: MAC address
1296 *
1297 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1298 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1299 * so skip them to speed up processing. Those filters should be removed
1300 * using ixl_del_filter function.
1301 */
1302void
1303ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1304{
1305	struct ixl_mac_filter *f, *tmp;
1306	struct ixl_ftl_head to_del;
1307	int to_del_cnt = 0;
1308
1309	LIST_INIT(&to_del);
1310
1311	LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1312		if ((f->flags & IXL_FILTER_MC) != 0 ||
1313		    !ixl_ether_is_equal(f->macaddr, macaddr))
1314			continue;
1315
1316		LIST_REMOVE(f, ftle);
1317		LIST_INSERT_HEAD(&to_del, f, ftle);
1318		to_del_cnt++;
1319	}
1320
1321	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1322	    "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1323	    __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1324	if (to_del_cnt > 0)
1325		ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1326}
1327
1328/*
1329** Find the filter with both matching mac addr and vlan id
1330*/
1331struct ixl_mac_filter *
1332ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1333{
1334	struct ixl_mac_filter	*f;
1335
1336	LIST_FOREACH(f, headp, ftle) {
1337		if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1338		    (f->vlan == vlan)) {
1339			return (f);
1340		}
1341	}
1342
1343	return (NULL);
1344}
1345
1346/*
1347** This routine takes additions to the vsi filter
1348** table and creates an Admin Queue call to create
1349** the filters in the hardware.
1350*/
1351void
1352ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1353{
1354	struct i40e_aqc_add_macvlan_element_data *a, *b;
1355	struct ixl_mac_filter	*f, *fn;
1356	struct ixl_pf		*pf;
1357	struct i40e_hw		*hw;
1358	device_t		dev;
1359	enum i40e_status_code	status;
1360	int			j = 0;
1361
1362	pf = vsi->back;
1363	dev = vsi->dev;
1364	hw = &pf->hw;
1365
1366	ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1367
1368	if (cnt < 1) {
1369		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1370		return;
1371	}
1372
1373	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1374	    M_IXL, M_NOWAIT | M_ZERO);
1375	if (a == NULL) {
1376		device_printf(dev, "add_hw_filters failed to get memory\n");
1377		return;
1378	}
1379
1380	LIST_FOREACH(f, to_add, ftle) {
1381		b = &a[j]; // a pox on fvl long names :)
1382		bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1383		if (f->vlan == IXL_VLAN_ANY) {
1384			b->vlan_tag = 0;
1385			b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1386		} else {
1387			b->vlan_tag = f->vlan;
1388			b->flags = 0;
1389		}
1390		b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1391		ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1392		    MAC_FORMAT_ARGS(f->macaddr));
1393
1394		if (++j == cnt)
1395			break;
1396	}
1397	if (j != cnt) {
1398		/* Something went wrong */
1399		device_printf(dev,
1400		    "%s ERROR: list of filters to short expected: %d, found: %d\n",
1401		    __func__, cnt, j);
1402		ixl_free_filters(to_add);
1403		goto out_free;
1404	}
1405
1406	status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1407	if (status == I40E_SUCCESS) {
1408		LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1409		vsi->num_hw_filters += j;
1410		goto out_free;
1411	}
1412
1413	device_printf(dev,
1414	    "i40e_aq_add_macvlan status %s, error %s\n",
1415	    i40e_stat_str(hw, status),
1416	    i40e_aq_str(hw, hw->aq.asq_last_status));
1417	j = 0;
1418
1419	/* Verify which filters were actually configured in HW
1420	 * and add them to the list */
1421	LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1422		LIST_REMOVE(f, ftle);
1423		if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1424			ixl_dbg_filter(pf,
1425			    "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1426			    __func__,
1427			    MAC_FORMAT_ARGS(f->macaddr),
1428			    f->vlan);
1429			free(f, M_IXL);
1430		} else {
1431			LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1432			vsi->num_hw_filters++;
1433		}
1434		j++;
1435	}
1436
1437out_free:
1438	free(a, M_IXL);
1439}
1440
1441/*
1442** This routine takes removals in the vsi filter
1443** table and creates an Admin Queue call to delete
1444** the filters in the hardware.
1445*/
1446void
1447ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1448{
1449	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1450	struct ixl_pf		*pf;
1451	struct i40e_hw		*hw;
1452	device_t		dev;
1453	struct ixl_mac_filter	*f, *f_temp;
1454	enum i40e_status_code	status;
1455	int			j = 0;
1456
1457	pf = vsi->back;
1458	hw = &pf->hw;
1459	dev = vsi->dev;
1460
1461	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1462
1463	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1464	    M_IXL, M_NOWAIT | M_ZERO);
1465	if (d == NULL) {
1466		device_printf(dev, "%s: failed to get memory\n", __func__);
1467		return;
1468	}
1469
1470	LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1471		e = &d[j]; // a pox on fvl long names :)
1472		bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1473		e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1474		if (f->vlan == IXL_VLAN_ANY) {
1475			e->vlan_tag = 0;
1476			e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1477		} else {
1478			e->vlan_tag = f->vlan;
1479		}
1480
1481		ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1482		    MAC_FORMAT_ARGS(f->macaddr));
1483
1484		/* delete entry from the list */
1485		LIST_REMOVE(f, ftle);
1486		free(f, M_IXL);
1487		if (++j == cnt)
1488			break;
1489	}
1490	if (j != cnt || !LIST_EMPTY(to_del)) {
1491		/* Something went wrong */
1492		device_printf(dev,
1493		    "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1494		    __func__, cnt, j);
1495		ixl_free_filters(to_del);
1496		goto out_free;
1497	}
1498	status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1499	if (status) {
1500		device_printf(dev,
1501		    "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1502		    __func__, i40e_stat_str(hw, status),
1503		    i40e_aq_str(hw, hw->aq.asq_last_status));
1504		for (int i = 0; i < j; i++) {
1505			if (d[i].error_code == 0)
1506				continue;
1507			device_printf(dev,
1508			    "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1509			    __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1510			    d[i].vlan_tag);
1511		}
1512	}
1513
1514	vsi->num_hw_filters -= j;
1515
1516out_free:
1517	free(d, M_IXL);
1518
1519	ixl_dbg_filter(pf, "%s: end\n", __func__);
1520}
1521
1522int
1523ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1524{
1525	struct i40e_hw	*hw = &pf->hw;
1526	int		error = 0;
1527	u32		reg;
1528	u16		pf_qidx;
1529
1530	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1531
1532	ixl_dbg(pf, IXL_DBG_EN_DIS,
1533	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1534	    pf_qidx, vsi_qidx);
1535
1536	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1537
1538	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1539	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1540	    I40E_QTX_ENA_QENA_STAT_MASK;
1541	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1542	/* Verify the enable took */
1543	for (int j = 0; j < 10; j++) {
1544		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1545		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1546			break;
1547		i40e_usec_delay(10);
1548	}
1549	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1550		device_printf(pf->dev, "TX queue %d still disabled!\n",
1551		    pf_qidx);
1552		error = ETIMEDOUT;
1553	}
1554
1555	return (error);
1556}
1557
1558int
1559ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1560{
1561	struct i40e_hw	*hw = &pf->hw;
1562	int		error = 0;
1563	u32		reg;
1564	u16		pf_qidx;
1565
1566	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1567
1568	ixl_dbg(pf, IXL_DBG_EN_DIS,
1569	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1570	    pf_qidx, vsi_qidx);
1571
1572	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1573	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1574	    I40E_QRX_ENA_QENA_STAT_MASK;
1575	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1576	/* Verify the enable took */
1577	for (int j = 0; j < 10; j++) {
1578		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1579		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1580			break;
1581		i40e_usec_delay(10);
1582	}
1583	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1584		device_printf(pf->dev, "RX queue %d still disabled!\n",
1585		    pf_qidx);
1586		error = ETIMEDOUT;
1587	}
1588
1589	return (error);
1590}
1591
1592int
1593ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1594{
1595	int error = 0;
1596
1597	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1598	/* Called function already prints error message */
1599	if (error)
1600		return (error);
1601	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1602	return (error);
1603}
1604
1605/*
1606 * Returns error on first ring that is detected hung.
1607 */
1608int
1609ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1610{
1611	struct i40e_hw	*hw = &pf->hw;
1612	int		error = 0;
1613	u32		reg;
1614	u16		pf_qidx;
1615
1616	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1617
1618	ixl_dbg(pf, IXL_DBG_EN_DIS,
1619	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1620	    pf_qidx, vsi_qidx);
1621
1622	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1623	i40e_usec_delay(500);
1624
1625	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1626	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1627	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1628	/* Verify the disable took */
1629	for (int j = 0; j < 10; j++) {
1630		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1631		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1632			break;
1633		i40e_msec_delay(10);
1634	}
1635	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1636		device_printf(pf->dev, "TX queue %d still enabled!\n",
1637		    pf_qidx);
1638		error = ETIMEDOUT;
1639	}
1640
1641	return (error);
1642}
1643
1644/*
1645 * Returns error on first ring that is detected hung.
1646 */
1647int
1648ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1649{
1650	struct i40e_hw	*hw = &pf->hw;
1651	int		error = 0;
1652	u32		reg;
1653	u16		pf_qidx;
1654
1655	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1656
1657	ixl_dbg(pf, IXL_DBG_EN_DIS,
1658	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1659	    pf_qidx, vsi_qidx);
1660
1661	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1662	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1663	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1664	/* Verify the disable took */
1665	for (int j = 0; j < 10; j++) {
1666		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1667		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1668			break;
1669		i40e_msec_delay(10);
1670	}
1671	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1672		device_printf(pf->dev, "RX queue %d still enabled!\n",
1673		    pf_qidx);
1674		error = ETIMEDOUT;
1675	}
1676
1677	return (error);
1678}
1679
1680int
1681ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1682{
1683	int error = 0;
1684
1685	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1686	/* Called function already prints error message */
1687	if (error)
1688		return (error);
1689	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1690	return (error);
1691}
1692
1693static void
1694ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1695{
1696	struct i40e_hw *hw = &pf->hw;
1697	device_t dev = pf->dev;
1698	struct ixl_vf *vf;
1699	bool mdd_detected = false;
1700	bool pf_mdd_detected = false;
1701	bool vf_mdd_detected = false;
1702	u16 vf_num, queue;
1703	u8 pf_num, event;
1704	u8 pf_mdet_num, vp_mdet_num;
1705	u32 reg;
1706
1707	/* find what triggered the MDD event */
1708	reg = rd32(hw, I40E_GL_MDET_TX);
1709	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1710		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1711		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1712		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1713		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1714		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1715		    I40E_GL_MDET_TX_EVENT_SHIFT;
1716		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1717		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1718		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1719		mdd_detected = true;
1720	}
1721
1722	if (!mdd_detected)
1723		return;
1724
1725	reg = rd32(hw, I40E_PF_MDET_TX);
1726	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1727		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1728		pf_mdet_num = hw->pf_id;
1729		pf_mdd_detected = true;
1730	}
1731
1732	/* Check if MDD was caused by a VF */
1733	for (int i = 0; i < pf->num_vfs; i++) {
1734		vf = &(pf->vfs[i]);
1735		reg = rd32(hw, I40E_VP_MDET_TX(i));
1736		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1737			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1738			vp_mdet_num = i;
1739			vf->num_mdd_events++;
1740			vf_mdd_detected = true;
1741		}
1742	}
1743
1744	/* Print out an error message */
1745	if (vf_mdd_detected && pf_mdd_detected)
1746		device_printf(dev,
1747		    "Malicious Driver Detection event %d"
1748		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1749		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1750	else if (vf_mdd_detected && !pf_mdd_detected)
1751		device_printf(dev,
1752		    "Malicious Driver Detection event %d"
1753		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1754		    event, queue, pf_num, vf_num, vp_mdet_num);
1755	else if (!vf_mdd_detected && pf_mdd_detected)
1756		device_printf(dev,
1757		    "Malicious Driver Detection event %d"
1758		    " on TX queue %d, pf number %d (PF-%d)\n",
1759		    event, queue, pf_num, pf_mdet_num);
1760	/* Theoretically shouldn't happen */
1761	else
1762		device_printf(dev,
1763		    "TX Malicious Driver Detection event (unknown)\n");
1764}
1765
1766static void
1767ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1768{
1769	struct i40e_hw *hw = &pf->hw;
1770	device_t dev = pf->dev;
1771	struct ixl_vf *vf;
1772	bool mdd_detected = false;
1773	bool pf_mdd_detected = false;
1774	bool vf_mdd_detected = false;
1775	u16 queue;
1776	u8 pf_num, event;
1777	u8 pf_mdet_num, vp_mdet_num;
1778	u32 reg;
1779
1780	/*
1781	 * GL_MDET_RX doesn't contain VF number information, unlike
1782	 * GL_MDET_TX.
1783	 */
1784	reg = rd32(hw, I40E_GL_MDET_RX);
1785	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1786		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1787		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1788		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1789		    I40E_GL_MDET_RX_EVENT_SHIFT;
1790		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1791		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1792		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1793		mdd_detected = true;
1794	}
1795
1796	if (!mdd_detected)
1797		return;
1798
1799	reg = rd32(hw, I40E_PF_MDET_RX);
1800	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1801		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1802		pf_mdet_num = hw->pf_id;
1803		pf_mdd_detected = true;
1804	}
1805
1806	/* Check if MDD was caused by a VF */
1807	for (int i = 0; i < pf->num_vfs; i++) {
1808		vf = &(pf->vfs[i]);
1809		reg = rd32(hw, I40E_VP_MDET_RX(i));
1810		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1811			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1812			vp_mdet_num = i;
1813			vf->num_mdd_events++;
1814			vf_mdd_detected = true;
1815		}
1816	}
1817
1818	/* Print out an error message */
1819	if (vf_mdd_detected && pf_mdd_detected)
1820		device_printf(dev,
1821		    "Malicious Driver Detection event %d"
1822		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1823		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1824	else if (vf_mdd_detected && !pf_mdd_detected)
1825		device_printf(dev,
1826		    "Malicious Driver Detection event %d"
1827		    " on RX queue %d, pf number %d, (VF-%d)\n",
1828		    event, queue, pf_num, vp_mdet_num);
1829	else if (!vf_mdd_detected && pf_mdd_detected)
1830		device_printf(dev,
1831		    "Malicious Driver Detection event %d"
1832		    " on RX queue %d, pf number %d (PF-%d)\n",
1833		    event, queue, pf_num, pf_mdet_num);
1834	/* Theoretically shouldn't happen */
1835	else
1836		device_printf(dev,
1837		    "RX Malicious Driver Detection event (unknown)\n");
1838}
1839
1840/**
1841 * ixl_handle_mdd_event
1842 *
1843 * Called from interrupt handler to identify possibly malicious vfs
1844 * (But also detects events from the PF, as well)
1845 **/
1846void
1847ixl_handle_mdd_event(struct ixl_pf *pf)
1848{
1849	struct i40e_hw *hw = &pf->hw;
1850	u32 reg;
1851
1852	/*
1853	 * Handle both TX/RX because it's possible they could
1854	 * both trigger in the same interrupt.
1855	 */
1856	ixl_handle_tx_mdd_event(pf);
1857	ixl_handle_rx_mdd_event(pf);
1858
1859	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1860
1861	/* re-enable mdd interrupt cause */
1862	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1863	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1864	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1865	ixl_flush(hw);
1866}
1867
1868void
1869ixl_enable_intr0(struct i40e_hw *hw)
1870{
1871	u32		reg;
1872
1873	/* Use IXL_ITR_NONE so ITR isn't updated here */
1874	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1875	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1876	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1877	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1878}
1879
1880void
1881ixl_disable_intr0(struct i40e_hw *hw)
1882{
1883	u32		reg;
1884
1885	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1886	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1887	ixl_flush(hw);
1888}
1889
1890void
1891ixl_enable_queue(struct i40e_hw *hw, int id)
1892{
1893	u32		reg;
1894
1895	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1896	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1897	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1898	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1899}
1900
1901void
1902ixl_disable_queue(struct i40e_hw *hw, int id)
1903{
1904	u32		reg;
1905
1906	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1907	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1908}
1909
1910void
1911ixl_handle_empr_reset(struct ixl_pf *pf)
1912{
1913	struct ixl_vsi	*vsi = &pf->vsi;
1914	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1915
1916	ixl_prepare_for_reset(pf, is_up);
1917	/*
1918	 * i40e_pf_reset checks the type of reset and acts
1919	 * accordingly. If EMP or Core reset was performed
1920	 * doing PF reset is not necessary and it sometimes
1921	 * fails.
1922	 */
1923	ixl_pf_reset(pf);
1924
1925	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1926	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1927		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1928		device_printf(pf->dev,
1929		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1930		pf->link_up = FALSE;
1931		ixl_update_link_status(pf);
1932	}
1933
1934	ixl_rebuild_hw_structs_after_reset(pf, is_up);
1935
1936	atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1937}
1938
1939void
1940ixl_update_stats_counters(struct ixl_pf *pf)
1941{
1942	struct i40e_hw	*hw = &pf->hw;
1943	struct ixl_vsi	*vsi = &pf->vsi;
1944	struct ixl_vf	*vf;
1945	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1946
1947	struct i40e_hw_port_stats *nsd = &pf->stats;
1948	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1949
1950	/* Update hw stats */
1951	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1952			   pf->stat_offsets_loaded,
1953			   &osd->crc_errors, &nsd->crc_errors);
1954	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1955			   pf->stat_offsets_loaded,
1956			   &osd->illegal_bytes, &nsd->illegal_bytes);
1957	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1958			   I40E_GLPRT_GORCL(hw->port),
1959			   pf->stat_offsets_loaded,
1960			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1961	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1962			   I40E_GLPRT_GOTCL(hw->port),
1963			   pf->stat_offsets_loaded,
1964			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1965	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1966			   pf->stat_offsets_loaded,
1967			   &osd->eth.rx_discards,
1968			   &nsd->eth.rx_discards);
1969	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1970			   I40E_GLPRT_UPRCL(hw->port),
1971			   pf->stat_offsets_loaded,
1972			   &osd->eth.rx_unicast,
1973			   &nsd->eth.rx_unicast);
1974	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1975			   I40E_GLPRT_UPTCL(hw->port),
1976			   pf->stat_offsets_loaded,
1977			   &osd->eth.tx_unicast,
1978			   &nsd->eth.tx_unicast);
1979	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1980			   I40E_GLPRT_MPRCL(hw->port),
1981			   pf->stat_offsets_loaded,
1982			   &osd->eth.rx_multicast,
1983			   &nsd->eth.rx_multicast);
1984	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1985			   I40E_GLPRT_MPTCL(hw->port),
1986			   pf->stat_offsets_loaded,
1987			   &osd->eth.tx_multicast,
1988			   &nsd->eth.tx_multicast);
1989	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1990			   I40E_GLPRT_BPRCL(hw->port),
1991			   pf->stat_offsets_loaded,
1992			   &osd->eth.rx_broadcast,
1993			   &nsd->eth.rx_broadcast);
1994	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1995			   I40E_GLPRT_BPTCL(hw->port),
1996			   pf->stat_offsets_loaded,
1997			   &osd->eth.tx_broadcast,
1998			   &nsd->eth.tx_broadcast);
1999
2000	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2001			   pf->stat_offsets_loaded,
2002			   &osd->tx_dropped_link_down,
2003			   &nsd->tx_dropped_link_down);
2004	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2005			   pf->stat_offsets_loaded,
2006			   &osd->mac_local_faults,
2007			   &nsd->mac_local_faults);
2008	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2009			   pf->stat_offsets_loaded,
2010			   &osd->mac_remote_faults,
2011			   &nsd->mac_remote_faults);
2012	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2013			   pf->stat_offsets_loaded,
2014			   &osd->rx_length_errors,
2015			   &nsd->rx_length_errors);
2016
2017	/* Flow control (LFC) stats */
2018	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2019			   pf->stat_offsets_loaded,
2020			   &osd->link_xon_rx, &nsd->link_xon_rx);
2021	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2022			   pf->stat_offsets_loaded,
2023			   &osd->link_xon_tx, &nsd->link_xon_tx);
2024	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2025			   pf->stat_offsets_loaded,
2026			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
2027	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2028			   pf->stat_offsets_loaded,
2029			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
2030
2031	/*
2032	 * For watchdog management we need to know if we have been paused
2033	 * during the last interval, so capture that here.
2034	 */
2035	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2036		vsi->shared->isc_pause_frames = 1;
2037
2038	/* Packet size stats rx */
2039	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2040			   I40E_GLPRT_PRC64L(hw->port),
2041			   pf->stat_offsets_loaded,
2042			   &osd->rx_size_64, &nsd->rx_size_64);
2043	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2044			   I40E_GLPRT_PRC127L(hw->port),
2045			   pf->stat_offsets_loaded,
2046			   &osd->rx_size_127, &nsd->rx_size_127);
2047	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2048			   I40E_GLPRT_PRC255L(hw->port),
2049			   pf->stat_offsets_loaded,
2050			   &osd->rx_size_255, &nsd->rx_size_255);
2051	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2052			   I40E_GLPRT_PRC511L(hw->port),
2053			   pf->stat_offsets_loaded,
2054			   &osd->rx_size_511, &nsd->rx_size_511);
2055	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2056			   I40E_GLPRT_PRC1023L(hw->port),
2057			   pf->stat_offsets_loaded,
2058			   &osd->rx_size_1023, &nsd->rx_size_1023);
2059	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2060			   I40E_GLPRT_PRC1522L(hw->port),
2061			   pf->stat_offsets_loaded,
2062			   &osd->rx_size_1522, &nsd->rx_size_1522);
2063	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2064			   I40E_GLPRT_PRC9522L(hw->port),
2065			   pf->stat_offsets_loaded,
2066			   &osd->rx_size_big, &nsd->rx_size_big);
2067
2068	/* Packet size stats tx */
2069	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2070			   I40E_GLPRT_PTC64L(hw->port),
2071			   pf->stat_offsets_loaded,
2072			   &osd->tx_size_64, &nsd->tx_size_64);
2073	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2074			   I40E_GLPRT_PTC127L(hw->port),
2075			   pf->stat_offsets_loaded,
2076			   &osd->tx_size_127, &nsd->tx_size_127);
2077	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2078			   I40E_GLPRT_PTC255L(hw->port),
2079			   pf->stat_offsets_loaded,
2080			   &osd->tx_size_255, &nsd->tx_size_255);
2081	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2082			   I40E_GLPRT_PTC511L(hw->port),
2083			   pf->stat_offsets_loaded,
2084			   &osd->tx_size_511, &nsd->tx_size_511);
2085	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2086			   I40E_GLPRT_PTC1023L(hw->port),
2087			   pf->stat_offsets_loaded,
2088			   &osd->tx_size_1023, &nsd->tx_size_1023);
2089	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2090			   I40E_GLPRT_PTC1522L(hw->port),
2091			   pf->stat_offsets_loaded,
2092			   &osd->tx_size_1522, &nsd->tx_size_1522);
2093	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2094			   I40E_GLPRT_PTC9522L(hw->port),
2095			   pf->stat_offsets_loaded,
2096			   &osd->tx_size_big, &nsd->tx_size_big);
2097
2098	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2099			   pf->stat_offsets_loaded,
2100			   &osd->rx_undersize, &nsd->rx_undersize);
2101	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2102			   pf->stat_offsets_loaded,
2103			   &osd->rx_fragments, &nsd->rx_fragments);
2104	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2105			   pf->stat_offsets_loaded,
2106			   &osd->rx_oversize, &nsd->rx_oversize);
2107	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2108			   pf->stat_offsets_loaded,
2109			   &osd->rx_jabber, &nsd->rx_jabber);
2110	/* EEE */
2111	i40e_get_phy_lpi_status(hw, nsd);
2112
2113	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2114			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
2115			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
2116
2117	pf->stat_offsets_loaded = true;
2118	/* End hw stats */
2119
2120	/* Update vsi stats */
2121	ixl_update_vsi_stats(vsi);
2122
2123	for (int i = 0; i < pf->num_vfs; i++) {
2124		vf = &pf->vfs[i];
2125		if (vf->vf_flags & VF_FLAG_ENABLED)
2126			ixl_update_eth_stats(&pf->vfs[i].vsi);
2127	}
2128}
2129
2130/**
2131 * Update VSI-specific ethernet statistics counters.
2132 **/
2133void
2134ixl_update_eth_stats(struct ixl_vsi *vsi)
2135{
2136	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2137	struct i40e_hw *hw = &pf->hw;
2138	struct i40e_eth_stats *es;
2139	struct i40e_eth_stats *oes;
2140	u16 stat_idx = vsi->info.stat_counter_idx;
2141
2142	es = &vsi->eth_stats;
2143	oes = &vsi->eth_stats_offsets;
2144
2145	/* Gather up the stats that the hw collects */
2146	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2147			   vsi->stat_offsets_loaded,
2148			   &oes->tx_errors, &es->tx_errors);
2149	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2150			   vsi->stat_offsets_loaded,
2151			   &oes->rx_discards, &es->rx_discards);
2152
2153	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2154			   I40E_GLV_GORCL(stat_idx),
2155			   vsi->stat_offsets_loaded,
2156			   &oes->rx_bytes, &es->rx_bytes);
2157	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2158			   I40E_GLV_UPRCL(stat_idx),
2159			   vsi->stat_offsets_loaded,
2160			   &oes->rx_unicast, &es->rx_unicast);
2161	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2162			   I40E_GLV_MPRCL(stat_idx),
2163			   vsi->stat_offsets_loaded,
2164			   &oes->rx_multicast, &es->rx_multicast);
2165	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2166			   I40E_GLV_BPRCL(stat_idx),
2167			   vsi->stat_offsets_loaded,
2168			   &oes->rx_broadcast, &es->rx_broadcast);
2169
2170	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2171			   I40E_GLV_GOTCL(stat_idx),
2172			   vsi->stat_offsets_loaded,
2173			   &oes->tx_bytes, &es->tx_bytes);
2174	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2175			   I40E_GLV_UPTCL(stat_idx),
2176			   vsi->stat_offsets_loaded,
2177			   &oes->tx_unicast, &es->tx_unicast);
2178	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2179			   I40E_GLV_MPTCL(stat_idx),
2180			   vsi->stat_offsets_loaded,
2181			   &oes->tx_multicast, &es->tx_multicast);
2182	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2183			   I40E_GLV_BPTCL(stat_idx),
2184			   vsi->stat_offsets_loaded,
2185			   &oes->tx_broadcast, &es->tx_broadcast);
2186	vsi->stat_offsets_loaded = true;
2187}
2188
2189void
2190ixl_update_vsi_stats(struct ixl_vsi *vsi)
2191{
2192	struct ixl_pf		*pf;
2193	struct ifnet		*ifp;
2194	struct i40e_eth_stats	*es;
2195	u64			tx_discards, csum_errs;
2196
2197	struct i40e_hw_port_stats *nsd;
2198
2199	pf = vsi->back;
2200	ifp = vsi->ifp;
2201	es = &vsi->eth_stats;
2202	nsd = &pf->stats;
2203
2204	ixl_update_eth_stats(vsi);
2205
2206	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2207
2208	csum_errs = 0;
2209	for (int i = 0; i < vsi->num_rx_queues; i++)
2210		csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2211	nsd->checksum_error = csum_errs;
2212
2213	/* Update ifnet stats */
2214	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2215	                   es->rx_multicast +
2216			   es->rx_broadcast);
2217	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2218	                   es->tx_multicast +
2219			   es->tx_broadcast);
2220	IXL_SET_IBYTES(vsi, es->rx_bytes);
2221	IXL_SET_OBYTES(vsi, es->tx_bytes);
2222	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2223	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2224
2225	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2226	    nsd->checksum_error + nsd->rx_length_errors +
2227	    nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2228	    nsd->rx_jabber);
2229	IXL_SET_OERRORS(vsi, es->tx_errors);
2230	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2231	IXL_SET_OQDROPS(vsi, tx_discards);
2232	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2233	IXL_SET_COLLISIONS(vsi, 0);
2234}
2235
2236/**
2237 * Reset all of the stats for the given pf
2238 **/
2239void
2240ixl_pf_reset_stats(struct ixl_pf *pf)
2241{
2242	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2243	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2244	pf->stat_offsets_loaded = false;
2245}
2246
2247/**
2248 * Resets all stats of the given vsi
2249 **/
2250void
2251ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2252{
2253	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2254	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2255	vsi->stat_offsets_loaded = false;
2256}
2257
2258/**
2259 * Read and update a 48 bit stat from the hw
2260 *
2261 * Since the device stats are not reset at PFReset, they likely will not
2262 * be zeroed when the driver starts.  We'll save the first values read
2263 * and use them as offsets to be subtracted from the raw values in order
2264 * to report stats that count from zero.
2265 **/
2266void
2267ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2268	bool offset_loaded, u64 *offset, u64 *stat)
2269{
2270	u64 new_data;
2271
2272#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2273	new_data = rd64(hw, loreg);
2274#else
2275	/*
2276	 * Use two rd32's instead of one rd64; FreeBSD versions before
2277	 * 10 don't support 64-bit bus reads/writes.
2278	 */
2279	new_data = rd32(hw, loreg);
2280	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2281#endif
2282
2283	if (!offset_loaded)
2284		*offset = new_data;
2285	if (new_data >= *offset)
2286		*stat = new_data - *offset;
2287	else
2288		*stat = (new_data + ((u64)1 << 48)) - *offset;
2289	*stat &= 0xFFFFFFFFFFFFULL;
2290}
2291
2292/**
2293 * Read and update a 32 bit stat from the hw
2294 **/
2295void
2296ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2297	bool offset_loaded, u64 *offset, u64 *stat)
2298{
2299	u32 new_data;
2300
2301	new_data = rd32(hw, reg);
2302	if (!offset_loaded)
2303		*offset = new_data;
2304	if (new_data >= *offset)
2305		*stat = (u32)(new_data - *offset);
2306	else
2307		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2308}
2309
2310/**
2311 * Add subset of device sysctls safe to use in recovery mode
2312 */
2313void
2314ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2315{
2316	device_t dev = pf->dev;
2317
2318	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2319	struct sysctl_oid_list *ctx_list =
2320	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2321
2322	struct sysctl_oid *debug_node;
2323	struct sysctl_oid_list *debug_list;
2324
2325	SYSCTL_ADD_PROC(ctx, ctx_list,
2326	    OID_AUTO, "fw_version",
2327	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2328	    ixl_sysctl_show_fw, "A", "Firmware version");
2329
2330	/* Add sysctls meant to print debug information, but don't list them
2331	 * in "sysctl -a" output. */
2332	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2333	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2334	    "Debug Sysctls");
2335	debug_list = SYSCTL_CHILDREN(debug_node);
2336
2337	SYSCTL_ADD_UINT(ctx, debug_list,
2338	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2339	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2340
2341	SYSCTL_ADD_UINT(ctx, debug_list,
2342	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2343	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2344
2345	SYSCTL_ADD_PROC(ctx, debug_list,
2346	    OID_AUTO, "dump_debug_data",
2347	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2348	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2349
2350	SYSCTL_ADD_PROC(ctx, debug_list,
2351	    OID_AUTO, "do_pf_reset",
2352	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2353	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2354
2355	SYSCTL_ADD_PROC(ctx, debug_list,
2356	    OID_AUTO, "do_core_reset",
2357	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2358	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2359
2360	SYSCTL_ADD_PROC(ctx, debug_list,
2361	    OID_AUTO, "do_global_reset",
2362	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2363	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2364
2365	SYSCTL_ADD_PROC(ctx, debug_list,
2366	    OID_AUTO, "queue_interrupt_table",
2367	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2368	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2369}
2370
2371void
2372ixl_add_device_sysctls(struct ixl_pf *pf)
2373{
2374	device_t dev = pf->dev;
2375	struct i40e_hw *hw = &pf->hw;
2376
2377	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2378	struct sysctl_oid_list *ctx_list =
2379	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2380
2381	struct sysctl_oid *debug_node;
2382	struct sysctl_oid_list *debug_list;
2383
2384	struct sysctl_oid *fec_node;
2385	struct sysctl_oid_list *fec_list;
2386	struct sysctl_oid *eee_node;
2387	struct sysctl_oid_list *eee_list;
2388
2389	/* Set up sysctls */
2390	SYSCTL_ADD_PROC(ctx, ctx_list,
2391	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2392	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2393
2394	SYSCTL_ADD_PROC(ctx, ctx_list,
2395	    OID_AUTO, "advertise_speed",
2396	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2397	    ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2398
2399	SYSCTL_ADD_PROC(ctx, ctx_list,
2400	    OID_AUTO, "supported_speeds",
2401	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2402	    ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2403
2404	SYSCTL_ADD_PROC(ctx, ctx_list,
2405	    OID_AUTO, "current_speed",
2406	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2407	    ixl_sysctl_current_speed, "A", "Current Port Speed");
2408
2409	SYSCTL_ADD_PROC(ctx, ctx_list,
2410	    OID_AUTO, "fw_version",
2411	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2412	    ixl_sysctl_show_fw, "A", "Firmware version");
2413
2414	SYSCTL_ADD_PROC(ctx, ctx_list,
2415	    OID_AUTO, "unallocated_queues",
2416	    CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2417	    ixl_sysctl_unallocated_queues, "I",
2418	    "Queues not allocated to a PF or VF");
2419
2420	SYSCTL_ADD_PROC(ctx, ctx_list,
2421	    OID_AUTO, "tx_itr",
2422	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2423	    ixl_sysctl_pf_tx_itr, "I",
2424	    "Immediately set TX ITR value for all queues");
2425
2426	SYSCTL_ADD_PROC(ctx, ctx_list,
2427	    OID_AUTO, "rx_itr",
2428	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2429	    ixl_sysctl_pf_rx_itr, "I",
2430	    "Immediately set RX ITR value for all queues");
2431
2432	SYSCTL_ADD_INT(ctx, ctx_list,
2433	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2434	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2435
2436	SYSCTL_ADD_INT(ctx, ctx_list,
2437	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2438	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2439
2440	/* Add FEC sysctls for 25G adapters */
2441	if (i40e_is_25G_device(hw->device_id)) {
2442		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2443		    OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2444		    "FEC Sysctls");
2445		fec_list = SYSCTL_CHILDREN(fec_node);
2446
2447		SYSCTL_ADD_PROC(ctx, fec_list,
2448		    OID_AUTO, "fc_ability",
2449		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2450		    ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2451
2452		SYSCTL_ADD_PROC(ctx, fec_list,
2453		    OID_AUTO, "rs_ability",
2454		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2455		    ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2456
2457		SYSCTL_ADD_PROC(ctx, fec_list,
2458		    OID_AUTO, "fc_requested",
2459		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2460		    ixl_sysctl_fec_fc_request, "I",
2461		    "FC FEC mode requested on link");
2462
2463		SYSCTL_ADD_PROC(ctx, fec_list,
2464		    OID_AUTO, "rs_requested",
2465		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2466		    ixl_sysctl_fec_rs_request, "I",
2467		    "RS FEC mode requested on link");
2468
2469		SYSCTL_ADD_PROC(ctx, fec_list,
2470		    OID_AUTO, "auto_fec_enabled",
2471		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2472		    ixl_sysctl_fec_auto_enable, "I",
2473		    "Let FW decide FEC ability/request modes");
2474	}
2475
2476	SYSCTL_ADD_PROC(ctx, ctx_list,
2477	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2478	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2479
2480	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2481	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2482	    "Energy Efficient Ethernet (EEE) Sysctls");
2483	eee_list = SYSCTL_CHILDREN(eee_node);
2484
2485	SYSCTL_ADD_PROC(ctx, eee_list,
2486	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2487	    pf, 0, ixl_sysctl_eee_enable, "I",
2488	    "Enable Energy Efficient Ethernet (EEE)");
2489
2490	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2491	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2492	    "TX LPI status");
2493
2494	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2495	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2496	    "RX LPI status");
2497
2498	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2499	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2500	    "TX LPI count");
2501
2502	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2503	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2504	    "RX LPI count");
2505
2506	SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2507	    "link_active_on_if_down",
2508	    CTLTYPE_INT | CTLFLAG_RWTUN,
2509	    pf, 0, ixl_sysctl_set_link_active, "I",
2510	    IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2511
2512	/* Add sysctls meant to print debug information, but don't list them
2513	 * in "sysctl -a" output. */
2514	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2515	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2516	    "Debug Sysctls");
2517	debug_list = SYSCTL_CHILDREN(debug_node);
2518
2519	SYSCTL_ADD_UINT(ctx, debug_list,
2520	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2521	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2522
2523	SYSCTL_ADD_UINT(ctx, debug_list,
2524	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2525	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2526
2527	SYSCTL_ADD_PROC(ctx, debug_list,
2528	    OID_AUTO, "link_status",
2529	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2530	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2531
2532	SYSCTL_ADD_PROC(ctx, debug_list,
2533	    OID_AUTO, "phy_abilities_init",
2534	    CTLTYPE_STRING | CTLFLAG_RD,
2535	    pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2536
2537	SYSCTL_ADD_PROC(ctx, debug_list,
2538	    OID_AUTO, "phy_abilities",
2539	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2540	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2541
2542	SYSCTL_ADD_PROC(ctx, debug_list,
2543	    OID_AUTO, "filter_list",
2544	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2545	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2546
2547	SYSCTL_ADD_PROC(ctx, debug_list,
2548	    OID_AUTO, "hw_res_alloc",
2549	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2550	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2551
2552	SYSCTL_ADD_PROC(ctx, debug_list,
2553	    OID_AUTO, "switch_config",
2554	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2555	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2556
2557	SYSCTL_ADD_PROC(ctx, debug_list,
2558	    OID_AUTO, "switch_vlans",
2559	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2560	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2561
2562	SYSCTL_ADD_PROC(ctx, debug_list,
2563	    OID_AUTO, "rss_key",
2564	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2565	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2566
2567	SYSCTL_ADD_PROC(ctx, debug_list,
2568	    OID_AUTO, "rss_lut",
2569	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2570	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2571
2572	SYSCTL_ADD_PROC(ctx, debug_list,
2573	    OID_AUTO, "rss_hena",
2574	    CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2575	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2576
2577	SYSCTL_ADD_PROC(ctx, debug_list,
2578	    OID_AUTO, "disable_fw_link_management",
2579	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2580	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2581
2582	SYSCTL_ADD_PROC(ctx, debug_list,
2583	    OID_AUTO, "dump_debug_data",
2584	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2585	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2586
2587	SYSCTL_ADD_PROC(ctx, debug_list,
2588	    OID_AUTO, "do_pf_reset",
2589	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2590	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2591
2592	SYSCTL_ADD_PROC(ctx, debug_list,
2593	    OID_AUTO, "do_core_reset",
2594	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2595	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2596
2597	SYSCTL_ADD_PROC(ctx, debug_list,
2598	    OID_AUTO, "do_global_reset",
2599	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2600	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2601
2602	SYSCTL_ADD_PROC(ctx, debug_list,
2603	    OID_AUTO, "queue_interrupt_table",
2604	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2605	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2606
2607	if (pf->has_i2c) {
2608		SYSCTL_ADD_PROC(ctx, debug_list,
2609		    OID_AUTO, "read_i2c_byte",
2610		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2611		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2612
2613		SYSCTL_ADD_PROC(ctx, debug_list,
2614		    OID_AUTO, "write_i2c_byte",
2615		    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2616		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2617
2618		SYSCTL_ADD_PROC(ctx, debug_list,
2619		    OID_AUTO, "read_i2c_diag_data",
2620		    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2621		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2622	}
2623}
2624
2625/*
2626 * Primarily for finding out how many queues can be assigned to VFs,
2627 * at runtime.
2628 */
2629static int
2630ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2631{
2632	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2633	int queues;
2634
2635	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2636
2637	return sysctl_handle_int(oidp, NULL, queues, req);
2638}
2639
2640static const char *
2641ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2642{
2643	const char * link_speed_str[] = {
2644		"Unknown",
2645		"100 Mbps",
2646		"1 Gbps",
2647		"10 Gbps",
2648		"40 Gbps",
2649		"20 Gbps",
2650		"25 Gbps",
2651		"2.5 Gbps",
2652		"5 Gbps"
2653	};
2654	int index;
2655
2656	switch (link_speed) {
2657	case I40E_LINK_SPEED_100MB:
2658		index = 1;
2659		break;
2660	case I40E_LINK_SPEED_1GB:
2661		index = 2;
2662		break;
2663	case I40E_LINK_SPEED_10GB:
2664		index = 3;
2665		break;
2666	case I40E_LINK_SPEED_40GB:
2667		index = 4;
2668		break;
2669	case I40E_LINK_SPEED_20GB:
2670		index = 5;
2671		break;
2672	case I40E_LINK_SPEED_25GB:
2673		index = 6;
2674		break;
2675	case I40E_LINK_SPEED_2_5GB:
2676		index = 7;
2677		break;
2678	case I40E_LINK_SPEED_5GB:
2679		index = 8;
2680		break;
2681	case I40E_LINK_SPEED_UNKNOWN:
2682	default:
2683		index = 0;
2684		break;
2685	}
2686
2687	return (link_speed_str[index]);
2688}
2689
2690int
2691ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2692{
2693	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2694	struct i40e_hw *hw = &pf->hw;
2695	int error = 0;
2696
2697	ixl_update_link_status(pf);
2698
2699	error = sysctl_handle_string(oidp,
2700	    __DECONST(void *,
2701		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2702	    8, req);
2703
2704	return (error);
2705}
2706
2707/*
2708 * Converts 8-bit speeds value to and from sysctl flags and
2709 * Admin Queue flags.
2710 */
2711static u8
2712ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2713{
2714#define SPEED_MAP_SIZE 8
2715	static u16 speedmap[SPEED_MAP_SIZE] = {
2716		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2717		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2718		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2719		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2720		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2721		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2722		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2723		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2724	};
2725	u8 retval = 0;
2726
2727	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2728		if (to_aq)
2729			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2730		else
2731			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2732	}
2733
2734	return (retval);
2735}
2736
2737int
2738ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2739{
2740	struct i40e_hw *hw = &pf->hw;
2741	device_t dev = pf->dev;
2742	struct i40e_aq_get_phy_abilities_resp abilities;
2743	struct i40e_aq_set_phy_config config;
2744	enum i40e_status_code aq_error = 0;
2745
2746	/* Get current capability information */
2747	aq_error = i40e_aq_get_phy_capabilities(hw,
2748	    FALSE, FALSE, &abilities, NULL);
2749	if (aq_error) {
2750		device_printf(dev,
2751		    "%s: Error getting phy capabilities %d,"
2752		    " aq error: %d\n", __func__, aq_error,
2753		    hw->aq.asq_last_status);
2754		return (EIO);
2755	}
2756
2757	/* Prepare new config */
2758	bzero(&config, sizeof(config));
2759	if (from_aq)
2760		config.link_speed = speeds;
2761	else
2762		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2763	config.phy_type = abilities.phy_type;
2764	config.phy_type_ext = abilities.phy_type_ext;
2765	config.abilities = abilities.abilities
2766	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2767	config.eee_capability = abilities.eee_capability;
2768	config.eeer = abilities.eeer_val;
2769	config.low_power_ctrl = abilities.d3_lpan;
2770	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2771	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2772
2773	/* Do aq command & restart link */
2774	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2775	if (aq_error) {
2776		device_printf(dev,
2777		    "%s: Error setting new phy config %d,"
2778		    " aq error: %d\n", __func__, aq_error,
2779		    hw->aq.asq_last_status);
2780		return (EIO);
2781	}
2782
2783	return (0);
2784}
2785
2786/*
2787** Supported link speeds
2788**	Flags:
2789**	 0x1 - 100 Mb
2790**	 0x2 - 1G
2791**	 0x4 - 10G
2792**	 0x8 - 20G
2793**	0x10 - 25G
2794**	0x20 - 40G
2795**	0x40 - 2.5G
2796**	0x80 - 5G
2797*/
2798static int
2799ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2800{
2801	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2802	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2803
2804	return sysctl_handle_int(oidp, NULL, supported, req);
2805}
2806
2807/*
2808** Control link advertise speed:
2809**	Flags:
2810**	 0x1 - advertise 100 Mb
2811**	 0x2 - advertise 1G
2812**	 0x4 - advertise 10G
2813**	 0x8 - advertise 20G
2814**	0x10 - advertise 25G
2815**	0x20 - advertise 40G
2816**	0x40 - advertise 2.5G
2817**	0x80 - advertise 5G
2818**
2819**	Set to 0 to disable link
2820*/
2821int
2822ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2823{
2824	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2825	device_t dev = pf->dev;
2826	u8 converted_speeds;
2827	int requested_ls = 0;
2828	int error = 0;
2829
2830	/* Read in new mode */
2831	requested_ls = pf->advertised_speed;
2832	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2833	if ((error) || (req->newptr == NULL))
2834		return (error);
2835	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2836		device_printf(dev, "Interface is currently in FW recovery mode. "
2837				"Setting advertise speed not supported\n");
2838		return (EINVAL);
2839	}
2840
2841	/* Error out if bits outside of possible flag range are set */
2842	if ((requested_ls & ~((u8)0xFF)) != 0) {
2843		device_printf(dev, "Input advertised speed out of range; "
2844		    "valid flags are: 0x%02x\n",
2845		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2846		return (EINVAL);
2847	}
2848
2849	/* Check if adapter supports input value */
2850	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2851	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2852		device_printf(dev, "Invalid advertised speed; "
2853		    "valid flags are: 0x%02x\n",
2854		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2855		return (EINVAL);
2856	}
2857
2858	error = ixl_set_advertised_speeds(pf, requested_ls, false);
2859	if (error)
2860		return (error);
2861
2862	pf->advertised_speed = requested_ls;
2863	ixl_update_link_status(pf);
2864	return (0);
2865}
2866
2867/*
2868 * Input: bitmap of enum i40e_aq_link_speed
2869 */
2870u64
2871ixl_max_aq_speed_to_value(u8 link_speeds)
2872{
2873	if (link_speeds & I40E_LINK_SPEED_40GB)
2874		return IF_Gbps(40);
2875	if (link_speeds & I40E_LINK_SPEED_25GB)
2876		return IF_Gbps(25);
2877	if (link_speeds & I40E_LINK_SPEED_20GB)
2878		return IF_Gbps(20);
2879	if (link_speeds & I40E_LINK_SPEED_10GB)
2880		return IF_Gbps(10);
2881	if (link_speeds & I40E_LINK_SPEED_5GB)
2882		return IF_Gbps(5);
2883	if (link_speeds & I40E_LINK_SPEED_2_5GB)
2884		return IF_Mbps(2500);
2885	if (link_speeds & I40E_LINK_SPEED_1GB)
2886		return IF_Gbps(1);
2887	if (link_speeds & I40E_LINK_SPEED_100MB)
2888		return IF_Mbps(100);
2889	else
2890		/* Minimum supported link speed */
2891		return IF_Mbps(100);
2892}
2893
2894/*
2895** Get the width and transaction speed of
2896** the bus this adapter is plugged into.
2897*/
2898void
2899ixl_get_bus_info(struct ixl_pf *pf)
2900{
2901	struct i40e_hw *hw = &pf->hw;
2902	device_t dev = pf->dev;
2903        u16 link;
2904        u32 offset, num_ports;
2905	u64 max_speed;
2906
2907	/* Some devices don't use PCIE */
2908	if (hw->mac.type == I40E_MAC_X722)
2909		return;
2910
2911        /* Read PCI Express Capabilities Link Status Register */
2912        pci_find_cap(dev, PCIY_EXPRESS, &offset);
2913        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2914
2915	/* Fill out hw struct with PCIE info */
2916	i40e_set_pci_config_data(hw, link);
2917
2918	/* Use info to print out bandwidth messages */
2919        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2920            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2921            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2922            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2923            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2924            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2925            (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2926            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2927            ("Unknown"));
2928
2929	/*
2930	 * If adapter is in slot with maximum supported speed,
2931	 * no warning message needs to be printed out.
2932	 */
2933	if (hw->bus.speed >= i40e_bus_speed_8000
2934	    && hw->bus.width >= i40e_bus_width_pcie_x8)
2935		return;
2936
2937	num_ports = bitcount32(hw->func_caps.valid_functions);
2938	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2939
2940	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2941                device_printf(dev, "PCI-Express bandwidth available"
2942                    " for this device may be insufficient for"
2943                    " optimal performance.\n");
2944                device_printf(dev, "Please move the device to a different"
2945		    " PCI-e link with more lanes and/or higher"
2946		    " transfer rate.\n");
2947        }
2948}
2949
2950static int
2951ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2952{
2953	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
2954	struct i40e_hw	*hw = &pf->hw;
2955	struct sbuf	*sbuf;
2956
2957	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2958	ixl_nvm_version_str(hw, sbuf);
2959	sbuf_finish(sbuf);
2960	sbuf_delete(sbuf);
2961
2962	return (0);
2963}
2964
2965void
2966ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2967{
2968	u8 nvma_ptr = nvma->config & 0xFF;
2969	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2970	const char * cmd_str;
2971
2972	switch (nvma->command) {
2973	case I40E_NVM_READ:
2974		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2975		    nvma->offset == 0 && nvma->data_size == 1) {
2976			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2977			return;
2978		}
2979		cmd_str = "READ ";
2980		break;
2981	case I40E_NVM_WRITE:
2982		cmd_str = "WRITE";
2983		break;
2984	default:
2985		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2986		return;
2987	}
2988	device_printf(dev,
2989	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2990	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2991}
2992
2993int
2994ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2995{
2996	struct i40e_hw *hw = &pf->hw;
2997	struct i40e_nvm_access *nvma;
2998	device_t dev = pf->dev;
2999	enum i40e_status_code status = 0;
3000	size_t nvma_size, ifd_len, exp_len;
3001	int err, perrno;
3002
3003	DEBUGFUNC("ixl_handle_nvmupd_cmd");
3004
3005	/* Sanity checks */
3006	nvma_size = sizeof(struct i40e_nvm_access);
3007	ifd_len = ifd->ifd_len;
3008
3009	if (ifd_len < nvma_size ||
3010	    ifd->ifd_data == NULL) {
3011		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3012		    __func__);
3013		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3014		    __func__, ifd_len, nvma_size);
3015		device_printf(dev, "%s: data pointer: %p\n", __func__,
3016		    ifd->ifd_data);
3017		return (EINVAL);
3018	}
3019
3020	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3021	err = copyin(ifd->ifd_data, nvma, ifd_len);
3022	if (err) {
3023		device_printf(dev, "%s: Cannot get request from user space\n",
3024		    __func__);
3025		free(nvma, M_IXL);
3026		return (err);
3027	}
3028
3029	if (pf->dbg_mask & IXL_DBG_NVMUPD)
3030		ixl_print_nvm_cmd(dev, nvma);
3031
3032	if (IXL_PF_IS_RESETTING(pf)) {
3033		int count = 0;
3034		while (count++ < 100) {
3035			i40e_msec_delay(100);
3036			if (!(IXL_PF_IS_RESETTING(pf)))
3037				break;
3038		}
3039	}
3040
3041	if (IXL_PF_IS_RESETTING(pf)) {
3042		device_printf(dev,
3043		    "%s: timeout waiting for EMP reset to finish\n",
3044		    __func__);
3045		free(nvma, M_IXL);
3046		return (-EBUSY);
3047	}
3048
3049	if (nvma->data_size < 1 || nvma->data_size > 4096) {
3050		device_printf(dev,
3051		    "%s: invalid request, data size not in supported range\n",
3052		    __func__);
3053		free(nvma, M_IXL);
3054		return (EINVAL);
3055	}
3056
3057	/*
3058	 * Older versions of the NVM update tool don't set ifd_len to the size
3059	 * of the entire buffer passed to the ioctl. Check the data_size field
3060	 * in the contained i40e_nvm_access struct and ensure everything is
3061	 * copied in from userspace.
3062	 */
3063	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3064
3065	if (ifd_len < exp_len) {
3066		ifd_len = exp_len;
3067		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3068		err = copyin(ifd->ifd_data, nvma, ifd_len);
3069		if (err) {
3070			device_printf(dev, "%s: Cannot get request from user space\n",
3071					__func__);
3072			free(nvma, M_IXL);
3073			return (err);
3074		}
3075	}
3076
3077	// TODO: Might need a different lock here
3078	// IXL_PF_LOCK(pf);
3079	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3080	// IXL_PF_UNLOCK(pf);
3081
3082	err = copyout(nvma, ifd->ifd_data, ifd_len);
3083	free(nvma, M_IXL);
3084	if (err) {
3085		device_printf(dev, "%s: Cannot return data to user space\n",
3086				__func__);
3087		return (err);
3088	}
3089
3090	/* Let the nvmupdate report errors, show them only when debug is enabled */
3091	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3092		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3093		    i40e_stat_str(hw, status), perrno);
3094
3095	/*
3096	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3097	 * to run this ioctl again. So use -EACCES for -EPERM instead.
3098	 */
3099	if (perrno == -EPERM)
3100		return (-EACCES);
3101	else
3102		return (perrno);
3103}
3104
3105int
3106ixl_find_i2c_interface(struct ixl_pf *pf)
3107{
3108	struct i40e_hw *hw = &pf->hw;
3109	bool i2c_en, port_matched;
3110	u32 reg;
3111
3112	for (int i = 0; i < 4; i++) {
3113		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3114		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3115		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3116		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3117		    & BIT(hw->port);
3118		if (i2c_en && port_matched)
3119			return (i);
3120	}
3121
3122	return (-1);
3123}
3124
3125void
3126ixl_set_link(struct ixl_pf *pf, bool enable)
3127{
3128	struct i40e_hw *hw = &pf->hw;
3129	device_t dev = pf->dev;
3130	struct i40e_aq_get_phy_abilities_resp abilities;
3131	struct i40e_aq_set_phy_config config;
3132	enum i40e_status_code aq_error = 0;
3133	u32 phy_type, phy_type_ext;
3134
3135	/* Get initial capability information */
3136	aq_error = i40e_aq_get_phy_capabilities(hw,
3137	    FALSE, TRUE, &abilities, NULL);
3138	if (aq_error) {
3139		device_printf(dev,
3140		    "%s: Error getting phy capabilities %d,"
3141		    " aq error: %d\n", __func__, aq_error,
3142		    hw->aq.asq_last_status);
3143		return;
3144	}
3145
3146	phy_type = abilities.phy_type;
3147	phy_type_ext = abilities.phy_type_ext;
3148
3149	/* Get current capability information */
3150	aq_error = i40e_aq_get_phy_capabilities(hw,
3151	    FALSE, FALSE, &abilities, NULL);
3152	if (aq_error) {
3153		device_printf(dev,
3154		    "%s: Error getting phy capabilities %d,"
3155		    " aq error: %d\n", __func__, aq_error,
3156		    hw->aq.asq_last_status);
3157		return;
3158	}
3159
3160	/* Prepare new config */
3161	memset(&config, 0, sizeof(config));
3162	config.link_speed = abilities.link_speed;
3163	config.abilities = abilities.abilities;
3164	config.eee_capability = abilities.eee_capability;
3165	config.eeer = abilities.eeer_val;
3166	config.low_power_ctrl = abilities.d3_lpan;
3167	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3168	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
3169	config.phy_type = 0;
3170	config.phy_type_ext = 0;
3171
3172	if (enable) {
3173		config.phy_type = phy_type;
3174		config.phy_type_ext = phy_type_ext;
3175
3176		config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3177		    I40E_AQ_PHY_FLAG_PAUSE_RX);
3178
3179		switch (pf->fc) {
3180		case I40E_FC_FULL:
3181			config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3182			    I40E_AQ_PHY_FLAG_PAUSE_RX;
3183			break;
3184		case I40E_FC_RX_PAUSE:
3185			config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3186			break;
3187		case I40E_FC_TX_PAUSE:
3188			config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3189			break;
3190		default:
3191			break;
3192		}
3193	}
3194
3195	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3196	if (aq_error) {
3197		device_printf(dev,
3198		    "%s: Error setting new phy config %d,"
3199		    " aq error: %d\n", __func__, aq_error,
3200		    hw->aq.asq_last_status);
3201		return;
3202	}
3203
3204	aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3205	if (aq_error) {
3206		device_printf(dev,
3207		    "%s: Error set link config %d,"
3208		    " aq error: %d\n", __func__, aq_error,
3209		    hw->aq.asq_last_status);
3210		return;
3211	}
3212}
3213
3214static char *
3215ixl_phy_type_string(u32 bit_pos, bool ext)
3216{
3217	static char * phy_types_str[32] = {
3218		"SGMII",
3219		"1000BASE-KX",
3220		"10GBASE-KX4",
3221		"10GBASE-KR",
3222		"40GBASE-KR4",
3223		"XAUI",
3224		"XFI",
3225		"SFI",
3226		"XLAUI",
3227		"XLPPI",
3228		"40GBASE-CR4",
3229		"10GBASE-CR1",
3230		"SFP+ Active DA",
3231		"QSFP+ Active DA",
3232		"Reserved (14)",
3233		"Reserved (15)",
3234		"Reserved (16)",
3235		"100BASE-TX",
3236		"1000BASE-T",
3237		"10GBASE-T",
3238		"10GBASE-SR",
3239		"10GBASE-LR",
3240		"10GBASE-SFP+Cu",
3241		"10GBASE-CR1",
3242		"40GBASE-CR4",
3243		"40GBASE-SR4",
3244		"40GBASE-LR4",
3245		"1000BASE-SX",
3246		"1000BASE-LX",
3247		"1000BASE-T Optical",
3248		"20GBASE-KR2",
3249		"Reserved (31)"
3250	};
3251	static char * ext_phy_types_str[8] = {
3252		"25GBASE-KR",
3253		"25GBASE-CR",
3254		"25GBASE-SR",
3255		"25GBASE-LR",
3256		"25GBASE-AOC",
3257		"25GBASE-ACC",
3258		"2.5GBASE-T",
3259		"5GBASE-T"
3260	};
3261
3262	if (ext && bit_pos > 7) return "Invalid_Ext";
3263	if (bit_pos > 31) return "Invalid";
3264
3265	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3266}
3267
3268/* TODO: ERJ: I don't this is necessary anymore. */
3269int
3270ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3271{
3272	device_t dev = pf->dev;
3273	struct i40e_hw *hw = &pf->hw;
3274	struct i40e_aq_desc desc;
3275	enum i40e_status_code status;
3276
3277	struct i40e_aqc_get_link_status *aq_link_status =
3278		(struct i40e_aqc_get_link_status *)&desc.params.raw;
3279
3280	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3281	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3282	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3283	if (status) {
3284		device_printf(dev,
3285		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3286		    __func__, i40e_stat_str(hw, status),
3287		    i40e_aq_str(hw, hw->aq.asq_last_status));
3288		return (EIO);
3289	}
3290
3291	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3292	return (0);
3293}
3294
3295static char *
3296ixl_phy_type_string_ls(u8 val)
3297{
3298	if (val >= 0x1F)
3299		return ixl_phy_type_string(val - 0x1F, true);
3300	else
3301		return ixl_phy_type_string(val, false);
3302}
3303
3304static int
3305ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3306{
3307	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3308	device_t dev = pf->dev;
3309	struct sbuf *buf;
3310	int error = 0;
3311
3312	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3313	if (!buf) {
3314		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3315		return (ENOMEM);
3316	}
3317
3318	struct i40e_aqc_get_link_status link_status;
3319	error = ixl_aq_get_link_status(pf, &link_status);
3320	if (error) {
3321		sbuf_delete(buf);
3322		return (error);
3323	}
3324
3325	sbuf_printf(buf, "\n"
3326	    "PHY Type : 0x%02x<%s>\n"
3327	    "Speed    : 0x%02x\n"
3328	    "Link info: 0x%02x\n"
3329	    "AN info  : 0x%02x\n"
3330	    "Ext info : 0x%02x\n"
3331	    "Loopback : 0x%02x\n"
3332	    "Max Frame: %d\n"
3333	    "Config   : 0x%02x\n"
3334	    "Power    : 0x%02x",
3335	    link_status.phy_type,
3336	    ixl_phy_type_string_ls(link_status.phy_type),
3337	    link_status.link_speed,
3338	    link_status.link_info,
3339	    link_status.an_info,
3340	    link_status.ext_info,
3341	    link_status.loopback,
3342	    link_status.max_frame_size,
3343	    link_status.config,
3344	    link_status.power_desc);
3345
3346	error = sbuf_finish(buf);
3347	if (error)
3348		device_printf(dev, "Error finishing sbuf: %d\n", error);
3349
3350	sbuf_delete(buf);
3351	return (error);
3352}
3353
3354static int
3355ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3356{
3357	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3358	struct i40e_hw *hw = &pf->hw;
3359	device_t dev = pf->dev;
3360	enum i40e_status_code status;
3361	struct i40e_aq_get_phy_abilities_resp abilities;
3362	struct sbuf *buf;
3363	int error = 0;
3364
3365	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3366	if (!buf) {
3367		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3368		return (ENOMEM);
3369	}
3370
3371	status = i40e_aq_get_phy_capabilities(hw,
3372	    FALSE, arg2 != 0, &abilities, NULL);
3373	if (status) {
3374		device_printf(dev,
3375		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3376		    __func__, i40e_stat_str(hw, status),
3377		    i40e_aq_str(hw, hw->aq.asq_last_status));
3378		sbuf_delete(buf);
3379		return (EIO);
3380	}
3381
3382	sbuf_printf(buf, "\n"
3383	    "PHY Type : %08x",
3384	    abilities.phy_type);
3385
3386	if (abilities.phy_type != 0) {
3387		sbuf_printf(buf, "<");
3388		for (int i = 0; i < 32; i++)
3389			if ((1 << i) & abilities.phy_type)
3390				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3391		sbuf_printf(buf, ">");
3392	}
3393
3394	sbuf_printf(buf, "\nPHY Ext  : %02x",
3395	    abilities.phy_type_ext);
3396
3397	if (abilities.phy_type_ext != 0) {
3398		sbuf_printf(buf, "<");
3399		for (int i = 0; i < 4; i++)
3400			if ((1 << i) & abilities.phy_type_ext)
3401				sbuf_printf(buf, "%s,",
3402				    ixl_phy_type_string(i, true));
3403		sbuf_printf(buf, ">");
3404	}
3405
3406	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3407	if (abilities.link_speed != 0) {
3408		u8 link_speed;
3409		sbuf_printf(buf, " <");
3410		for (int i = 0; i < 8; i++) {
3411			link_speed = (1 << i) & abilities.link_speed;
3412			if (link_speed)
3413				sbuf_printf(buf, "%s, ",
3414				    ixl_link_speed_string(link_speed));
3415		}
3416		sbuf_printf(buf, ">");
3417	}
3418
3419	sbuf_printf(buf, "\n"
3420	    "Abilities: %02x\n"
3421	    "EEE cap  : %04x\n"
3422	    "EEER reg : %08x\n"
3423	    "D3 Lpan  : %02x\n"
3424	    "ID       : %02x %02x %02x %02x\n"
3425	    "ModType  : %02x %02x %02x\n"
3426	    "ModType E: %01x\n"
3427	    "FEC Cfg  : %02x\n"
3428	    "Ext CC   : %02x",
3429	    abilities.abilities, abilities.eee_capability,
3430	    abilities.eeer_val, abilities.d3_lpan,
3431	    abilities.phy_id[0], abilities.phy_id[1],
3432	    abilities.phy_id[2], abilities.phy_id[3],
3433	    abilities.module_type[0], abilities.module_type[1],
3434	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3435	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3436	    abilities.ext_comp_code);
3437
3438	error = sbuf_finish(buf);
3439	if (error)
3440		device_printf(dev, "Error finishing sbuf: %d\n", error);
3441
3442	sbuf_delete(buf);
3443	return (error);
3444}
3445
3446static int
3447ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3448{
3449	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3450	struct ixl_vsi *vsi = &pf->vsi;
3451	struct ixl_mac_filter *f;
3452	device_t dev = pf->dev;
3453	int error = 0, ftl_len = 0, ftl_counter = 0;
3454
3455	struct sbuf *buf;
3456
3457	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3458	if (!buf) {
3459		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3460		return (ENOMEM);
3461	}
3462
3463	sbuf_printf(buf, "\n");
3464
3465	/* Print MAC filters */
3466	sbuf_printf(buf, "PF Filters:\n");
3467	LIST_FOREACH(f, &vsi->ftl, ftle)
3468		ftl_len++;
3469
3470	if (ftl_len < 1)
3471		sbuf_printf(buf, "(none)\n");
3472	else {
3473		LIST_FOREACH(f, &vsi->ftl, ftle) {
3474			sbuf_printf(buf,
3475			    MAC_FORMAT ", vlan %4d, flags %#06x",
3476			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3477			/* don't print '\n' for last entry */
3478			if (++ftl_counter != ftl_len)
3479				sbuf_printf(buf, "\n");
3480		}
3481	}
3482
3483#ifdef PCI_IOV
3484	/* TODO: Give each VF its own filter list sysctl */
3485	struct ixl_vf *vf;
3486	if (pf->num_vfs > 0) {
3487		sbuf_printf(buf, "\n\n");
3488		for (int i = 0; i < pf->num_vfs; i++) {
3489			vf = &pf->vfs[i];
3490			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3491				continue;
3492
3493			vsi = &vf->vsi;
3494			ftl_len = 0, ftl_counter = 0;
3495			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3496			LIST_FOREACH(f, &vsi->ftl, ftle)
3497				ftl_len++;
3498
3499			if (ftl_len < 1)
3500				sbuf_printf(buf, "(none)\n");
3501			else {
3502				LIST_FOREACH(f, &vsi->ftl, ftle) {
3503					sbuf_printf(buf,
3504					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3505					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3506				}
3507			}
3508		}
3509	}
3510#endif
3511
3512	error = sbuf_finish(buf);
3513	if (error)
3514		device_printf(dev, "Error finishing sbuf: %d\n", error);
3515	sbuf_delete(buf);
3516
3517	return (error);
3518}
3519
3520#define IXL_SW_RES_SIZE 0x14
3521int
3522ixl_res_alloc_cmp(const void *a, const void *b)
3523{
3524	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3525	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3526	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3527
3528	return ((int)one->resource_type - (int)two->resource_type);
3529}
3530
3531/*
3532 * Longest string length: 25
3533 */
3534const char *
3535ixl_switch_res_type_string(u8 type)
3536{
3537	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3538		"VEB",
3539		"VSI",
3540		"Perfect Match MAC address",
3541		"S-tag",
3542		"(Reserved)",
3543		"Multicast hash entry",
3544		"Unicast hash entry",
3545		"VLAN",
3546		"VSI List entry",
3547		"(Reserved)",
3548		"VLAN Statistic Pool",
3549		"Mirror Rule",
3550		"Queue Set",
3551		"Inner VLAN Forward filter",
3552		"(Reserved)",
3553		"Inner MAC",
3554		"IP",
3555		"GRE/VN1 Key",
3556		"VN2 Key",
3557		"Tunneling Port"
3558	};
3559
3560	if (type < IXL_SW_RES_SIZE)
3561		return ixl_switch_res_type_strings[type];
3562	else
3563		return "(Reserved)";
3564}
3565
3566static int
3567ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3568{
3569	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3570	struct i40e_hw *hw = &pf->hw;
3571	device_t dev = pf->dev;
3572	struct sbuf *buf;
3573	enum i40e_status_code status;
3574	int error = 0;
3575
3576	u8 num_entries;
3577	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3578
3579	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3580	if (!buf) {
3581		device_printf(dev, "Could not allocate sbuf for output.\n");
3582		return (ENOMEM);
3583	}
3584
3585	bzero(resp, sizeof(resp));
3586	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3587				resp,
3588				IXL_SW_RES_SIZE,
3589				NULL);
3590	if (status) {
3591		device_printf(dev,
3592		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3593		    __func__, i40e_stat_str(hw, status),
3594		    i40e_aq_str(hw, hw->aq.asq_last_status));
3595		sbuf_delete(buf);
3596		return (error);
3597	}
3598
3599	/* Sort entries by type for display */
3600	qsort(resp, num_entries,
3601	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3602	    &ixl_res_alloc_cmp);
3603
3604	sbuf_cat(buf, "\n");
3605	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3606	sbuf_printf(buf,
3607	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3608	    "                          | (this)     | (all) | (this) | (all)       \n");
3609	for (int i = 0; i < num_entries; i++) {
3610		sbuf_printf(buf,
3611		    "%25s | %10d   %5d   %6d   %12d",
3612		    ixl_switch_res_type_string(resp[i].resource_type),
3613		    resp[i].guaranteed,
3614		    resp[i].total,
3615		    resp[i].used,
3616		    resp[i].total_unalloced);
3617		if (i < num_entries - 1)
3618			sbuf_cat(buf, "\n");
3619	}
3620
3621	error = sbuf_finish(buf);
3622	if (error)
3623		device_printf(dev, "Error finishing sbuf: %d\n", error);
3624
3625	sbuf_delete(buf);
3626	return (error);
3627}
3628
3629enum ixl_sw_seid_offset {
3630	IXL_SW_SEID_EMP = 1,
3631	IXL_SW_SEID_MAC_START = 2,
3632	IXL_SW_SEID_MAC_END = 5,
3633	IXL_SW_SEID_PF_START = 16,
3634	IXL_SW_SEID_PF_END = 31,
3635	IXL_SW_SEID_VF_START = 32,
3636	IXL_SW_SEID_VF_END = 159,
3637};
3638
3639/*
3640 * Caller must init and delete sbuf; this function will clear and
3641 * finish it for caller.
3642 *
3643 * Note: The SEID argument only applies for elements defined by FW at
3644 * power-on; these include the EMP, Ports, PFs and VFs.
3645 */
3646static char *
3647ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3648{
3649	sbuf_clear(s);
3650
3651	/* If SEID is in certain ranges, then we can infer the
3652	 * mapping of SEID to switch element.
3653	 */
3654	if (seid == IXL_SW_SEID_EMP) {
3655		sbuf_cat(s, "EMP");
3656		goto out;
3657	} else if (seid >= IXL_SW_SEID_MAC_START &&
3658	    seid <= IXL_SW_SEID_MAC_END) {
3659		sbuf_printf(s, "MAC  %2d",
3660		    seid - IXL_SW_SEID_MAC_START);
3661		goto out;
3662	} else if (seid >= IXL_SW_SEID_PF_START &&
3663	    seid <= IXL_SW_SEID_PF_END) {
3664		sbuf_printf(s, "PF  %3d",
3665		    seid - IXL_SW_SEID_PF_START);
3666		goto out;
3667	} else if (seid >= IXL_SW_SEID_VF_START &&
3668	    seid <= IXL_SW_SEID_VF_END) {
3669		sbuf_printf(s, "VF  %3d",
3670		    seid - IXL_SW_SEID_VF_START);
3671		goto out;
3672	}
3673
3674	switch (element_type) {
3675	case I40E_AQ_SW_ELEM_TYPE_BMC:
3676		sbuf_cat(s, "BMC");
3677		break;
3678	case I40E_AQ_SW_ELEM_TYPE_PV:
3679		sbuf_cat(s, "PV");
3680		break;
3681	case I40E_AQ_SW_ELEM_TYPE_VEB:
3682		sbuf_cat(s, "VEB");
3683		break;
3684	case I40E_AQ_SW_ELEM_TYPE_PA:
3685		sbuf_cat(s, "PA");
3686		break;
3687	case I40E_AQ_SW_ELEM_TYPE_VSI:
3688		sbuf_printf(s, "VSI");
3689		break;
3690	default:
3691		sbuf_cat(s, "?");
3692		break;
3693	}
3694
3695out:
3696	sbuf_finish(s);
3697	return sbuf_data(s);
3698}
3699
3700static int
3701ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3702{
3703	const struct i40e_aqc_switch_config_element_resp *one, *two;
3704	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3705	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3706
3707	return ((int)one->seid - (int)two->seid);
3708}
3709
3710static int
3711ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3712{
3713	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3714	struct i40e_hw *hw = &pf->hw;
3715	device_t dev = pf->dev;
3716	struct sbuf *buf;
3717	struct sbuf *nmbuf;
3718	enum i40e_status_code status;
3719	int error = 0;
3720	u16 next = 0;
3721	u8 aq_buf[I40E_AQ_LARGE_BUF];
3722
3723	struct i40e_aqc_switch_config_element_resp *elem;
3724	struct i40e_aqc_get_switch_config_resp *sw_config;
3725	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3726
3727	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3728	if (!buf) {
3729		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3730		return (ENOMEM);
3731	}
3732
3733	status = i40e_aq_get_switch_config(hw, sw_config,
3734	    sizeof(aq_buf), &next, NULL);
3735	if (status) {
3736		device_printf(dev,
3737		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3738		    __func__, i40e_stat_str(hw, status),
3739		    i40e_aq_str(hw, hw->aq.asq_last_status));
3740		sbuf_delete(buf);
3741		return error;
3742	}
3743	if (next)
3744		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3745		    __func__, next);
3746
3747	nmbuf = sbuf_new_auto();
3748	if (!nmbuf) {
3749		device_printf(dev, "Could not allocate sbuf for name output.\n");
3750		sbuf_delete(buf);
3751		return (ENOMEM);
3752	}
3753
3754	/* Sort entries by SEID for display */
3755	qsort(sw_config->element, sw_config->header.num_reported,
3756	    sizeof(struct i40e_aqc_switch_config_element_resp),
3757	    &ixl_sw_cfg_elem_seid_cmp);
3758
3759	sbuf_cat(buf, "\n");
3760	/* Assuming <= 255 elements in switch */
3761	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3762	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3763	/* Exclude:
3764	 * Revision -- all elements are revision 1 for now
3765	 */
3766	sbuf_printf(buf,
3767	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3768	    "                |                 |                 | (uplink)\n");
3769	for (int i = 0; i < sw_config->header.num_reported; i++) {
3770		elem = &sw_config->element[i];
3771
3772		// "%4d (%8s) | %8s   %8s   %#8x",
3773		sbuf_printf(buf, "%4d", elem->seid);
3774		sbuf_cat(buf, " ");
3775		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3776		    elem->element_type, elem->seid));
3777		sbuf_cat(buf, " | ");
3778		sbuf_printf(buf, "%4d", elem->uplink_seid);
3779		sbuf_cat(buf, " ");
3780		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3781		    0, elem->uplink_seid));
3782		sbuf_cat(buf, " | ");
3783		sbuf_printf(buf, "%4d", elem->downlink_seid);
3784		sbuf_cat(buf, " ");
3785		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3786		    0, elem->downlink_seid));
3787		sbuf_cat(buf, " | ");
3788		sbuf_printf(buf, "%8d", elem->connection_type);
3789		if (i < sw_config->header.num_reported - 1)
3790			sbuf_cat(buf, "\n");
3791	}
3792	sbuf_delete(nmbuf);
3793
3794	error = sbuf_finish(buf);
3795	if (error)
3796		device_printf(dev, "Error finishing sbuf: %d\n", error);
3797
3798	sbuf_delete(buf);
3799
3800	return (error);
3801}
3802
3803static int
3804ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3805{
3806	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3807	struct i40e_hw *hw = &pf->hw;
3808	device_t dev = pf->dev;
3809	int requested_vlan = -1;
3810	enum i40e_status_code status = 0;
3811	int error = 0;
3812
3813	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3814	if ((error) || (req->newptr == NULL))
3815	    return (error);
3816
3817	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3818		device_printf(dev, "Flags disallow setting of vlans\n");
3819		return (ENODEV);
3820	}
3821
3822	hw->switch_tag = requested_vlan;
3823	device_printf(dev,
3824	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3825	    hw->switch_tag, hw->first_tag, hw->second_tag);
3826	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3827	if (status) {
3828		device_printf(dev,
3829		    "%s: aq_set_switch_config() error %s, aq error %s\n",
3830		    __func__, i40e_stat_str(hw, status),
3831		    i40e_aq_str(hw, hw->aq.asq_last_status));
3832		return (status);
3833	}
3834	return (0);
3835}
3836
3837static int
3838ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3839{
3840	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3841	struct i40e_hw *hw = &pf->hw;
3842	device_t dev = pf->dev;
3843	struct sbuf *buf;
3844	int error = 0;
3845	enum i40e_status_code status;
3846	u32 reg;
3847
3848	struct i40e_aqc_get_set_rss_key_data key_data;
3849
3850	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3851	if (!buf) {
3852		device_printf(dev, "Could not allocate sbuf for output.\n");
3853		return (ENOMEM);
3854	}
3855
3856	bzero(&key_data, sizeof(key_data));
3857
3858	sbuf_cat(buf, "\n");
3859	if (hw->mac.type == I40E_MAC_X722) {
3860		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3861		if (status)
3862			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3863			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3864	} else {
3865		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3866			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3867			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3868		}
3869	}
3870
3871	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3872
3873	error = sbuf_finish(buf);
3874	if (error)
3875		device_printf(dev, "Error finishing sbuf: %d\n", error);
3876	sbuf_delete(buf);
3877
3878	return (error);
3879}
3880
3881static void
3882ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3883{
3884	int i, j, k, width;
3885	char c;
3886
3887	if (length < 1 || buf == NULL) return;
3888
3889	int byte_stride = 16;
3890	int lines = length / byte_stride;
3891	int rem = length % byte_stride;
3892	if (rem > 0)
3893		lines++;
3894
3895	for (i = 0; i < lines; i++) {
3896		width = (rem > 0 && i == lines - 1)
3897		    ? rem : byte_stride;
3898
3899		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3900
3901		for (j = 0; j < width; j++)
3902			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3903
3904		if (width < byte_stride) {
3905			for (k = 0; k < (byte_stride - width); k++)
3906				sbuf_printf(sb, "   ");
3907		}
3908
3909		if (!text) {
3910			sbuf_printf(sb, "\n");
3911			continue;
3912		}
3913
3914		for (j = 0; j < width; j++) {
3915			c = (char)buf[i * byte_stride + j];
3916			if (c < 32 || c > 126)
3917				sbuf_printf(sb, ".");
3918			else
3919				sbuf_printf(sb, "%c", c);
3920
3921			if (j == width - 1)
3922				sbuf_printf(sb, "\n");
3923		}
3924	}
3925}
3926
3927static int
3928ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3929{
3930	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3931	struct i40e_hw *hw = &pf->hw;
3932	device_t dev = pf->dev;
3933	struct sbuf *buf;
3934	int error = 0;
3935	enum i40e_status_code status;
3936	u8 hlut[512];
3937	u32 reg;
3938
3939	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3940	if (!buf) {
3941		device_printf(dev, "Could not allocate sbuf for output.\n");
3942		return (ENOMEM);
3943	}
3944
3945	bzero(hlut, sizeof(hlut));
3946	sbuf_cat(buf, "\n");
3947	if (hw->mac.type == I40E_MAC_X722) {
3948		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3949		if (status)
3950			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3951			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3952	} else {
3953		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3954			reg = rd32(hw, I40E_PFQF_HLUT(i));
3955			bcopy(&reg, &hlut[i << 2], 4);
3956		}
3957	}
3958	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3959
3960	error = sbuf_finish(buf);
3961	if (error)
3962		device_printf(dev, "Error finishing sbuf: %d\n", error);
3963	sbuf_delete(buf);
3964
3965	return (error);
3966}
3967
3968static int
3969ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3970{
3971	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3972	struct i40e_hw *hw = &pf->hw;
3973	u64 hena;
3974
3975	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3976	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3977
3978	return sysctl_handle_long(oidp, NULL, hena, req);
3979}
3980
3981/*
3982 * Sysctl to disable firmware's link management
3983 *
3984 * 1 - Disable link management on this port
3985 * 0 - Re-enable link management
3986 *
3987 * On normal NVMs, firmware manages link by default.
3988 */
3989static int
3990ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3991{
3992	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3993	struct i40e_hw *hw = &pf->hw;
3994	device_t dev = pf->dev;
3995	int requested_mode = -1;
3996	enum i40e_status_code status = 0;
3997	int error = 0;
3998
3999	/* Read in new mode */
4000	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4001	if ((error) || (req->newptr == NULL))
4002		return (error);
4003	/* Check for sane value */
4004	if (requested_mode < 0 || requested_mode > 1) {
4005		device_printf(dev, "Valid modes are 0 or 1\n");
4006		return (EINVAL);
4007	}
4008
4009	/* Set new mode */
4010	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4011	if (status) {
4012		device_printf(dev,
4013		    "%s: Error setting new phy debug mode %s,"
4014		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4015		    i40e_aq_str(hw, hw->aq.asq_last_status));
4016		return (EIO);
4017	}
4018
4019	return (0);
4020}
4021
4022/*
4023 * Read some diagnostic data from a (Q)SFP+ module
4024 *
4025 *             SFP A2   QSFP Lower Page
4026 * Temperature 96-97	22-23
4027 * Vcc         98-99    26-27
4028 * TX power    102-103  34-35..40-41
4029 * RX power    104-105  50-51..56-57
4030 */
4031static int
4032ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4033{
4034	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4035	device_t dev = pf->dev;
4036	struct sbuf *sbuf;
4037	int error = 0;
4038	u8 output;
4039
4040	if (req->oldptr == NULL) {
4041		error = SYSCTL_OUT(req, 0, 128);
4042		return (0);
4043	}
4044
4045	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4046	if (error) {
4047		device_printf(dev, "Error reading from i2c\n");
4048		return (error);
4049	}
4050
4051	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4052	if (output == 0x3) {
4053		/*
4054		 * Check for:
4055		 * - Internally calibrated data
4056		 * - Diagnostic monitoring is implemented
4057		 */
4058		pf->read_i2c_byte(pf, 92, 0xA0, &output);
4059		if (!(output & 0x60)) {
4060			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4061			return (0);
4062		}
4063
4064		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4065
4066		for (u8 offset = 96; offset < 100; offset++) {
4067			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4068			sbuf_printf(sbuf, "%02X ", output);
4069		}
4070		for (u8 offset = 102; offset < 106; offset++) {
4071			pf->read_i2c_byte(pf, offset, 0xA2, &output);
4072			sbuf_printf(sbuf, "%02X ", output);
4073		}
4074	} else if (output == 0xD || output == 0x11) {
4075		/*
4076		 * QSFP+ modules are always internally calibrated, and must indicate
4077		 * what types of diagnostic monitoring are implemented
4078		 */
4079		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4080
4081		for (u8 offset = 22; offset < 24; offset++) {
4082			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4083			sbuf_printf(sbuf, "%02X ", output);
4084		}
4085		for (u8 offset = 26; offset < 28; offset++) {
4086			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4087			sbuf_printf(sbuf, "%02X ", output);
4088		}
4089		/* Read the data from the first lane */
4090		for (u8 offset = 34; offset < 36; offset++) {
4091			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4092			sbuf_printf(sbuf, "%02X ", output);
4093		}
4094		for (u8 offset = 50; offset < 52; offset++) {
4095			pf->read_i2c_byte(pf, offset, 0xA0, &output);
4096			sbuf_printf(sbuf, "%02X ", output);
4097		}
4098	} else {
4099		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4100		return (0);
4101	}
4102
4103	sbuf_finish(sbuf);
4104	sbuf_delete(sbuf);
4105
4106	return (0);
4107}
4108
4109/*
4110 * Sysctl to read a byte from I2C bus.
4111 *
4112 * Input: 32-bit value:
4113 * 	bits 0-7:   device address (0xA0 or 0xA2)
4114 * 	bits 8-15:  offset (0-255)
4115 *	bits 16-31: unused
4116 * Output: 8-bit value read
4117 */
4118static int
4119ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4120{
4121	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4122	device_t dev = pf->dev;
4123	int input = -1, error = 0;
4124	u8 dev_addr, offset, output;
4125
4126	/* Read in I2C read parameters */
4127	error = sysctl_handle_int(oidp, &input, 0, req);
4128	if ((error) || (req->newptr == NULL))
4129		return (error);
4130	/* Validate device address */
4131	dev_addr = input & 0xFF;
4132	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4133		return (EINVAL);
4134	}
4135	offset = (input >> 8) & 0xFF;
4136
4137	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4138	if (error)
4139		return (error);
4140
4141	device_printf(dev, "%02X\n", output);
4142	return (0);
4143}
4144
4145/*
4146 * Sysctl to write a byte to the I2C bus.
4147 *
4148 * Input: 32-bit value:
4149 * 	bits 0-7:   device address (0xA0 or 0xA2)
4150 * 	bits 8-15:  offset (0-255)
4151 *	bits 16-23: value to write
4152 *	bits 24-31: unused
4153 * Output: 8-bit value written
4154 */
4155static int
4156ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4157{
4158	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4159	device_t dev = pf->dev;
4160	int input = -1, error = 0;
4161	u8 dev_addr, offset, value;
4162
4163	/* Read in I2C write parameters */
4164	error = sysctl_handle_int(oidp, &input, 0, req);
4165	if ((error) || (req->newptr == NULL))
4166		return (error);
4167	/* Validate device address */
4168	dev_addr = input & 0xFF;
4169	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4170		return (EINVAL);
4171	}
4172	offset = (input >> 8) & 0xFF;
4173	value = (input >> 16) & 0xFF;
4174
4175	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4176	if (error)
4177		return (error);
4178
4179	device_printf(dev, "%02X written\n", value);
4180	return (0);
4181}
4182
4183static int
4184ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4185    u8 bit_pos, int *is_set)
4186{
4187	device_t dev = pf->dev;
4188	struct i40e_hw *hw = &pf->hw;
4189	enum i40e_status_code status;
4190
4191	if (IXL_PF_IN_RECOVERY_MODE(pf))
4192		return (EIO);
4193
4194	status = i40e_aq_get_phy_capabilities(hw,
4195	    FALSE, FALSE, abilities, NULL);
4196	if (status) {
4197		device_printf(dev,
4198		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4199		    __func__, i40e_stat_str(hw, status),
4200		    i40e_aq_str(hw, hw->aq.asq_last_status));
4201		return (EIO);
4202	}
4203
4204	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4205	return (0);
4206}
4207
4208static int
4209ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4210    u8 bit_pos, int set)
4211{
4212	device_t dev = pf->dev;
4213	struct i40e_hw *hw = &pf->hw;
4214	struct i40e_aq_set_phy_config config;
4215	enum i40e_status_code status;
4216
4217	/* Set new PHY config */
4218	memset(&config, 0, sizeof(config));
4219	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4220	if (set)
4221		config.fec_config |= bit_pos;
4222	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4223		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4224		config.phy_type = abilities->phy_type;
4225		config.phy_type_ext = abilities->phy_type_ext;
4226		config.link_speed = abilities->link_speed;
4227		config.eee_capability = abilities->eee_capability;
4228		config.eeer = abilities->eeer_val;
4229		config.low_power_ctrl = abilities->d3_lpan;
4230		status = i40e_aq_set_phy_config(hw, &config, NULL);
4231
4232		if (status) {
4233			device_printf(dev,
4234			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4235			    __func__, i40e_stat_str(hw, status),
4236			    i40e_aq_str(hw, hw->aq.asq_last_status));
4237			return (EIO);
4238		}
4239	}
4240
4241	return (0);
4242}
4243
4244static int
4245ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4246{
4247	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4248	int mode, error = 0;
4249
4250	struct i40e_aq_get_phy_abilities_resp abilities;
4251	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4252	if (error)
4253		return (error);
4254	/* Read in new mode */
4255	error = sysctl_handle_int(oidp, &mode, 0, req);
4256	if ((error) || (req->newptr == NULL))
4257		return (error);
4258
4259	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4260}
4261
4262static int
4263ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4264{
4265	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4266	int mode, error = 0;
4267
4268	struct i40e_aq_get_phy_abilities_resp abilities;
4269	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4270	if (error)
4271		return (error);
4272	/* Read in new mode */
4273	error = sysctl_handle_int(oidp, &mode, 0, req);
4274	if ((error) || (req->newptr == NULL))
4275		return (error);
4276
4277	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4278}
4279
4280static int
4281ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4282{
4283	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4284	int mode, error = 0;
4285
4286	struct i40e_aq_get_phy_abilities_resp abilities;
4287	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4288	if (error)
4289		return (error);
4290	/* Read in new mode */
4291	error = sysctl_handle_int(oidp, &mode, 0, req);
4292	if ((error) || (req->newptr == NULL))
4293		return (error);
4294
4295	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4296}
4297
4298static int
4299ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4300{
4301	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4302	int mode, error = 0;
4303
4304	struct i40e_aq_get_phy_abilities_resp abilities;
4305	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4306	if (error)
4307		return (error);
4308	/* Read in new mode */
4309	error = sysctl_handle_int(oidp, &mode, 0, req);
4310	if ((error) || (req->newptr == NULL))
4311		return (error);
4312
4313	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4314}
4315
4316static int
4317ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4318{
4319	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4320	int mode, error = 0;
4321
4322	struct i40e_aq_get_phy_abilities_resp abilities;
4323	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4324	if (error)
4325		return (error);
4326	/* Read in new mode */
4327	error = sysctl_handle_int(oidp, &mode, 0, req);
4328	if ((error) || (req->newptr == NULL))
4329		return (error);
4330
4331	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4332}
4333
4334static int
4335ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4336{
4337	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4338	struct i40e_hw *hw = &pf->hw;
4339	device_t dev = pf->dev;
4340	struct sbuf *buf;
4341	int error = 0;
4342	enum i40e_status_code status;
4343
4344	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4345	if (!buf) {
4346		device_printf(dev, "Could not allocate sbuf for output.\n");
4347		return (ENOMEM);
4348	}
4349
4350	u8 *final_buff;
4351	/* This amount is only necessary if reading the entire cluster into memory */
4352#define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4353	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4354	if (final_buff == NULL) {
4355		device_printf(dev, "Could not allocate memory for output.\n");
4356		goto out;
4357	}
4358	int final_buff_len = 0;
4359
4360	u8 cluster_id = 1;
4361	bool more = true;
4362
4363	u8 dump_buf[4096];
4364	u16 curr_buff_size = 4096;
4365	u8 curr_next_table = 0;
4366	u32 curr_next_index = 0;
4367
4368	u16 ret_buff_size;
4369	u8 ret_next_table;
4370	u32 ret_next_index;
4371
4372	sbuf_cat(buf, "\n");
4373
4374	while (more) {
4375		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4376		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4377		if (status) {
4378			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4379			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4380			goto free_out;
4381		}
4382
4383		/* copy info out of temp buffer */
4384		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4385		final_buff_len += ret_buff_size;
4386
4387		if (ret_next_table != curr_next_table) {
4388			/* We're done with the current table; we can dump out read data. */
4389			sbuf_printf(buf, "%d:", curr_next_table);
4390			int bytes_printed = 0;
4391			while (bytes_printed <= final_buff_len) {
4392				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4393				bytes_printed += 16;
4394			}
4395				sbuf_cat(buf, "\n");
4396
4397			/* The entire cluster has been read; we're finished */
4398			if (ret_next_table == 0xFF)
4399				break;
4400
4401			/* Otherwise clear the output buffer and continue reading */
4402			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4403			final_buff_len = 0;
4404		}
4405
4406		if (ret_next_index == 0xFFFFFFFF)
4407			ret_next_index = 0;
4408
4409		bzero(dump_buf, sizeof(dump_buf));
4410		curr_next_table = ret_next_table;
4411		curr_next_index = ret_next_index;
4412	}
4413
4414free_out:
4415	free(final_buff, M_IXL);
4416out:
4417	error = sbuf_finish(buf);
4418	if (error)
4419		device_printf(dev, "Error finishing sbuf: %d\n", error);
4420	sbuf_delete(buf);
4421
4422	return (error);
4423}
4424
4425static int
4426ixl_start_fw_lldp(struct ixl_pf *pf)
4427{
4428	struct i40e_hw *hw = &pf->hw;
4429	enum i40e_status_code status;
4430
4431	status = i40e_aq_start_lldp(hw, false, NULL);
4432	if (status != I40E_SUCCESS) {
4433		switch (hw->aq.asq_last_status) {
4434		case I40E_AQ_RC_EEXIST:
4435			device_printf(pf->dev,
4436			    "FW LLDP agent is already running\n");
4437			break;
4438		case I40E_AQ_RC_EPERM:
4439			device_printf(pf->dev,
4440			    "Device configuration forbids SW from starting "
4441			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4442			    "attribute to \"Enabled\" to use this sysctl\n");
4443			return (EINVAL);
4444		default:
4445			device_printf(pf->dev,
4446			    "Starting FW LLDP agent failed: error: %s, %s\n",
4447			    i40e_stat_str(hw, status),
4448			    i40e_aq_str(hw, hw->aq.asq_last_status));
4449			return (EINVAL);
4450		}
4451	}
4452
4453	atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4454	return (0);
4455}
4456
4457static int
4458ixl_stop_fw_lldp(struct ixl_pf *pf)
4459{
4460	struct i40e_hw *hw = &pf->hw;
4461	device_t dev = pf->dev;
4462	enum i40e_status_code status;
4463
4464	if (hw->func_caps.npar_enable != 0) {
4465		device_printf(dev,
4466		    "Disabling FW LLDP agent is not supported on this device\n");
4467		return (EINVAL);
4468	}
4469
4470	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4471		device_printf(dev,
4472		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4473		return (EINVAL);
4474	}
4475
4476	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4477	if (status != I40E_SUCCESS) {
4478		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4479			device_printf(dev,
4480			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4481			    i40e_stat_str(hw, status),
4482			    i40e_aq_str(hw, hw->aq.asq_last_status));
4483			return (EINVAL);
4484		}
4485
4486		device_printf(dev, "FW LLDP agent is already stopped\n");
4487	}
4488
4489	i40e_aq_set_dcb_parameters(hw, true, NULL);
4490	atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4491	return (0);
4492}
4493
4494static int
4495ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4496{
4497	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4498	int state, new_state, error = 0;
4499
4500	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4501
4502	/* Read in new mode */
4503	error = sysctl_handle_int(oidp, &new_state, 0, req);
4504	if ((error) || (req->newptr == NULL))
4505		return (error);
4506
4507	/* Already in requested state */
4508	if (new_state == state)
4509		return (error);
4510
4511	if (new_state == 0)
4512		return ixl_stop_fw_lldp(pf);
4513
4514	return ixl_start_fw_lldp(pf);
4515}
4516
4517static int
4518ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4519{
4520	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4521	int                   state, new_state;
4522	int                   sysctl_handle_status = 0;
4523	enum i40e_status_code cmd_status;
4524
4525	/* Init states' values */
4526	state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4527
4528	/* Get requested mode */
4529	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4530	if ((sysctl_handle_status) || (req->newptr == NULL))
4531		return (sysctl_handle_status);
4532
4533	/* Check if state has changed */
4534	if (new_state == state)
4535		return (0);
4536
4537	/* Set new state */
4538	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4539
4540	/* Save new state or report error */
4541	if (!cmd_status) {
4542		if (new_state == 0)
4543			atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4544		else
4545			atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4546	} else if (cmd_status == I40E_ERR_CONFIG)
4547		return (EPERM);
4548	else
4549		return (EIO);
4550
4551	return (0);
4552}
4553
4554static int
4555ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4556{
4557	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4558	int error, state;
4559
4560	state = !!(atomic_load_acq_32(&pf->state) &
4561	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4562
4563	error = sysctl_handle_int(oidp, &state, 0, req);
4564	if ((error) || (req->newptr == NULL))
4565		return (error);
4566
4567	if (state == 0)
4568		atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4569	else
4570		atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4571
4572	return (0);
4573}
4574
4575
4576int
4577ixl_attach_get_link_status(struct ixl_pf *pf)
4578{
4579	struct i40e_hw *hw = &pf->hw;
4580	device_t dev = pf->dev;
4581	int error = 0;
4582
4583	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4584	    (hw->aq.fw_maj_ver < 4)) {
4585		i40e_msec_delay(75);
4586		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4587		if (error) {
4588			device_printf(dev, "link restart failed, aq_err=%d\n",
4589			    pf->hw.aq.asq_last_status);
4590			return error;
4591		}
4592	}
4593
4594	/* Determine link state */
4595	hw->phy.get_link_info = TRUE;
4596	i40e_get_link_status(hw, &pf->link_up);
4597	return (0);
4598}
4599
4600static int
4601ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4602{
4603	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4604	int requested = 0, error = 0;
4605
4606	/* Read in new mode */
4607	error = sysctl_handle_int(oidp, &requested, 0, req);
4608	if ((error) || (req->newptr == NULL))
4609		return (error);
4610
4611	/* Initiate the PF reset later in the admin task */
4612	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4613
4614	return (error);
4615}
4616
4617static int
4618ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4619{
4620	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4621	struct i40e_hw *hw = &pf->hw;
4622	int requested = 0, error = 0;
4623
4624	/* Read in new mode */
4625	error = sysctl_handle_int(oidp, &requested, 0, req);
4626	if ((error) || (req->newptr == NULL))
4627		return (error);
4628
4629	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4630
4631	return (error);
4632}
4633
4634static int
4635ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4636{
4637	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4638	struct i40e_hw *hw = &pf->hw;
4639	int requested = 0, error = 0;
4640
4641	/* Read in new mode */
4642	error = sysctl_handle_int(oidp, &requested, 0, req);
4643	if ((error) || (req->newptr == NULL))
4644		return (error);
4645
4646	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4647
4648	return (error);
4649}
4650
4651/*
4652 * Print out mapping of TX queue indexes and Rx queue indexes
4653 * to MSI-X vectors.
4654 */
4655static int
4656ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4657{
4658	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4659	struct ixl_vsi *vsi = &pf->vsi;
4660	device_t dev = pf->dev;
4661	struct sbuf *buf;
4662	int error = 0;
4663
4664	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4665	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4666
4667	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4668	if (!buf) {
4669		device_printf(dev, "Could not allocate sbuf for output.\n");
4670		return (ENOMEM);
4671	}
4672
4673	sbuf_cat(buf, "\n");
4674	for (int i = 0; i < vsi->num_rx_queues; i++) {
4675		rx_que = &vsi->rx_queues[i];
4676		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4677	}
4678	for (int i = 0; i < vsi->num_tx_queues; i++) {
4679		tx_que = &vsi->tx_queues[i];
4680		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4681	}
4682
4683	error = sbuf_finish(buf);
4684	if (error)
4685		device_printf(dev, "Error finishing sbuf: %d\n", error);
4686	sbuf_delete(buf);
4687
4688	return (error);
4689}
4690