1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35
36#include "ixl_pf.h"
37
38#ifdef PCI_IOV
39#include "ixl_pf_iov.h"
40#endif
41
42#ifdef IXL_IW
43#include "ixl_iw.h"
44#include "ixl_iw_int.h"
45#endif
46
47static u8	ixl_convert_sysctl_aq_link_speed(u8, bool);
48static void	ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50static char *	ixl_switch_element_string(struct sbuf *, u8, u16);
51static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
52
53/* Sysctls */
54static int	ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
55static int	ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
56static int	ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
57static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
58static int	ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
59static int	ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
60static int	ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
61
62static int	ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
63
64/* Debug Sysctls */
65static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
66static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
67static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
68static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
69static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
70static int	ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
71static int	ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
72static int	ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
73static int	ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
74static int	ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
75static int	ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
76static int	ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
77static int	ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
78static int	ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
79static int	ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
80static int	ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
81static int	ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
82static int	ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
83static int	ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
84static int	ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
85
86/* Debug Sysctls */
87static int	ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
88static int	ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
89static int	ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
90static int	ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
91#ifdef IXL_DEBUG
92static int	ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
93static int	ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
94#endif
95
96#ifdef IXL_IW
97extern int ixl_enable_iwarp;
98extern int ixl_limit_iwarp_msix;
99#endif
100
101static const char * const ixl_fc_string[6] = {
102	"None",
103	"Rx",
104	"Tx",
105	"Full",
106	"Priority",
107	"Default"
108};
109
110static char *ixl_fec_string[3] = {
111       "CL108 RS-FEC",
112       "CL74 FC-FEC/BASE-R",
113       "None"
114};
115
116MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
117
118/*
119** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
120*/
121void
122ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
123{
124	u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
125	u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
126	u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
127
128	sbuf_printf(buf,
129	    "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
130	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
131	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
132	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
133	    IXL_NVM_VERSION_HI_SHIFT,
134	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
135	    IXL_NVM_VERSION_LO_SHIFT,
136	    hw->nvm.eetrack,
137	    oem_ver, oem_build, oem_patch);
138}
139
140void
141ixl_print_nvm_version(struct ixl_pf *pf)
142{
143	struct i40e_hw *hw = &pf->hw;
144	device_t dev = pf->dev;
145	struct sbuf *sbuf;
146
147	sbuf = sbuf_new_auto();
148	ixl_nvm_version_str(hw, sbuf);
149	sbuf_finish(sbuf);
150	device_printf(dev, "%s\n", sbuf_data(sbuf));
151	sbuf_delete(sbuf);
152}
153
154/**
155 * ixl_get_fw_mode - Check the state of FW
156 * @hw: device hardware structure
157 *
158 * Identify state of FW. It might be in a recovery mode
159 * which limits functionality and requires special handling
160 * from the driver.
161 *
162 * @returns FW mode (normal, recovery, unexpected EMP reset)
163 */
164static enum ixl_fw_mode
165ixl_get_fw_mode(struct ixl_pf *pf)
166{
167	struct i40e_hw *hw = &pf->hw;
168	enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
169	u32 fwsts;
170
171#ifdef IXL_DEBUG
172	if (pf->recovery_mode)
173		return IXL_FW_MODE_RECOVERY;
174#endif
175	fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
176
177	/* Is set and has one of expected values */
178	if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
179	    fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
180	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
181	    fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
182		fw_mode = IXL_FW_MODE_RECOVERY;
183	else {
184		if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
185		    fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
186			fw_mode = IXL_FW_MODE_UEMPR;
187	}
188	return (fw_mode);
189}
190
191/**
192 * ixl_pf_reset - Reset the PF
193 * @pf: PF structure
194 *
195 * Ensure that FW is in the right state and do the reset
196 * if needed.
197 *
198 * @returns zero on success, or an error code on failure.
199 */
200int
201ixl_pf_reset(struct ixl_pf *pf)
202{
203	struct i40e_hw *hw = &pf->hw;
204	enum i40e_status_code status;
205	enum ixl_fw_mode fw_mode;
206
207	fw_mode = ixl_get_fw_mode(pf);
208	ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
209	if (fw_mode == IXL_FW_MODE_RECOVERY) {
210		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
211		/* Don't try to reset device if it's in recovery mode */
212		return (0);
213	}
214
215	status = i40e_pf_reset(hw);
216	if (status == I40E_SUCCESS)
217		return (0);
218
219	/* Check FW mode again in case it has changed while
220	 * waiting for reset to complete */
221	fw_mode = ixl_get_fw_mode(pf);
222	ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
223	if (fw_mode == IXL_FW_MODE_RECOVERY) {
224		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
225		return (0);
226	}
227
228	if (fw_mode == IXL_FW_MODE_UEMPR)
229		device_printf(pf->dev,
230		    "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
231	else
232		device_printf(pf->dev, "PF reset failure %s\n",
233		    i40e_stat_str(hw, status));
234	return (EIO);
235}
236
237/**
238 * ixl_setup_hmc - Setup LAN Host Memory Cache
239 * @pf: PF structure
240 *
241 * Init and configure LAN Host Memory Cache
242 *
243 * @returns 0 on success, EIO on error
244 */
245int
246ixl_setup_hmc(struct ixl_pf *pf)
247{
248	struct i40e_hw *hw = &pf->hw;
249	enum i40e_status_code status;
250
251	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
252	    hw->func_caps.num_rx_qp, 0, 0);
253	if (status) {
254		device_printf(pf->dev, "init_lan_hmc failed: %s\n",
255		    i40e_stat_str(hw, status));
256		return (EIO);
257	}
258
259	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
260	if (status) {
261		device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
262		    i40e_stat_str(hw, status));
263		return (EIO);
264	}
265
266	return (0);
267}
268
269/**
270 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
271 * @pf: PF structure
272 *
273 * Shutdown Host Memory Cache if configured.
274 *
275 */
276void
277ixl_shutdown_hmc(struct ixl_pf *pf)
278{
279	struct i40e_hw *hw = &pf->hw;
280	enum i40e_status_code status;
281
282	/* HMC not configured, no need to shutdown */
283	if (hw->hmc.hmc_obj == NULL)
284		return;
285
286	status = i40e_shutdown_lan_hmc(hw);
287	if (status)
288		device_printf(pf->dev,
289		    "Shutdown LAN HMC failed with code %s\n",
290		    i40e_stat_str(hw, status));
291}
292/*
293 * Write PF ITR values to queue ITR registers.
294 */
295void
296ixl_configure_itr(struct ixl_pf *pf)
297{
298	ixl_configure_tx_itr(pf);
299	ixl_configure_rx_itr(pf);
300}
301
302/*********************************************************************
303 *
304 *  Get the hardware capabilities
305 *
306 **********************************************************************/
307
308int
309ixl_get_hw_capabilities(struct ixl_pf *pf)
310{
311	struct i40e_aqc_list_capabilities_element_resp *buf;
312	struct i40e_hw	*hw = &pf->hw;
313	device_t 	dev = pf->dev;
314	enum i40e_status_code status;
315	int len, i2c_intfc_num;
316	bool again = TRUE;
317	u16 needed;
318
319	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
320		hw->func_caps.iwarp = 0;
321		return (0);
322	}
323
324	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
325retry:
326	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
327	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
328		device_printf(dev, "Unable to allocate cap memory\n");
329                return (ENOMEM);
330	}
331
332	/* This populates the hw struct */
333        status = i40e_aq_discover_capabilities(hw, buf, len,
334	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
335	free(buf, M_DEVBUF);
336	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
337	    (again == TRUE)) {
338		/* retry once with a larger buffer */
339		again = FALSE;
340		len = needed;
341		goto retry;
342	} else if (status != I40E_SUCCESS) {
343		device_printf(dev, "capability discovery failed; status %s, error %s\n",
344		    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
345		return (ENODEV);
346	}
347
348	/*
349	 * Some devices have both MDIO and I2C; since this isn't reported
350	 * by the FW, check registers to see if an I2C interface exists.
351	 */
352	i2c_intfc_num = ixl_find_i2c_interface(pf);
353	if (i2c_intfc_num != -1)
354		pf->has_i2c = true;
355
356	/* Determine functions to use for driver I2C accesses */
357	switch (pf->i2c_access_method) {
358	case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
359		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
360			pf->read_i2c_byte = ixl_read_i2c_byte_aq;
361			pf->write_i2c_byte = ixl_write_i2c_byte_aq;
362		} else {
363			pf->read_i2c_byte = ixl_read_i2c_byte_reg;
364			pf->write_i2c_byte = ixl_write_i2c_byte_reg;
365		}
366		break;
367	}
368	case IXL_I2C_ACCESS_METHOD_AQ:
369		pf->read_i2c_byte = ixl_read_i2c_byte_aq;
370		pf->write_i2c_byte = ixl_write_i2c_byte_aq;
371		break;
372	case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
373		pf->read_i2c_byte = ixl_read_i2c_byte_reg;
374		pf->write_i2c_byte = ixl_write_i2c_byte_reg;
375		break;
376	case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
377		pf->read_i2c_byte = ixl_read_i2c_byte_bb;
378		pf->write_i2c_byte = ixl_write_i2c_byte_bb;
379		break;
380	default:
381		/* Should not happen */
382		device_printf(dev, "Error setting I2C access functions\n");
383		break;
384	}
385
386	/* Print a subset of the capability information. */
387	device_printf(dev,
388	    "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
389	    hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
390	    hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
391	    (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
392	    (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
393	    (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
394	    "MDIO shared");
395
396	return (0);
397}
398
399/* For the set_advertise sysctl */
400void
401ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
402{
403	device_t dev = pf->dev;
404	int err;
405
406	/* Make sure to initialize the device to the complete list of
407	 * supported speeds on driver load, to ensure unloading and
408	 * reloading the driver will restore this value.
409	 */
410	err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
411	if (err) {
412		/* Non-fatal error */
413		device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
414			      __func__, err);
415		return;
416	}
417
418	pf->advertised_speed =
419	    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
420}
421
422int
423ixl_teardown_hw_structs(struct ixl_pf *pf)
424{
425	enum i40e_status_code status = 0;
426	struct i40e_hw *hw = &pf->hw;
427	device_t dev = pf->dev;
428
429	/* Shutdown LAN HMC */
430	if (hw->hmc.hmc_obj) {
431		status = i40e_shutdown_lan_hmc(hw);
432		if (status) {
433			device_printf(dev,
434			    "init: LAN HMC shutdown failure; status %s\n",
435			    i40e_stat_str(hw, status));
436			goto err_out;
437		}
438	}
439
440	/* Shutdown admin queue */
441	ixl_disable_intr0(hw);
442	status = i40e_shutdown_adminq(hw);
443	if (status)
444		device_printf(dev,
445		    "init: Admin Queue shutdown failure; status %s\n",
446		    i40e_stat_str(hw, status));
447
448	ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
449err_out:
450	return (status);
451}
452
453/*********************************************************************
454 * 	Filter Routines
455 *
456 *	Routines for multicast and vlan filter management.
457 *
458 *********************************************************************/
459void
460ixl_add_multi(struct ixl_vsi *vsi)
461{
462	struct	ifmultiaddr	*ifma;
463	struct ifnet		*ifp = vsi->ifp;
464	struct i40e_hw		*hw = vsi->hw;
465	int			mcnt = 0, flags;
466
467	IOCTL_DEBUGOUT("ixl_add_multi: begin");
468
469	if_maddr_rlock(ifp);
470	/*
471	** First just get a count, to decide if we
472	** we simply use multicast promiscuous.
473	*/
474	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
475		if (ifma->ifma_addr->sa_family != AF_LINK)
476			continue;
477		mcnt++;
478	}
479	if_maddr_runlock(ifp);
480
481	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
482		/* delete existing MC filters */
483		ixl_del_hw_filters(vsi, mcnt);
484		i40e_aq_set_vsi_multicast_promiscuous(hw,
485		    vsi->seid, TRUE, NULL);
486		return;
487	}
488
489	mcnt = 0;
490	if_maddr_rlock(ifp);
491	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
492		if (ifma->ifma_addr->sa_family != AF_LINK)
493			continue;
494		ixl_add_mc_filter(vsi,
495		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
496		mcnt++;
497	}
498	if_maddr_runlock(ifp);
499	if (mcnt > 0) {
500		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
501		ixl_add_hw_filters(vsi, flags, mcnt);
502	}
503
504	IOCTL_DEBUGOUT("ixl_add_multi: end");
505}
506
507int
508ixl_del_multi(struct ixl_vsi *vsi)
509{
510	struct ifnet		*ifp = vsi->ifp;
511	struct ifmultiaddr	*ifma;
512	struct ixl_mac_filter	*f;
513	int			mcnt = 0;
514	bool		match = FALSE;
515
516	IOCTL_DEBUGOUT("ixl_del_multi: begin");
517
518	/* Search for removed multicast addresses */
519	if_maddr_rlock(ifp);
520	SLIST_FOREACH(f, &vsi->ftl, next) {
521		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
522			match = FALSE;
523			CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
524				if (ifma->ifma_addr->sa_family != AF_LINK)
525					continue;
526				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
527				if (cmp_etheraddr(f->macaddr, mc_addr)) {
528					match = TRUE;
529					break;
530				}
531			}
532			if (match == FALSE) {
533				f->flags |= IXL_FILTER_DEL;
534				mcnt++;
535			}
536		}
537	}
538	if_maddr_runlock(ifp);
539
540	if (mcnt > 0)
541		ixl_del_hw_filters(vsi, mcnt);
542
543	return (mcnt);
544}
545
546void
547ixl_link_up_msg(struct ixl_pf *pf)
548{
549	struct i40e_hw *hw = &pf->hw;
550	struct ifnet *ifp = pf->vsi.ifp;
551	char *req_fec_string, *neg_fec_string;
552	u8 fec_abilities;
553
554	fec_abilities = hw->phy.link_info.req_fec_info;
555	/* If both RS and KR are requested, only show RS */
556	if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
557		req_fec_string = ixl_fec_string[0];
558	else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
559		req_fec_string = ixl_fec_string[1];
560	else
561		req_fec_string = ixl_fec_string[2];
562
563	if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
564		neg_fec_string = ixl_fec_string[0];
565	else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
566		neg_fec_string = ixl_fec_string[1];
567	else
568		neg_fec_string = ixl_fec_string[2];
569
570	log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
571	    ifp->if_xname,
572	    ixl_link_speed_string(hw->phy.link_info.link_speed),
573	    req_fec_string, neg_fec_string,
574	    (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
575	    (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
576	        hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
577		ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
578		ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
579		ixl_fc_string[1] : ixl_fc_string[0]);
580}
581
582/*
583 * Configure admin queue/misc interrupt cause registers in hardware.
584 */
585void
586ixl_configure_intr0_msix(struct ixl_pf *pf)
587{
588	struct i40e_hw *hw = &pf->hw;
589	u32 reg;
590
591	/* First set up the adminq - vector 0 */
592	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
593	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
594
595	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
596	    I40E_PFINT_ICR0_ENA_GRST_MASK |
597	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
598	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
599	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
600	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
601	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
602	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
603	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
604
605	/*
606	 * 0x7FF is the end of the queue list.
607	 * This means we won't use MSI-X vector 0 for a queue interrupt
608	 * in MSI-X mode.
609	 */
610	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
611	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
612	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
613
614	wr32(hw, I40E_PFINT_DYN_CTL0,
615	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
616	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
617
618	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
619}
620
621void
622ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
623{
624	/* Display supported media types */
625	if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
626		ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
627
628	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
629		ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
630	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
631		ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
632	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
633		ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
634
635	if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
636		ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
637
638	if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
639		ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
640
641	if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
642	    phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
643	    phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
644		ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
645
646	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
647		ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
648	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
649		ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
650	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
651		ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
652
653	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
654	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
655	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
656	    phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
657	    phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
658		ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
659	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
660		ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
661	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
662		ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
663
664	if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
665		ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
666
667	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
668	    || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
669		ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
670	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
671		ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
672	if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
673		ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
674	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
675		ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
676	if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
677		ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
678
679	if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
680		ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
681
682	if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
683		ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
684	if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
685		ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
686
687	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
688		ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
689	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
690		ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
691	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
692		ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
693	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
694		ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
695	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
696		ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
697	if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
698		ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
699}
700
701/*********************************************************************
702 *
703 *  Get Firmware Switch configuration
704 *	- this will need to be more robust when more complex
705 *	  switch configurations are enabled.
706 *
707 **********************************************************************/
708int
709ixl_switch_config(struct ixl_pf *pf)
710{
711	struct i40e_hw	*hw = &pf->hw;
712	struct ixl_vsi	*vsi = &pf->vsi;
713	device_t 	dev = iflib_get_dev(vsi->ctx);
714	struct i40e_aqc_get_switch_config_resp *sw_config;
715	u8	aq_buf[I40E_AQ_LARGE_BUF];
716	int	ret;
717	u16	next = 0;
718
719	memset(&aq_buf, 0, sizeof(aq_buf));
720	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
721	ret = i40e_aq_get_switch_config(hw, sw_config,
722	    sizeof(aq_buf), &next, NULL);
723	if (ret) {
724		device_printf(dev, "aq_get_switch_config() failed, error %d,"
725		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
726		return (ret);
727	}
728	if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
729		device_printf(dev,
730		    "Switch config: header reported: %d in structure, %d total\n",
731		    LE16_TO_CPU(sw_config->header.num_reported),
732		    LE16_TO_CPU(sw_config->header.num_total));
733		for (int i = 0;
734		    i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
735			device_printf(dev,
736			    "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
737			    sw_config->element[i].element_type,
738			    LE16_TO_CPU(sw_config->element[i].seid),
739			    LE16_TO_CPU(sw_config->element[i].uplink_seid),
740			    LE16_TO_CPU(sw_config->element[i].downlink_seid));
741		}
742	}
743	/* Simplified due to a single VSI */
744	vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
745	vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
746	vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
747	return (ret);
748}
749
750void
751ixl_free_mac_filters(struct ixl_vsi *vsi)
752{
753	struct ixl_mac_filter *f;
754
755	while (!SLIST_EMPTY(&vsi->ftl)) {
756		f = SLIST_FIRST(&vsi->ftl);
757		SLIST_REMOVE_HEAD(&vsi->ftl, next);
758		free(f, M_DEVBUF);
759	}
760
761	vsi->num_hw_filters = 0;
762}
763
764void
765ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
766{
767	struct sysctl_oid *tree;
768	struct sysctl_oid_list *child;
769	struct sysctl_oid_list *vsi_list;
770
771	tree = device_get_sysctl_tree(vsi->dev);
772	child = SYSCTL_CHILDREN(tree);
773	vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
774			CTLFLAG_RD, NULL, "VSI Number");
775
776	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
777	ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
778
779	if (queues_sysctls)
780		ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
781}
782
783/*
784 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
785 * Writes to the ITR registers immediately.
786 */
787static int
788ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
789{
790	struct ixl_pf *pf = (struct ixl_pf *)arg1;
791	device_t dev = pf->dev;
792	int error = 0;
793	int requested_tx_itr;
794
795	requested_tx_itr = pf->tx_itr;
796	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
797	if ((error) || (req->newptr == NULL))
798		return (error);
799	if (pf->dynamic_tx_itr) {
800		device_printf(dev,
801		    "Cannot set TX itr value while dynamic TX itr is enabled\n");
802		    return (EINVAL);
803	}
804	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
805		device_printf(dev,
806		    "Invalid TX itr value; value must be between 0 and %d\n",
807		        IXL_MAX_ITR);
808		return (EINVAL);
809	}
810
811	pf->tx_itr = requested_tx_itr;
812	ixl_configure_tx_itr(pf);
813
814	return (error);
815}
816
817/*
818 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
819 * Writes to the ITR registers immediately.
820 */
821static int
822ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
823{
824	struct ixl_pf *pf = (struct ixl_pf *)arg1;
825	device_t dev = pf->dev;
826	int error = 0;
827	int requested_rx_itr;
828
829	requested_rx_itr = pf->rx_itr;
830	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
831	if ((error) || (req->newptr == NULL))
832		return (error);
833	if (pf->dynamic_rx_itr) {
834		device_printf(dev,
835		    "Cannot set RX itr value while dynamic RX itr is enabled\n");
836		    return (EINVAL);
837	}
838	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
839		device_printf(dev,
840		    "Invalid RX itr value; value must be between 0 and %d\n",
841		        IXL_MAX_ITR);
842		return (EINVAL);
843	}
844
845	pf->rx_itr = requested_rx_itr;
846	ixl_configure_rx_itr(pf);
847
848	return (error);
849}
850
851void
852ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
853	struct sysctl_oid_list *child,
854	struct i40e_hw_port_stats *stats)
855{
856	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
857				    CTLFLAG_RD, NULL, "Mac Statistics");
858	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
859
860	struct i40e_eth_stats *eth_stats = &stats->eth;
861	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
862
863	struct ixl_sysctl_info ctls[] =
864	{
865		{&stats->crc_errors, "crc_errors", "CRC Errors"},
866		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
867		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
868		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
869		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
870		/* Packet Reception Stats */
871		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
872		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
873		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
874		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
875		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
876		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
877		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
878		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
879		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
880		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
881		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
882		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
883		/* Packet Transmission Stats */
884		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
885		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
886		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
887		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
888		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
889		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
890		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
891		/* Flow control */
892		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
893		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
894		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
895		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
896		/* End */
897		{0,0,0}
898	};
899
900	struct ixl_sysctl_info *entry = ctls;
901	while (entry->stat != 0)
902	{
903		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
904				CTLFLAG_RD, entry->stat,
905				entry->description);
906		entry++;
907	}
908}
909
910void
911ixl_set_rss_key(struct ixl_pf *pf)
912{
913	struct i40e_hw *hw = &pf->hw;
914	struct ixl_vsi *vsi = &pf->vsi;
915	device_t	dev = pf->dev;
916	u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
917	enum i40e_status_code status;
918
919#ifdef RSS
920        /* Fetch the configured RSS key */
921        rss_getkey((uint8_t *) &rss_seed);
922#else
923	ixl_get_default_rss_key(rss_seed);
924#endif
925	/* Fill out hash function seed */
926	if (hw->mac.type == I40E_MAC_X722) {
927		struct i40e_aqc_get_set_rss_key_data key_data;
928		bcopy(rss_seed, &key_data, 52);
929		status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
930		if (status)
931			device_printf(dev,
932			    "i40e_aq_set_rss_key status %s, error %s\n",
933			    i40e_stat_str(hw, status),
934			    i40e_aq_str(hw, hw->aq.asq_last_status));
935	} else {
936		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
937			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
938	}
939}
940
941/*
942 * Configure enabled PCTYPES for RSS.
943 */
944void
945ixl_set_rss_pctypes(struct ixl_pf *pf)
946{
947	struct i40e_hw *hw = &pf->hw;
948	u64		set_hena = 0, hena;
949
950#ifdef RSS
951	u32		rss_hash_config;
952
953	rss_hash_config = rss_gethashconfig();
954	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
955                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
956	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
957                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
958	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
959                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
960	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
961                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
962	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
963		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
964	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
965                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
966        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
967                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
968#else
969	if (hw->mac.type == I40E_MAC_X722)
970		set_hena = IXL_DEFAULT_RSS_HENA_X722;
971	else
972		set_hena = IXL_DEFAULT_RSS_HENA_XL710;
973#endif
974	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
975	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
976	hena |= set_hena;
977	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
978	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
979
980}
981
982/*
983** Setup the PF's RSS parameters.
984*/
985void
986ixl_config_rss(struct ixl_pf *pf)
987{
988	ixl_set_rss_key(pf);
989	ixl_set_rss_pctypes(pf);
990	ixl_set_rss_hlut(pf);
991}
992
993/*
994 * In some firmware versions there is default MAC/VLAN filter
995 * configured which interferes with filters managed by driver.
996 * Make sure it's removed.
997 */
998void
999ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1000{
1001	struct i40e_aqc_remove_macvlan_element_data e;
1002
1003	bzero(&e, sizeof(e));
1004	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1005	e.vlan_tag = 0;
1006	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1007	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1008
1009	bzero(&e, sizeof(e));
1010	bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1011	e.vlan_tag = 0;
1012	e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1013		I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1014	i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1015}
1016
1017/*
1018** Initialize filter list and add filters that the hardware
1019** needs to know about.
1020**
1021** Requires VSI's seid to be set before calling.
1022*/
1023void
1024ixl_init_filters(struct ixl_vsi *vsi)
1025{
1026	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1027
1028	ixl_dbg_filter(pf, "%s: start\n", __func__);
1029
1030	/* Initialize mac filter list for VSI */
1031	SLIST_INIT(&vsi->ftl);
1032	vsi->num_hw_filters = 0;
1033
1034	/* Receive broadcast Ethernet frames */
1035	i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1036
1037	if (IXL_VSI_IS_VF(vsi))
1038		return;
1039
1040	ixl_del_default_hw_filters(vsi);
1041
1042	ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1043
1044	/*
1045	 * Prevent Tx flow control frames from being sent out by
1046	 * non-firmware transmitters.
1047	 * This affects every VSI in the PF.
1048	 */
1049#ifndef IXL_DEBUG_FC
1050	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1051#else
1052	if (pf->enable_tx_fc_filter)
1053		i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1054#endif
1055}
1056
1057/*
1058** This routine adds mulicast filters
1059*/
1060void
1061ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
1062{
1063	struct ixl_mac_filter *f;
1064
1065	/* Does one already exist */
1066	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1067	if (f != NULL)
1068		return;
1069
1070	f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY);
1071	if (f != NULL)
1072		f->flags |= IXL_FILTER_MC;
1073	else
1074		printf("WARNING: no filter available!!\n");
1075}
1076
1077void
1078ixl_reconfigure_filters(struct ixl_vsi *vsi)
1079{
1080	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
1081}
1082
1083/*
1084 * This routine adds a MAC/VLAN filter to the software filter
1085 * list, then adds that new filter to the HW if it doesn't already
1086 * exist in the SW filter list.
1087 */
1088void
1089ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1090{
1091	struct ixl_mac_filter	*f, *tmp;
1092	struct ixl_pf		*pf;
1093	device_t		dev;
1094
1095	pf = vsi->back;
1096	dev = pf->dev;
1097
1098	ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1099	    MAC_FORMAT_ARGS(macaddr), vlan);
1100
1101	/* Does one already exist */
1102	f = ixl_find_filter(vsi, macaddr, vlan);
1103	if (f != NULL)
1104		return;
1105	/*
1106	** Is this the first vlan being registered, if so we
1107	** need to remove the ANY filter that indicates we are
1108	** not in a vlan, and replace that with a 0 filter.
1109	*/
1110	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1111		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
1112		if (tmp != NULL) {
1113			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
1114			ixl_add_filter(vsi, macaddr, 0);
1115		}
1116	}
1117
1118	f = ixl_new_filter(vsi, macaddr, vlan);
1119	if (f == NULL) {
1120		device_printf(dev, "WARNING: no filter available!!\n");
1121		return;
1122	}
1123	if (f->vlan != IXL_VLAN_ANY)
1124		f->flags |= IXL_FILTER_VLAN;
1125	else
1126		vsi->num_macs++;
1127
1128	f->flags |= IXL_FILTER_USED;
1129	ixl_add_hw_filters(vsi, f->flags, 1);
1130}
1131
1132void
1133ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1134{
1135	struct ixl_mac_filter *f;
1136
1137	ixl_dbg_filter((struct ixl_pf *)vsi->back,
1138	    "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1139	    MAC_FORMAT_ARGS(macaddr), vlan);
1140
1141	f = ixl_find_filter(vsi, macaddr, vlan);
1142	if (f == NULL)
1143		return;
1144
1145	f->flags |= IXL_FILTER_DEL;
1146	ixl_del_hw_filters(vsi, 1);
1147	if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1148		vsi->num_macs--;
1149
1150	/* Check if this is the last vlan removal */
1151	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
1152		/* Switch back to a non-vlan filter */
1153		ixl_del_filter(vsi, macaddr, 0);
1154		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1155	}
1156	return;
1157}
1158
1159/*
1160** Find the filter with both matching mac addr and vlan id
1161*/
1162struct ixl_mac_filter *
1163ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1164{
1165	struct ixl_mac_filter	*f;
1166
1167	SLIST_FOREACH(f, &vsi->ftl, next) {
1168		if ((cmp_etheraddr(f->macaddr, macaddr) != 0)
1169		    && (f->vlan == vlan)) {
1170			return (f);
1171		}
1172	}
1173
1174	return (NULL);
1175}
1176
1177/*
1178** This routine takes additions to the vsi filter
1179** table and creates an Admin Queue call to create
1180** the filters in the hardware.
1181*/
1182void
1183ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
1184{
1185	struct i40e_aqc_add_macvlan_element_data *a, *b;
1186	struct ixl_mac_filter	*f;
1187	struct ixl_pf		*pf;
1188	struct i40e_hw		*hw;
1189	device_t		dev;
1190	enum i40e_status_code	status;
1191	int			j = 0;
1192
1193	pf = vsi->back;
1194	dev = vsi->dev;
1195	hw = &pf->hw;
1196
1197	ixl_dbg_filter(pf,
1198	    "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt);
1199
1200	if (cnt < 1) {
1201		ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1202		return;
1203	}
1204
1205	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1206	    M_DEVBUF, M_NOWAIT | M_ZERO);
1207	if (a == NULL) {
1208		device_printf(dev, "add_hw_filters failed to get memory\n");
1209		return;
1210	}
1211
1212	/*
1213	** Scan the filter list, each time we find one
1214	** we add it to the admin queue array and turn off
1215	** the add bit.
1216	*/
1217	SLIST_FOREACH(f, &vsi->ftl, next) {
1218		if ((f->flags & flags) == flags) {
1219			b = &a[j]; // a pox on fvl long names :)
1220			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1221			if (f->vlan == IXL_VLAN_ANY) {
1222				b->vlan_tag = 0;
1223				b->flags = CPU_TO_LE16(
1224				    I40E_AQC_MACVLAN_ADD_IGNORE_VLAN);
1225			} else {
1226				b->vlan_tag = CPU_TO_LE16(f->vlan);
1227				b->flags = 0;
1228			}
1229			b->flags |= CPU_TO_LE16(
1230			    I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1231			f->flags &= ~IXL_FILTER_ADD;
1232			j++;
1233
1234			ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1235			    MAC_FORMAT_ARGS(f->macaddr));
1236		}
1237		if (j == cnt)
1238			break;
1239	}
1240	if (j > 0) {
1241		status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1242		if (status)
1243			device_printf(dev, "i40e_aq_add_macvlan status %s, "
1244			    "error %s\n", i40e_stat_str(hw, status),
1245			    i40e_aq_str(hw, hw->aq.asq_last_status));
1246		else
1247			vsi->num_hw_filters += j;
1248	}
1249	free(a, M_DEVBUF);
1250	return;
1251}
1252
1253/*
1254** This routine takes removals in the vsi filter
1255** table and creates an Admin Queue call to delete
1256** the filters in the hardware.
1257*/
1258void
1259ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
1260{
1261	struct i40e_aqc_remove_macvlan_element_data *d, *e;
1262	struct ixl_pf		*pf;
1263	struct i40e_hw		*hw;
1264	device_t		dev;
1265	struct ixl_mac_filter	*f, *f_temp;
1266	enum i40e_status_code	status;
1267	int			j = 0;
1268
1269	pf = vsi->back;
1270	hw = &pf->hw;
1271	dev = vsi->dev;
1272
1273	ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1274
1275	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1276	    M_DEVBUF, M_NOWAIT | M_ZERO);
1277	if (d == NULL) {
1278		device_printf(dev, "%s: failed to get memory\n", __func__);
1279		return;
1280	}
1281
1282	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
1283		if (f->flags & IXL_FILTER_DEL) {
1284			e = &d[j]; // a pox on fvl long names :)
1285			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1286			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1287			if (f->vlan == IXL_VLAN_ANY) {
1288				e->vlan_tag = 0;
1289				e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1290			} else {
1291				e->vlan_tag = f->vlan;
1292			}
1293
1294			ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1295			    MAC_FORMAT_ARGS(f->macaddr));
1296
1297			/* delete entry from vsi list */
1298			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
1299			free(f, M_DEVBUF);
1300			j++;
1301		}
1302		if (j == cnt)
1303			break;
1304	}
1305	if (j > 0) {
1306		status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1307		if (status) {
1308			int sc = 0;
1309			for (int i = 0; i < j; i++)
1310				sc += (!d[i].error_code);
1311			vsi->num_hw_filters -= sc;
1312			device_printf(dev,
1313			    "Failed to remove %d/%d filters, error %s\n",
1314			    j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
1315		} else
1316			vsi->num_hw_filters -= j;
1317	}
1318	free(d, M_DEVBUF);
1319
1320	ixl_dbg_filter(pf, "%s: end\n", __func__);
1321	return;
1322}
1323
1324int
1325ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1326{
1327	struct i40e_hw	*hw = &pf->hw;
1328	int		error = 0;
1329	u32		reg;
1330	u16		pf_qidx;
1331
1332	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1333
1334	ixl_dbg(pf, IXL_DBG_EN_DIS,
1335	    "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1336	    pf_qidx, vsi_qidx);
1337
1338	i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1339
1340	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1341	reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1342	    I40E_QTX_ENA_QENA_STAT_MASK;
1343	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1344	/* Verify the enable took */
1345	for (int j = 0; j < 10; j++) {
1346		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1347		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1348			break;
1349		i40e_usec_delay(10);
1350	}
1351	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1352		device_printf(pf->dev, "TX queue %d still disabled!\n",
1353		    pf_qidx);
1354		error = ETIMEDOUT;
1355	}
1356
1357	return (error);
1358}
1359
1360int
1361ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1362{
1363	struct i40e_hw	*hw = &pf->hw;
1364	int		error = 0;
1365	u32		reg;
1366	u16		pf_qidx;
1367
1368	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1369
1370	ixl_dbg(pf, IXL_DBG_EN_DIS,
1371	    "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1372	    pf_qidx, vsi_qidx);
1373
1374	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1375	reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1376	    I40E_QRX_ENA_QENA_STAT_MASK;
1377	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1378	/* Verify the enable took */
1379	for (int j = 0; j < 10; j++) {
1380		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1381		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1382			break;
1383		i40e_usec_delay(10);
1384	}
1385	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1386		device_printf(pf->dev, "RX queue %d still disabled!\n",
1387		    pf_qidx);
1388		error = ETIMEDOUT;
1389	}
1390
1391	return (error);
1392}
1393
1394int
1395ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1396{
1397	int error = 0;
1398
1399	error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1400	/* Called function already prints error message */
1401	if (error)
1402		return (error);
1403	error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1404	return (error);
1405}
1406
1407/*
1408 * Returns error on first ring that is detected hung.
1409 */
1410int
1411ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1412{
1413	struct i40e_hw	*hw = &pf->hw;
1414	int		error = 0;
1415	u32		reg;
1416	u16		pf_qidx;
1417
1418	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1419
1420	ixl_dbg(pf, IXL_DBG_EN_DIS,
1421	    "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1422	    pf_qidx, vsi_qidx);
1423
1424	i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1425	i40e_usec_delay(500);
1426
1427	reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1428	reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1429	wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1430	/* Verify the disable took */
1431	for (int j = 0; j < 10; j++) {
1432		reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1433		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1434			break;
1435		i40e_msec_delay(10);
1436	}
1437	if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1438		device_printf(pf->dev, "TX queue %d still enabled!\n",
1439		    pf_qidx);
1440		error = ETIMEDOUT;
1441	}
1442
1443	return (error);
1444}
1445
1446/*
1447 * Returns error on first ring that is detected hung.
1448 */
1449int
1450ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1451{
1452	struct i40e_hw	*hw = &pf->hw;
1453	int		error = 0;
1454	u32		reg;
1455	u16		pf_qidx;
1456
1457	pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1458
1459	ixl_dbg(pf, IXL_DBG_EN_DIS,
1460	    "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1461	    pf_qidx, vsi_qidx);
1462
1463	reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1464	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1465	wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1466	/* Verify the disable took */
1467	for (int j = 0; j < 10; j++) {
1468		reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1469		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1470			break;
1471		i40e_msec_delay(10);
1472	}
1473	if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1474		device_printf(pf->dev, "RX queue %d still enabled!\n",
1475		    pf_qidx);
1476		error = ETIMEDOUT;
1477	}
1478
1479	return (error);
1480}
1481
1482int
1483ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1484{
1485	int error = 0;
1486
1487	error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1488	/* Called function already prints error message */
1489	if (error)
1490		return (error);
1491	error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1492	return (error);
1493}
1494
1495static void
1496ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1497{
1498	struct i40e_hw *hw = &pf->hw;
1499	device_t dev = pf->dev;
1500	struct ixl_vf *vf;
1501	bool mdd_detected = false;
1502	bool pf_mdd_detected = false;
1503	bool vf_mdd_detected = false;
1504	u16 vf_num, queue;
1505	u8 pf_num, event;
1506	u8 pf_mdet_num, vp_mdet_num;
1507	u32 reg;
1508
1509	/* find what triggered the MDD event */
1510	reg = rd32(hw, I40E_GL_MDET_TX);
1511	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1512		pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1513		    I40E_GL_MDET_TX_PF_NUM_SHIFT;
1514		vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1515		    I40E_GL_MDET_TX_VF_NUM_SHIFT;
1516		event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1517		    I40E_GL_MDET_TX_EVENT_SHIFT;
1518		queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1519		    I40E_GL_MDET_TX_QUEUE_SHIFT;
1520		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1521		mdd_detected = true;
1522	}
1523
1524	if (!mdd_detected)
1525		return;
1526
1527	reg = rd32(hw, I40E_PF_MDET_TX);
1528	if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1529		wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1530		pf_mdet_num = hw->pf_id;
1531		pf_mdd_detected = true;
1532	}
1533
1534	/* Check if MDD was caused by a VF */
1535	for (int i = 0; i < pf->num_vfs; i++) {
1536		vf = &(pf->vfs[i]);
1537		reg = rd32(hw, I40E_VP_MDET_TX(i));
1538		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1539			wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1540			vp_mdet_num = i;
1541			vf->num_mdd_events++;
1542			vf_mdd_detected = true;
1543		}
1544	}
1545
1546	/* Print out an error message */
1547	if (vf_mdd_detected && pf_mdd_detected)
1548		device_printf(dev,
1549		    "Malicious Driver Detection event %d"
1550		    " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1551		    event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1552	else if (vf_mdd_detected && !pf_mdd_detected)
1553		device_printf(dev,
1554		    "Malicious Driver Detection event %d"
1555		    " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1556		    event, queue, pf_num, vf_num, vp_mdet_num);
1557	else if (!vf_mdd_detected && pf_mdd_detected)
1558		device_printf(dev,
1559		    "Malicious Driver Detection event %d"
1560		    " on TX queue %d, pf number %d (PF-%d)\n",
1561		    event, queue, pf_num, pf_mdet_num);
1562	/* Theoretically shouldn't happen */
1563	else
1564		device_printf(dev,
1565		    "TX Malicious Driver Detection event (unknown)\n");
1566}
1567
1568static void
1569ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1570{
1571	struct i40e_hw *hw = &pf->hw;
1572	device_t dev = pf->dev;
1573	struct ixl_vf *vf;
1574	bool mdd_detected = false;
1575	bool pf_mdd_detected = false;
1576	bool vf_mdd_detected = false;
1577	u16 queue;
1578	u8 pf_num, event;
1579	u8 pf_mdet_num, vp_mdet_num;
1580	u32 reg;
1581
1582	/*
1583	 * GL_MDET_RX doesn't contain VF number information, unlike
1584	 * GL_MDET_TX.
1585	 */
1586	reg = rd32(hw, I40E_GL_MDET_RX);
1587	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1588		pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1589		    I40E_GL_MDET_RX_FUNCTION_SHIFT;
1590		event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1591		    I40E_GL_MDET_RX_EVENT_SHIFT;
1592		queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1593		    I40E_GL_MDET_RX_QUEUE_SHIFT;
1594		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1595		mdd_detected = true;
1596	}
1597
1598	if (!mdd_detected)
1599		return;
1600
1601	reg = rd32(hw, I40E_PF_MDET_RX);
1602	if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1603		wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1604		pf_mdet_num = hw->pf_id;
1605		pf_mdd_detected = true;
1606	}
1607
1608	/* Check if MDD was caused by a VF */
1609	for (int i = 0; i < pf->num_vfs; i++) {
1610		vf = &(pf->vfs[i]);
1611		reg = rd32(hw, I40E_VP_MDET_RX(i));
1612		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1613			wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1614			vp_mdet_num = i;
1615			vf->num_mdd_events++;
1616			vf_mdd_detected = true;
1617		}
1618	}
1619
1620	/* Print out an error message */
1621	if (vf_mdd_detected && pf_mdd_detected)
1622		device_printf(dev,
1623		    "Malicious Driver Detection event %d"
1624		    " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1625		    event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1626	else if (vf_mdd_detected && !pf_mdd_detected)
1627		device_printf(dev,
1628		    "Malicious Driver Detection event %d"
1629		    " on RX queue %d, pf number %d, (VF-%d)\n",
1630		    event, queue, pf_num, vp_mdet_num);
1631	else if (!vf_mdd_detected && pf_mdd_detected)
1632		device_printf(dev,
1633		    "Malicious Driver Detection event %d"
1634		    " on RX queue %d, pf number %d (PF-%d)\n",
1635		    event, queue, pf_num, pf_mdet_num);
1636	/* Theoretically shouldn't happen */
1637	else
1638		device_printf(dev,
1639		    "RX Malicious Driver Detection event (unknown)\n");
1640}
1641
1642/**
1643 * ixl_handle_mdd_event
1644 *
1645 * Called from interrupt handler to identify possibly malicious vfs
1646 * (But also detects events from the PF, as well)
1647 **/
1648void
1649ixl_handle_mdd_event(struct ixl_pf *pf)
1650{
1651	struct i40e_hw *hw = &pf->hw;
1652	u32 reg;
1653
1654	/*
1655	 * Handle both TX/RX because it's possible they could
1656	 * both trigger in the same interrupt.
1657	 */
1658	ixl_handle_tx_mdd_event(pf);
1659	ixl_handle_rx_mdd_event(pf);
1660
1661	atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1662
1663	/* re-enable mdd interrupt cause */
1664	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1665	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1666	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1667	ixl_flush(hw);
1668}
1669
1670void
1671ixl_enable_intr0(struct i40e_hw *hw)
1672{
1673	u32		reg;
1674
1675	/* Use IXL_ITR_NONE so ITR isn't updated here */
1676	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1677	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1678	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1679	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1680}
1681
1682void
1683ixl_disable_intr0(struct i40e_hw *hw)
1684{
1685	u32		reg;
1686
1687	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1688	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1689	ixl_flush(hw);
1690}
1691
1692void
1693ixl_enable_queue(struct i40e_hw *hw, int id)
1694{
1695	u32		reg;
1696
1697	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1698	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1699	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1700	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1701}
1702
1703void
1704ixl_disable_queue(struct i40e_hw *hw, int id)
1705{
1706	u32		reg;
1707
1708	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1709	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1710}
1711
1712void
1713ixl_handle_empr_reset(struct ixl_pf *pf)
1714{
1715	struct ixl_vsi	*vsi = &pf->vsi;
1716	bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1717
1718	ixl_prepare_for_reset(pf, is_up);
1719	/*
1720	 * i40e_pf_reset checks the type of reset and acts
1721	 * accordingly. If EMP or Core reset was performed
1722	 * doing PF reset is not necessary and it sometimes
1723	 * fails.
1724	 */
1725	ixl_pf_reset(pf);
1726
1727	if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1728	    ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1729		atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1730		device_printf(pf->dev,
1731		    "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1732		pf->link_up = FALSE;
1733		ixl_update_link_status(pf);
1734	}
1735
1736	ixl_rebuild_hw_structs_after_reset(pf, is_up);
1737
1738	atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
1739}
1740
1741void
1742ixl_update_stats_counters(struct ixl_pf *pf)
1743{
1744	struct i40e_hw	*hw = &pf->hw;
1745	struct ixl_vsi	*vsi = &pf->vsi;
1746	struct ixl_vf	*vf;
1747	u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1748
1749	struct i40e_hw_port_stats *nsd = &pf->stats;
1750	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1751
1752	/* Update hw stats */
1753	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1754			   pf->stat_offsets_loaded,
1755			   &osd->crc_errors, &nsd->crc_errors);
1756	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1757			   pf->stat_offsets_loaded,
1758			   &osd->illegal_bytes, &nsd->illegal_bytes);
1759	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1760			   I40E_GLPRT_GORCL(hw->port),
1761			   pf->stat_offsets_loaded,
1762			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1763	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1764			   I40E_GLPRT_GOTCL(hw->port),
1765			   pf->stat_offsets_loaded,
1766			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1767	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1768			   pf->stat_offsets_loaded,
1769			   &osd->eth.rx_discards,
1770			   &nsd->eth.rx_discards);
1771	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1772			   I40E_GLPRT_UPRCL(hw->port),
1773			   pf->stat_offsets_loaded,
1774			   &osd->eth.rx_unicast,
1775			   &nsd->eth.rx_unicast);
1776	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1777			   I40E_GLPRT_UPTCL(hw->port),
1778			   pf->stat_offsets_loaded,
1779			   &osd->eth.tx_unicast,
1780			   &nsd->eth.tx_unicast);
1781	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1782			   I40E_GLPRT_MPRCL(hw->port),
1783			   pf->stat_offsets_loaded,
1784			   &osd->eth.rx_multicast,
1785			   &nsd->eth.rx_multicast);
1786	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1787			   I40E_GLPRT_MPTCL(hw->port),
1788			   pf->stat_offsets_loaded,
1789			   &osd->eth.tx_multicast,
1790			   &nsd->eth.tx_multicast);
1791	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1792			   I40E_GLPRT_BPRCL(hw->port),
1793			   pf->stat_offsets_loaded,
1794			   &osd->eth.rx_broadcast,
1795			   &nsd->eth.rx_broadcast);
1796	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1797			   I40E_GLPRT_BPTCL(hw->port),
1798			   pf->stat_offsets_loaded,
1799			   &osd->eth.tx_broadcast,
1800			   &nsd->eth.tx_broadcast);
1801
1802	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1803			   pf->stat_offsets_loaded,
1804			   &osd->tx_dropped_link_down,
1805			   &nsd->tx_dropped_link_down);
1806	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1807			   pf->stat_offsets_loaded,
1808			   &osd->mac_local_faults,
1809			   &nsd->mac_local_faults);
1810	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1811			   pf->stat_offsets_loaded,
1812			   &osd->mac_remote_faults,
1813			   &nsd->mac_remote_faults);
1814	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1815			   pf->stat_offsets_loaded,
1816			   &osd->rx_length_errors,
1817			   &nsd->rx_length_errors);
1818
1819	/* Flow control (LFC) stats */
1820	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1821			   pf->stat_offsets_loaded,
1822			   &osd->link_xon_rx, &nsd->link_xon_rx);
1823	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1824			   pf->stat_offsets_loaded,
1825			   &osd->link_xon_tx, &nsd->link_xon_tx);
1826	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1827			   pf->stat_offsets_loaded,
1828			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
1829	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1830			   pf->stat_offsets_loaded,
1831			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
1832
1833	/*
1834	 * For watchdog management we need to know if we have been paused
1835	 * during the last interval, so capture that here.
1836	 */
1837	if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
1838		vsi->shared->isc_pause_frames = 1;
1839
1840	/* Packet size stats rx */
1841	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1842			   I40E_GLPRT_PRC64L(hw->port),
1843			   pf->stat_offsets_loaded,
1844			   &osd->rx_size_64, &nsd->rx_size_64);
1845	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1846			   I40E_GLPRT_PRC127L(hw->port),
1847			   pf->stat_offsets_loaded,
1848			   &osd->rx_size_127, &nsd->rx_size_127);
1849	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1850			   I40E_GLPRT_PRC255L(hw->port),
1851			   pf->stat_offsets_loaded,
1852			   &osd->rx_size_255, &nsd->rx_size_255);
1853	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1854			   I40E_GLPRT_PRC511L(hw->port),
1855			   pf->stat_offsets_loaded,
1856			   &osd->rx_size_511, &nsd->rx_size_511);
1857	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1858			   I40E_GLPRT_PRC1023L(hw->port),
1859			   pf->stat_offsets_loaded,
1860			   &osd->rx_size_1023, &nsd->rx_size_1023);
1861	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1862			   I40E_GLPRT_PRC1522L(hw->port),
1863			   pf->stat_offsets_loaded,
1864			   &osd->rx_size_1522, &nsd->rx_size_1522);
1865	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1866			   I40E_GLPRT_PRC9522L(hw->port),
1867			   pf->stat_offsets_loaded,
1868			   &osd->rx_size_big, &nsd->rx_size_big);
1869
1870	/* Packet size stats tx */
1871	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1872			   I40E_GLPRT_PTC64L(hw->port),
1873			   pf->stat_offsets_loaded,
1874			   &osd->tx_size_64, &nsd->tx_size_64);
1875	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1876			   I40E_GLPRT_PTC127L(hw->port),
1877			   pf->stat_offsets_loaded,
1878			   &osd->tx_size_127, &nsd->tx_size_127);
1879	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1880			   I40E_GLPRT_PTC255L(hw->port),
1881			   pf->stat_offsets_loaded,
1882			   &osd->tx_size_255, &nsd->tx_size_255);
1883	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1884			   I40E_GLPRT_PTC511L(hw->port),
1885			   pf->stat_offsets_loaded,
1886			   &osd->tx_size_511, &nsd->tx_size_511);
1887	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1888			   I40E_GLPRT_PTC1023L(hw->port),
1889			   pf->stat_offsets_loaded,
1890			   &osd->tx_size_1023, &nsd->tx_size_1023);
1891	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1892			   I40E_GLPRT_PTC1522L(hw->port),
1893			   pf->stat_offsets_loaded,
1894			   &osd->tx_size_1522, &nsd->tx_size_1522);
1895	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1896			   I40E_GLPRT_PTC9522L(hw->port),
1897			   pf->stat_offsets_loaded,
1898			   &osd->tx_size_big, &nsd->tx_size_big);
1899
1900	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1901			   pf->stat_offsets_loaded,
1902			   &osd->rx_undersize, &nsd->rx_undersize);
1903	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1904			   pf->stat_offsets_loaded,
1905			   &osd->rx_fragments, &nsd->rx_fragments);
1906	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1907			   pf->stat_offsets_loaded,
1908			   &osd->rx_oversize, &nsd->rx_oversize);
1909	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1910			   pf->stat_offsets_loaded,
1911			   &osd->rx_jabber, &nsd->rx_jabber);
1912	/* EEE */
1913	i40e_get_phy_lpi_status(hw, nsd);
1914
1915	i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
1916			  &osd->tx_lpi_count, &nsd->tx_lpi_count,
1917			  &osd->rx_lpi_count, &nsd->rx_lpi_count);
1918
1919	pf->stat_offsets_loaded = true;
1920	/* End hw stats */
1921
1922	/* Update vsi stats */
1923	ixl_update_vsi_stats(vsi);
1924
1925	for (int i = 0; i < pf->num_vfs; i++) {
1926		vf = &pf->vfs[i];
1927		if (vf->vf_flags & VF_FLAG_ENABLED)
1928			ixl_update_eth_stats(&pf->vfs[i].vsi);
1929	}
1930}
1931
1932/**
1933 * Update VSI-specific ethernet statistics counters.
1934 **/
1935void
1936ixl_update_eth_stats(struct ixl_vsi *vsi)
1937{
1938	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1939	struct i40e_hw *hw = &pf->hw;
1940	struct i40e_eth_stats *es;
1941	struct i40e_eth_stats *oes;
1942	u16 stat_idx = vsi->info.stat_counter_idx;
1943
1944	es = &vsi->eth_stats;
1945	oes = &vsi->eth_stats_offsets;
1946
1947	/* Gather up the stats that the hw collects */
1948	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
1949			   vsi->stat_offsets_loaded,
1950			   &oes->tx_errors, &es->tx_errors);
1951	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
1952			   vsi->stat_offsets_loaded,
1953			   &oes->rx_discards, &es->rx_discards);
1954
1955	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
1956			   I40E_GLV_GORCL(stat_idx),
1957			   vsi->stat_offsets_loaded,
1958			   &oes->rx_bytes, &es->rx_bytes);
1959	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
1960			   I40E_GLV_UPRCL(stat_idx),
1961			   vsi->stat_offsets_loaded,
1962			   &oes->rx_unicast, &es->rx_unicast);
1963	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
1964			   I40E_GLV_MPRCL(stat_idx),
1965			   vsi->stat_offsets_loaded,
1966			   &oes->rx_multicast, &es->rx_multicast);
1967	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
1968			   I40E_GLV_BPRCL(stat_idx),
1969			   vsi->stat_offsets_loaded,
1970			   &oes->rx_broadcast, &es->rx_broadcast);
1971
1972	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
1973			   I40E_GLV_GOTCL(stat_idx),
1974			   vsi->stat_offsets_loaded,
1975			   &oes->tx_bytes, &es->tx_bytes);
1976	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
1977			   I40E_GLV_UPTCL(stat_idx),
1978			   vsi->stat_offsets_loaded,
1979			   &oes->tx_unicast, &es->tx_unicast);
1980	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
1981			   I40E_GLV_MPTCL(stat_idx),
1982			   vsi->stat_offsets_loaded,
1983			   &oes->tx_multicast, &es->tx_multicast);
1984	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
1985			   I40E_GLV_BPTCL(stat_idx),
1986			   vsi->stat_offsets_loaded,
1987			   &oes->tx_broadcast, &es->tx_broadcast);
1988	vsi->stat_offsets_loaded = true;
1989}
1990
1991void
1992ixl_update_vsi_stats(struct ixl_vsi *vsi)
1993{
1994	struct ixl_pf		*pf;
1995	struct ifnet		*ifp;
1996	struct i40e_eth_stats	*es;
1997	u64			tx_discards;
1998
1999	struct i40e_hw_port_stats *nsd;
2000
2001	pf = vsi->back;
2002	ifp = vsi->ifp;
2003	es = &vsi->eth_stats;
2004	nsd = &pf->stats;
2005
2006	ixl_update_eth_stats(vsi);
2007
2008	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2009
2010	/* Update ifnet stats */
2011	IXL_SET_IPACKETS(vsi, es->rx_unicast +
2012	                   es->rx_multicast +
2013			   es->rx_broadcast);
2014	IXL_SET_OPACKETS(vsi, es->tx_unicast +
2015	                   es->tx_multicast +
2016			   es->tx_broadcast);
2017	IXL_SET_IBYTES(vsi, es->rx_bytes);
2018	IXL_SET_OBYTES(vsi, es->tx_bytes);
2019	IXL_SET_IMCASTS(vsi, es->rx_multicast);
2020	IXL_SET_OMCASTS(vsi, es->tx_multicast);
2021
2022	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2023	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
2024	    nsd->rx_jabber);
2025	IXL_SET_OERRORS(vsi, es->tx_errors);
2026	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2027	IXL_SET_OQDROPS(vsi, tx_discards);
2028	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2029	IXL_SET_COLLISIONS(vsi, 0);
2030}
2031
2032/**
2033 * Reset all of the stats for the given pf
2034 **/
2035void
2036ixl_pf_reset_stats(struct ixl_pf *pf)
2037{
2038	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2039	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2040	pf->stat_offsets_loaded = false;
2041}
2042
2043/**
2044 * Resets all stats of the given vsi
2045 **/
2046void
2047ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2048{
2049	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2050	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2051	vsi->stat_offsets_loaded = false;
2052}
2053
2054/**
2055 * Read and update a 48 bit stat from the hw
2056 *
2057 * Since the device stats are not reset at PFReset, they likely will not
2058 * be zeroed when the driver starts.  We'll save the first values read
2059 * and use them as offsets to be subtracted from the raw values in order
2060 * to report stats that count from zero.
2061 **/
2062void
2063ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2064	bool offset_loaded, u64 *offset, u64 *stat)
2065{
2066	u64 new_data;
2067
2068#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2069	new_data = rd64(hw, loreg);
2070#else
2071	/*
2072	 * Use two rd32's instead of one rd64; FreeBSD versions before
2073	 * 10 don't support 64-bit bus reads/writes.
2074	 */
2075	new_data = rd32(hw, loreg);
2076	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2077#endif
2078
2079	if (!offset_loaded)
2080		*offset = new_data;
2081	if (new_data >= *offset)
2082		*stat = new_data - *offset;
2083	else
2084		*stat = (new_data + ((u64)1 << 48)) - *offset;
2085	*stat &= 0xFFFFFFFFFFFFULL;
2086}
2087
2088/**
2089 * Read and update a 32 bit stat from the hw
2090 **/
2091void
2092ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2093	bool offset_loaded, u64 *offset, u64 *stat)
2094{
2095	u32 new_data;
2096
2097	new_data = rd32(hw, reg);
2098	if (!offset_loaded)
2099		*offset = new_data;
2100	if (new_data >= *offset)
2101		*stat = (u32)(new_data - *offset);
2102	else
2103		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2104}
2105
2106/**
2107 * Add subset of device sysctls safe to use in recovery mode
2108 */
2109void
2110ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2111{
2112	device_t dev = pf->dev;
2113
2114	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2115	struct sysctl_oid_list *ctx_list =
2116	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2117
2118	struct sysctl_oid *debug_node;
2119	struct sysctl_oid_list *debug_list;
2120
2121	SYSCTL_ADD_PROC(ctx, ctx_list,
2122	    OID_AUTO, "fw_version",
2123	    CTLTYPE_STRING | CTLFLAG_RD, pf, 0,
2124	    ixl_sysctl_show_fw, "A", "Firmware version");
2125
2126	/* Add sysctls meant to print debug information, but don't list them
2127	 * in "sysctl -a" output. */
2128	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2129	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL,
2130	    "Debug Sysctls");
2131	debug_list = SYSCTL_CHILDREN(debug_node);
2132
2133	SYSCTL_ADD_UINT(ctx, debug_list,
2134	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2135	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2136
2137	SYSCTL_ADD_UINT(ctx, debug_list,
2138	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2139	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2140
2141	SYSCTL_ADD_PROC(ctx, debug_list,
2142	    OID_AUTO, "dump_debug_data",
2143	    CTLTYPE_STRING | CTLFLAG_RD,
2144	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2145
2146	SYSCTL_ADD_PROC(ctx, debug_list,
2147	    OID_AUTO, "do_pf_reset",
2148	    CTLTYPE_INT | CTLFLAG_WR,
2149	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2150
2151	SYSCTL_ADD_PROC(ctx, debug_list,
2152	    OID_AUTO, "do_core_reset",
2153	    CTLTYPE_INT | CTLFLAG_WR,
2154	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2155
2156	SYSCTL_ADD_PROC(ctx, debug_list,
2157	    OID_AUTO, "do_global_reset",
2158	    CTLTYPE_INT | CTLFLAG_WR,
2159	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2160
2161	SYSCTL_ADD_PROC(ctx, debug_list,
2162	    OID_AUTO, "queue_interrupt_table",
2163	    CTLTYPE_STRING | CTLFLAG_RD,
2164	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2165}
2166
2167void
2168ixl_add_device_sysctls(struct ixl_pf *pf)
2169{
2170	device_t dev = pf->dev;
2171	struct i40e_hw *hw = &pf->hw;
2172
2173	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2174	struct sysctl_oid_list *ctx_list =
2175	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2176
2177	struct sysctl_oid *debug_node;
2178	struct sysctl_oid_list *debug_list;
2179
2180	struct sysctl_oid *fec_node;
2181	struct sysctl_oid_list *fec_list;
2182	struct sysctl_oid *eee_node;
2183	struct sysctl_oid_list *eee_list;
2184
2185	/* Set up sysctls */
2186	SYSCTL_ADD_PROC(ctx, ctx_list,
2187	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2188	    pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2189
2190	SYSCTL_ADD_PROC(ctx, ctx_list,
2191	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
2192	    pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2193
2194	SYSCTL_ADD_PROC(ctx, ctx_list,
2195	    OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD,
2196	    pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2197
2198	SYSCTL_ADD_PROC(ctx, ctx_list,
2199	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
2200	    pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed");
2201
2202	SYSCTL_ADD_PROC(ctx, ctx_list,
2203	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
2204	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
2205
2206	SYSCTL_ADD_PROC(ctx, ctx_list,
2207	    OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
2208	    pf, 0, ixl_sysctl_unallocated_queues, "I",
2209	    "Queues not allocated to a PF or VF");
2210
2211	SYSCTL_ADD_PROC(ctx, ctx_list,
2212	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
2213	    pf, 0, ixl_sysctl_pf_tx_itr, "I",
2214	    "Immediately set TX ITR value for all queues");
2215
2216	SYSCTL_ADD_PROC(ctx, ctx_list,
2217	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
2218	    pf, 0, ixl_sysctl_pf_rx_itr, "I",
2219	    "Immediately set RX ITR value for all queues");
2220
2221	SYSCTL_ADD_INT(ctx, ctx_list,
2222	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2223	    &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2224
2225	SYSCTL_ADD_INT(ctx, ctx_list,
2226	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2227	    &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2228
2229	/* Add FEC sysctls for 25G adapters */
2230	if (i40e_is_25G_device(hw->device_id)) {
2231		fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2232		    OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls");
2233		fec_list = SYSCTL_CHILDREN(fec_node);
2234
2235		SYSCTL_ADD_PROC(ctx, fec_list,
2236		    OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW,
2237		    pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2238
2239		SYSCTL_ADD_PROC(ctx, fec_list,
2240		    OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW,
2241		    pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2242
2243		SYSCTL_ADD_PROC(ctx, fec_list,
2244		    OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW,
2245		    pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link");
2246
2247		SYSCTL_ADD_PROC(ctx, fec_list,
2248		    OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW,
2249		    pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link");
2250
2251		SYSCTL_ADD_PROC(ctx, fec_list,
2252		    OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW,
2253		    pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes");
2254	}
2255
2256	SYSCTL_ADD_PROC(ctx, ctx_list,
2257	    OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
2258	    pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2259
2260	eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2261	    OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2262	    "Energy Efficient Ethernet (EEE) Sysctls");
2263	eee_list = SYSCTL_CHILDREN(eee_node);
2264
2265	SYSCTL_ADD_PROC(ctx, eee_list,
2266	    OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2267	    pf, 0, ixl_sysctl_eee_enable, "I",
2268	    "Enable Energy Efficient Ethernet (EEE)");
2269
2270	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2271	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2272	    "TX LPI status");
2273
2274	SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2275	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2276	    "RX LPI status");
2277
2278	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2279	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2280	    "TX LPI count");
2281
2282	SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2283	    CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2284	    "RX LPI count");
2285
2286	/* Add sysctls meant to print debug information, but don't list them
2287	 * in "sysctl -a" output. */
2288	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2289	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
2290	debug_list = SYSCTL_CHILDREN(debug_node);
2291
2292	SYSCTL_ADD_UINT(ctx, debug_list,
2293	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2294	    &pf->hw.debug_mask, 0, "Shared code debug message level");
2295
2296	SYSCTL_ADD_UINT(ctx, debug_list,
2297	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2298	    &pf->dbg_mask, 0, "Non-shared code debug message level");
2299
2300	SYSCTL_ADD_PROC(ctx, debug_list,
2301	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
2302	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2303
2304	SYSCTL_ADD_PROC(ctx, debug_list,
2305	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
2306	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2307
2308	SYSCTL_ADD_PROC(ctx, debug_list,
2309	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
2310	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2311
2312	SYSCTL_ADD_PROC(ctx, debug_list,
2313	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
2314	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2315
2316	SYSCTL_ADD_PROC(ctx, debug_list,
2317	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
2318	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2319
2320	SYSCTL_ADD_PROC(ctx, debug_list,
2321	    OID_AUTO, "switch_vlans", CTLTYPE_INT | CTLFLAG_WR,
2322	    pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2323
2324	SYSCTL_ADD_PROC(ctx, debug_list,
2325	    OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
2326	    pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2327
2328	SYSCTL_ADD_PROC(ctx, debug_list,
2329	    OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
2330	    pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2331
2332	SYSCTL_ADD_PROC(ctx, debug_list,
2333	    OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD,
2334	    pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2335
2336	SYSCTL_ADD_PROC(ctx, debug_list,
2337	    OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR,
2338	    pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2339
2340	SYSCTL_ADD_PROC(ctx, debug_list,
2341	    OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD,
2342	    pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2343
2344	SYSCTL_ADD_PROC(ctx, debug_list,
2345	    OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR,
2346	    pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2347
2348	SYSCTL_ADD_PROC(ctx, debug_list,
2349	    OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR,
2350	    pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2351
2352	SYSCTL_ADD_PROC(ctx, debug_list,
2353	    OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR,
2354	    pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2355
2356	SYSCTL_ADD_PROC(ctx, debug_list,
2357	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
2358	    pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2359
2360	if (pf->has_i2c) {
2361		SYSCTL_ADD_PROC(ctx, debug_list,
2362		    OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
2363		    pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2364
2365		SYSCTL_ADD_PROC(ctx, debug_list,
2366		    OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
2367		    pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2368
2369		SYSCTL_ADD_PROC(ctx, debug_list,
2370		    OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
2371		    pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2372	}
2373}
2374
2375/*
2376 * Primarily for finding out how many queues can be assigned to VFs,
2377 * at runtime.
2378 */
2379static int
2380ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2381{
2382	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2383	int queues;
2384
2385	queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2386
2387	return sysctl_handle_int(oidp, NULL, queues, req);
2388}
2389
2390static const char *
2391ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2392{
2393	const char * link_speed_str[] = {
2394		"Unknown",
2395		"100 Mbps",
2396		"1 Gbps",
2397		"10 Gbps",
2398		"40 Gbps",
2399		"20 Gbps",
2400		"25 Gbps",
2401		"2.5 Gbps",
2402		"5 Gbps"
2403	};
2404	int index;
2405
2406	switch (link_speed) {
2407	case I40E_LINK_SPEED_100MB:
2408		index = 1;
2409		break;
2410	case I40E_LINK_SPEED_1GB:
2411		index = 2;
2412		break;
2413	case I40E_LINK_SPEED_10GB:
2414		index = 3;
2415		break;
2416	case I40E_LINK_SPEED_40GB:
2417		index = 4;
2418		break;
2419	case I40E_LINK_SPEED_20GB:
2420		index = 5;
2421		break;
2422	case I40E_LINK_SPEED_25GB:
2423		index = 6;
2424		break;
2425	case I40E_LINK_SPEED_2_5GB:
2426		index = 7;
2427		break;
2428	case I40E_LINK_SPEED_5GB:
2429		index = 8;
2430		break;
2431	case I40E_LINK_SPEED_UNKNOWN:
2432	default:
2433		index = 0;
2434		break;
2435	}
2436
2437	return (link_speed_str[index]);
2438}
2439
2440int
2441ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2442{
2443	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2444	struct i40e_hw *hw = &pf->hw;
2445	int error = 0;
2446
2447	ixl_update_link_status(pf);
2448
2449	error = sysctl_handle_string(oidp,
2450	    __DECONST(void *,
2451		ixl_link_speed_string(hw->phy.link_info.link_speed)),
2452	    8, req);
2453
2454	return (error);
2455}
2456
2457/*
2458 * Converts 8-bit speeds value to and from sysctl flags and
2459 * Admin Queue flags.
2460 */
2461static u8
2462ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2463{
2464#define SPEED_MAP_SIZE 8
2465	static u16 speedmap[SPEED_MAP_SIZE] = {
2466		(I40E_LINK_SPEED_100MB | (0x1 << 8)),
2467		(I40E_LINK_SPEED_1GB   | (0x2 << 8)),
2468		(I40E_LINK_SPEED_10GB  | (0x4 << 8)),
2469		(I40E_LINK_SPEED_20GB  | (0x8 << 8)),
2470		(I40E_LINK_SPEED_25GB  | (0x10 << 8)),
2471		(I40E_LINK_SPEED_40GB  | (0x20 << 8)),
2472		(I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2473		(I40E_LINK_SPEED_5GB   | (0x80 << 8)),
2474	};
2475	u8 retval = 0;
2476
2477	for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2478		if (to_aq)
2479			retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2480		else
2481			retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2482	}
2483
2484	return (retval);
2485}
2486
2487int
2488ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2489{
2490	struct i40e_hw *hw = &pf->hw;
2491	device_t dev = pf->dev;
2492	struct i40e_aq_get_phy_abilities_resp abilities;
2493	struct i40e_aq_set_phy_config config;
2494	enum i40e_status_code aq_error = 0;
2495
2496	/* Get current capability information */
2497	aq_error = i40e_aq_get_phy_capabilities(hw,
2498	    FALSE, FALSE, &abilities, NULL);
2499	if (aq_error) {
2500		device_printf(dev,
2501		    "%s: Error getting phy capabilities %d,"
2502		    " aq error: %d\n", __func__, aq_error,
2503		    hw->aq.asq_last_status);
2504		return (EIO);
2505	}
2506
2507	/* Prepare new config */
2508	bzero(&config, sizeof(config));
2509	if (from_aq)
2510		config.link_speed = speeds;
2511	else
2512		config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2513	config.phy_type = abilities.phy_type;
2514	config.phy_type_ext = abilities.phy_type_ext;
2515	config.abilities = abilities.abilities
2516	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2517	config.eee_capability = abilities.eee_capability;
2518	config.eeer = abilities.eeer_val;
2519	config.low_power_ctrl = abilities.d3_lpan;
2520	config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2521	    & I40E_AQ_PHY_FEC_CONFIG_MASK;
2522
2523	/* Do aq command & restart link */
2524	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2525	if (aq_error) {
2526		device_printf(dev,
2527		    "%s: Error setting new phy config %d,"
2528		    " aq error: %d\n", __func__, aq_error,
2529		    hw->aq.asq_last_status);
2530		return (EIO);
2531	}
2532
2533	return (0);
2534}
2535
2536/*
2537** Supported link speeds
2538**	Flags:
2539**	 0x1 - 100 Mb
2540**	 0x2 - 1G
2541**	 0x4 - 10G
2542**	 0x8 - 20G
2543**	0x10 - 25G
2544**	0x20 - 40G
2545**	0x40 - 2.5G
2546**	0x80 - 5G
2547*/
2548static int
2549ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2550{
2551	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2552	int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2553
2554	return sysctl_handle_int(oidp, NULL, supported, req);
2555}
2556
2557/*
2558** Control link advertise speed:
2559**	Flags:
2560**	 0x1 - advertise 100 Mb
2561**	 0x2 - advertise 1G
2562**	 0x4 - advertise 10G
2563**	 0x8 - advertise 20G
2564**	0x10 - advertise 25G
2565**	0x20 - advertise 40G
2566**	0x40 - advertise 2.5G
2567**	0x80 - advertise 5G
2568**
2569**	Set to 0 to disable link
2570*/
2571int
2572ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2573{
2574	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2575	device_t dev = pf->dev;
2576	u8 converted_speeds;
2577	int requested_ls = 0;
2578	int error = 0;
2579
2580	/* Read in new mode */
2581	requested_ls = pf->advertised_speed;
2582	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2583	if ((error) || (req->newptr == NULL))
2584		return (error);
2585	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2586		device_printf(dev, "Interface is currently in FW recovery mode. "
2587				"Setting advertise speed not supported\n");
2588		return (EINVAL);
2589	}
2590
2591	/* Error out if bits outside of possible flag range are set */
2592	if ((requested_ls & ~((u8)0xFF)) != 0) {
2593		device_printf(dev, "Input advertised speed out of range; "
2594		    "valid flags are: 0x%02x\n",
2595		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2596		return (EINVAL);
2597	}
2598
2599	/* Check if adapter supports input value */
2600	converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2601	if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2602		device_printf(dev, "Invalid advertised speed; "
2603		    "valid flags are: 0x%02x\n",
2604		    ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2605		return (EINVAL);
2606	}
2607
2608	error = ixl_set_advertised_speeds(pf, requested_ls, false);
2609	if (error)
2610		return (error);
2611
2612	pf->advertised_speed = requested_ls;
2613	ixl_update_link_status(pf);
2614	return (0);
2615}
2616
2617/*
2618 * Input: bitmap of enum i40e_aq_link_speed
2619 */
2620u64
2621ixl_max_aq_speed_to_value(u8 link_speeds)
2622{
2623	if (link_speeds & I40E_LINK_SPEED_40GB)
2624		return IF_Gbps(40);
2625	if (link_speeds & I40E_LINK_SPEED_25GB)
2626		return IF_Gbps(25);
2627	if (link_speeds & I40E_LINK_SPEED_20GB)
2628		return IF_Gbps(20);
2629	if (link_speeds & I40E_LINK_SPEED_10GB)
2630		return IF_Gbps(10);
2631	if (link_speeds & I40E_LINK_SPEED_5GB)
2632		return IF_Gbps(5);
2633	if (link_speeds & I40E_LINK_SPEED_2_5GB)
2634		return IF_Mbps(2500);
2635	if (link_speeds & I40E_LINK_SPEED_1GB)
2636		return IF_Gbps(1);
2637	if (link_speeds & I40E_LINK_SPEED_100MB)
2638		return IF_Mbps(100);
2639	else
2640		/* Minimum supported link speed */
2641		return IF_Mbps(100);
2642}
2643
2644/*
2645** Get the width and transaction speed of
2646** the bus this adapter is plugged into.
2647*/
2648void
2649ixl_get_bus_info(struct ixl_pf *pf)
2650{
2651	struct i40e_hw *hw = &pf->hw;
2652	device_t dev = pf->dev;
2653        u16 link;
2654        u32 offset, num_ports;
2655	u64 max_speed;
2656
2657	/* Some devices don't use PCIE */
2658	if (hw->mac.type == I40E_MAC_X722)
2659		return;
2660
2661        /* Read PCI Express Capabilities Link Status Register */
2662        pci_find_cap(dev, PCIY_EXPRESS, &offset);
2663        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2664
2665	/* Fill out hw struct with PCIE info */
2666	i40e_set_pci_config_data(hw, link);
2667
2668	/* Use info to print out bandwidth messages */
2669        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2670            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2671            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2672            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2673            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2674            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2675            (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2676            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2677            ("Unknown"));
2678
2679	/*
2680	 * If adapter is in slot with maximum supported speed,
2681	 * no warning message needs to be printed out.
2682	 */
2683	if (hw->bus.speed >= i40e_bus_speed_8000
2684	    && hw->bus.width >= i40e_bus_width_pcie_x8)
2685		return;
2686
2687	num_ports = bitcount32(hw->func_caps.valid_functions);
2688	max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2689
2690	if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2691                device_printf(dev, "PCI-Express bandwidth available"
2692                    " for this device may be insufficient for"
2693                    " optimal performance.\n");
2694                device_printf(dev, "Please move the device to a different"
2695		    " PCI-e link with more lanes and/or higher"
2696		    " transfer rate.\n");
2697        }
2698}
2699
2700static int
2701ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2702{
2703	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
2704	struct i40e_hw	*hw = &pf->hw;
2705	struct sbuf	*sbuf;
2706
2707	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2708	ixl_nvm_version_str(hw, sbuf);
2709	sbuf_finish(sbuf);
2710	sbuf_delete(sbuf);
2711
2712	return (0);
2713}
2714
2715void
2716ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2717{
2718	u8 nvma_ptr = nvma->config & 0xFF;
2719	u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2720	const char * cmd_str;
2721
2722	switch (nvma->command) {
2723	case I40E_NVM_READ:
2724		if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2725		    nvma->offset == 0 && nvma->data_size == 1) {
2726			device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2727			return;
2728		}
2729		cmd_str = "READ ";
2730		break;
2731	case I40E_NVM_WRITE:
2732		cmd_str = "WRITE";
2733		break;
2734	default:
2735		device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2736		return;
2737	}
2738	device_printf(dev,
2739	    "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2740	    cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2741}
2742
2743int
2744ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2745{
2746	struct i40e_hw *hw = &pf->hw;
2747	struct i40e_nvm_access *nvma;
2748	device_t dev = pf->dev;
2749	enum i40e_status_code status = 0;
2750	size_t nvma_size, ifd_len, exp_len;
2751	int err, perrno;
2752
2753	DEBUGFUNC("ixl_handle_nvmupd_cmd");
2754
2755	/* Sanity checks */
2756	nvma_size = sizeof(struct i40e_nvm_access);
2757	ifd_len = ifd->ifd_len;
2758
2759	if (ifd_len < nvma_size ||
2760	    ifd->ifd_data == NULL) {
2761		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
2762		    __func__);
2763		device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
2764		    __func__, ifd_len, nvma_size);
2765		device_printf(dev, "%s: data pointer: %p\n", __func__,
2766		    ifd->ifd_data);
2767		return (EINVAL);
2768	}
2769
2770	nvma = malloc(ifd_len, M_IXL, M_WAITOK);
2771	err = copyin(ifd->ifd_data, nvma, ifd_len);
2772	if (err) {
2773		device_printf(dev, "%s: Cannot get request from user space\n",
2774		    __func__);
2775		free(nvma, M_IXL);
2776		return (err);
2777	}
2778
2779	if (pf->dbg_mask & IXL_DBG_NVMUPD)
2780		ixl_print_nvm_cmd(dev, nvma);
2781
2782	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
2783		int count = 0;
2784		while (count++ < 100) {
2785			i40e_msec_delay(100);
2786			if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING))
2787				break;
2788		}
2789	}
2790
2791	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
2792		device_printf(dev,
2793		    "%s: timeout waiting for EMP reset to finish\n",
2794		    __func__);
2795		free(nvma, M_IXL);
2796		return (-EBUSY);
2797	}
2798
2799	if (nvma->data_size < 1 || nvma->data_size > 4096) {
2800		device_printf(dev,
2801		    "%s: invalid request, data size not in supported range\n",
2802		    __func__);
2803		free(nvma, M_IXL);
2804		return (EINVAL);
2805	}
2806
2807	/*
2808	 * Older versions of the NVM update tool don't set ifd_len to the size
2809	 * of the entire buffer passed to the ioctl. Check the data_size field
2810	 * in the contained i40e_nvm_access struct and ensure everything is
2811	 * copied in from userspace.
2812	 */
2813	exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
2814
2815	if (ifd_len < exp_len) {
2816		ifd_len = exp_len;
2817		nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
2818		err = copyin(ifd->ifd_data, nvma, ifd_len);
2819		if (err) {
2820			device_printf(dev, "%s: Cannot get request from user space\n",
2821					__func__);
2822			free(nvma, M_IXL);
2823			return (err);
2824		}
2825	}
2826
2827	// TODO: Might need a different lock here
2828	// IXL_PF_LOCK(pf);
2829	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
2830	// IXL_PF_UNLOCK(pf);
2831
2832	err = copyout(nvma, ifd->ifd_data, ifd_len);
2833	free(nvma, M_IXL);
2834	if (err) {
2835		device_printf(dev, "%s: Cannot return data to user space\n",
2836				__func__);
2837		return (err);
2838	}
2839
2840	/* Let the nvmupdate report errors, show them only when debug is enabled */
2841	if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
2842		device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
2843		    i40e_stat_str(hw, status), perrno);
2844
2845	/*
2846	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
2847	 * to run this ioctl again. So use -EACCES for -EPERM instead.
2848	 */
2849	if (perrno == -EPERM)
2850		return (-EACCES);
2851	else
2852		return (perrno);
2853}
2854
2855int
2856ixl_find_i2c_interface(struct ixl_pf *pf)
2857{
2858	struct i40e_hw *hw = &pf->hw;
2859	bool i2c_en, port_matched;
2860	u32 reg;
2861
2862	for (int i = 0; i < 4; i++) {
2863		reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
2864		i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
2865		port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
2866		    >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
2867		    & BIT(hw->port);
2868		if (i2c_en && port_matched)
2869			return (i);
2870	}
2871
2872	return (-1);
2873}
2874
2875static char *
2876ixl_phy_type_string(u32 bit_pos, bool ext)
2877{
2878	static char * phy_types_str[32] = {
2879		"SGMII",
2880		"1000BASE-KX",
2881		"10GBASE-KX4",
2882		"10GBASE-KR",
2883		"40GBASE-KR4",
2884		"XAUI",
2885		"XFI",
2886		"SFI",
2887		"XLAUI",
2888		"XLPPI",
2889		"40GBASE-CR4",
2890		"10GBASE-CR1",
2891		"SFP+ Active DA",
2892		"QSFP+ Active DA",
2893		"Reserved (14)",
2894		"Reserved (15)",
2895		"Reserved (16)",
2896		"100BASE-TX",
2897		"1000BASE-T",
2898		"10GBASE-T",
2899		"10GBASE-SR",
2900		"10GBASE-LR",
2901		"10GBASE-SFP+Cu",
2902		"10GBASE-CR1",
2903		"40GBASE-CR4",
2904		"40GBASE-SR4",
2905		"40GBASE-LR4",
2906		"1000BASE-SX",
2907		"1000BASE-LX",
2908		"1000BASE-T Optical",
2909		"20GBASE-KR2",
2910		"Reserved (31)"
2911	};
2912	static char * ext_phy_types_str[8] = {
2913		"25GBASE-KR",
2914		"25GBASE-CR",
2915		"25GBASE-SR",
2916		"25GBASE-LR",
2917		"25GBASE-AOC",
2918		"25GBASE-ACC",
2919		"2.5GBASE-T",
2920		"5GBASE-T"
2921	};
2922
2923	if (ext && bit_pos > 7) return "Invalid_Ext";
2924	if (bit_pos > 31) return "Invalid";
2925
2926	return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
2927}
2928
2929/* TODO: ERJ: I don't this is necessary anymore. */
2930int
2931ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
2932{
2933	device_t dev = pf->dev;
2934	struct i40e_hw *hw = &pf->hw;
2935	struct i40e_aq_desc desc;
2936	enum i40e_status_code status;
2937
2938	struct i40e_aqc_get_link_status *aq_link_status =
2939		(struct i40e_aqc_get_link_status *)&desc.params.raw;
2940
2941	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
2942	link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
2943	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
2944	if (status) {
2945		device_printf(dev,
2946		    "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
2947		    __func__, i40e_stat_str(hw, status),
2948		    i40e_aq_str(hw, hw->aq.asq_last_status));
2949		return (EIO);
2950	}
2951
2952	bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
2953	return (0);
2954}
2955
2956static char *
2957ixl_phy_type_string_ls(u8 val)
2958{
2959	if (val >= 0x1F)
2960		return ixl_phy_type_string(val - 0x1F, true);
2961	else
2962		return ixl_phy_type_string(val, false);
2963}
2964
2965static int
2966ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
2967{
2968	struct ixl_pf *pf = (struct ixl_pf *)arg1;
2969	device_t dev = pf->dev;
2970	struct sbuf *buf;
2971	int error = 0;
2972
2973	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2974	if (!buf) {
2975		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
2976		return (ENOMEM);
2977	}
2978
2979	struct i40e_aqc_get_link_status link_status;
2980	error = ixl_aq_get_link_status(pf, &link_status);
2981	if (error) {
2982		sbuf_delete(buf);
2983		return (error);
2984	}
2985
2986	sbuf_printf(buf, "\n"
2987	    "PHY Type : 0x%02x<%s>\n"
2988	    "Speed    : 0x%02x\n"
2989	    "Link info: 0x%02x\n"
2990	    "AN info  : 0x%02x\n"
2991	    "Ext info : 0x%02x\n"
2992	    "Loopback : 0x%02x\n"
2993	    "Max Frame: %d\n"
2994	    "Config   : 0x%02x\n"
2995	    "Power    : 0x%02x",
2996	    link_status.phy_type,
2997	    ixl_phy_type_string_ls(link_status.phy_type),
2998	    link_status.link_speed,
2999	    link_status.link_info,
3000	    link_status.an_info,
3001	    link_status.ext_info,
3002	    link_status.loopback,
3003	    link_status.max_frame_size,
3004	    link_status.config,
3005	    link_status.power_desc);
3006
3007	error = sbuf_finish(buf);
3008	if (error)
3009		device_printf(dev, "Error finishing sbuf: %d\n", error);
3010
3011	sbuf_delete(buf);
3012	return (error);
3013}
3014
3015static int
3016ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3017{
3018	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3019	struct i40e_hw *hw = &pf->hw;
3020	device_t dev = pf->dev;
3021	enum i40e_status_code status;
3022	struct i40e_aq_get_phy_abilities_resp abilities;
3023	struct sbuf *buf;
3024	int error = 0;
3025
3026	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3027	if (!buf) {
3028		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3029		return (ENOMEM);
3030	}
3031
3032	status = i40e_aq_get_phy_capabilities(hw,
3033	    FALSE, FALSE, &abilities, NULL);
3034	if (status) {
3035		device_printf(dev,
3036		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3037		    __func__, i40e_stat_str(hw, status),
3038		    i40e_aq_str(hw, hw->aq.asq_last_status));
3039		sbuf_delete(buf);
3040		return (EIO);
3041	}
3042
3043	sbuf_printf(buf, "\n"
3044	    "PHY Type : %08x",
3045	    abilities.phy_type);
3046
3047	if (abilities.phy_type != 0) {
3048		sbuf_printf(buf, "<");
3049		for (int i = 0; i < 32; i++)
3050			if ((1 << i) & abilities.phy_type)
3051				sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3052		sbuf_printf(buf, ">");
3053	}
3054
3055	sbuf_printf(buf, "\nPHY Ext  : %02x",
3056	    abilities.phy_type_ext);
3057
3058	if (abilities.phy_type_ext != 0) {
3059		sbuf_printf(buf, "<");
3060		for (int i = 0; i < 4; i++)
3061			if ((1 << i) & abilities.phy_type_ext)
3062				sbuf_printf(buf, "%s,",
3063				    ixl_phy_type_string(i, true));
3064		sbuf_printf(buf, ">");
3065	}
3066
3067	sbuf_printf(buf, "\nSpeed    : %02x", abilities.link_speed);
3068	if (abilities.link_speed != 0) {
3069		u8 link_speed;
3070		sbuf_printf(buf, " <");
3071		for (int i = 0; i < 8; i++) {
3072			link_speed = (1 << i) & abilities.link_speed;
3073			if (link_speed)
3074				sbuf_printf(buf, "%s, ",
3075				    ixl_link_speed_string(link_speed));
3076		}
3077		sbuf_printf(buf, ">");
3078	}
3079
3080	sbuf_printf(buf, "\n"
3081	    "Abilities: %02x\n"
3082	    "EEE cap  : %04x\n"
3083	    "EEER reg : %08x\n"
3084	    "D3 Lpan  : %02x\n"
3085	    "ID       : %02x %02x %02x %02x\n"
3086	    "ModType  : %02x %02x %02x\n"
3087	    "ModType E: %01x\n"
3088	    "FEC Cfg  : %02x\n"
3089	    "Ext CC   : %02x",
3090	    abilities.abilities, abilities.eee_capability,
3091	    abilities.eeer_val, abilities.d3_lpan,
3092	    abilities.phy_id[0], abilities.phy_id[1],
3093	    abilities.phy_id[2], abilities.phy_id[3],
3094	    abilities.module_type[0], abilities.module_type[1],
3095	    abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3096	    abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3097	    abilities.ext_comp_code);
3098
3099	error = sbuf_finish(buf);
3100	if (error)
3101		device_printf(dev, "Error finishing sbuf: %d\n", error);
3102
3103	sbuf_delete(buf);
3104	return (error);
3105}
3106
3107static int
3108ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3109{
3110	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3111	struct ixl_vsi *vsi = &pf->vsi;
3112	struct ixl_mac_filter *f;
3113	device_t dev = pf->dev;
3114	int error = 0, ftl_len = 0, ftl_counter = 0;
3115
3116	struct sbuf *buf;
3117
3118	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3119	if (!buf) {
3120		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3121		return (ENOMEM);
3122	}
3123
3124	sbuf_printf(buf, "\n");
3125
3126	/* Print MAC filters */
3127	sbuf_printf(buf, "PF Filters:\n");
3128	SLIST_FOREACH(f, &vsi->ftl, next)
3129		ftl_len++;
3130
3131	if (ftl_len < 1)
3132		sbuf_printf(buf, "(none)\n");
3133	else {
3134		SLIST_FOREACH(f, &vsi->ftl, next) {
3135			sbuf_printf(buf,
3136			    MAC_FORMAT ", vlan %4d, flags %#06x",
3137			    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3138			/* don't print '\n' for last entry */
3139			if (++ftl_counter != ftl_len)
3140				sbuf_printf(buf, "\n");
3141		}
3142	}
3143
3144#ifdef PCI_IOV
3145	/* TODO: Give each VF its own filter list sysctl */
3146	struct ixl_vf *vf;
3147	if (pf->num_vfs > 0) {
3148		sbuf_printf(buf, "\n\n");
3149		for (int i = 0; i < pf->num_vfs; i++) {
3150			vf = &pf->vfs[i];
3151			if (!(vf->vf_flags & VF_FLAG_ENABLED))
3152				continue;
3153
3154			vsi = &vf->vsi;
3155			ftl_len = 0, ftl_counter = 0;
3156			sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3157			SLIST_FOREACH(f, &vsi->ftl, next)
3158				ftl_len++;
3159
3160			if (ftl_len < 1)
3161				sbuf_printf(buf, "(none)\n");
3162			else {
3163				SLIST_FOREACH(f, &vsi->ftl, next) {
3164					sbuf_printf(buf,
3165					    MAC_FORMAT ", vlan %4d, flags %#06x\n",
3166					    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3167				}
3168			}
3169		}
3170	}
3171#endif
3172
3173	error = sbuf_finish(buf);
3174	if (error)
3175		device_printf(dev, "Error finishing sbuf: %d\n", error);
3176	sbuf_delete(buf);
3177
3178	return (error);
3179}
3180
3181#define IXL_SW_RES_SIZE 0x14
3182int
3183ixl_res_alloc_cmp(const void *a, const void *b)
3184{
3185	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3186	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3187	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3188
3189	return ((int)one->resource_type - (int)two->resource_type);
3190}
3191
3192/*
3193 * Longest string length: 25
3194 */
3195const char *
3196ixl_switch_res_type_string(u8 type)
3197{
3198	static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3199		"VEB",
3200		"VSI",
3201		"Perfect Match MAC address",
3202		"S-tag",
3203		"(Reserved)",
3204		"Multicast hash entry",
3205		"Unicast hash entry",
3206		"VLAN",
3207		"VSI List entry",
3208		"(Reserved)",
3209		"VLAN Statistic Pool",
3210		"Mirror Rule",
3211		"Queue Set",
3212		"Inner VLAN Forward filter",
3213		"(Reserved)",
3214		"Inner MAC",
3215		"IP",
3216		"GRE/VN1 Key",
3217		"VN2 Key",
3218		"Tunneling Port"
3219	};
3220
3221	if (type < IXL_SW_RES_SIZE)
3222		return ixl_switch_res_type_strings[type];
3223	else
3224		return "(Reserved)";
3225}
3226
3227static int
3228ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3229{
3230	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3231	struct i40e_hw *hw = &pf->hw;
3232	device_t dev = pf->dev;
3233	struct sbuf *buf;
3234	enum i40e_status_code status;
3235	int error = 0;
3236
3237	u8 num_entries;
3238	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3239
3240	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3241	if (!buf) {
3242		device_printf(dev, "Could not allocate sbuf for output.\n");
3243		return (ENOMEM);
3244	}
3245
3246	bzero(resp, sizeof(resp));
3247	status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3248				resp,
3249				IXL_SW_RES_SIZE,
3250				NULL);
3251	if (status) {
3252		device_printf(dev,
3253		    "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3254		    __func__, i40e_stat_str(hw, status),
3255		    i40e_aq_str(hw, hw->aq.asq_last_status));
3256		sbuf_delete(buf);
3257		return (error);
3258	}
3259
3260	/* Sort entries by type for display */
3261	qsort(resp, num_entries,
3262	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3263	    &ixl_res_alloc_cmp);
3264
3265	sbuf_cat(buf, "\n");
3266	sbuf_printf(buf, "# of entries: %d\n", num_entries);
3267	sbuf_printf(buf,
3268	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
3269	    "                          | (this)     | (all) | (this) | (all)       \n");
3270	for (int i = 0; i < num_entries; i++) {
3271		sbuf_printf(buf,
3272		    "%25s | %10d   %5d   %6d   %12d",
3273		    ixl_switch_res_type_string(resp[i].resource_type),
3274		    resp[i].guaranteed,
3275		    resp[i].total,
3276		    resp[i].used,
3277		    resp[i].total_unalloced);
3278		if (i < num_entries - 1)
3279			sbuf_cat(buf, "\n");
3280	}
3281
3282	error = sbuf_finish(buf);
3283	if (error)
3284		device_printf(dev, "Error finishing sbuf: %d\n", error);
3285
3286	sbuf_delete(buf);
3287	return (error);
3288}
3289
3290enum ixl_sw_seid_offset {
3291	IXL_SW_SEID_EMP = 1,
3292	IXL_SW_SEID_MAC_START = 2,
3293	IXL_SW_SEID_MAC_END = 5,
3294	IXL_SW_SEID_PF_START = 16,
3295	IXL_SW_SEID_PF_END = 31,
3296	IXL_SW_SEID_VF_START = 32,
3297	IXL_SW_SEID_VF_END = 159,
3298};
3299
3300/*
3301 * Caller must init and delete sbuf; this function will clear and
3302 * finish it for caller.
3303 *
3304 * Note: The SEID argument only applies for elements defined by FW at
3305 * power-on; these include the EMP, Ports, PFs and VFs.
3306 */
3307static char *
3308ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3309{
3310	sbuf_clear(s);
3311
3312	/* If SEID is in certain ranges, then we can infer the
3313	 * mapping of SEID to switch element.
3314	 */
3315	if (seid == IXL_SW_SEID_EMP) {
3316		sbuf_cat(s, "EMP");
3317		goto out;
3318	} else if (seid >= IXL_SW_SEID_MAC_START &&
3319	    seid <= IXL_SW_SEID_MAC_END) {
3320		sbuf_printf(s, "MAC  %2d",
3321		    seid - IXL_SW_SEID_MAC_START);
3322		goto out;
3323	} else if (seid >= IXL_SW_SEID_PF_START &&
3324	    seid <= IXL_SW_SEID_PF_END) {
3325		sbuf_printf(s, "PF  %3d",
3326		    seid - IXL_SW_SEID_PF_START);
3327		goto out;
3328	} else if (seid >= IXL_SW_SEID_VF_START &&
3329	    seid <= IXL_SW_SEID_VF_END) {
3330		sbuf_printf(s, "VF  %3d",
3331		    seid - IXL_SW_SEID_VF_START);
3332		goto out;
3333	}
3334
3335	switch (element_type) {
3336	case I40E_AQ_SW_ELEM_TYPE_BMC:
3337		sbuf_cat(s, "BMC");
3338		break;
3339	case I40E_AQ_SW_ELEM_TYPE_PV:
3340		sbuf_cat(s, "PV");
3341		break;
3342	case I40E_AQ_SW_ELEM_TYPE_VEB:
3343		sbuf_cat(s, "VEB");
3344		break;
3345	case I40E_AQ_SW_ELEM_TYPE_PA:
3346		sbuf_cat(s, "PA");
3347		break;
3348	case I40E_AQ_SW_ELEM_TYPE_VSI:
3349		sbuf_printf(s, "VSI");
3350		break;
3351	default:
3352		sbuf_cat(s, "?");
3353		break;
3354	}
3355
3356out:
3357	sbuf_finish(s);
3358	return sbuf_data(s);
3359}
3360
3361static int
3362ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3363{
3364	const struct i40e_aqc_switch_config_element_resp *one, *two;
3365	one = (const struct i40e_aqc_switch_config_element_resp *)a;
3366	two = (const struct i40e_aqc_switch_config_element_resp *)b;
3367
3368	return ((int)one->seid - (int)two->seid);
3369}
3370
3371static int
3372ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3373{
3374	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3375	struct i40e_hw *hw = &pf->hw;
3376	device_t dev = pf->dev;
3377	struct sbuf *buf;
3378	struct sbuf *nmbuf;
3379	enum i40e_status_code status;
3380	int error = 0;
3381	u16 next = 0;
3382	u8 aq_buf[I40E_AQ_LARGE_BUF];
3383
3384	struct i40e_aqc_switch_config_element_resp *elem;
3385	struct i40e_aqc_get_switch_config_resp *sw_config;
3386	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3387
3388	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3389	if (!buf) {
3390		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3391		return (ENOMEM);
3392	}
3393
3394	status = i40e_aq_get_switch_config(hw, sw_config,
3395	    sizeof(aq_buf), &next, NULL);
3396	if (status) {
3397		device_printf(dev,
3398		    "%s: aq_get_switch_config() error %s, aq error %s\n",
3399		    __func__, i40e_stat_str(hw, status),
3400		    i40e_aq_str(hw, hw->aq.asq_last_status));
3401		sbuf_delete(buf);
3402		return error;
3403	}
3404	if (next)
3405		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3406		    __func__, next);
3407
3408	nmbuf = sbuf_new_auto();
3409	if (!nmbuf) {
3410		device_printf(dev, "Could not allocate sbuf for name output.\n");
3411		sbuf_delete(buf);
3412		return (ENOMEM);
3413	}
3414
3415	/* Sort entries by SEID for display */
3416	qsort(sw_config->element, sw_config->header.num_reported,
3417	    sizeof(struct i40e_aqc_switch_config_element_resp),
3418	    &ixl_sw_cfg_elem_seid_cmp);
3419
3420	sbuf_cat(buf, "\n");
3421	/* Assuming <= 255 elements in switch */
3422	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3423	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3424	/* Exclude:
3425	 * Revision -- all elements are revision 1 for now
3426	 */
3427	sbuf_printf(buf,
3428	    "SEID (  Name  ) |  Up  (  Name  ) | Down (  Name  ) | Conn Type\n"
3429	    "                |                 |                 | (uplink)\n");
3430	for (int i = 0; i < sw_config->header.num_reported; i++) {
3431		elem = &sw_config->element[i];
3432
3433		// "%4d (%8s) | %8s   %8s   %#8x",
3434		sbuf_printf(buf, "%4d", elem->seid);
3435		sbuf_cat(buf, " ");
3436		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3437		    elem->element_type, elem->seid));
3438		sbuf_cat(buf, " | ");
3439		sbuf_printf(buf, "%4d", elem->uplink_seid);
3440		sbuf_cat(buf, " ");
3441		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3442		    0, elem->uplink_seid));
3443		sbuf_cat(buf, " | ");
3444		sbuf_printf(buf, "%4d", elem->downlink_seid);
3445		sbuf_cat(buf, " ");
3446		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3447		    0, elem->downlink_seid));
3448		sbuf_cat(buf, " | ");
3449		sbuf_printf(buf, "%8d", elem->connection_type);
3450		if (i < sw_config->header.num_reported - 1)
3451			sbuf_cat(buf, "\n");
3452	}
3453	sbuf_delete(nmbuf);
3454
3455	error = sbuf_finish(buf);
3456	if (error)
3457		device_printf(dev, "Error finishing sbuf: %d\n", error);
3458
3459	sbuf_delete(buf);
3460
3461	return (error);
3462}
3463
3464static int
3465ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3466{
3467	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3468	struct i40e_hw *hw = &pf->hw;
3469	device_t dev = pf->dev;
3470	int requested_vlan = -1;
3471	enum i40e_status_code status = 0;
3472	int error = 0;
3473
3474	error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3475	if ((error) || (req->newptr == NULL))
3476	    return (error);
3477
3478	if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3479		device_printf(dev, "Flags disallow setting of vlans\n");
3480		return (ENODEV);
3481	}
3482
3483	hw->switch_tag = requested_vlan;
3484	device_printf(dev,
3485	    "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3486	    hw->switch_tag, hw->first_tag, hw->second_tag);
3487	status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3488	if (status) {
3489		device_printf(dev,
3490		    "%s: aq_set_switch_config() error %s, aq error %s\n",
3491		    __func__, i40e_stat_str(hw, status),
3492		    i40e_aq_str(hw, hw->aq.asq_last_status));
3493		return (status);
3494	}
3495	return (0);
3496}
3497
3498static int
3499ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3500{
3501	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3502	struct i40e_hw *hw = &pf->hw;
3503	device_t dev = pf->dev;
3504	struct sbuf *buf;
3505	int error = 0;
3506	enum i40e_status_code status;
3507	u32 reg;
3508
3509	struct i40e_aqc_get_set_rss_key_data key_data;
3510
3511	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3512	if (!buf) {
3513		device_printf(dev, "Could not allocate sbuf for output.\n");
3514		return (ENOMEM);
3515	}
3516
3517	bzero(&key_data, sizeof(key_data));
3518
3519	sbuf_cat(buf, "\n");
3520	if (hw->mac.type == I40E_MAC_X722) {
3521		status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3522		if (status)
3523			device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3524			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3525	} else {
3526		for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3527			reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3528			bcopy(&reg, ((caddr_t)&key_data) + (i << 2), 4);
3529		}
3530	}
3531
3532	ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3533
3534	error = sbuf_finish(buf);
3535	if (error)
3536		device_printf(dev, "Error finishing sbuf: %d\n", error);
3537	sbuf_delete(buf);
3538
3539	return (error);
3540}
3541
3542static void
3543ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3544{
3545	int i, j, k, width;
3546	char c;
3547
3548	if (length < 1 || buf == NULL) return;
3549
3550	int byte_stride = 16;
3551	int lines = length / byte_stride;
3552	int rem = length % byte_stride;
3553	if (rem > 0)
3554		lines++;
3555
3556	for (i = 0; i < lines; i++) {
3557		width = (rem > 0 && i == lines - 1)
3558		    ? rem : byte_stride;
3559
3560		sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3561
3562		for (j = 0; j < width; j++)
3563			sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3564
3565		if (width < byte_stride) {
3566			for (k = 0; k < (byte_stride - width); k++)
3567				sbuf_printf(sb, "   ");
3568		}
3569
3570		if (!text) {
3571			sbuf_printf(sb, "\n");
3572			continue;
3573		}
3574
3575		for (j = 0; j < width; j++) {
3576			c = (char)buf[i * byte_stride + j];
3577			if (c < 32 || c > 126)
3578				sbuf_printf(sb, ".");
3579			else
3580				sbuf_printf(sb, "%c", c);
3581
3582			if (j == width - 1)
3583				sbuf_printf(sb, "\n");
3584		}
3585	}
3586}
3587
3588static int
3589ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3590{
3591	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3592	struct i40e_hw *hw = &pf->hw;
3593	device_t dev = pf->dev;
3594	struct sbuf *buf;
3595	int error = 0;
3596	enum i40e_status_code status;
3597	u8 hlut[512];
3598	u32 reg;
3599
3600	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3601	if (!buf) {
3602		device_printf(dev, "Could not allocate sbuf for output.\n");
3603		return (ENOMEM);
3604	}
3605
3606	bzero(hlut, sizeof(hlut));
3607	sbuf_cat(buf, "\n");
3608	if (hw->mac.type == I40E_MAC_X722) {
3609		status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3610		if (status)
3611			device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3612			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3613	} else {
3614		for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3615			reg = rd32(hw, I40E_PFQF_HLUT(i));
3616			bcopy(&reg, &hlut[i << 2], 4);
3617		}
3618	}
3619	ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3620
3621	error = sbuf_finish(buf);
3622	if (error)
3623		device_printf(dev, "Error finishing sbuf: %d\n", error);
3624	sbuf_delete(buf);
3625
3626	return (error);
3627}
3628
3629static int
3630ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3631{
3632	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3633	struct i40e_hw *hw = &pf->hw;
3634	u64 hena;
3635
3636	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3637	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3638
3639	return sysctl_handle_long(oidp, NULL, hena, req);
3640}
3641
3642/*
3643 * Sysctl to disable firmware's link management
3644 *
3645 * 1 - Disable link management on this port
3646 * 0 - Re-enable link management
3647 *
3648 * On normal NVMs, firmware manages link by default.
3649 */
3650static int
3651ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3652{
3653	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3654	struct i40e_hw *hw = &pf->hw;
3655	device_t dev = pf->dev;
3656	int requested_mode = -1;
3657	enum i40e_status_code status = 0;
3658	int error = 0;
3659
3660	/* Read in new mode */
3661	error = sysctl_handle_int(oidp, &requested_mode, 0, req);
3662	if ((error) || (req->newptr == NULL))
3663		return (error);
3664	/* Check for sane value */
3665	if (requested_mode < 0 || requested_mode > 1) {
3666		device_printf(dev, "Valid modes are 0 or 1\n");
3667		return (EINVAL);
3668	}
3669
3670	/* Set new mode */
3671	status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
3672	if (status) {
3673		device_printf(dev,
3674		    "%s: Error setting new phy debug mode %s,"
3675		    " aq error: %s\n", __func__, i40e_stat_str(hw, status),
3676		    i40e_aq_str(hw, hw->aq.asq_last_status));
3677		return (EIO);
3678	}
3679
3680	return (0);
3681}
3682
3683/*
3684 * Read some diagnostic data from a (Q)SFP+ module
3685 *
3686 *             SFP A2   QSFP Lower Page
3687 * Temperature 96-97	22-23
3688 * Vcc         98-99    26-27
3689 * TX power    102-103  34-35..40-41
3690 * RX power    104-105  50-51..56-57
3691 */
3692static int
3693ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
3694{
3695	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3696	device_t dev = pf->dev;
3697	struct sbuf *sbuf;
3698	int error = 0;
3699	u8 output;
3700
3701	if (req->oldptr == NULL) {
3702		error = SYSCTL_OUT(req, 0, 128);
3703		return (0);
3704	}
3705
3706	error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
3707	if (error) {
3708		device_printf(dev, "Error reading from i2c\n");
3709		return (error);
3710	}
3711
3712	/* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
3713	if (output == 0x3) {
3714		/*
3715		 * Check for:
3716		 * - Internally calibrated data
3717		 * - Diagnostic monitoring is implemented
3718		 */
3719		pf->read_i2c_byte(pf, 92, 0xA0, &output);
3720		if (!(output & 0x60)) {
3721			device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
3722			return (0);
3723		}
3724
3725		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3726
3727		for (u8 offset = 96; offset < 100; offset++) {
3728			pf->read_i2c_byte(pf, offset, 0xA2, &output);
3729			sbuf_printf(sbuf, "%02X ", output);
3730		}
3731		for (u8 offset = 102; offset < 106; offset++) {
3732			pf->read_i2c_byte(pf, offset, 0xA2, &output);
3733			sbuf_printf(sbuf, "%02X ", output);
3734		}
3735	} else if (output == 0xD || output == 0x11) {
3736		/*
3737		 * QSFP+ modules are always internally calibrated, and must indicate
3738		 * what types of diagnostic monitoring are implemented
3739		 */
3740		sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3741
3742		for (u8 offset = 22; offset < 24; offset++) {
3743			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3744			sbuf_printf(sbuf, "%02X ", output);
3745		}
3746		for (u8 offset = 26; offset < 28; offset++) {
3747			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3748			sbuf_printf(sbuf, "%02X ", output);
3749		}
3750		/* Read the data from the first lane */
3751		for (u8 offset = 34; offset < 36; offset++) {
3752			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3753			sbuf_printf(sbuf, "%02X ", output);
3754		}
3755		for (u8 offset = 50; offset < 52; offset++) {
3756			pf->read_i2c_byte(pf, offset, 0xA0, &output);
3757			sbuf_printf(sbuf, "%02X ", output);
3758		}
3759	} else {
3760		device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
3761		return (0);
3762	}
3763
3764	sbuf_finish(sbuf);
3765	sbuf_delete(sbuf);
3766
3767	return (0);
3768}
3769
3770/*
3771 * Sysctl to read a byte from I2C bus.
3772 *
3773 * Input: 32-bit value:
3774 * 	bits 0-7:   device address (0xA0 or 0xA2)
3775 * 	bits 8-15:  offset (0-255)
3776 *	bits 16-31: unused
3777 * Output: 8-bit value read
3778 */
3779static int
3780ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
3781{
3782	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3783	device_t dev = pf->dev;
3784	int input = -1, error = 0;
3785	u8 dev_addr, offset, output;
3786
3787	/* Read in I2C read parameters */
3788	error = sysctl_handle_int(oidp, &input, 0, req);
3789	if ((error) || (req->newptr == NULL))
3790		return (error);
3791	/* Validate device address */
3792	dev_addr = input & 0xFF;
3793	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
3794		return (EINVAL);
3795	}
3796	offset = (input >> 8) & 0xFF;
3797
3798	error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
3799	if (error)
3800		return (error);
3801
3802	device_printf(dev, "%02X\n", output);
3803	return (0);
3804}
3805
3806/*
3807 * Sysctl to write a byte to the I2C bus.
3808 *
3809 * Input: 32-bit value:
3810 * 	bits 0-7:   device address (0xA0 or 0xA2)
3811 * 	bits 8-15:  offset (0-255)
3812 *	bits 16-23: value to write
3813 *	bits 24-31: unused
3814 * Output: 8-bit value written
3815 */
3816static int
3817ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
3818{
3819	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3820	device_t dev = pf->dev;
3821	int input = -1, error = 0;
3822	u8 dev_addr, offset, value;
3823
3824	/* Read in I2C write parameters */
3825	error = sysctl_handle_int(oidp, &input, 0, req);
3826	if ((error) || (req->newptr == NULL))
3827		return (error);
3828	/* Validate device address */
3829	dev_addr = input & 0xFF;
3830	if (dev_addr != 0xA0 && dev_addr != 0xA2) {
3831		return (EINVAL);
3832	}
3833	offset = (input >> 8) & 0xFF;
3834	value = (input >> 16) & 0xFF;
3835
3836	error = pf->write_i2c_byte(pf, offset, dev_addr, value);
3837	if (error)
3838		return (error);
3839
3840	device_printf(dev, "%02X written\n", value);
3841	return (0);
3842}
3843
3844static int
3845ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
3846    u8 bit_pos, int *is_set)
3847{
3848	device_t dev = pf->dev;
3849	struct i40e_hw *hw = &pf->hw;
3850	enum i40e_status_code status;
3851
3852	if (IXL_PF_IN_RECOVERY_MODE(pf))
3853		return (EIO);
3854
3855	status = i40e_aq_get_phy_capabilities(hw,
3856	    FALSE, FALSE, abilities, NULL);
3857	if (status) {
3858		device_printf(dev,
3859		    "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3860		    __func__, i40e_stat_str(hw, status),
3861		    i40e_aq_str(hw, hw->aq.asq_last_status));
3862		return (EIO);
3863	}
3864
3865	*is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
3866	return (0);
3867}
3868
3869static int
3870ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
3871    u8 bit_pos, int set)
3872{
3873	device_t dev = pf->dev;
3874	struct i40e_hw *hw = &pf->hw;
3875	struct i40e_aq_set_phy_config config;
3876	enum i40e_status_code status;
3877
3878	/* Set new PHY config */
3879	memset(&config, 0, sizeof(config));
3880	config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
3881	if (set)
3882		config.fec_config |= bit_pos;
3883	if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
3884		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
3885		config.phy_type = abilities->phy_type;
3886		config.phy_type_ext = abilities->phy_type_ext;
3887		config.link_speed = abilities->link_speed;
3888		config.eee_capability = abilities->eee_capability;
3889		config.eeer = abilities->eeer_val;
3890		config.low_power_ctrl = abilities->d3_lpan;
3891		status = i40e_aq_set_phy_config(hw, &config, NULL);
3892
3893		if (status) {
3894			device_printf(dev,
3895			    "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
3896			    __func__, i40e_stat_str(hw, status),
3897			    i40e_aq_str(hw, hw->aq.asq_last_status));
3898			return (EIO);
3899		}
3900	}
3901
3902	return (0);
3903}
3904
3905static int
3906ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
3907{
3908	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3909	int mode, error = 0;
3910
3911	struct i40e_aq_get_phy_abilities_resp abilities;
3912	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
3913	if (error)
3914		return (error);
3915	/* Read in new mode */
3916	error = sysctl_handle_int(oidp, &mode, 0, req);
3917	if ((error) || (req->newptr == NULL))
3918		return (error);
3919
3920	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
3921}
3922
3923static int
3924ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
3925{
3926	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3927	int mode, error = 0;
3928
3929	struct i40e_aq_get_phy_abilities_resp abilities;
3930	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
3931	if (error)
3932		return (error);
3933	/* Read in new mode */
3934	error = sysctl_handle_int(oidp, &mode, 0, req);
3935	if ((error) || (req->newptr == NULL))
3936		return (error);
3937
3938	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
3939}
3940
3941static int
3942ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
3943{
3944	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3945	int mode, error = 0;
3946
3947	struct i40e_aq_get_phy_abilities_resp abilities;
3948	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
3949	if (error)
3950		return (error);
3951	/* Read in new mode */
3952	error = sysctl_handle_int(oidp, &mode, 0, req);
3953	if ((error) || (req->newptr == NULL))
3954		return (error);
3955
3956	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
3957}
3958
3959static int
3960ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
3961{
3962	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3963	int mode, error = 0;
3964
3965	struct i40e_aq_get_phy_abilities_resp abilities;
3966	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
3967	if (error)
3968		return (error);
3969	/* Read in new mode */
3970	error = sysctl_handle_int(oidp, &mode, 0, req);
3971	if ((error) || (req->newptr == NULL))
3972		return (error);
3973
3974	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
3975}
3976
3977static int
3978ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
3979{
3980	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3981	int mode, error = 0;
3982
3983	struct i40e_aq_get_phy_abilities_resp abilities;
3984	error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
3985	if (error)
3986		return (error);
3987	/* Read in new mode */
3988	error = sysctl_handle_int(oidp, &mode, 0, req);
3989	if ((error) || (req->newptr == NULL))
3990		return (error);
3991
3992	return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
3993}
3994
3995static int
3996ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
3997{
3998	struct ixl_pf *pf = (struct ixl_pf *)arg1;
3999	struct i40e_hw *hw = &pf->hw;
4000	device_t dev = pf->dev;
4001	struct sbuf *buf;
4002	int error = 0;
4003	enum i40e_status_code status;
4004
4005	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4006	if (!buf) {
4007		device_printf(dev, "Could not allocate sbuf for output.\n");
4008		return (ENOMEM);
4009	}
4010
4011	u8 *final_buff;
4012	/* This amount is only necessary if reading the entire cluster into memory */
4013#define IXL_FINAL_BUFF_SIZE	(1280 * 1024)
4014	final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT);
4015	if (final_buff == NULL) {
4016		device_printf(dev, "Could not allocate memory for output.\n");
4017		goto out;
4018	}
4019	int final_buff_len = 0;
4020
4021	u8 cluster_id = 1;
4022	bool more = true;
4023
4024	u8 dump_buf[4096];
4025	u16 curr_buff_size = 4096;
4026	u8 curr_next_table = 0;
4027	u32 curr_next_index = 0;
4028
4029	u16 ret_buff_size;
4030	u8 ret_next_table;
4031	u32 ret_next_index;
4032
4033	sbuf_cat(buf, "\n");
4034
4035	while (more) {
4036		status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4037		    dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4038		if (status) {
4039			device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4040			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4041			goto free_out;
4042		}
4043
4044		/* copy info out of temp buffer */
4045		bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4046		final_buff_len += ret_buff_size;
4047
4048		if (ret_next_table != curr_next_table) {
4049			/* We're done with the current table; we can dump out read data. */
4050			sbuf_printf(buf, "%d:", curr_next_table);
4051			int bytes_printed = 0;
4052			while (bytes_printed <= final_buff_len) {
4053				sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4054				bytes_printed += 16;
4055			}
4056				sbuf_cat(buf, "\n");
4057
4058			/* The entire cluster has been read; we're finished */
4059			if (ret_next_table == 0xFF)
4060				break;
4061
4062			/* Otherwise clear the output buffer and continue reading */
4063			bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4064			final_buff_len = 0;
4065		}
4066
4067		if (ret_next_index == 0xFFFFFFFF)
4068			ret_next_index = 0;
4069
4070		bzero(dump_buf, sizeof(dump_buf));
4071		curr_next_table = ret_next_table;
4072		curr_next_index = ret_next_index;
4073	}
4074
4075free_out:
4076	free(final_buff, M_DEVBUF);
4077out:
4078	error = sbuf_finish(buf);
4079	if (error)
4080		device_printf(dev, "Error finishing sbuf: %d\n", error);
4081	sbuf_delete(buf);
4082
4083	return (error);
4084}
4085
4086static int
4087ixl_start_fw_lldp(struct ixl_pf *pf)
4088{
4089	struct i40e_hw *hw = &pf->hw;
4090	enum i40e_status_code status;
4091
4092	status = i40e_aq_start_lldp(hw, false, NULL);
4093	if (status != I40E_SUCCESS) {
4094		switch (hw->aq.asq_last_status) {
4095		case I40E_AQ_RC_EEXIST:
4096			device_printf(pf->dev,
4097			    "FW LLDP agent is already running\n");
4098			break;
4099		case I40E_AQ_RC_EPERM:
4100			device_printf(pf->dev,
4101			    "Device configuration forbids SW from starting "
4102			    "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4103			    "attribute to \"Enabled\" to use this sysctl\n");
4104			return (EINVAL);
4105		default:
4106			device_printf(pf->dev,
4107			    "Starting FW LLDP agent failed: error: %s, %s\n",
4108			    i40e_stat_str(hw, status),
4109			    i40e_aq_str(hw, hw->aq.asq_last_status));
4110			return (EINVAL);
4111		}
4112	}
4113
4114	atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4115	return (0);
4116}
4117
4118static int
4119ixl_stop_fw_lldp(struct ixl_pf *pf)
4120{
4121	struct i40e_hw *hw = &pf->hw;
4122	device_t dev = pf->dev;
4123	enum i40e_status_code status;
4124
4125	if (hw->func_caps.npar_enable != 0) {
4126		device_printf(dev,
4127		    "Disabling FW LLDP agent is not supported on this device\n");
4128		return (EINVAL);
4129	}
4130
4131	if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4132		device_printf(dev,
4133		    "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4134		return (EINVAL);
4135	}
4136
4137	status = i40e_aq_stop_lldp(hw, true, false, NULL);
4138	if (status != I40E_SUCCESS) {
4139		if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4140			device_printf(dev,
4141			    "Disabling FW LLDP agent failed: error: %s, %s\n",
4142			    i40e_stat_str(hw, status),
4143			    i40e_aq_str(hw, hw->aq.asq_last_status));
4144			return (EINVAL);
4145		}
4146
4147		device_printf(dev, "FW LLDP agent is already stopped\n");
4148	}
4149
4150#ifndef EXTERNAL_RELEASE
4151	/* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */
4152#endif
4153	i40e_aq_set_dcb_parameters(hw, true, NULL);
4154	atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4155	return (0);
4156}
4157
4158static int
4159ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4160{
4161	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4162	int state, new_state, error = 0;
4163
4164	state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4165
4166	/* Read in new mode */
4167	error = sysctl_handle_int(oidp, &new_state, 0, req);
4168	if ((error) || (req->newptr == NULL))
4169		return (error);
4170
4171	/* Already in requested state */
4172	if (new_state == state)
4173		return (error);
4174
4175	if (new_state == 0)
4176		return ixl_stop_fw_lldp(pf);
4177
4178	return ixl_start_fw_lldp(pf);
4179}
4180
4181static int
4182ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4183{
4184	struct ixl_pf         *pf = (struct ixl_pf *)arg1;
4185	int                   state, new_state;
4186	int                   sysctl_handle_status = 0;
4187	enum i40e_status_code cmd_status;
4188
4189	/* Init states' values */
4190	state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4191
4192	/* Get requested mode */
4193	sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4194	if ((sysctl_handle_status) || (req->newptr == NULL))
4195		return (sysctl_handle_status);
4196
4197	/* Check if state has changed */
4198	if (new_state == state)
4199		return (0);
4200
4201	/* Set new state */
4202	cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4203
4204	/* Save new state or report error */
4205	if (!cmd_status) {
4206		if (new_state == 0)
4207			atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4208		else
4209			atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4210	} else if (cmd_status == I40E_ERR_CONFIG)
4211		return (EPERM);
4212	else
4213		return (EIO);
4214
4215	return (0);
4216}
4217
4218int
4219ixl_attach_get_link_status(struct ixl_pf *pf)
4220{
4221	struct i40e_hw *hw = &pf->hw;
4222	device_t dev = pf->dev;
4223	int error = 0;
4224
4225	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4226	    (hw->aq.fw_maj_ver < 4)) {
4227		i40e_msec_delay(75);
4228		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4229		if (error) {
4230			device_printf(dev, "link restart failed, aq_err=%d\n",
4231			    pf->hw.aq.asq_last_status);
4232			return error;
4233		}
4234	}
4235
4236	/* Determine link state */
4237	hw->phy.get_link_info = TRUE;
4238	i40e_get_link_status(hw, &pf->link_up);
4239	return (0);
4240}
4241
4242static int
4243ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4244{
4245	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4246	int requested = 0, error = 0;
4247
4248	/* Read in new mode */
4249	error = sysctl_handle_int(oidp, &requested, 0, req);
4250	if ((error) || (req->newptr == NULL))
4251		return (error);
4252
4253	/* Initiate the PF reset later in the admin task */
4254	atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4255
4256	return (error);
4257}
4258
4259static int
4260ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4261{
4262	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4263	struct i40e_hw *hw = &pf->hw;
4264	int requested = 0, error = 0;
4265
4266	/* Read in new mode */
4267	error = sysctl_handle_int(oidp, &requested, 0, req);
4268	if ((error) || (req->newptr == NULL))
4269		return (error);
4270
4271	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4272
4273	return (error);
4274}
4275
4276static int
4277ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4278{
4279	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4280	struct i40e_hw *hw = &pf->hw;
4281	int requested = 0, error = 0;
4282
4283	/* Read in new mode */
4284	error = sysctl_handle_int(oidp, &requested, 0, req);
4285	if ((error) || (req->newptr == NULL))
4286		return (error);
4287
4288	wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4289
4290	return (error);
4291}
4292
4293/*
4294 * Print out mapping of TX queue indexes and Rx queue indexes
4295 * to MSI-X vectors.
4296 */
4297static int
4298ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4299{
4300	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4301	struct ixl_vsi *vsi = &pf->vsi;
4302	device_t dev = pf->dev;
4303	struct sbuf *buf;
4304	int error = 0;
4305
4306	struct ixl_rx_queue *rx_que = vsi->rx_queues;
4307	struct ixl_tx_queue *tx_que = vsi->tx_queues;
4308
4309	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4310	if (!buf) {
4311		device_printf(dev, "Could not allocate sbuf for output.\n");
4312		return (ENOMEM);
4313	}
4314
4315	sbuf_cat(buf, "\n");
4316	for (int i = 0; i < vsi->num_rx_queues; i++) {
4317		rx_que = &vsi->rx_queues[i];
4318		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4319	}
4320	for (int i = 0; i < vsi->num_tx_queues; i++) {
4321		tx_que = &vsi->tx_queues[i];
4322		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4323	}
4324
4325	error = sbuf_finish(buf);
4326	if (error)
4327		device_printf(dev, "Error finishing sbuf: %d\n", error);
4328	sbuf_delete(buf);
4329
4330	return (error);
4331}
4332