1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "iavf.h"
36
37/*********************************************************************
38 *  Driver version
39 *********************************************************************/
40#define IAVF_DRIVER_VERSION_MAJOR	2
41#define IAVF_DRIVER_VERSION_MINOR	0
42#define IAVF_DRIVER_VERSION_BUILD	0
43
44#define IAVF_DRIVER_VERSION_STRING			\
45    __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "."		\
46    __XSTRING(IAVF_DRIVER_VERSION_MINOR) "."		\
47    __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
48
49/*********************************************************************
50 *  PCI Device ID Table
51 *
52 *  Used by probe to select devices to load on
53 *
54 *  ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
56
57static pci_vendor_info_t iavf_vendor_info_array[] =
58{
59	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61	PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62	/* required last entry */
63	PVID_END
64};
65
66/*********************************************************************
67 *  Function prototypes
68 *********************************************************************/
69static void	 *iavf_register(device_t dev);
70static int	 iavf_if_attach_pre(if_ctx_t ctx);
71static int	 iavf_if_attach_post(if_ctx_t ctx);
72static int	 iavf_if_detach(if_ctx_t ctx);
73static int	 iavf_if_shutdown(if_ctx_t ctx);
74static int	 iavf_if_suspend(if_ctx_t ctx);
75static int	 iavf_if_resume(if_ctx_t ctx);
76static int	 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77static void	 iavf_if_enable_intr(if_ctx_t ctx);
78static void	 iavf_if_disable_intr(if_ctx_t ctx);
79static int	 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80static int	 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81static int	 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82static int	 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83static void	 iavf_if_queues_free(if_ctx_t ctx);
84static void	 iavf_if_update_admin_status(if_ctx_t ctx);
85static void	 iavf_if_multi_set(if_ctx_t ctx);
86static int	 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87static void	 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88static int	 iavf_if_media_change(if_ctx_t ctx);
89static int	 iavf_if_promisc_set(if_ctx_t ctx, int flags);
90static void	 iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91static void	 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92static void	 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93static uint64_t	 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94static void	 iavf_if_stop(if_ctx_t ctx);
95static bool	 iavf_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
96
97static int	iavf_allocate_pci_resources(struct iavf_sc *);
98static int	iavf_reset_complete(struct i40e_hw *);
99static int	iavf_setup_vc(struct iavf_sc *);
100static int	iavf_reset(struct iavf_sc *);
101static int	iavf_vf_config(struct iavf_sc *);
102static void	iavf_init_filters(struct iavf_sc *);
103static void	iavf_free_pci_resources(struct iavf_sc *);
104static void	iavf_free_filters(struct iavf_sc *);
105static void	iavf_setup_interface(device_t, struct iavf_sc *);
106static void	iavf_add_device_sysctls(struct iavf_sc *);
107static void	iavf_enable_adminq_irq(struct i40e_hw *);
108static void	iavf_disable_adminq_irq(struct i40e_hw *);
109static void	iavf_enable_queue_irq(struct i40e_hw *, int);
110static void	iavf_disable_queue_irq(struct i40e_hw *, int);
111static void	iavf_config_rss(struct iavf_sc *);
112static void	iavf_stop(struct iavf_sc *);
113
114static int	iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
115static int	iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
116static int	iavf_msix_que(void *);
117static int	iavf_msix_adminq(void *);
118//static void	iavf_del_multi(struct iavf_sc *sc);
119static void	iavf_init_multi(struct iavf_sc *sc);
120static void	iavf_configure_itr(struct iavf_sc *sc);
121
122static int	iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
123static int	iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
124static int	iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
125static int	iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
126static int	iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
127static int	iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
128static int	iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129
130static void	iavf_save_tunables(struct iavf_sc *);
131static enum i40e_status_code
132    iavf_process_adminq(struct iavf_sc *, u16 *);
133static int	iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
134static int	iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
135
136/*********************************************************************
137 *  FreeBSD Device Interface Entry Points
138 *********************************************************************/
139
140static device_method_t iavf_methods[] = {
141	/* Device interface */
142	DEVMETHOD(device_register, iavf_register),
143	DEVMETHOD(device_probe, iflib_device_probe),
144	DEVMETHOD(device_attach, iflib_device_attach),
145	DEVMETHOD(device_detach, iflib_device_detach),
146	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147	DEVMETHOD_END
148};
149
150static driver_t iavf_driver = {
151	"iavf", iavf_methods, sizeof(struct iavf_sc),
152};
153
154devclass_t iavf_devclass;
155DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
156MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
157    pci, iavf, iavf_vendor_info_array,
158        nitems(iavf_vendor_info_array) - 1);
159MODULE_VERSION(iavf, 1);
160
161MODULE_DEPEND(iavf, pci, 1, 1, 1);
162MODULE_DEPEND(iavf, ether, 1, 1, 1);
163MODULE_DEPEND(iavf, iflib, 1, 1, 1);
164
165MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
166
167static device_method_t iavf_if_methods[] = {
168	DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
169	DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
170	DEVMETHOD(ifdi_detach, iavf_if_detach),
171	DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
172	DEVMETHOD(ifdi_suspend, iavf_if_suspend),
173	DEVMETHOD(ifdi_resume, iavf_if_resume),
174	DEVMETHOD(ifdi_init, iavf_if_init),
175	DEVMETHOD(ifdi_stop, iavf_if_stop),
176	DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
177	DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
178	DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
179	DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
180	DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
181	DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
182	DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
183	DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
184	DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
185	DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
186	DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
187	DEVMETHOD(ifdi_media_status, iavf_if_media_status),
188	DEVMETHOD(ifdi_media_change, iavf_if_media_change),
189	DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
190	DEVMETHOD(ifdi_timer, iavf_if_timer),
191	DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
192	DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
193	DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
194	DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart),
195	DEVMETHOD_END
196};
197
198static driver_t iavf_if_driver = {
199	"iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
200};
201
202/*
203** TUNEABLE PARAMETERS:
204*/
205
206static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
207    "iavf driver parameters");
208
209/*
210 * Different method for processing TX descriptor
211 * completion.
212 */
213static int iavf_enable_head_writeback = 0;
214TUNABLE_INT("hw.iavf.enable_head_writeback",
215    &iavf_enable_head_writeback);
216SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217    &iavf_enable_head_writeback, 0,
218    "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
219
220static int iavf_core_debug_mask = 0;
221TUNABLE_INT("hw.iavf.core_debug_mask",
222    &iavf_core_debug_mask);
223SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
224    &iavf_core_debug_mask, 0,
225    "Display debug statements that are printed in non-shared code");
226
227static int iavf_shared_debug_mask = 0;
228TUNABLE_INT("hw.iavf.shared_debug_mask",
229    &iavf_shared_debug_mask);
230SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
231    &iavf_shared_debug_mask, 0,
232    "Display debug statements that are printed in shared code");
233
234int iavf_rx_itr = IXL_ITR_8K;
235TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
236SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
237    &iavf_rx_itr, 0, "RX Interrupt Rate");
238
239int iavf_tx_itr = IXL_ITR_4K;
240TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
241SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
242    &iavf_tx_itr, 0, "TX Interrupt Rate");
243
244extern struct if_txrx ixl_txrx_hwb;
245extern struct if_txrx ixl_txrx_dwb;
246
247static struct if_shared_ctx iavf_sctx_init = {
248	.isc_magic = IFLIB_MAGIC,
249	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
250	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
253	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
254	.isc_rx_maxsize = 16384,
255	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
256	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
257	.isc_nfl = 1,
258	.isc_ntxqs = 1,
259	.isc_nrxqs = 1,
260
261	.isc_admin_intrcnt = 1,
262	.isc_vendor_info = iavf_vendor_info_array,
263	.isc_driver_version = IAVF_DRIVER_VERSION_STRING,
264	.isc_driver = &iavf_if_driver,
265	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
266
267	.isc_nrxd_min = {IXL_MIN_RING},
268	.isc_ntxd_min = {IXL_MIN_RING},
269	.isc_nrxd_max = {IXL_MAX_RING},
270	.isc_ntxd_max = {IXL_MAX_RING},
271	.isc_nrxd_default = {IXL_DEFAULT_RING},
272	.isc_ntxd_default = {IXL_DEFAULT_RING},
273};
274
275/*** Functions ***/
276static void *
277iavf_register(device_t dev)
278{
279	return (&iavf_sctx_init);
280}
281
282static int
283iavf_allocate_pci_resources(struct iavf_sc *sc)
284{
285	struct i40e_hw *hw = &sc->hw;
286	device_t dev = iflib_get_dev(sc->vsi.ctx);
287	int             rid;
288
289	/* Map BAR0 */
290	rid = PCIR_BAR(0);
291	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
292	    &rid, RF_ACTIVE);
293
294	if (!(sc->pci_mem)) {
295		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
296		return (ENXIO);
297 	}
298
299	/* Save off the PCI information */
300	hw->vendor_id = pci_get_vendor(dev);
301	hw->device_id = pci_get_device(dev);
302	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
303	hw->subsystem_vendor_id =
304	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
305	hw->subsystem_device_id =
306	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
307
308	hw->bus.device = pci_get_slot(dev);
309	hw->bus.func = pci_get_function(dev);
310
311	/* Save off register access information */
312	sc->osdep.mem_bus_space_tag =
313		rman_get_bustag(sc->pci_mem);
314	sc->osdep.mem_bus_space_handle =
315		rman_get_bushandle(sc->pci_mem);
316	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
317	sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
318	sc->osdep.dev = dev;
319
320	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
321	sc->hw.back = &sc->osdep;
322
323 	return (0);
324}
325
326static int
327iavf_if_attach_pre(if_ctx_t ctx)
328{
329	device_t dev;
330	struct iavf_sc *sc;
331	struct i40e_hw *hw;
332	struct ixl_vsi *vsi;
333	if_softc_ctx_t scctx;
334	int error = 0;
335
336	dev = iflib_get_dev(ctx);
337	sc = iflib_get_softc(ctx);
338
339	vsi = &sc->vsi;
340	vsi->back = sc;
341	sc->dev = dev;
342	hw = &sc->hw;
343
344	vsi->dev = dev;
345	vsi->hw = &sc->hw;
346	vsi->num_vlans = 0;
347	vsi->ctx = ctx;
348	vsi->media = iflib_get_media(ctx);
349	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
350
351	iavf_save_tunables(sc);
352
353	/* Do PCI setup - map BAR0, etc */
354	if (iavf_allocate_pci_resources(sc)) {
355		device_printf(dev, "%s: Allocation of PCI resources failed\n",
356		    __func__);
357		error = ENXIO;
358		goto err_early;
359	}
360
361	iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
362
363	/*
364	 * XXX: This is called by init_shared_code in the PF driver,
365	 * but the rest of that function does not support VFs.
366	 */
367	error = i40e_set_mac_type(hw);
368	if (error) {
369		device_printf(dev, "%s: set_mac_type failed: %d\n",
370		    __func__, error);
371		goto err_pci_res;
372	}
373
374	error = iavf_reset_complete(hw);
375	if (error) {
376		device_printf(dev, "%s: Device is still being reset\n",
377		    __func__);
378		goto err_pci_res;
379	}
380
381	iavf_dbg_init(sc, "VF Device is ready for configuration\n");
382
383	/* Sets up Admin Queue */
384	error = iavf_setup_vc(sc);
385	if (error) {
386		device_printf(dev, "%s: Error setting up PF comms, %d\n",
387		    __func__, error);
388		goto err_pci_res;
389	}
390
391	iavf_dbg_init(sc, "PF API version verified\n");
392
393	/* Need API version before sending reset message */
394	error = iavf_reset(sc);
395	if (error) {
396		device_printf(dev, "VF reset failed; reload the driver\n");
397		goto err_aq;
398	}
399
400	iavf_dbg_init(sc, "VF reset complete\n");
401
402	/* Ask for VF config from PF */
403	error = iavf_vf_config(sc);
404	if (error) {
405		device_printf(dev, "Error getting configuration from PF: %d\n",
406		    error);
407		goto err_aq;
408	}
409
410	device_printf(dev,
411	    "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
412	    sc->vf_res->num_vsis,
413	    sc->vf_res->num_queue_pairs,
414	    sc->vf_res->max_vectors,
415	    sc->vf_res->rss_key_size,
416	    sc->vf_res->rss_lut_size);
417	iavf_dbg_info(sc, "Capabilities=%b\n",
418	    sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
419
420	/* got VF config message back from PF, now we can parse it */
421	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
422		/* XXX: We only use the first VSI we find */
423		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
424			sc->vsi_res = &sc->vf_res->vsi_res[i];
425	}
426	if (!sc->vsi_res) {
427		device_printf(dev, "%s: no LAN VSI found\n", __func__);
428		error = EIO;
429		goto err_res_buf;
430	}
431	vsi->id = sc->vsi_res->vsi_id;
432
433	iavf_dbg_init(sc, "Resource Acquisition complete\n");
434
435	/* If no mac address was assigned just make a random one */
436	if (!iavf_check_ether_addr(hw->mac.addr)) {
437		u8 addr[ETHER_ADDR_LEN];
438		arc4rand(&addr, sizeof(addr), 0);
439		addr[0] &= 0xFE;
440		addr[0] |= 0x02;
441		bcopy(addr, hw->mac.addr, sizeof(addr));
442	}
443	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
444	iflib_set_mac(ctx, hw->mac.addr);
445
446	/* Allocate filter lists */
447	iavf_init_filters(sc);
448
449	/* Fill out more iflib parameters */
450	scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
451	    sc->vsi_res->num_queue_pairs;
452	if (vsi->enable_head_writeback) {
453		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
454		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
455		scctx->isc_txrx = &ixl_txrx_hwb;
456	} else {
457		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
458		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
459		scctx->isc_txrx = &ixl_txrx_dwb;
460	}
461	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
462	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
463	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
464	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
465	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
466	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
467	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
468	scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
469	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
470	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
471
472	return (0);
473
474err_res_buf:
475	free(sc->vf_res, M_IAVF);
476err_aq:
477	i40e_shutdown_adminq(hw);
478err_pci_res:
479	iavf_free_pci_resources(sc);
480err_early:
481	return (error);
482}
483
484static int
485iavf_if_attach_post(if_ctx_t ctx)
486{
487	device_t dev;
488	struct iavf_sc	*sc;
489	struct i40e_hw	*hw;
490	struct ixl_vsi *vsi;
491	int error = 0;
492
493	INIT_DBG_DEV(dev, "begin");
494
495	dev = iflib_get_dev(ctx);
496	sc = iflib_get_softc(ctx);
497	vsi = &sc->vsi;
498	vsi->ifp = iflib_get_ifp(ctx);
499	hw = &sc->hw;
500
501	/* Save off determined number of queues for interface */
502	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
503	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
504
505	/* Setup the stack interface */
506	iavf_setup_interface(dev, sc);
507
508	INIT_DBG_DEV(dev, "Interface setup complete");
509
510	/* Initialize statistics & add sysctls */
511	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
512	iavf_add_device_sysctls(sc);
513
514	sc->init_state = IAVF_INIT_READY;
515	atomic_store_rel_32(&sc->queues_enabled, 0);
516
517	/* We want AQ enabled early for init */
518	iavf_enable_adminq_irq(hw);
519
520	INIT_DBG_DEV(dev, "end");
521
522	return (error);
523}
524
525/**
526 * XXX: iflib always ignores the return value of detach()
527 * -> This means that this isn't allowed to fail
528 */
529static int
530iavf_if_detach(if_ctx_t ctx)
531{
532	struct iavf_sc *sc = iflib_get_softc(ctx);
533	struct ixl_vsi *vsi = &sc->vsi;
534	struct i40e_hw *hw = &sc->hw;
535	device_t dev = sc->dev;
536	enum i40e_status_code status;
537
538	INIT_DBG_DEV(dev, "begin");
539
540	/* Remove all the media and link information */
541	ifmedia_removeall(vsi->media);
542
543	iavf_disable_adminq_irq(hw);
544	status = i40e_shutdown_adminq(&sc->hw);
545	if (status != I40E_SUCCESS) {
546		device_printf(dev,
547		    "i40e_shutdown_adminq() failed with status %s\n",
548		    i40e_stat_str(hw, status));
549	}
550
551	free(sc->vf_res, M_IAVF);
552	iavf_free_pci_resources(sc);
553	iavf_free_filters(sc);
554
555	INIT_DBG_DEV(dev, "end");
556	return (0);
557}
558
559static int
560iavf_if_shutdown(if_ctx_t ctx)
561{
562	return (0);
563}
564
565static int
566iavf_if_suspend(if_ctx_t ctx)
567{
568	return (0);
569}
570
571static int
572iavf_if_resume(if_ctx_t ctx)
573{
574	return (0);
575}
576
577static int
578iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
579{
580	int error = 0;
581	if_ctx_t ctx = sc->vsi.ctx;
582
583	error = ixl_vc_send_cmd(sc, op);
584	if (error != 0) {
585		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
586		return (error);
587	}
588
589	/* Don't wait for a response if the device is being detached. */
590	if (!iflib_in_detach(ctx)) {
591		iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
592		error = sx_sleep(ixl_vc_get_op_chan(sc, op),
593		    iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
594
595		if (error == EWOULDBLOCK)
596			device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
597	}
598
599	return (error);
600}
601
602static int
603iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
604{
605	int error = 0;
606
607	error = ixl_vc_send_cmd(sc, op);
608	if (error != 0)
609		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
610
611	return (error);
612}
613
614static void
615iavf_init_queues(struct ixl_vsi *vsi)
616{
617	struct ixl_tx_queue *tx_que = vsi->tx_queues;
618	struct ixl_rx_queue *rx_que = vsi->rx_queues;
619	struct rx_ring *rxr;
620
621	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
622		ixl_init_tx_ring(vsi, tx_que);
623
624	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
625		rxr = &rx_que->rxr;
626
627		rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
628
629		wr32(vsi->hw, rxr->tail, 0);
630	}
631}
632
633void
634iavf_if_init(if_ctx_t ctx)
635{
636	struct iavf_sc *sc = iflib_get_softc(ctx);
637	struct ixl_vsi *vsi = &sc->vsi;
638	struct i40e_hw *hw = &sc->hw;
639	struct ifnet *ifp = iflib_get_ifp(ctx);
640	u8 tmpaddr[ETHER_ADDR_LEN];
641	int error = 0;
642
643	INIT_DBG_IF(ifp, "begin");
644
645	MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
646
647	error = iavf_reset_complete(hw);
648	if (error) {
649		device_printf(sc->dev, "%s: VF reset failed\n",
650		    __func__);
651	}
652
653	if (!i40e_check_asq_alive(hw)) {
654		iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
655		pci_enable_busmaster(sc->dev);
656		i40e_shutdown_adminq(hw);
657		i40e_init_adminq(hw);
658	}
659
660	/* Make sure queues are disabled */
661	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
662
663	bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
664	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
665	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
666		error = iavf_del_mac_filter(sc, hw->mac.addr);
667		if (error == 0)
668			iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
669
670		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
671	}
672
673	error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
674	if (!error || error == EEXIST)
675		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
676	iflib_set_mac(ctx, hw->mac.addr);
677
678	/* Prepare the queues for operation */
679	iavf_init_queues(vsi);
680
681	/* Set initial ITR values */
682	iavf_configure_itr(sc);
683
684	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
685
686	/* Set up RSS */
687	iavf_config_rss(sc);
688
689	/* Map vectors */
690	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
691
692	/* Init SW TX ring indices */
693	if (vsi->enable_head_writeback)
694		ixl_init_tx_cidx(vsi);
695	else
696		ixl_init_tx_rsqs(vsi);
697
698	/* Configure promiscuous mode */
699	iavf_if_promisc_set(ctx, if_getflags(ifp));
700
701	/* Enable queues */
702	iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
703
704	sc->init_state = IAVF_RUNNING;
705}
706
707/*
708 * iavf_attach() helper function; initializes the admin queue
709 * and attempts to establish contact with the PF by
710 * retrying the initial "API version" message several times
711 * or until the PF responds.
712 */
713static int
714iavf_setup_vc(struct iavf_sc *sc)
715{
716	struct i40e_hw *hw = &sc->hw;
717	device_t dev = sc->dev;
718	int error = 0, ret_error = 0, asq_retries = 0;
719	bool send_api_ver_retried = 0;
720
721	/* Need to set these AQ paramters before initializing AQ */
722	hw->aq.num_arq_entries = IXL_AQ_LEN;
723	hw->aq.num_asq_entries = IXL_AQ_LEN;
724	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
725	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
726
727	for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
728		/* Initialize admin queue */
729		error = i40e_init_adminq(hw);
730		if (error) {
731			device_printf(dev, "%s: init_adminq failed: %d\n",
732			    __func__, error);
733			ret_error = 1;
734			continue;
735		}
736
737		iavf_dbg_init(sc, "Initialized Admin Queue; starting"
738		    " send_api_ver attempt %d", i+1);
739
740retry_send:
741		/* Send VF's API version */
742		error = iavf_send_api_ver(sc);
743		if (error) {
744			i40e_shutdown_adminq(hw);
745			ret_error = 2;
746			device_printf(dev, "%s: unable to send api"
747			    " version to PF on attempt %d, error %d\n",
748			    __func__, i+1, error);
749		}
750
751		asq_retries = 0;
752		while (!i40e_asq_done(hw)) {
753			if (++asq_retries > IAVF_AQ_MAX_ERR) {
754				i40e_shutdown_adminq(hw);
755				device_printf(dev, "Admin Queue timeout "
756				    "(waiting for send_api_ver), %d more tries...\n",
757				    IAVF_AQ_MAX_ERR - (i + 1));
758				ret_error = 3;
759				break;
760			}
761			i40e_msec_pause(10);
762		}
763		if (asq_retries > IAVF_AQ_MAX_ERR)
764			continue;
765
766		iavf_dbg_init(sc, "Sent API version message to PF");
767
768		/* Verify that the VF accepts the PF's API version */
769		error = iavf_verify_api_ver(sc);
770		if (error == ETIMEDOUT) {
771			if (!send_api_ver_retried) {
772				/* Resend message, one more time */
773				send_api_ver_retried = true;
774				device_printf(dev,
775				    "%s: Timeout while verifying API version on first"
776				    " try!\n", __func__);
777				goto retry_send;
778			} else {
779				device_printf(dev,
780				    "%s: Timeout while verifying API version on second"
781				    " try!\n", __func__);
782				ret_error = 4;
783				break;
784			}
785		}
786		if (error) {
787			device_printf(dev,
788			    "%s: Unable to verify API version,"
789			    " error %s\n", __func__, i40e_stat_str(hw, error));
790			ret_error = 5;
791		}
792		break;
793	}
794
795	if (ret_error >= 4)
796		i40e_shutdown_adminq(hw);
797	return (ret_error);
798}
799
800/*
801 * iavf_attach() helper function; asks the PF for this VF's
802 * configuration, and saves the information if it receives it.
803 */
804static int
805iavf_vf_config(struct iavf_sc *sc)
806{
807	struct i40e_hw *hw = &sc->hw;
808	device_t dev = sc->dev;
809	int bufsz, error = 0, ret_error = 0;
810	int asq_retries, retried = 0;
811
812retry_config:
813	error = iavf_send_vf_config_msg(sc);
814	if (error) {
815		device_printf(dev,
816		    "%s: Unable to send VF config request, attempt %d,"
817		    " error %d\n", __func__, retried + 1, error);
818		ret_error = 2;
819	}
820
821	asq_retries = 0;
822	while (!i40e_asq_done(hw)) {
823		if (++asq_retries > IAVF_AQ_MAX_ERR) {
824			device_printf(dev, "%s: Admin Queue timeout "
825			    "(waiting for send_vf_config_msg), attempt %d\n",
826			    __func__, retried + 1);
827			ret_error = 3;
828			goto fail;
829		}
830		i40e_msec_pause(10);
831	}
832
833	iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
834	    retried + 1);
835
836	if (!sc->vf_res) {
837		bufsz = sizeof(struct virtchnl_vf_resource) +
838		    (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
839		sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
840		if (!sc->vf_res) {
841			device_printf(dev,
842			    "%s: Unable to allocate memory for VF configuration"
843			    " message from PF on attempt %d\n", __func__, retried + 1);
844			ret_error = 1;
845			goto fail;
846		}
847	}
848
849	/* Check for VF config response */
850	error = iavf_get_vf_config(sc);
851	if (error == ETIMEDOUT) {
852		/* The 1st time we timeout, send the configuration message again */
853		if (!retried) {
854			retried++;
855			goto retry_config;
856		}
857		device_printf(dev,
858		    "%s: iavf_get_vf_config() timed out waiting for a response\n",
859		    __func__);
860	}
861	if (error) {
862		device_printf(dev,
863		    "%s: Unable to get VF configuration from PF after %d tries!\n",
864		    __func__, retried + 1);
865		ret_error = 4;
866	}
867	goto done;
868
869fail:
870	free(sc->vf_res, M_IAVF);
871done:
872	return (ret_error);
873}
874
875static int
876iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
877{
878	struct iavf_sc *sc = iflib_get_softc(ctx);
879	struct ixl_vsi *vsi = &sc->vsi;
880	struct ixl_rx_queue *rx_que = vsi->rx_queues;
881	struct ixl_tx_queue *tx_que = vsi->tx_queues;
882	int err, i, rid, vector = 0;
883	char buf[16];
884
885	MPASS(vsi->shared->isc_nrxqsets > 0);
886	MPASS(vsi->shared->isc_ntxqsets > 0);
887
888	/* Admin Que is vector 0*/
889	rid = vector + 1;
890	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
891	    iavf_msix_adminq, sc, 0, "aq");
892	if (err) {
893		iflib_irq_free(ctx, &vsi->irq);
894		device_printf(iflib_get_dev(ctx),
895		    "Failed to register Admin Que handler");
896		return (err);
897	}
898
899	/* Now set up the stations */
900	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
901		rid = vector + 1;
902
903		snprintf(buf, sizeof(buf), "rxq%d", i);
904		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
905		    IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
906		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
907		 * what's expected in the iflib context? */
908		if (err) {
909			device_printf(iflib_get_dev(ctx),
910			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
911			vsi->num_rx_queues = i + 1;
912			goto fail;
913		}
914		rx_que->msix = vector;
915	}
916
917	bzero(buf, sizeof(buf));
918
919	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
920		snprintf(buf, sizeof(buf), "txq%d", i);
921		iflib_softirq_alloc_generic(ctx,
922		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
923		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
924
925		/* TODO: Maybe call a strategy function for this to figure out which
926		* interrupts to map Tx queues to. I don't know if there's an immediately
927		* better way than this other than a user-supplied map, though. */
928		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
929	}
930
931	return (0);
932fail:
933	iflib_irq_free(ctx, &vsi->irq);
934	rx_que = vsi->rx_queues;
935	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
936		iflib_irq_free(ctx, &rx_que->que_irq);
937	return (err);
938}
939
940/* Enable all interrupts */
941static void
942iavf_if_enable_intr(if_ctx_t ctx)
943{
944	struct iavf_sc *sc = iflib_get_softc(ctx);
945	struct ixl_vsi *vsi = &sc->vsi;
946
947	iavf_enable_intr(vsi);
948}
949
950/* Disable all interrupts */
951static void
952iavf_if_disable_intr(if_ctx_t ctx)
953{
954	struct iavf_sc *sc = iflib_get_softc(ctx);
955	struct ixl_vsi *vsi = &sc->vsi;
956
957	iavf_disable_intr(vsi);
958}
959
960static int
961iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
962{
963	struct iavf_sc *sc = iflib_get_softc(ctx);
964	struct ixl_vsi *vsi = &sc->vsi;
965	struct i40e_hw *hw = vsi->hw;
966	struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
967
968	iavf_enable_queue_irq(hw, rx_que->msix - 1);
969	return (0);
970}
971
972static int
973iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
974{
975	struct iavf_sc *sc = iflib_get_softc(ctx);
976	struct ixl_vsi *vsi = &sc->vsi;
977	struct i40e_hw *hw = vsi->hw;
978	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
979
980	iavf_enable_queue_irq(hw, tx_que->msix - 1);
981	return (0);
982}
983
984static int
985iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
986{
987	struct iavf_sc *sc = iflib_get_softc(ctx);
988	struct ixl_vsi *vsi = &sc->vsi;
989	if_softc_ctx_t scctx = vsi->shared;
990	struct ixl_tx_queue *que;
991	int i, j, error = 0;
992
993	MPASS(scctx->isc_ntxqsets > 0);
994	MPASS(ntxqs == 1);
995	MPASS(scctx->isc_ntxqsets == ntxqsets);
996
997	/* Allocate queue structure memory */
998	if (!(vsi->tx_queues =
999	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1000		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1001		return (ENOMEM);
1002	}
1003
1004	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1005		struct tx_ring *txr = &que->txr;
1006
1007		txr->me = i;
1008		que->vsi = vsi;
1009
1010		if (!vsi->enable_head_writeback) {
1011			/* Allocate report status array */
1012			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1013				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1014				error = ENOMEM;
1015				goto fail;
1016			}
1017			/* Init report status array */
1018			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1019				txr->tx_rsq[j] = QIDX_INVALID;
1020		}
1021		/* get the virtual and physical address of the hardware queues */
1022		txr->tail = I40E_QTX_TAIL1(txr->me);
1023		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1024		txr->tx_paddr = paddrs[i * ntxqs];
1025		txr->que = que;
1026	}
1027
1028	return (0);
1029fail:
1030	iavf_if_queues_free(ctx);
1031	return (error);
1032}
1033
1034static int
1035iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1036{
1037	struct iavf_sc *sc = iflib_get_softc(ctx);
1038	struct ixl_vsi *vsi = &sc->vsi;
1039	struct ixl_rx_queue *que;
1040	int i, error = 0;
1041
1042#ifdef INVARIANTS
1043	if_softc_ctx_t scctx = vsi->shared;
1044	MPASS(scctx->isc_nrxqsets > 0);
1045	MPASS(nrxqs == 1);
1046	MPASS(scctx->isc_nrxqsets == nrxqsets);
1047#endif
1048
1049	/* Allocate queue structure memory */
1050	if (!(vsi->rx_queues =
1051	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1052	    nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1053		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1054		error = ENOMEM;
1055		goto fail;
1056	}
1057
1058	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1059		struct rx_ring *rxr = &que->rxr;
1060
1061		rxr->me = i;
1062		que->vsi = vsi;
1063
1064		/* get the virtual and physical address of the hardware queues */
1065		rxr->tail = I40E_QRX_TAIL1(rxr->me);
1066		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1067		rxr->rx_paddr = paddrs[i * nrxqs];
1068		rxr->que = que;
1069	}
1070
1071	return (0);
1072fail:
1073	iavf_if_queues_free(ctx);
1074	return (error);
1075}
1076
1077static void
1078iavf_if_queues_free(if_ctx_t ctx)
1079{
1080	struct iavf_sc *sc = iflib_get_softc(ctx);
1081	struct ixl_vsi *vsi = &sc->vsi;
1082
1083	if (!vsi->enable_head_writeback) {
1084		struct ixl_tx_queue *que;
1085		int i = 0;
1086
1087		for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1088			struct tx_ring *txr = &que->txr;
1089			if (txr->tx_rsq != NULL) {
1090				free(txr->tx_rsq, M_IAVF);
1091				txr->tx_rsq = NULL;
1092			}
1093		}
1094	}
1095
1096	if (vsi->tx_queues != NULL) {
1097		free(vsi->tx_queues, M_IAVF);
1098		vsi->tx_queues = NULL;
1099	}
1100	if (vsi->rx_queues != NULL) {
1101		free(vsi->rx_queues, M_IAVF);
1102		vsi->rx_queues = NULL;
1103	}
1104}
1105
1106static int
1107iavf_check_aq_errors(struct iavf_sc *sc)
1108{
1109	struct i40e_hw *hw = &sc->hw;
1110	device_t dev = sc->dev;
1111	u32 reg, oldreg;
1112	u8 aq_error = false;
1113
1114	/* check for Admin queue errors */
1115	oldreg = reg = rd32(hw, hw->aq.arq.len);
1116	if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1117		device_printf(dev, "ARQ VF Error detected\n");
1118		reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1119		aq_error = true;
1120	}
1121	if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1122		device_printf(dev, "ARQ Overflow Error detected\n");
1123		reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1124		aq_error = true;
1125	}
1126	if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1127		device_printf(dev, "ARQ Critical Error detected\n");
1128		reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1129		aq_error = true;
1130	}
1131	if (oldreg != reg)
1132		wr32(hw, hw->aq.arq.len, reg);
1133
1134	oldreg = reg = rd32(hw, hw->aq.asq.len);
1135	if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1136		device_printf(dev, "ASQ VF Error detected\n");
1137		reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1138		aq_error = true;
1139	}
1140	if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1141		device_printf(dev, "ASQ Overflow Error detected\n");
1142		reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1143		aq_error = true;
1144	}
1145	if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1146		device_printf(dev, "ASQ Critical Error detected\n");
1147		reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1148		aq_error = true;
1149	}
1150	if (oldreg != reg)
1151		wr32(hw, hw->aq.asq.len, reg);
1152
1153	if (aq_error) {
1154		device_printf(dev, "WARNING: Stopping VF!\n");
1155		/*
1156		 * A VF reset might not be enough to fix a problem here;
1157		 * a PF reset could be required.
1158		 */
1159		sc->init_state = IAVF_RESET_REQUIRED;
1160		iavf_stop(sc);
1161		iavf_request_reset(sc);
1162	}
1163
1164	return (aq_error ? EIO : 0);
1165}
1166
1167static enum i40e_status_code
1168iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1169{
1170	enum i40e_status_code status = I40E_SUCCESS;
1171	struct i40e_arq_event_info event;
1172	struct i40e_hw *hw = &sc->hw;
1173	struct virtchnl_msg *v_msg;
1174	int error = 0, loop = 0;
1175	u32 reg;
1176
1177	error = iavf_check_aq_errors(sc);
1178	if (error)
1179		return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1180
1181	event.buf_len = IXL_AQ_BUF_SZ;
1182        event.msg_buf = sc->aq_buffer;
1183	bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1184	v_msg = (struct virtchnl_msg *)&event.desc;
1185
1186	/* clean and process any events */
1187	do {
1188		status = i40e_clean_arq_element(hw, &event, pending);
1189		/*
1190		 * Also covers normal case when i40e_clean_arq_element()
1191		 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1192		 */
1193		if (status)
1194			break;
1195		iavf_vc_completion(sc, v_msg->v_opcode,
1196		    v_msg->v_retval, event.msg_buf, event.msg_len);
1197		bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1198	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1199
1200	/* Re-enable admin queue interrupt cause */
1201	reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1202	reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1203	wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1204
1205	return (status);
1206}
1207
1208static void
1209iavf_if_update_admin_status(if_ctx_t ctx)
1210{
1211	struct iavf_sc *sc = iflib_get_softc(ctx);
1212	struct i40e_hw *hw = &sc->hw;
1213	u16 pending;
1214
1215	iavf_process_adminq(sc, &pending);
1216	iavf_update_link_status(sc);
1217
1218	/*
1219	 * If there are still messages to process, reschedule.
1220	 * Otherwise, re-enable the Admin Queue interrupt.
1221	 */
1222	if (pending > 0)
1223		iflib_admin_intr_deferred(ctx);
1224	else
1225		iavf_enable_adminq_irq(hw);
1226}
1227
1228static u_int
1229iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1230{
1231	struct iavf_sc *sc = arg;
1232	int error;
1233
1234	error = iavf_add_mac_filter(sc, (u8*)LLADDR(sdl), IAVF_FILTER_MC);
1235	return (!error);
1236}
1237
1238static void
1239iavf_if_multi_set(if_ctx_t ctx)
1240{
1241	struct iavf_sc *sc = iflib_get_softc(ctx);
1242
1243	IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1244
1245	if (__predict_false(if_llmaddr_count(iflib_get_ifp(ctx)) >=
1246	    MAX_MULTICAST_ADDR)) {
1247		/* Delete MC filters and enable mulitcast promisc instead */
1248		iavf_init_multi(sc);
1249		sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1250		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1251		return;
1252	}
1253
1254	/* If there aren't too many filters, delete existing MC filters */
1255	iavf_init_multi(sc);
1256
1257	/* And (re-)install filters for all mcast addresses */
1258	if (if_foreach_llmaddr(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc) >
1259	    0)
1260		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1261}
1262
1263static int
1264iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1265{
1266	struct iavf_sc *sc = iflib_get_softc(ctx);
1267	struct ixl_vsi *vsi = &sc->vsi;
1268
1269	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1270	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1271		ETHER_VLAN_ENCAP_LEN)
1272		return (EINVAL);
1273
1274	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1275		ETHER_VLAN_ENCAP_LEN;
1276
1277	return (0);
1278}
1279
1280static void
1281iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1282{
1283#ifdef IXL_DEBUG
1284	struct ifnet *ifp = iflib_get_ifp(ctx);
1285#endif
1286	struct iavf_sc *sc = iflib_get_softc(ctx);
1287
1288	INIT_DBG_IF(ifp, "begin");
1289
1290	iavf_update_link_status(sc);
1291
1292	ifmr->ifm_status = IFM_AVALID;
1293	ifmr->ifm_active = IFM_ETHER;
1294
1295	if (!sc->link_up)
1296		return;
1297
1298	ifmr->ifm_status |= IFM_ACTIVE;
1299	/* Hardware is always full-duplex */
1300	ifmr->ifm_active |= IFM_FDX;
1301
1302	/* Based on the link speed reported by the PF over the AdminQ, choose a
1303	 * PHY type to report. This isn't 100% correct since we don't really
1304	 * know the underlying PHY type of the PF, but at least we can report
1305	 * a valid link speed...
1306	 */
1307	switch (sc->link_speed) {
1308	case VIRTCHNL_LINK_SPEED_100MB:
1309		ifmr->ifm_active |= IFM_100_TX;
1310		break;
1311	case VIRTCHNL_LINK_SPEED_1GB:
1312		ifmr->ifm_active |= IFM_1000_T;
1313		break;
1314	case VIRTCHNL_LINK_SPEED_10GB:
1315		ifmr->ifm_active |= IFM_10G_SR;
1316		break;
1317	case VIRTCHNL_LINK_SPEED_20GB:
1318	case VIRTCHNL_LINK_SPEED_25GB:
1319		ifmr->ifm_active |= IFM_25G_SR;
1320		break;
1321	case VIRTCHNL_LINK_SPEED_40GB:
1322		ifmr->ifm_active |= IFM_40G_SR4;
1323		break;
1324	default:
1325		ifmr->ifm_active |= IFM_UNKNOWN;
1326		break;
1327	}
1328
1329	INIT_DBG_IF(ifp, "end");
1330}
1331
1332static int
1333iavf_if_media_change(if_ctx_t ctx)
1334{
1335	struct ifmedia *ifm = iflib_get_media(ctx);
1336
1337	INIT_DEBUGOUT("ixl_media_change: begin");
1338
1339	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1340		return (EINVAL);
1341
1342	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1343	return (ENODEV);
1344}
1345
1346static int
1347iavf_if_promisc_set(if_ctx_t ctx, int flags)
1348{
1349	struct iavf_sc *sc = iflib_get_softc(ctx);
1350	struct ifnet	*ifp = iflib_get_ifp(ctx);
1351
1352	sc->promisc_flags = 0;
1353
1354	if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1355	    MAX_MULTICAST_ADDR)
1356		sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1357	if (flags & IFF_PROMISC)
1358		sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1359
1360	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1361
1362	return (0);
1363}
1364
1365static void
1366iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1367{
1368	struct iavf_sc *sc = iflib_get_softc(ctx);
1369	struct i40e_hw *hw = &sc->hw;
1370	u32 val;
1371
1372	if (qid != 0)
1373		return;
1374
1375	/* Check for when PF triggers a VF reset */
1376	val = rd32(hw, I40E_VFGEN_RSTAT) &
1377	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1378	if (val != VIRTCHNL_VFR_VFACTIVE
1379	    && val != VIRTCHNL_VFR_COMPLETED) {
1380		iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1381		return;
1382	}
1383
1384	/* Fire off the adminq task */
1385	iflib_admin_intr_deferred(ctx);
1386
1387	/* Update stats */
1388	iavf_request_stats(sc);
1389}
1390
1391static void
1392iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1393{
1394	struct iavf_sc *sc = iflib_get_softc(ctx);
1395	struct ixl_vsi *vsi = &sc->vsi;
1396	struct iavf_vlan_filter	*v;
1397
1398	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1399		return;
1400
1401	++vsi->num_vlans;
1402	v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1403	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1404	v->vlan = vtag;
1405	v->flags = IAVF_FILTER_ADD;
1406
1407	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1408}
1409
1410static void
1411iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1412{
1413	struct iavf_sc *sc = iflib_get_softc(ctx);
1414	struct ixl_vsi *vsi = &sc->vsi;
1415	struct iavf_vlan_filter	*v;
1416	int			i = 0;
1417
1418	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1419		return;
1420
1421	SLIST_FOREACH(v, sc->vlan_filters, next) {
1422		if (v->vlan == vtag) {
1423			v->flags = IAVF_FILTER_DEL;
1424			++i;
1425			--vsi->num_vlans;
1426		}
1427	}
1428	if (i)
1429		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1430}
1431
1432static uint64_t
1433iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1434{
1435	struct iavf_sc *sc = iflib_get_softc(ctx);
1436	struct ixl_vsi *vsi = &sc->vsi;
1437	if_t ifp = iflib_get_ifp(ctx);
1438
1439	switch (cnt) {
1440	case IFCOUNTER_IPACKETS:
1441		return (vsi->ipackets);
1442	case IFCOUNTER_IERRORS:
1443		return (vsi->ierrors);
1444	case IFCOUNTER_OPACKETS:
1445		return (vsi->opackets);
1446	case IFCOUNTER_OERRORS:
1447		return (vsi->oerrors);
1448	case IFCOUNTER_COLLISIONS:
1449		/* Collisions are by standard impossible in 40G/10G Ethernet */
1450		return (0);
1451	case IFCOUNTER_IBYTES:
1452		return (vsi->ibytes);
1453	case IFCOUNTER_OBYTES:
1454		return (vsi->obytes);
1455	case IFCOUNTER_IMCASTS:
1456		return (vsi->imcasts);
1457	case IFCOUNTER_OMCASTS:
1458		return (vsi->omcasts);
1459	case IFCOUNTER_IQDROPS:
1460		return (vsi->iqdrops);
1461	case IFCOUNTER_OQDROPS:
1462		return (vsi->oqdrops);
1463	case IFCOUNTER_NOPROTO:
1464		return (vsi->noproto);
1465	default:
1466		return (if_get_counter_default(ifp, cnt));
1467	}
1468}
1469
1470/* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1471 * @ctx: iflib context
1472 * @event: event code to check
1473 *
1474 * Defaults to returning true for every event.
1475 *
1476 * @returns true if iflib needs to reinit the interface
1477 */
1478static bool
1479iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1480{
1481	switch (event) {
1482	case IFLIB_RESTART_VLAN_CONFIG:
1483		/* This case must return true if VLAN anti-spoof checks are
1484		 * enabled by the PF driver for the VF.
1485		 */
1486	default:
1487		return (true);
1488	}
1489}
1490
1491static void
1492iavf_free_pci_resources(struct iavf_sc *sc)
1493{
1494	struct ixl_vsi		*vsi = &sc->vsi;
1495	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1496	device_t                dev = sc->dev;
1497
1498	/* We may get here before stations are set up */
1499	if (rx_que == NULL)
1500		goto early;
1501
1502	/* Release all interrupts */
1503	iflib_irq_free(vsi->ctx, &vsi->irq);
1504
1505	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1506		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1507
1508early:
1509	if (sc->pci_mem != NULL)
1510		bus_release_resource(dev, SYS_RES_MEMORY,
1511		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1512}
1513
1514
1515/*
1516** Requests a VF reset from the PF.
1517**
1518** Requires the VF's Admin Queue to be initialized.
1519*/
1520static int
1521iavf_reset(struct iavf_sc *sc)
1522{
1523	struct i40e_hw	*hw = &sc->hw;
1524	device_t	dev = sc->dev;
1525	int		error = 0;
1526
1527	/* Ask the PF to reset us if we are initiating */
1528	if (sc->init_state != IAVF_RESET_PENDING)
1529		iavf_request_reset(sc);
1530
1531	i40e_msec_pause(100);
1532	error = iavf_reset_complete(hw);
1533	if (error) {
1534		device_printf(dev, "%s: VF reset failed\n",
1535		    __func__);
1536		return (error);
1537	}
1538	pci_enable_busmaster(dev);
1539
1540	error = i40e_shutdown_adminq(hw);
1541	if (error) {
1542		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1543		    __func__, error);
1544		return (error);
1545	}
1546
1547	error = i40e_init_adminq(hw);
1548	if (error) {
1549		device_printf(dev, "%s: init_adminq failed: %d\n",
1550		    __func__, error);
1551		return (error);
1552	}
1553
1554	iavf_enable_adminq_irq(hw);
1555	return (0);
1556}
1557
1558static int
1559iavf_reset_complete(struct i40e_hw *hw)
1560{
1561	u32 reg;
1562
1563	/* Wait up to ~10 seconds */
1564	for (int i = 0; i < 100; i++) {
1565		reg = rd32(hw, I40E_VFGEN_RSTAT) &
1566		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1567
1568                if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1569		    (reg == VIRTCHNL_VFR_COMPLETED))
1570			return (0);
1571		i40e_msec_pause(100);
1572	}
1573
1574	return (EBUSY);
1575}
1576
1577static void
1578iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1579{
1580	struct ixl_vsi *vsi = &sc->vsi;
1581	if_ctx_t ctx = vsi->ctx;
1582	struct ifnet *ifp = iflib_get_ifp(ctx);
1583
1584	INIT_DBG_DEV(dev, "begin");
1585
1586	vsi->shared->isc_max_frame_size =
1587	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1588	    + ETHER_VLAN_ENCAP_LEN;
1589#if __FreeBSD_version >= 1100000
1590	if_setbaudrate(ifp, IF_Gbps(40));
1591#else
1592	if_initbaudrate(ifp, IF_Gbps(40));
1593#endif
1594
1595	ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1596	ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1597}
1598
1599/*
1600** Get a new filter and add it to the mac filter list.
1601*/
1602static struct iavf_mac_filter *
1603iavf_get_mac_filter(struct iavf_sc *sc)
1604{
1605	struct iavf_mac_filter	*f;
1606
1607	f = malloc(sizeof(struct iavf_mac_filter),
1608	    M_IAVF, M_NOWAIT | M_ZERO);
1609	if (f)
1610		SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1611
1612	return (f);
1613}
1614
1615/*
1616** Find the filter with matching MAC address
1617*/
1618static struct iavf_mac_filter *
1619iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1620{
1621	struct iavf_mac_filter	*f;
1622	bool match = FALSE;
1623
1624	SLIST_FOREACH(f, sc->mac_filters, next) {
1625		if (ixl_ether_is_equal(f->macaddr, macaddr)) {
1626			match = TRUE;
1627			break;
1628		}
1629	}
1630
1631	if (!match)
1632		f = NULL;
1633	return (f);
1634}
1635
1636/*
1637** Admin Queue interrupt handler
1638*/
1639static int
1640iavf_msix_adminq(void *arg)
1641{
1642	struct iavf_sc	*sc = arg;
1643	struct i40e_hw	*hw = &sc->hw;
1644	u32		reg, mask;
1645	bool		do_task = FALSE;
1646
1647	++sc->admin_irq;
1648
1649        reg = rd32(hw, I40E_VFINT_ICR01);
1650	/*
1651	 * For masking off interrupt causes that need to be handled before
1652	 * they can be re-enabled
1653	 */
1654        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1655
1656	/* Check on the cause */
1657	if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1658		mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1659		do_task = TRUE;
1660	}
1661
1662	wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1663	iavf_enable_adminq_irq(hw);
1664
1665	if (do_task)
1666		return (FILTER_SCHEDULE_THREAD);
1667	else
1668		return (FILTER_HANDLED);
1669}
1670
1671void
1672iavf_enable_intr(struct ixl_vsi *vsi)
1673{
1674	struct i40e_hw *hw = vsi->hw;
1675	struct ixl_rx_queue *que = vsi->rx_queues;
1676
1677	iavf_enable_adminq_irq(hw);
1678	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1679		iavf_enable_queue_irq(hw, que->rxr.me);
1680}
1681
1682void
1683iavf_disable_intr(struct ixl_vsi *vsi)
1684{
1685        struct i40e_hw *hw = vsi->hw;
1686        struct ixl_rx_queue *que = vsi->rx_queues;
1687
1688	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1689		iavf_disable_queue_irq(hw, que->rxr.me);
1690}
1691
1692static void
1693iavf_disable_adminq_irq(struct i40e_hw *hw)
1694{
1695	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1696	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1697	/* flush */
1698	rd32(hw, I40E_VFGEN_RSTAT);
1699}
1700
1701static void
1702iavf_enable_adminq_irq(struct i40e_hw *hw)
1703{
1704	wr32(hw, I40E_VFINT_DYN_CTL01,
1705	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
1706	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1707	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1708	/* flush */
1709	rd32(hw, I40E_VFGEN_RSTAT);
1710}
1711
1712static void
1713iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1714{
1715	u32		reg;
1716
1717	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1718	    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1719	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1720	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1721}
1722
1723static void
1724iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1725{
1726	wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1727	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1728	rd32(hw, I40E_VFGEN_RSTAT);
1729}
1730
1731static void
1732iavf_configure_tx_itr(struct iavf_sc *sc)
1733{
1734	struct i40e_hw		*hw = &sc->hw;
1735	struct ixl_vsi		*vsi = &sc->vsi;
1736	struct ixl_tx_queue	*que = vsi->tx_queues;
1737
1738	vsi->tx_itr_setting = sc->tx_itr;
1739
1740	for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1741		struct tx_ring	*txr = &que->txr;
1742
1743		wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1744		    vsi->tx_itr_setting);
1745		txr->itr = vsi->tx_itr_setting;
1746		txr->latency = IXL_AVE_LATENCY;
1747	}
1748}
1749
1750static void
1751iavf_configure_rx_itr(struct iavf_sc *sc)
1752{
1753	struct i40e_hw		*hw = &sc->hw;
1754	struct ixl_vsi		*vsi = &sc->vsi;
1755	struct ixl_rx_queue	*que = vsi->rx_queues;
1756
1757	vsi->rx_itr_setting = sc->rx_itr;
1758
1759	for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1760		struct rx_ring 	*rxr = &que->rxr;
1761
1762		wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1763		    vsi->rx_itr_setting);
1764		rxr->itr = vsi->rx_itr_setting;
1765		rxr->latency = IXL_AVE_LATENCY;
1766	}
1767}
1768
1769/*
1770 * Get initial ITR values from tunable values.
1771 */
1772static void
1773iavf_configure_itr(struct iavf_sc *sc)
1774{
1775	iavf_configure_tx_itr(sc);
1776	iavf_configure_rx_itr(sc);
1777}
1778
1779/*
1780** Provide a update to the queue RX
1781** interrupt moderation value.
1782*/
1783static void
1784iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1785{
1786	struct ixl_vsi	*vsi = que->vsi;
1787	struct i40e_hw	*hw = vsi->hw;
1788	struct rx_ring	*rxr = &que->rxr;
1789
1790	/* Idle, do nothing */
1791	if (rxr->bytes == 0)
1792		return;
1793
1794	/* Update the hardware if needed */
1795	if (rxr->itr != vsi->rx_itr_setting) {
1796		rxr->itr = vsi->rx_itr_setting;
1797		wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1798		    que->rxr.me), rxr->itr);
1799	}
1800}
1801
1802static int
1803iavf_msix_que(void *arg)
1804{
1805	struct ixl_rx_queue *rx_que = arg;
1806
1807	++rx_que->irqs;
1808
1809	iavf_set_queue_rx_itr(rx_que);
1810	// iavf_set_queue_tx_itr(que);
1811
1812	return (FILTER_SCHEDULE_THREAD);
1813}
1814
1815/*********************************************************************
1816 *  Multicast Initialization
1817 *
1818 *  This routine is called by init to reset a fresh state.
1819 *
1820 **********************************************************************/
1821static void
1822iavf_init_multi(struct iavf_sc *sc)
1823{
1824	struct iavf_mac_filter *f;
1825	int mcnt = 0;
1826
1827	/* First clear any multicast filters */
1828	SLIST_FOREACH(f, sc->mac_filters, next) {
1829		if ((f->flags & IAVF_FILTER_USED)
1830		    && (f->flags & IAVF_FILTER_MC)) {
1831			f->flags |= IAVF_FILTER_DEL;
1832			mcnt++;
1833		}
1834	}
1835	if (mcnt > 0)
1836		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1837}
1838
1839/*
1840** Note: this routine updates the OS on the link state
1841**	the real check of the hardware only happens with
1842**	a link interrupt.
1843*/
1844void
1845iavf_update_link_status(struct iavf_sc *sc)
1846{
1847	struct ixl_vsi *vsi = &sc->vsi;
1848	u64 baudrate;
1849
1850	if (sc->link_up){
1851		if (vsi->link_active == FALSE) {
1852			vsi->link_active = TRUE;
1853			baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1854			iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1855			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1856		}
1857	} else { /* Link down */
1858		if (vsi->link_active == TRUE) {
1859			vsi->link_active = FALSE;
1860			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1861		}
1862	}
1863}
1864
1865/*********************************************************************
1866 *
1867 *  This routine disables all traffic on the adapter by issuing a
1868 *  global reset on the MAC and deallocates TX/RX buffers.
1869 *
1870 **********************************************************************/
1871
1872static void
1873iavf_stop(struct iavf_sc *sc)
1874{
1875	struct ifnet *ifp;
1876
1877	ifp = sc->vsi.ifp;
1878
1879	iavf_disable_intr(&sc->vsi);
1880
1881	if (atomic_load_acq_32(&sc->queues_enabled))
1882		iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1883}
1884
1885static void
1886iavf_if_stop(if_ctx_t ctx)
1887{
1888	struct iavf_sc *sc = iflib_get_softc(ctx);
1889
1890	iavf_stop(sc);
1891}
1892
1893static void
1894iavf_config_rss_reg(struct iavf_sc *sc)
1895{
1896	struct i40e_hw	*hw = &sc->hw;
1897	struct ixl_vsi	*vsi = &sc->vsi;
1898	u32		lut = 0;
1899	u64		set_hena = 0, hena;
1900	int		i, j, que_id;
1901	u32		rss_seed[IXL_RSS_KEY_SIZE_REG];
1902#ifdef RSS
1903	u32		rss_hash_config;
1904#endif
1905
1906	/* Don't set up RSS if using a single queue */
1907	if (vsi->num_rx_queues == 1) {
1908		wr32(hw, I40E_VFQF_HENA(0), 0);
1909		wr32(hw, I40E_VFQF_HENA(1), 0);
1910		ixl_flush(hw);
1911		return;
1912	}
1913
1914#ifdef RSS
1915	/* Fetch the configured RSS key */
1916	rss_getkey((uint8_t *) &rss_seed);
1917#else
1918	ixl_get_default_rss_key(rss_seed);
1919#endif
1920
1921	/* Fill out hash function seed */
1922	for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1923                wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1924
1925	/* Enable PCTYPES for RSS: */
1926#ifdef RSS
1927	rss_hash_config = rss_gethashconfig();
1928	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1929                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1930	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1931                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1932	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1933                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1934	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1935                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1936	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1937		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1938	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1939                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1940        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1941                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1942#else
1943	set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1944#endif
1945	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1946	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1947	hena |= set_hena;
1948	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1949	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1950
1951	/* Populate the LUT with max no. of queues in round robin fashion */
1952	for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1953                if (j == vsi->num_rx_queues)
1954                        j = 0;
1955#ifdef RSS
1956		/*
1957		 * Fetch the RSS bucket id for the given indirection entry.
1958		 * Cap it at the number of configured buckets (which is
1959		 * num_rx_queues.)
1960		 */
1961		que_id = rss_get_indirection_to_bucket(i);
1962		que_id = que_id % vsi->num_rx_queues;
1963#else
1964		que_id = j;
1965#endif
1966                /* lut = 4-byte sliding window of 4 lut entries */
1967                lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1968                /* On i = 3, we have 4 entries in lut; write to the register */
1969                if ((i & 3) == 3) {
1970                        wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1971			DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1972		}
1973        }
1974	ixl_flush(hw);
1975}
1976
1977static void
1978iavf_config_rss_pf(struct iavf_sc *sc)
1979{
1980	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1981
1982	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1983
1984	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1985}
1986
1987/*
1988** iavf_config_rss - setup RSS
1989**
1990** RSS keys and table are cleared on VF reset.
1991*/
1992static void
1993iavf_config_rss(struct iavf_sc *sc)
1994{
1995	if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1996		iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1997		iavf_config_rss_reg(sc);
1998	} else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1999		iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
2000		iavf_config_rss_pf(sc);
2001	} else
2002		device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2003}
2004
2005/*
2006** This routine adds new MAC filters to the sc's list;
2007** these are later added in hardware by sending a virtual
2008** channel message.
2009*/
2010static int
2011iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2012{
2013	struct iavf_mac_filter	*f;
2014
2015	/* Does one already exist? */
2016	f = iavf_find_mac_filter(sc, macaddr);
2017	if (f != NULL) {
2018		iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2019		    MAC_FORMAT_ARGS(macaddr));
2020		return (EEXIST);
2021	}
2022
2023	/* If not, get a new empty filter */
2024	f = iavf_get_mac_filter(sc);
2025	if (f == NULL) {
2026		device_printf(sc->dev, "%s: no filters available!!\n",
2027		    __func__);
2028		return (ENOMEM);
2029	}
2030
2031	iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2032	    MAC_FORMAT_ARGS(macaddr));
2033
2034	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2035	f->flags |= (IAVF_FILTER_ADD | IAVF_FILTER_USED);
2036	f->flags |= flags;
2037	return (0);
2038}
2039
2040/*
2041** Marks a MAC filter for deletion.
2042*/
2043static int
2044iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2045{
2046	struct iavf_mac_filter	*f;
2047
2048	f = iavf_find_mac_filter(sc, macaddr);
2049	if (f == NULL)
2050		return (ENOENT);
2051
2052	f->flags |= IAVF_FILTER_DEL;
2053	return (0);
2054}
2055
2056/*
2057 * Re-uses the name from the PF driver.
2058 */
2059static void
2060iavf_add_device_sysctls(struct iavf_sc *sc)
2061{
2062	struct ixl_vsi *vsi = &sc->vsi;
2063	device_t dev = sc->dev;
2064
2065	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2066	struct sysctl_oid_list *ctx_list =
2067	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2068	struct sysctl_oid *debug_node;
2069	struct sysctl_oid_list *debug_list;
2070
2071	SYSCTL_ADD_PROC(ctx, ctx_list,
2072	    OID_AUTO, "current_speed",
2073	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2074	    sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2075
2076	SYSCTL_ADD_PROC(ctx, ctx_list,
2077	    OID_AUTO, "tx_itr",
2078	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2079	    sc, 0, iavf_sysctl_tx_itr, "I",
2080	    "Immediately set TX ITR value for all queues");
2081
2082	SYSCTL_ADD_PROC(ctx, ctx_list,
2083	    OID_AUTO, "rx_itr",
2084	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2085	    sc, 0, iavf_sysctl_rx_itr, "I",
2086	    "Immediately set RX ITR value for all queues");
2087
2088	/* Add sysctls meant to print debug information, but don't list them
2089	 * in "sysctl -a" output. */
2090	debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2091	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
2092	    NULL, "Debug Sysctls");
2093	debug_list = SYSCTL_CHILDREN(debug_node);
2094
2095	SYSCTL_ADD_UINT(ctx, debug_list,
2096	    OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2097	    &sc->hw.debug_mask, 0, "Shared code debug message level");
2098
2099	SYSCTL_ADD_UINT(ctx, debug_list,
2100	    OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2101	    &sc->dbg_mask, 0, "Non-shared code debug message level");
2102
2103	SYSCTL_ADD_PROC(ctx, debug_list,
2104	    OID_AUTO, "filter_list",
2105	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2106	    sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2107
2108	SYSCTL_ADD_PROC(ctx, debug_list,
2109	    OID_AUTO, "queue_interrupt_table",
2110	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2111	    sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2112
2113	SYSCTL_ADD_PROC(ctx, debug_list,
2114	    OID_AUTO, "do_vf_reset",
2115	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2116	    sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2117
2118	SYSCTL_ADD_PROC(ctx, debug_list,
2119	    OID_AUTO, "do_vflr_reset",
2120	    CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2121	    sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2122
2123	/* Add stats sysctls */
2124	ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2125	ixl_vsi_add_queues_stats(vsi, ctx);
2126
2127}
2128
2129static void
2130iavf_init_filters(struct iavf_sc *sc)
2131{
2132	sc->mac_filters = malloc(sizeof(struct mac_list),
2133	    M_IAVF, M_WAITOK | M_ZERO);
2134	SLIST_INIT(sc->mac_filters);
2135	sc->vlan_filters = malloc(sizeof(struct vlan_list),
2136	    M_IAVF, M_WAITOK | M_ZERO);
2137	SLIST_INIT(sc->vlan_filters);
2138}
2139
2140static void
2141iavf_free_filters(struct iavf_sc *sc)
2142{
2143	struct iavf_mac_filter *f;
2144	struct iavf_vlan_filter *v;
2145
2146	while (!SLIST_EMPTY(sc->mac_filters)) {
2147		f = SLIST_FIRST(sc->mac_filters);
2148		SLIST_REMOVE_HEAD(sc->mac_filters, next);
2149		free(f, M_IAVF);
2150	}
2151	free(sc->mac_filters, M_IAVF);
2152	while (!SLIST_EMPTY(sc->vlan_filters)) {
2153		v = SLIST_FIRST(sc->vlan_filters);
2154		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2155		free(v, M_IAVF);
2156	}
2157	free(sc->vlan_filters, M_IAVF);
2158}
2159
2160char *
2161iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2162{
2163	int index;
2164
2165	char *speeds[] = {
2166		"Unknown",
2167		"100 Mbps",
2168		"1 Gbps",
2169		"10 Gbps",
2170		"40 Gbps",
2171		"20 Gbps",
2172		"25 Gbps",
2173	};
2174
2175	switch (link_speed) {
2176	case VIRTCHNL_LINK_SPEED_100MB:
2177		index = 1;
2178		break;
2179	case VIRTCHNL_LINK_SPEED_1GB:
2180		index = 2;
2181		break;
2182	case VIRTCHNL_LINK_SPEED_10GB:
2183		index = 3;
2184		break;
2185	case VIRTCHNL_LINK_SPEED_40GB:
2186		index = 4;
2187		break;
2188	case VIRTCHNL_LINK_SPEED_20GB:
2189		index = 5;
2190		break;
2191	case VIRTCHNL_LINK_SPEED_25GB:
2192		index = 6;
2193		break;
2194	case VIRTCHNL_LINK_SPEED_UNKNOWN:
2195	default:
2196		index = 0;
2197		break;
2198	}
2199
2200	return speeds[index];
2201}
2202
2203static int
2204iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2205{
2206	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2207	int error = 0;
2208
2209	error = sysctl_handle_string(oidp,
2210	  iavf_vc_speed_to_string(sc->link_speed),
2211	  8, req);
2212	return (error);
2213}
2214
2215/*
2216 * Sanity check and save off tunable values.
2217 */
2218static void
2219iavf_save_tunables(struct iavf_sc *sc)
2220{
2221	device_t dev = sc->dev;
2222
2223	/* Save tunable information */
2224	sc->dbg_mask = iavf_core_debug_mask;
2225	sc->hw.debug_mask = iavf_shared_debug_mask;
2226	sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2227
2228	if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2229		device_printf(dev, "Invalid tx_itr value of %d set!\n",
2230		    iavf_tx_itr);
2231		device_printf(dev, "tx_itr must be between %d and %d, "
2232		    "inclusive\n",
2233		    0, IXL_MAX_ITR);
2234		device_printf(dev, "Using default value of %d instead\n",
2235		    IXL_ITR_4K);
2236		sc->tx_itr = IXL_ITR_4K;
2237	} else
2238		sc->tx_itr = iavf_tx_itr;
2239
2240	if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2241		device_printf(dev, "Invalid rx_itr value of %d set!\n",
2242		    iavf_rx_itr);
2243		device_printf(dev, "rx_itr must be between %d and %d, "
2244		    "inclusive\n",
2245		    0, IXL_MAX_ITR);
2246		device_printf(dev, "Using default value of %d instead\n",
2247		    IXL_ITR_8K);
2248		sc->rx_itr = IXL_ITR_8K;
2249	} else
2250		sc->rx_itr = iavf_rx_itr;
2251}
2252
2253/*
2254 * Used to set the Tx ITR value for all of the VF's queues.
2255 * Writes to the ITR registers immediately.
2256 */
2257static int
2258iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2259{
2260	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2261	device_t dev = sc->dev;
2262	int requested_tx_itr;
2263	int error = 0;
2264
2265	requested_tx_itr = sc->tx_itr;
2266	error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2267	if ((error) || (req->newptr == NULL))
2268		return (error);
2269	if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2270		device_printf(dev,
2271		    "Invalid TX itr value; value must be between 0 and %d\n",
2272		        IXL_MAX_ITR);
2273		return (EINVAL);
2274	}
2275
2276	sc->tx_itr = requested_tx_itr;
2277	iavf_configure_tx_itr(sc);
2278
2279	return (error);
2280}
2281
2282/*
2283 * Used to set the Rx ITR value for all of the VF's queues.
2284 * Writes to the ITR registers immediately.
2285 */
2286static int
2287iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2288{
2289	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2290	device_t dev = sc->dev;
2291	int requested_rx_itr;
2292	int error = 0;
2293
2294	requested_rx_itr = sc->rx_itr;
2295	error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2296	if ((error) || (req->newptr == NULL))
2297		return (error);
2298	if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2299		device_printf(dev,
2300		    "Invalid RX itr value; value must be between 0 and %d\n",
2301		        IXL_MAX_ITR);
2302		return (EINVAL);
2303	}
2304
2305	sc->rx_itr = requested_rx_itr;
2306	iavf_configure_rx_itr(sc);
2307
2308	return (error);
2309}
2310
2311static int
2312iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2313{
2314	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2315	struct iavf_mac_filter *f;
2316	struct iavf_vlan_filter *v;
2317	device_t dev = sc->dev;
2318	int ftl_len, ftl_counter = 0, error = 0;
2319	struct sbuf *buf;
2320
2321	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2322	if (!buf) {
2323		device_printf(dev, "Could not allocate sbuf for output.\n");
2324		return (ENOMEM);
2325	}
2326
2327	sbuf_printf(buf, "\n");
2328
2329	/* Print MAC filters */
2330	sbuf_printf(buf, "MAC Filters:\n");
2331	ftl_len = 0;
2332	SLIST_FOREACH(f, sc->mac_filters, next)
2333		ftl_len++;
2334	if (ftl_len < 1)
2335		sbuf_printf(buf, "(none)\n");
2336	else {
2337		SLIST_FOREACH(f, sc->mac_filters, next) {
2338			sbuf_printf(buf,
2339			    MAC_FORMAT ", flags %#06x\n",
2340			    MAC_FORMAT_ARGS(f->macaddr), f->flags);
2341		}
2342	}
2343
2344	/* Print VLAN filters */
2345	sbuf_printf(buf, "VLAN Filters:\n");
2346	ftl_len = 0;
2347	SLIST_FOREACH(v, sc->vlan_filters, next)
2348		ftl_len++;
2349	if (ftl_len < 1)
2350		sbuf_printf(buf, "(none)");
2351	else {
2352		SLIST_FOREACH(v, sc->vlan_filters, next) {
2353			sbuf_printf(buf,
2354			    "%d, flags %#06x",
2355			    v->vlan, v->flags);
2356			/* don't print '\n' for last entry */
2357			if (++ftl_counter != ftl_len)
2358				sbuf_printf(buf, "\n");
2359		}
2360	}
2361
2362	error = sbuf_finish(buf);
2363	if (error)
2364		device_printf(dev, "Error finishing sbuf: %d\n", error);
2365
2366	sbuf_delete(buf);
2367	return (error);
2368}
2369
2370/*
2371 * Print out mapping of TX queue indexes and Rx queue indexes
2372 * to MSI-X vectors.
2373 */
2374static int
2375iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2376{
2377	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2378	struct ixl_vsi *vsi = &sc->vsi;
2379	device_t dev = sc->dev;
2380	struct sbuf *buf;
2381	int error = 0;
2382
2383	struct ixl_rx_queue *rx_que = vsi->rx_queues;
2384	struct ixl_tx_queue *tx_que = vsi->tx_queues;
2385
2386	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2387	if (!buf) {
2388		device_printf(dev, "Could not allocate sbuf for output.\n");
2389		return (ENOMEM);
2390	}
2391
2392	sbuf_cat(buf, "\n");
2393	for (int i = 0; i < vsi->num_rx_queues; i++) {
2394		rx_que = &vsi->rx_queues[i];
2395		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2396	}
2397	for (int i = 0; i < vsi->num_tx_queues; i++) {
2398		tx_que = &vsi->tx_queues[i];
2399		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2400	}
2401
2402	error = sbuf_finish(buf);
2403	if (error)
2404		device_printf(dev, "Error finishing sbuf: %d\n", error);
2405	sbuf_delete(buf);
2406
2407	return (error);
2408}
2409
2410#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2411static int
2412iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2413{
2414	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2415	int do_reset = 0, error = 0;
2416
2417	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2418	if ((error) || (req->newptr == NULL))
2419		return (error);
2420
2421	if (do_reset == 1) {
2422		iavf_reset(sc);
2423		if (CTX_ACTIVE(sc->vsi.ctx))
2424			iflib_request_reset(sc->vsi.ctx);
2425	}
2426
2427	return (error);
2428}
2429
2430static int
2431iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2432{
2433	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2434	device_t dev = sc->dev;
2435	int do_reset = 0, error = 0;
2436
2437	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2438	if ((error) || (req->newptr == NULL))
2439		return (error);
2440
2441	if (do_reset == 1) {
2442		if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2443			device_printf(dev, "PCIE FLR failed\n");
2444			error = EIO;
2445		}
2446		else if (CTX_ACTIVE(sc->vsi.ctx))
2447			iflib_request_reset(sc->vsi.ctx);
2448	}
2449
2450	return (error);
2451}
2452#undef CTX_ACTIVE
2453