if_ixl.c revision 299549
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 299549 2016-05-12 18:20:36Z erj $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51299549Serjchar ixl_driver_version[] = "1.4.12-k";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72284049Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73266423Sjfv	/* required last entry */
74266423Sjfv	{0, 0, 0, 0, 0}
75266423Sjfv};
76266423Sjfv
77266423Sjfv/*********************************************************************
78266423Sjfv *  Table of branding strings
79266423Sjfv *********************************************************************/
80266423Sjfv
81270346Sjfvstatic char    *ixl_strings[] = {
82266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
83266423Sjfv};
84266423Sjfv
85266423Sjfv
86266423Sjfv/*********************************************************************
87266423Sjfv *  Function prototypes
88266423Sjfv *********************************************************************/
89270346Sjfvstatic int      ixl_probe(device_t);
90270346Sjfvstatic int      ixl_attach(device_t);
91270346Sjfvstatic int      ixl_detach(device_t);
92270346Sjfvstatic int      ixl_shutdown(device_t);
93270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
94270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
96270346Sjfvstatic void	ixl_init(void *);
97270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
98270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
99299547Serjstatic void	ixl_stop_locked(struct ixl_pf *);
100270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
101270346Sjfvstatic int      ixl_media_change(struct ifnet *);
102270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
103270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
104270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
105270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
106279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
107270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
108270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
109270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
110270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
111270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
112270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
113270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
114299546Serjstatic void	ixl_init_taskqueues(struct ixl_pf *);
115299546Serjstatic void	ixl_free_taskqueues(struct ixl_pf *);
116299547Serjstatic void	ixl_free_interrupt_resources(struct ixl_pf *);
117270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
118270346Sjfvstatic void	ixl_local_timer(void *);
119270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
120279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
121270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
122270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
123270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
124274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
125266423Sjfv
126279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
127279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
128279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
129279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
130279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
131266423Sjfv
132270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
133270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
134270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
135270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
136270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
137270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
138266423Sjfv
139270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
140270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
141270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
142270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
143270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
144270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
145266423Sjfv
146270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
147279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
148270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
149270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
150270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
151270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
152270346Sjfvstatic struct ixl_mac_filter *
153270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
154270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
155279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
156266423Sjfv
157299549Serj/* Sysctls*/
158299549Serjstatic void	ixl_add_device_sysctls(struct ixl_pf *);
159279858Sjfv
160270346Sjfvstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
161270346Sjfvstatic void	ixl_print_debug_info(struct ixl_pf *);
162266423Sjfv
163299549Serjstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
164299549Serjstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
165299549Serjstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
166299549Serjstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
167299549Serj
168299549Serj#ifdef IXL_DEBUG_SYSCTL
169299549Serjstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
170299549Serjstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
171299549Serjstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
172299549Serjstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
173299549Serjstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
174299549Serj#endif
175299549Serj
176266423Sjfv/* The MSI/X Interrupt handlers */
177270346Sjfvstatic void	ixl_intr(void *);
178270346Sjfvstatic void	ixl_msix_que(void *);
179270346Sjfvstatic void	ixl_msix_adminq(void *);
180270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
181266423Sjfv
182266423Sjfv/* Deferred interrupt tasklets */
183270346Sjfvstatic void	ixl_do_adminq(void *, int);
184266423Sjfv
185266423Sjfv/* Statistics */
186270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
187270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
188266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
189270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
190266423Sjfv		    struct sysctl_oid_list *,
191266423Sjfv		    struct i40e_eth_stats *);
192270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
193270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
194279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
195270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
196270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
197270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
198266423Sjfv		    u64 *, u64 *);
199270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
200266423Sjfv		    u64 *, u64 *);
201299547Serj/* NVM update */
202299547Serjstatic int	ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
203266423Sjfv
204266423Sjfv
205279858Sjfv#ifdef PCI_IOV
206279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
207279858Sjfv
208299546Serjstatic int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
209299546Serjstatic void	ixl_iov_uninit(device_t dev);
210279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
211279858Sjfv
212279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
213279858Sjfv		    struct i40e_arq_event_info *);
214279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
215279858Sjfv
216279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
217279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
218279858Sjfv#endif
219279858Sjfv
220266423Sjfv/*********************************************************************
221266423Sjfv *  FreeBSD Device Interface Entry Points
222266423Sjfv *********************************************************************/
223266423Sjfv
224270346Sjfvstatic device_method_t ixl_methods[] = {
225266423Sjfv	/* Device interface */
226270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
227270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
228270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
229270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
230279858Sjfv#ifdef PCI_IOV
231299546Serj	DEVMETHOD(pci_iov_init, ixl_iov_init),
232299546Serj	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
233299546Serj	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
234279858Sjfv#endif
235266423Sjfv	{0, 0}
236266423Sjfv};
237266423Sjfv
238270346Sjfvstatic driver_t ixl_driver = {
239270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
240266423Sjfv};
241266423Sjfv
242270346Sjfvdevclass_t ixl_devclass;
243270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
244266423Sjfv
245270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
246270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
247279860Sjfv#ifdef DEV_NETMAP
248279860SjfvMODULE_DEPEND(ixl, netmap, 1, 1, 1);
249279860Sjfv#endif /* DEV_NETMAP */
250279860Sjfv
251266423Sjfv/*
252269198Sjfv** Global reset mutex
253269198Sjfv*/
254270346Sjfvstatic struct mtx ixl_reset_mtx;
255269198Sjfv
256269198Sjfv/*
257270346Sjfv** TUNEABLE PARAMETERS:
258270346Sjfv*/
259270346Sjfv
260270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
261270346Sjfv                   "IXL driver parameters");
262270346Sjfv
263270346Sjfv/*
264266423Sjfv * MSIX should be the default for best performance,
265266423Sjfv * but this allows it to be forced off for testing.
266266423Sjfv */
267270346Sjfvstatic int ixl_enable_msix = 1;
268270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
269270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
270270346Sjfv    "Enable MSI-X interrupts");
271266423Sjfv
272266423Sjfv/*
273266423Sjfv** Number of descriptors per ring:
274266423Sjfv**   - TX and RX are the same size
275266423Sjfv*/
276270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
277270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
278270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
279270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
280266423Sjfv
281266423Sjfv/*
282266423Sjfv** This can be set manually, if left as 0 the
283266423Sjfv** number of queues will be calculated based
284266423Sjfv** on cpus and msix vectors available.
285266423Sjfv*/
286270346Sjfvint ixl_max_queues = 0;
287270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
288270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
289270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
290266423Sjfv
291266423Sjfv/*
292266423Sjfv** Controls for Interrupt Throttling
293266423Sjfv**	- true/false for dynamic adjustment
294266423Sjfv** 	- default values for static ITR
295266423Sjfv*/
296270346Sjfvint ixl_dynamic_rx_itr = 0;
297270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
298270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
299270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
300266423Sjfv
301270346Sjfvint ixl_dynamic_tx_itr = 0;
302270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
303270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
304270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
305266423Sjfv
306270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
307270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
308270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
309270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
310270346Sjfv
311270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
312270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
313270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
314270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
315270346Sjfv
316270346Sjfv#ifdef IXL_FDIR
317270346Sjfvstatic int ixl_enable_fdir = 1;
318270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
319266423Sjfv/* Rate at which we sample */
320270346Sjfvint ixl_atr_rate = 20;
321270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
322266423Sjfv#endif
323266423Sjfv
324279860Sjfv#ifdef DEV_NETMAP
325279860Sjfv#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
326279860Sjfv#include <dev/netmap/if_ixl_netmap.h>
327279860Sjfv#endif /* DEV_NETMAP */
328274205Sjfv
329270346Sjfvstatic char *ixl_fc_string[6] = {
330266423Sjfv	"None",
331266423Sjfv	"Rx",
332266423Sjfv	"Tx",
333266423Sjfv	"Full",
334266423Sjfv	"Priority",
335266423Sjfv	"Default"
336266423Sjfv};
337266423Sjfv
338279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
339269198Sjfv
340279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
341279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
342279858Sjfv
343266423Sjfv/*********************************************************************
344266423Sjfv *  Device identification routine
345266423Sjfv *
346270346Sjfv *  ixl_probe determines if the driver should be loaded on
347266423Sjfv *  the hardware based on PCI vendor/device id of the device.
348266423Sjfv *
349266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
350266423Sjfv *********************************************************************/
351266423Sjfv
352266423Sjfvstatic int
353270346Sjfvixl_probe(device_t dev)
354266423Sjfv{
355270346Sjfv	ixl_vendor_info_t *ent;
356266423Sjfv
357266423Sjfv	u16	pci_vendor_id, pci_device_id;
358266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
359266423Sjfv	char	device_name[256];
360269198Sjfv	static bool lock_init = FALSE;
361266423Sjfv
362270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
363266423Sjfv
364266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
365266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
366266423Sjfv		return (ENXIO);
367266423Sjfv
368266423Sjfv	pci_device_id = pci_get_device(dev);
369266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
370266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
371266423Sjfv
372270346Sjfv	ent = ixl_vendor_info_array;
373266423Sjfv	while (ent->vendor_id != 0) {
374266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
375266423Sjfv		    (pci_device_id == ent->device_id) &&
376266423Sjfv
377266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
378266423Sjfv		     (ent->subvendor_id == 0)) &&
379266423Sjfv
380266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
381266423Sjfv		     (ent->subdevice_id == 0))) {
382266423Sjfv			sprintf(device_name, "%s, Version - %s",
383270346Sjfv				ixl_strings[ent->index],
384270346Sjfv				ixl_driver_version);
385266423Sjfv			device_set_desc_copy(dev, device_name);
386269198Sjfv			/* One shot mutex init */
387269198Sjfv			if (lock_init == FALSE) {
388269198Sjfv				lock_init = TRUE;
389270346Sjfv				mtx_init(&ixl_reset_mtx,
390270346Sjfv				    "ixl_reset",
391270346Sjfv				    "IXL RESET Lock", MTX_DEF);
392269198Sjfv			}
393266423Sjfv			return (BUS_PROBE_DEFAULT);
394266423Sjfv		}
395266423Sjfv		ent++;
396266423Sjfv	}
397266423Sjfv	return (ENXIO);
398266423Sjfv}
399266423Sjfv
400266423Sjfv/*********************************************************************
401266423Sjfv *  Device initialization routine
402266423Sjfv *
403266423Sjfv *  The attach entry point is called when the driver is being loaded.
404266423Sjfv *  This routine identifies the type of hardware, allocates all resources
405266423Sjfv *  and initializes the hardware.
406266423Sjfv *
407266423Sjfv *  return 0 on success, positive on failure
408266423Sjfv *********************************************************************/
409266423Sjfv
410266423Sjfvstatic int
411270346Sjfvixl_attach(device_t dev)
412266423Sjfv{
413270346Sjfv	struct ixl_pf	*pf;
414266423Sjfv	struct i40e_hw	*hw;
415270346Sjfv	struct ixl_vsi *vsi;
416266423Sjfv	u16		bus;
417266423Sjfv	int             error = 0;
418279858Sjfv#ifdef PCI_IOV
419279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
420279858Sjfv	int		iov_error;
421279858Sjfv#endif
422266423Sjfv
423270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
424266423Sjfv
425266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
426266423Sjfv	pf = device_get_softc(dev);
427266423Sjfv	pf->dev = pf->osdep.dev = dev;
428266423Sjfv	hw = &pf->hw;
429266423Sjfv
430266423Sjfv	/*
431266423Sjfv	** Note this assumes we have a single embedded VSI,
432266423Sjfv	** this could be enhanced later to allocate multiple
433266423Sjfv	*/
434266423Sjfv	vsi = &pf->vsi;
435266423Sjfv	vsi->dev = pf->dev;
436266423Sjfv
437266423Sjfv	/* Core Lock Init*/
438270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
439266423Sjfv
440266423Sjfv	/* Set up the timer callout */
441266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
442266423Sjfv
443274205Sjfv	/* Save off the PCI information */
444266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
445266423Sjfv	hw->device_id = pci_get_device(dev);
446266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
447266423Sjfv	hw->subsystem_vendor_id =
448266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
449266423Sjfv	hw->subsystem_device_id =
450266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
451266423Sjfv
452269198Sjfv	hw->bus.device = pci_get_slot(dev);
453266423Sjfv	hw->bus.func = pci_get_function(dev);
454266423Sjfv
455279858Sjfv	pf->vc_debug_lvl = 1;
456279858Sjfv
457266423Sjfv	/* Do PCI setup - map BAR0, etc */
458270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
459266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
460266423Sjfv		error = ENXIO;
461266423Sjfv		goto err_out;
462266423Sjfv	}
463266423Sjfv
464266423Sjfv	/* Establish a clean starting point */
465269198Sjfv	i40e_clear_hw(hw);
466266423Sjfv	error = i40e_pf_reset(hw);
467266423Sjfv	if (error) {
468299549Serj		device_printf(dev, "PF reset failure %d\n", error);
469269198Sjfv		error = EIO;
470269198Sjfv		goto err_out;
471269198Sjfv	}
472266423Sjfv
473266423Sjfv	/* Set admin queue parameters */
474270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
475270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
476270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
477270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
478266423Sjfv
479299549Serj	/* Initialize mac filter list for VSI */
480299549Serj	SLIST_INIT(&vsi->ftl);
481299549Serj
482266423Sjfv	/* Initialize the shared code */
483266423Sjfv	error = i40e_init_shared_code(hw);
484266423Sjfv	if (error) {
485299549Serj		device_printf(dev, "Unable to initialize shared code, error %d\n",
486299549Serj		    error);
487266423Sjfv		error = EIO;
488266423Sjfv		goto err_out;
489266423Sjfv	}
490266423Sjfv
491266423Sjfv	/* Set up the admin queue */
492266423Sjfv	error = i40e_init_adminq(hw);
493299549Serj	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
494299549Serj		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
495299549Serj		    error);
496299549Serj		error = EIO;
497299549Serj		goto err_out;
498299549Serj	}
499299549Serj	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
500299549Serj	if (error == I40E_ERR_FIRMWARE_API_VERSION) {
501269198Sjfv		device_printf(dev, "The driver for the device stopped "
502269198Sjfv		    "because the NVM image is newer than expected.\n"
503269198Sjfv		    "You must install the most recent version of "
504299549Serj		    "the network driver.\n");
505299549Serj		error = EIO;
506266423Sjfv		goto err_out;
507266423Sjfv	}
508266423Sjfv
509269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
510269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
511269198Sjfv		device_printf(dev, "The driver for the device detected "
512269198Sjfv		    "a newer version of the NVM image than expected.\n"
513269198Sjfv		    "Please install the most recent version of the network driver.\n");
514269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
515269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
516269198Sjfv		device_printf(dev, "The driver for the device detected "
517269198Sjfv		    "an older version of the NVM image than expected.\n"
518269198Sjfv		    "Please update the NVM image.\n");
519266423Sjfv
520266423Sjfv	/* Clear PXE mode */
521266423Sjfv	i40e_clear_pxe_mode(hw);
522266423Sjfv
523266423Sjfv	/* Get capabilities from the device */
524270346Sjfv	error = ixl_get_hw_capabilities(pf);
525266423Sjfv	if (error) {
526266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
527266423Sjfv		goto err_get_cap;
528266423Sjfv	}
529266423Sjfv
530266423Sjfv	/* Set up host memory cache */
531279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
532279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
533266423Sjfv	if (error) {
534266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
535266423Sjfv		goto err_get_cap;
536266423Sjfv	}
537266423Sjfv
538266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
539266423Sjfv	if (error) {
540266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
541266423Sjfv		goto err_mac_hmc;
542266423Sjfv	}
543266423Sjfv
544269198Sjfv	/* Disable LLDP from the firmware */
545269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
546269198Sjfv
547266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
548266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
549266423Sjfv	if (error) {
550266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
551266423Sjfv		goto err_mac_hmc;
552266423Sjfv	}
553266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
554266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
555266423Sjfv
556274205Sjfv	/* Set up VSI and queues */
557270346Sjfv	if (ixl_setup_stations(pf) != 0) {
558266423Sjfv		device_printf(dev, "setup stations failed!\n");
559266423Sjfv		error = ENOMEM;
560266423Sjfv		goto err_mac_hmc;
561266423Sjfv	}
562266423Sjfv
563279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
564279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
565279033Sjfv		i40e_msec_delay(75);
566279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
567299547Serj		if (error) {
568279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
569279033Sjfv			    pf->hw.aq.asq_last_status);
570299547Serj			goto err_late;
571299547Serj		}
572270346Sjfv	}
573279033Sjfv
574266423Sjfv	/* Determine link state */
575299547Serj	hw->phy.get_link_info = TRUE;
576284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
577266423Sjfv
578299547Serj	/* Setup OS network interface / ifnet */
579274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
580274205Sjfv		device_printf(dev, "interface setup failed!\n");
581274205Sjfv		error = EIO;
582266423Sjfv		goto err_late;
583274205Sjfv	}
584266423Sjfv
585279033Sjfv	error = ixl_switch_config(pf);
586279033Sjfv	if (error) {
587299547Serj		device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error);
588299546Serj		goto err_late;
589279033Sjfv	}
590279033Sjfv
591299547Serj	/* Limit PHY interrupts to link, autoneg, and modules failure */
592299548Serj	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
593299547Serj	    NULL);
594299547Serj        if (error) {
595299547Serj		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
596299547Serj		    " aq_err %d\n", error, hw->aq.asq_last_status);
597299547Serj		goto err_late;
598299547Serj	}
599279033Sjfv
600266423Sjfv	/* Get the bus configuration and set the shared code */
601270346Sjfv	bus = ixl_get_bus_info(hw, dev);
602266423Sjfv	i40e_set_pci_config_data(hw, bus);
603266423Sjfv
604299546Serj	/* Initialize taskqueues */
605299546Serj	ixl_init_taskqueues(pf);
606299546Serj
607299549Serj	/* Initialize statistics & add sysctls */
608299549Serj	ixl_add_device_sysctls(pf);
609299549Serj
610270346Sjfv	ixl_pf_reset_stats(pf);
611270346Sjfv	ixl_update_stats_counters(pf);
612270346Sjfv	ixl_add_hw_stats(pf);
613266423Sjfv
614266423Sjfv	/* Register for VLAN events */
615266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
616270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
617266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
618270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
619266423Sjfv
620279858Sjfv#ifdef PCI_IOV
621279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
622279858Sjfv	if (pf->msix > 1) {
623279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
624279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
625279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
626279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
627279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
628279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
629279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
630279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
631279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
632274205Sjfv
633279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
634279858Sjfv		if (iov_error != 0)
635279858Sjfv			device_printf(dev,
636279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
637279858Sjfv			    iov_error);
638279858Sjfv	}
639279858Sjfv#endif
640279858Sjfv
641279860Sjfv#ifdef DEV_NETMAP
642279860Sjfv	ixl_netmap_attach(vsi);
643279860Sjfv#endif /* DEV_NETMAP */
644270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
645266423Sjfv	return (0);
646266423Sjfv
647266423Sjfverr_late:
648274205Sjfv	if (vsi->ifp != NULL)
649274205Sjfv		if_free(vsi->ifp);
650266423Sjfverr_mac_hmc:
651266423Sjfv	i40e_shutdown_lan_hmc(hw);
652266423Sjfverr_get_cap:
653266423Sjfv	i40e_shutdown_adminq(hw);
654266423Sjfverr_out:
655270346Sjfv	ixl_free_pci_resources(pf);
656274205Sjfv	ixl_free_vsi(vsi);
657270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
658266423Sjfv	return (error);
659266423Sjfv}
660266423Sjfv
661266423Sjfv/*********************************************************************
662266423Sjfv *  Device removal routine
663266423Sjfv *
664266423Sjfv *  The detach entry point is called when the driver is being removed.
665266423Sjfv *  This routine stops the adapter and deallocates all the resources
666266423Sjfv *  that were allocated for driver operation.
667266423Sjfv *
668266423Sjfv *  return 0 on success, positive on failure
669266423Sjfv *********************************************************************/
670266423Sjfv
671266423Sjfvstatic int
672270346Sjfvixl_detach(device_t dev)
673266423Sjfv{
674270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
675266423Sjfv	struct i40e_hw		*hw = &pf->hw;
676270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
677266423Sjfv	i40e_status		status;
678279858Sjfv#ifdef PCI_IOV
679279858Sjfv	int			error;
680279858Sjfv#endif
681266423Sjfv
682270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
683266423Sjfv
684266423Sjfv	/* Make sure VLANS are not using driver */
685266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
686266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
687266423Sjfv		return (EBUSY);
688266423Sjfv	}
689266423Sjfv
690279858Sjfv#ifdef PCI_IOV
691279858Sjfv	error = pci_iov_detach(dev);
692279858Sjfv	if (error != 0) {
693279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
694279858Sjfv		return (error);
695279858Sjfv	}
696279858Sjfv#endif
697279858Sjfv
698279033Sjfv	ether_ifdetach(vsi->ifp);
699299547Serj	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
700279033Sjfv		ixl_stop(pf);
701266423Sjfv
702299546Serj	ixl_free_taskqueues(pf);
703266423Sjfv
704266423Sjfv	/* Shutdown LAN HMC */
705266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
706266423Sjfv	if (status)
707266423Sjfv		device_printf(dev,
708266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
709266423Sjfv
710266423Sjfv	/* Shutdown admin queue */
711266423Sjfv	status = i40e_shutdown_adminq(hw);
712266423Sjfv	if (status)
713266423Sjfv		device_printf(dev,
714266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
715266423Sjfv
716266423Sjfv	/* Unregister VLAN events */
717266423Sjfv	if (vsi->vlan_attach != NULL)
718266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
719266423Sjfv	if (vsi->vlan_detach != NULL)
720266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
721266423Sjfv
722266423Sjfv	callout_drain(&pf->timer);
723279860Sjfv#ifdef DEV_NETMAP
724279860Sjfv	netmap_detach(vsi->ifp);
725279860Sjfv#endif /* DEV_NETMAP */
726270346Sjfv	ixl_free_pci_resources(pf);
727266423Sjfv	bus_generic_detach(dev);
728266423Sjfv	if_free(vsi->ifp);
729270346Sjfv	ixl_free_vsi(vsi);
730270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
731266423Sjfv	return (0);
732266423Sjfv}
733266423Sjfv
734266423Sjfv/*********************************************************************
735266423Sjfv *
736266423Sjfv *  Shutdown entry point
737266423Sjfv *
738266423Sjfv **********************************************************************/
739266423Sjfv
740266423Sjfvstatic int
741270346Sjfvixl_shutdown(device_t dev)
742266423Sjfv{
743270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
744270346Sjfv	ixl_stop(pf);
745266423Sjfv	return (0);
746266423Sjfv}
747266423Sjfv
748266423Sjfv
749266423Sjfv/*********************************************************************
750266423Sjfv *
751266423Sjfv *  Get the hardware capabilities
752266423Sjfv *
753266423Sjfv **********************************************************************/
754266423Sjfv
755266423Sjfvstatic int
756270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
757266423Sjfv{
758266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
759266423Sjfv	struct i40e_hw	*hw = &pf->hw;
760266423Sjfv	device_t 	dev = pf->dev;
761266423Sjfv	int             error, len;
762266423Sjfv	u16		needed;
763266423Sjfv	bool		again = TRUE;
764266423Sjfv
765266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
766266423Sjfvretry:
767266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
768266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
769266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
770266423Sjfv                return (ENOMEM);
771266423Sjfv	}
772266423Sjfv
773266423Sjfv	/* This populates the hw struct */
774266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
775266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
776266423Sjfv	free(buf, M_DEVBUF);
777266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
778266423Sjfv	    (again == TRUE)) {
779266423Sjfv		/* retry once with a larger buffer */
780266423Sjfv		again = FALSE;
781266423Sjfv		len = needed;
782266423Sjfv		goto retry;
783266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
784266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
785266423Sjfv		    pf->hw.aq.asq_last_status);
786266423Sjfv		return (ENODEV);
787266423Sjfv	}
788266423Sjfv
789266423Sjfv	/* Capture this PF's starting queue pair */
790266423Sjfv	pf->qbase = hw->func_caps.base_queue;
791266423Sjfv
792270346Sjfv#ifdef IXL_DEBUG
793266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
794266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
795266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
796266423Sjfv	    hw->func_caps.num_msix_vectors,
797266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
798266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
799266423Sjfv	    hw->func_caps.fd_filters_best_effort,
800266423Sjfv	    hw->func_caps.num_tx_qp,
801266423Sjfv	    hw->func_caps.num_rx_qp,
802266423Sjfv	    hw->func_caps.base_queue);
803266423Sjfv#endif
804266423Sjfv	return (error);
805266423Sjfv}
806266423Sjfv
807266423Sjfvstatic void
808270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
809266423Sjfv{
810266423Sjfv	device_t 	dev = vsi->dev;
811266423Sjfv
812266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
813266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
814266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
815266423Sjfv		if (mask & IFCAP_TXCSUM) {
816266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
817266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
818270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
819270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
820266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
821266423Sjfv			}
822266423Sjfv		}
823266423Sjfv		else if (mask & IFCAP_TSO4) {
824266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
825270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
826266423Sjfv			device_printf(dev,
827266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
828266423Sjfv		}
829266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
830266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
831266423Sjfv		if (mask & IFCAP_TXCSUM)
832266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
833266423Sjfv		else if (mask & IFCAP_TSO4)
834266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
835266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
836266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
837266423Sjfv		if (mask & IFCAP_TXCSUM) {
838270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
839266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
840266423Sjfv			device_printf(dev,
841266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
842266423Sjfv		} else if (mask & IFCAP_TSO4)
843266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
844266423Sjfv	}
845266423Sjfv
846266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
847266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
848266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
849266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
850266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
851270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
852270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
853266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
854266423Sjfv			}
855266423Sjfv		} else if (mask & IFCAP_TSO6) {
856266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
857270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
858266423Sjfv			device_printf(dev,
859266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
860266423Sjfv		}
861266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
862266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
863266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
864266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
865266423Sjfv		else if (mask & IFCAP_TSO6)
866266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
867266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
868266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
869266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
870270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
871266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
872266423Sjfv			device_printf(dev,
873266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
874266423Sjfv		} else if (mask & IFCAP_TSO6)
875266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
876266423Sjfv	}
877266423Sjfv}
878266423Sjfv
879266423Sjfv/*********************************************************************
880266423Sjfv *  Ioctl entry point
881266423Sjfv *
882270346Sjfv *  ixl_ioctl is called when the user wants to configure the
883266423Sjfv *  interface.
884266423Sjfv *
885266423Sjfv *  return 0 on success, positive on failure
886266423Sjfv **********************************************************************/
887266423Sjfv
888266423Sjfvstatic int
889270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
890266423Sjfv{
891270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
892279858Sjfv	struct ixl_pf	*pf = vsi->back;
893299547Serj	struct ifreq	*ifr = (struct ifreq *)data;
894299547Serj	struct ifdrv	*ifd = (struct ifdrv *)data;
895266423Sjfv#if defined(INET) || defined(INET6)
896266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
897266423Sjfv	bool		avoid_reset = FALSE;
898266423Sjfv#endif
899266423Sjfv	int             error = 0;
900266423Sjfv
901266423Sjfv	switch (command) {
902266423Sjfv
903266423Sjfv        case SIOCSIFADDR:
904266423Sjfv#ifdef INET
905266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
906266423Sjfv			avoid_reset = TRUE;
907266423Sjfv#endif
908266423Sjfv#ifdef INET6
909266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
910266423Sjfv			avoid_reset = TRUE;
911266423Sjfv#endif
912266423Sjfv#if defined(INET) || defined(INET6)
913266423Sjfv		/*
914266423Sjfv		** Calling init results in link renegotiation,
915266423Sjfv		** so we avoid doing it when possible.
916266423Sjfv		*/
917266423Sjfv		if (avoid_reset) {
918266423Sjfv			ifp->if_flags |= IFF_UP;
919266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
920270346Sjfv				ixl_init(pf);
921271900Sbz#ifdef INET
922266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
923266423Sjfv				arp_ifinit(ifp, ifa);
924271900Sbz#endif
925266423Sjfv		} else
926266423Sjfv			error = ether_ioctl(ifp, command, data);
927266423Sjfv		break;
928266423Sjfv#endif
929266423Sjfv	case SIOCSIFMTU:
930266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
931270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
932266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
933266423Sjfv			error = EINVAL;
934266423Sjfv		} else {
935270346Sjfv			IXL_PF_LOCK(pf);
936266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
937266423Sjfv			vsi->max_frame_size =
938266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
939266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
940270346Sjfv			ixl_init_locked(pf);
941270346Sjfv			IXL_PF_UNLOCK(pf);
942266423Sjfv		}
943266423Sjfv		break;
944266423Sjfv	case SIOCSIFFLAGS:
945266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
946270346Sjfv		IXL_PF_LOCK(pf);
947266423Sjfv		if (ifp->if_flags & IFF_UP) {
948266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
949266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
950266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
951270346Sjfv					ixl_set_promisc(vsi);
952266423Sjfv				}
953299547Serj			} else {
954299547Serj				IXL_PF_UNLOCK(pf);
955299547Serj				ixl_init(pf);
956299547Serj				IXL_PF_LOCK(pf);
957299547Serj			}
958299547Serj		} else {
959299547Serj			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
960299547Serj				IXL_PF_UNLOCK(pf);
961270346Sjfv				ixl_stop(pf);
962299547Serj				IXL_PF_LOCK(pf);
963299547Serj			}
964299547Serj		}
965266423Sjfv		pf->if_flags = ifp->if_flags;
966270346Sjfv		IXL_PF_UNLOCK(pf);
967266423Sjfv		break;
968299547Serj	case SIOCSDRVSPEC:
969299547Serj	case SIOCGDRVSPEC:
970299547Serj		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
971299547Serj		    "Info)\n");
972299547Serj
973299547Serj		/* NVM update command */
974299547Serj		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
975299547Serj			error = ixl_handle_nvmupd_cmd(pf, ifd);
976299547Serj		else
977299547Serj			error = EINVAL;
978299547Serj		break;
979266423Sjfv	case SIOCADDMULTI:
980266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
981266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982270346Sjfv			IXL_PF_LOCK(pf);
983270346Sjfv			ixl_disable_intr(vsi);
984270346Sjfv			ixl_add_multi(vsi);
985270346Sjfv			ixl_enable_intr(vsi);
986270346Sjfv			IXL_PF_UNLOCK(pf);
987266423Sjfv		}
988266423Sjfv		break;
989266423Sjfv	case SIOCDELMULTI:
990266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
991266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
992270346Sjfv			IXL_PF_LOCK(pf);
993270346Sjfv			ixl_disable_intr(vsi);
994270346Sjfv			ixl_del_multi(vsi);
995270346Sjfv			ixl_enable_intr(vsi);
996270346Sjfv			IXL_PF_UNLOCK(pf);
997266423Sjfv		}
998266423Sjfv		break;
999266423Sjfv	case SIOCSIFMEDIA:
1000266423Sjfv	case SIOCGIFMEDIA:
1001284049Sjfv#ifdef IFM_ETH_XTYPE
1002284049Sjfv	case SIOCGIFXMEDIA:
1003284049Sjfv#endif
1004266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1005266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1006266423Sjfv		break;
1007266423Sjfv	case SIOCSIFCAP:
1008266423Sjfv	{
1009266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1010266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1011266423Sjfv
1012270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1013266423Sjfv
1014266423Sjfv		if (mask & IFCAP_RXCSUM)
1015266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1016266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1017266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1018266423Sjfv		if (mask & IFCAP_LRO)
1019266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1020266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1021266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1022266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1023266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1024266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1025266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1026266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1027270346Sjfv			IXL_PF_LOCK(pf);
1028270346Sjfv			ixl_init_locked(pf);
1029270346Sjfv			IXL_PF_UNLOCK(pf);
1030266423Sjfv		}
1031266423Sjfv		VLAN_CAPABILITIES(ifp);
1032266423Sjfv
1033266423Sjfv		break;
1034266423Sjfv	}
1035266423Sjfv
1036266423Sjfv	default:
1037270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1038266423Sjfv		error = ether_ioctl(ifp, command, data);
1039266423Sjfv		break;
1040266423Sjfv	}
1041266423Sjfv
1042266423Sjfv	return (error);
1043266423Sjfv}
1044266423Sjfv
1045266423Sjfv
1046266423Sjfv/*********************************************************************
1047266423Sjfv *  Init entry point
1048266423Sjfv *
1049266423Sjfv *  This routine is used in two ways. It is used by the stack as
1050266423Sjfv *  init entry point in network interface structure. It is also used
1051266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1052266423Sjfv *  consistent state.
1053266423Sjfv *
1054266423Sjfv *  return 0 on success, positive on failure
1055266423Sjfv **********************************************************************/
1056266423Sjfv
1057266423Sjfvstatic void
1058270346Sjfvixl_init_locked(struct ixl_pf *pf)
1059266423Sjfv{
1060266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1061270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1062266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1063266423Sjfv	device_t 	dev = pf->dev;
1064266423Sjfv	struct i40e_filter_control_settings	filter;
1065266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1066266423Sjfv	int		ret;
1067266423Sjfv
1068266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1069270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1070266423Sjfv
1071299547Serj	ixl_stop_locked(pf);
1072299547Serj
1073266423Sjfv	/* Get the latest mac address... User might use a LAA */
1074266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1075266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1076299546Serj	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1077299546Serj	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1078299546Serj		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1079266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1080266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1081266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1082266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1083266423Sjfv		    hw->mac.addr, NULL);
1084266423Sjfv		if (ret) {
1085266423Sjfv			device_printf(dev, "LLA address"
1086266423Sjfv			 "change failed!!\n");
1087266423Sjfv			return;
1088299546Serj		} else {
1089299546Serj			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1090266423Sjfv		}
1091266423Sjfv	}
1092266423Sjfv
1093266423Sjfv	/* Set the various hardware offload abilities */
1094266423Sjfv	ifp->if_hwassist = 0;
1095266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1096266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1097266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1098266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1099266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1100266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1101266423Sjfv
1102266423Sjfv	/* Set up the device filtering */
1103266423Sjfv	bzero(&filter, sizeof(filter));
1104266423Sjfv	filter.enable_ethtype = TRUE;
1105266423Sjfv	filter.enable_macvlan = TRUE;
1106270346Sjfv#ifdef IXL_FDIR
1107266423Sjfv	filter.enable_fdir = TRUE;
1108266423Sjfv#endif
1109299548Serj	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
1110266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1111299548Serj		device_printf(dev, "i40e_set_filter_control() failed\n");
1112266423Sjfv
1113266423Sjfv	/* Set up RSS */
1114270346Sjfv	ixl_config_rss(vsi);
1115266423Sjfv
1116299548Serj	/* Prepare the VSI: rings, hmc contexts, etc... */
1117270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1118270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1119266423Sjfv		return;
1120266423Sjfv	}
1121266423Sjfv
1122266423Sjfv	/* Add protocol filters to list */
1123270346Sjfv	ixl_init_filters(vsi);
1124266423Sjfv
1125266423Sjfv	/* Setup vlan's if needed */
1126270346Sjfv	ixl_setup_vlan_filters(vsi);
1127266423Sjfv
1128266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1129270346Sjfv	if (ixl_enable_msix) {
1130270346Sjfv		ixl_configure_msix(pf);
1131270346Sjfv		ixl_configure_itr(pf);
1132266423Sjfv	} else
1133270346Sjfv		ixl_configure_legacy(pf);
1134266423Sjfv
1135270346Sjfv	ixl_enable_rings(vsi);
1136266423Sjfv
1137266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1138266423Sjfv
1139279858Sjfv	ixl_reconfigure_filters(vsi);
1140279858Sjfv
1141266423Sjfv	/* Set MTU in hardware*/
1142270346Sjfv	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1143270346Sjfv	    TRUE, 0, NULL);
1144270346Sjfv	if (aq_error)
1145270346Sjfv		device_printf(vsi->dev,
1146270346Sjfv			"aq_set_mac_config in init error, code %d\n",
1147270346Sjfv		    aq_error);
1148266423Sjfv
1149266423Sjfv	/* And now turn on interrupts */
1150270346Sjfv	ixl_enable_intr(vsi);
1151266423Sjfv
1152299547Serj	/* Get link info */
1153299547Serj	hw->phy.get_link_info = TRUE;
1154299547Serj	i40e_get_link_status(hw, &pf->link_up);
1155299547Serj	ixl_update_link_status(pf);
1156299547Serj
1157299548Serj	/* Start the local timer */
1158299548Serj	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1159299548Serj
1160266423Sjfv	/* Now inform the stack we're ready */
1161266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1162266423Sjfv
1163266423Sjfv	return;
1164266423Sjfv}
1165266423Sjfv
1166299548Serj// XXX: super experimental stuff
1167299548Serjstatic int
1168299548Serjixl_teardown_hw_structs(struct ixl_pf *pf)
1169299548Serj{
1170299548Serj	enum i40e_status_code status = 0;
1171299548Serj	struct i40e_hw *hw = &pf->hw;
1172299548Serj	device_t dev = pf->dev;
1173299548Serj
1174299548Serj	/* Shutdown LAN HMC */
1175299548Serj	if (hw->hmc.hmc_obj) {
1176299548Serj		status = i40e_shutdown_lan_hmc(hw);
1177299548Serj		if (status) {
1178299548Serj			device_printf(dev,
1179299548Serj			    "init: LAN HMC shutdown failure; status %d\n", status);
1180299548Serj			goto err_out;
1181299548Serj		}
1182299548Serj	}
1183299548Serj
1184299548Serj	// XXX: This gets called when we know the adminq is inactive;
1185299548Serj	// so we already know it's setup when we get here.
1186299548Serj
1187299548Serj	/* Shutdown admin queue */
1188299548Serj	status = i40e_shutdown_adminq(hw);
1189299548Serj	if (status)
1190299548Serj		device_printf(dev,
1191299548Serj		    "init: Admin Queue shutdown failure; status %d\n", status);
1192299548Serj
1193299548Serjerr_out:
1194299548Serj	return (status);
1195299548Serj}
1196299548Serj
1197299548Serjstatic int
1198299548Serjixl_reset(struct ixl_pf *pf)
1199299548Serj{
1200299548Serj	struct i40e_hw *hw = &pf->hw;
1201299548Serj	device_t dev = pf->dev;
1202299548Serj	int error = 0;
1203299548Serj
1204299548Serj	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
1205299548Serj	i40e_clear_hw(hw);
1206299548Serj	error = i40e_pf_reset(hw);
1207299548Serj	if (error) {
1208299548Serj		device_printf(dev, "init: PF reset failure");
1209299548Serj		error = EIO;
1210299548Serj		goto err_out;
1211299548Serj	}
1212299548Serj
1213299548Serj	error = i40e_init_adminq(hw);
1214299548Serj	if (error) {
1215299548Serj		device_printf(dev, "init: Admin queue init failure; status code %d", error);
1216299548Serj		error = EIO;
1217299548Serj		goto err_out;
1218299548Serj	}
1219299548Serj
1220299548Serj	i40e_clear_pxe_mode(hw);
1221299548Serj
1222299548Serj	error = ixl_get_hw_capabilities(pf);
1223299548Serj	if (error) {
1224299548Serj		device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error);
1225299548Serj		goto err_out;
1226299548Serj	}
1227299548Serj
1228299548Serj	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1229299548Serj	    hw->func_caps.num_rx_qp, 0, 0);
1230299548Serj	if (error) {
1231299548Serj		device_printf(dev, "init: LAN HMC init failed; status code %d\n", error);
1232299548Serj		error = EIO;
1233299548Serj		goto err_out;
1234299548Serj	}
1235299548Serj
1236299548Serj	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1237299548Serj	if (error) {
1238299548Serj		device_printf(dev, "init: LAN HMC config failed; status code  %d\n", error);
1239299548Serj		error = EIO;
1240299548Serj		goto err_out;
1241299548Serj	}
1242299548Serj
1243299548Serj	// XXX: need to do switch config here?
1244299548Serj
1245299548Serj	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1246299548Serj	    NULL);
1247299548Serj        if (error) {
1248299548Serj		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1249299548Serj		    " aq_err %d\n", error, hw->aq.asq_last_status);
1250299548Serj		error = EIO;
1251299548Serj		goto err_out;
1252299548Serj	}
1253299548Serj
1254299548Serj	u8 set_fc_err_mask;
1255299548Serj	error = i40e_set_fc(hw, &set_fc_err_mask, true);
1256299548Serj	if (error) {
1257299548Serj		device_printf(dev, "init: setting link flow control failed; retcode %d,"
1258299548Serj		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1259299548Serj		goto err_out;
1260299548Serj	}
1261299548Serj
1262299548Serj	// XXX: (Rebuild VSIs?)
1263299548Serj
1264299548Serj	// Firmware delay workaround
1265299548Serj	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
1266299548Serj	    (hw->aq.fw_maj_ver < 4)) {
1267299548Serj		i40e_msec_delay(75);
1268299548Serj		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
1269299548Serj		if (error) {
1270299548Serj			device_printf(dev, "init: link restart failed, aq_err %d\n",
1271299548Serj			    hw->aq.asq_last_status);
1272299548Serj			goto err_out;
1273299548Serj		}
1274299548Serj	}
1275299548Serj
1276299548Serj	// [add_filter_to_drop_tx_flow_control_frames]
1277299548Serj	// - TODO: Implement
1278299548Serj
1279299548Serj	// i40e_send_version
1280299548Serj	// - TODO: Properly implement
1281299548Serj	struct i40e_driver_version dv;
1282299548Serj
1283299548Serj	dv.major_version = 1;
1284299548Serj	dv.minor_version = 1;
1285299548Serj	dv.build_version = 1;
1286299548Serj	dv.subbuild_version = 0;
1287299548Serj	// put in a driver version string that is less than 0x80 bytes long
1288299548Serj	bzero(&dv.driver_string, sizeof(dv.driver_string));
1289299548Serj	i40e_aq_send_driver_version(hw, &dv, NULL);
1290299548Serj
1291299548Serjerr_out:
1292299548Serj	return (error);
1293299548Serj}
1294299548Serj
1295266423Sjfvstatic void
1296270346Sjfvixl_init(void *arg)
1297266423Sjfv{
1298270346Sjfv	struct ixl_pf *pf = arg;
1299299547Serj	int ret = 0;
1300266423Sjfv
1301299548Serj	/*
1302299548Serj	 * If the aq is dead here, it probably means something outside of the driver
1303299548Serj	 * did something to the adapter, like a PF reset.
1304299548Serj	 * So rebuild the driver's state here if that occurs.
1305299548Serj	 */
1306299548Serj	if (!i40e_check_asq_alive(&pf->hw)) {
1307299548Serj		device_printf(pf->dev, "asq is not alive; rebuilding...\n");
1308299548Serj		IXL_PF_LOCK(pf);
1309299548Serj		ixl_teardown_hw_structs(pf);
1310299548Serj		ixl_reset(pf);
1311299548Serj		IXL_PF_UNLOCK(pf);
1312299548Serj	}
1313299548Serj
1314299547Serj	/* Set up interrupt routing here */
1315299547Serj	if (pf->msix > 1)
1316299547Serj		ret = ixl_assign_vsi_msix(pf);
1317299547Serj	else
1318299547Serj		ret = ixl_assign_vsi_legacy(pf);
1319299547Serj	if (ret) {
1320299547Serj		device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret);
1321299547Serj		return;
1322299547Serj	}
1323299547Serj
1324270346Sjfv	IXL_PF_LOCK(pf);
1325270346Sjfv	ixl_init_locked(pf);
1326270346Sjfv	IXL_PF_UNLOCK(pf);
1327266423Sjfv	return;
1328266423Sjfv}
1329266423Sjfv
1330266423Sjfv/*
1331266423Sjfv**
1332266423Sjfv** MSIX Interrupt Handlers and Tasklets
1333266423Sjfv**
1334266423Sjfv*/
1335266423Sjfvstatic void
1336270346Sjfvixl_handle_que(void *context, int pending)
1337266423Sjfv{
1338270346Sjfv	struct ixl_queue *que = context;
1339270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1340266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1341266423Sjfv	struct tx_ring  *txr = &que->txr;
1342266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1343266423Sjfv	bool		more;
1344266423Sjfv
1345266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1346270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1347270346Sjfv		IXL_TX_LOCK(txr);
1348270346Sjfv		ixl_txeof(que);
1349266423Sjfv		if (!drbr_empty(ifp, txr->br))
1350270346Sjfv			ixl_mq_start_locked(ifp, txr);
1351270346Sjfv		IXL_TX_UNLOCK(txr);
1352266423Sjfv		if (more) {
1353266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1354266423Sjfv			return;
1355266423Sjfv		}
1356266423Sjfv	}
1357266423Sjfv
1358266423Sjfv	/* Reenable this interrupt - hmmm */
1359270346Sjfv	ixl_enable_queue(hw, que->me);
1360266423Sjfv	return;
1361266423Sjfv}
1362266423Sjfv
1363266423Sjfv
1364266423Sjfv/*********************************************************************
1365266423Sjfv *
1366266423Sjfv *  Legacy Interrupt Service routine
1367266423Sjfv *
1368266423Sjfv **********************************************************************/
1369266423Sjfvvoid
1370270346Sjfvixl_intr(void *arg)
1371266423Sjfv{
1372270346Sjfv	struct ixl_pf		*pf = arg;
1373266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1374270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1375270346Sjfv	struct ixl_queue	*que = vsi->queues;
1376266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1377266423Sjfv	struct tx_ring		*txr = &que->txr;
1378266423Sjfv        u32			reg, icr0, mask;
1379266423Sjfv	bool			more_tx, more_rx;
1380266423Sjfv
1381266423Sjfv	++que->irqs;
1382266423Sjfv
1383266423Sjfv	/* Protect against spurious interrupts */
1384266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1385266423Sjfv		return;
1386266423Sjfv
1387266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1388266423Sjfv
1389266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1390266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1391266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1392266423Sjfv
1393266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1394266423Sjfv
1395279858Sjfv#ifdef PCI_IOV
1396279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1397279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1398279858Sjfv#endif
1399279858Sjfv
1400266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1401266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1402266423Sjfv		return;
1403266423Sjfv	}
1404266423Sjfv
1405270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1406266423Sjfv
1407270346Sjfv	IXL_TX_LOCK(txr);
1408270346Sjfv	more_tx = ixl_txeof(que);
1409266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1410266423Sjfv		more_tx = 1;
1411270346Sjfv	IXL_TX_UNLOCK(txr);
1412266423Sjfv
1413266423Sjfv	/* re-enable other interrupt causes */
1414266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1415266423Sjfv
1416266423Sjfv	/* And now the queues */
1417266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1418266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1419266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1420266423Sjfv
1421266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1422266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1423266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1424266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1425266423Sjfv
1426270346Sjfv	ixl_enable_legacy(hw);
1427266423Sjfv
1428266423Sjfv	return;
1429266423Sjfv}
1430266423Sjfv
1431266423Sjfv
1432266423Sjfv/*********************************************************************
1433266423Sjfv *
1434266423Sjfv *  MSIX VSI Interrupt Service routine
1435266423Sjfv *
1436266423Sjfv **********************************************************************/
1437266423Sjfvvoid
1438270346Sjfvixl_msix_que(void *arg)
1439266423Sjfv{
1440270346Sjfv	struct ixl_queue	*que = arg;
1441270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1442266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1443266423Sjfv	struct tx_ring	*txr = &que->txr;
1444266423Sjfv	bool		more_tx, more_rx;
1445266423Sjfv
1446269198Sjfv	/* Protect against spurious interrupts */
1447269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1448269198Sjfv		return;
1449269198Sjfv
1450266423Sjfv	++que->irqs;
1451266423Sjfv
1452270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1453266423Sjfv
1454270346Sjfv	IXL_TX_LOCK(txr);
1455270346Sjfv	more_tx = ixl_txeof(que);
1456266423Sjfv	/*
1457266423Sjfv	** Make certain that if the stack
1458266423Sjfv	** has anything queued the task gets
1459266423Sjfv	** scheduled to handle it.
1460266423Sjfv	*/
1461266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1462266423Sjfv		more_tx = 1;
1463270346Sjfv	IXL_TX_UNLOCK(txr);
1464266423Sjfv
1465270346Sjfv	ixl_set_queue_rx_itr(que);
1466270346Sjfv	ixl_set_queue_tx_itr(que);
1467266423Sjfv
1468266423Sjfv	if (more_tx || more_rx)
1469266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1470266423Sjfv	else
1471270346Sjfv		ixl_enable_queue(hw, que->me);
1472266423Sjfv
1473266423Sjfv	return;
1474266423Sjfv}
1475266423Sjfv
1476266423Sjfv
1477266423Sjfv/*********************************************************************
1478266423Sjfv *
1479266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1480266423Sjfv *
1481266423Sjfv **********************************************************************/
1482266423Sjfvstatic void
1483270346Sjfvixl_msix_adminq(void *arg)
1484266423Sjfv{
1485270346Sjfv	struct ixl_pf	*pf = arg;
1486266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1487299549Serj	u32		reg, mask, rstat_reg;
1488299549Serj	bool		do_task = FALSE;
1489266423Sjfv
1490266423Sjfv	++pf->admin_irq;
1491266423Sjfv
1492266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1493266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1494266423Sjfv
1495266423Sjfv	/* Check on the cause */
1496299549Serj	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
1497299549Serj		mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
1498299549Serj		do_task = TRUE;
1499299549Serj	}
1500266423Sjfv
1501269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1502270346Sjfv		ixl_handle_mdd_event(pf);
1503299549Serj		mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
1504269198Sjfv	}
1505266423Sjfv
1506299549Serj	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
1507299549Serj		device_printf(pf->dev, "Reset Requested!\n");
1508299549Serj		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
1509299549Serj		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
1510299549Serj		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
1511299549Serj		device_printf(pf->dev, "Reset type: ");
1512299549Serj		switch (rstat_reg) {
1513299549Serj		/* These others might be handled similarly to an EMPR reset */
1514299549Serj		case I40E_RESET_CORER:
1515299549Serj			printf("CORER\n");
1516299549Serj			break;
1517299549Serj		case I40E_RESET_GLOBR:
1518299549Serj			printf("GLOBR\n");
1519299549Serj			break;
1520299549Serj		case I40E_RESET_EMPR:
1521299549Serj			printf("EMPR\n");
1522299549Serj			atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
1523299549Serj			break;
1524299549Serj		default:
1525299549Serj			printf("?\n");
1526299549Serj			break;
1527299549Serj		}
1528299549Serj		// overload admin queue task to check reset progress?
1529299549Serj		do_task = TRUE;
1530299549Serj	}
1531299549Serj
1532299549Serj	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
1533299549Serj		device_printf(pf->dev, "ECC Error detected!\n");
1534299549Serj	}
1535299549Serj
1536299549Serj	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
1537299549Serj		device_printf(pf->dev, "HMC Error detected!\n");
1538299549Serj	}
1539299549Serj
1540299549Serj	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
1541299549Serj		device_printf(pf->dev, "PCI Exception detected!\n");
1542299549Serj	}
1543299549Serj
1544279858Sjfv#ifdef PCI_IOV
1545279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1546266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1547279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1548279858Sjfv	}
1549279858Sjfv#endif
1550266423Sjfv
1551266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1552266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1553266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1554266423Sjfv
1555299549Serj	if (do_task)
1556299549Serj		taskqueue_enqueue(pf->tq, &pf->adminq);
1557266423Sjfv}
1558266423Sjfv
1559266423Sjfv/*********************************************************************
1560266423Sjfv *
1561266423Sjfv *  Media Ioctl callback
1562266423Sjfv *
1563266423Sjfv *  This routine is called whenever the user queries the status of
1564266423Sjfv *  the interface using ifconfig.
1565266423Sjfv *
1566266423Sjfv **********************************************************************/
1567266423Sjfvstatic void
1568270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1569266423Sjfv{
1570270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1571279858Sjfv	struct ixl_pf	*pf = vsi->back;
1572266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1573266423Sjfv
1574270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1575270346Sjfv	IXL_PF_LOCK(pf);
1576266423Sjfv
1577279858Sjfv	hw->phy.get_link_info = TRUE;
1578284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
1579270346Sjfv	ixl_update_link_status(pf);
1580266423Sjfv
1581266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1582266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1583266423Sjfv
1584279858Sjfv	if (!pf->link_up) {
1585270346Sjfv		IXL_PF_UNLOCK(pf);
1586266423Sjfv		return;
1587266423Sjfv	}
1588266423Sjfv
1589266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1590299545Serj
1591299545Serj	/* Hardware always does full-duplex */
1592266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1593266423Sjfv
1594266423Sjfv	switch (hw->phy.link_info.phy_type) {
1595266423Sjfv		/* 100 M */
1596266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1597266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1598266423Sjfv			break;
1599266423Sjfv		/* 1 G */
1600266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1601266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1602266423Sjfv			break;
1603269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1604269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1605269198Sjfv			break;
1606269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1607269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1608269198Sjfv			break;
1609266423Sjfv		/* 10 G */
1610266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1611266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1612266423Sjfv			break;
1613266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1614266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1615266423Sjfv			break;
1616266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1617266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1618266423Sjfv			break;
1619270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1620270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1621270346Sjfv			break;
1622266423Sjfv		/* 40 G */
1623266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1624266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1625266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1626266423Sjfv			break;
1627266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1628266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1629266423Sjfv			break;
1630266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1631266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1632266423Sjfv			break;
1633284049Sjfv#ifndef IFM_ETH_XTYPE
1634284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1635284049Sjfv			ifmr->ifm_active |= IFM_1000_CX;
1636284049Sjfv			break;
1637284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1638284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1639284049Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1640284049Sjfv			break;
1641284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1642284049Sjfv			ifmr->ifm_active |= IFM_10G_CX4;
1643284049Sjfv			break;
1644284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1645284049Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1646284049Sjfv			break;
1647279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1648279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1649284049Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1650279033Sjfv			break;
1651284049Sjfv#else
1652284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1653284049Sjfv			ifmr->ifm_active |= IFM_1000_KX;
1654284049Sjfv			break;
1655284049Sjfv		/* ERJ: What's the difference between these? */
1656284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1657284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1658284049Sjfv			ifmr->ifm_active |= IFM_10G_CR1;
1659284049Sjfv			break;
1660284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1661284049Sjfv			ifmr->ifm_active |= IFM_10G_KX4;
1662284049Sjfv			break;
1663284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1664284049Sjfv			ifmr->ifm_active |= IFM_10G_KR;
1665284049Sjfv			break;
1666299545Serj		/* Our single 20G media type */
1667284049Sjfv		case I40E_PHY_TYPE_20GBASE_KR2:
1668284049Sjfv			ifmr->ifm_active |= IFM_20G_KR2;
1669284049Sjfv			break;
1670284049Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1671284049Sjfv			ifmr->ifm_active |= IFM_40G_KR4;
1672284049Sjfv			break;
1673284049Sjfv		case I40E_PHY_TYPE_XLPPI:
1674284049Sjfv			ifmr->ifm_active |= IFM_40G_XLPPI;
1675284049Sjfv			break;
1676284049Sjfv#endif
1677266423Sjfv		default:
1678266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1679266423Sjfv			break;
1680266423Sjfv	}
1681266423Sjfv	/* Report flow control status as well */
1682266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1683266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1684266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1685266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1686266423Sjfv
1687270346Sjfv	IXL_PF_UNLOCK(pf);
1688266423Sjfv
1689266423Sjfv	return;
1690266423Sjfv}
1691266423Sjfv
1692299545Serj/*
1693299545Serj * NOTE: Fortville does not support forcing media speeds. Instead,
1694299545Serj * use the set_advertise sysctl to set the speeds Fortville
1695299545Serj * will advertise or be allowed to operate at.
1696299545Serj */
1697266423Sjfvstatic int
1698270346Sjfvixl_media_change(struct ifnet * ifp)
1699266423Sjfv{
1700270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1701266423Sjfv	struct ifmedia *ifm = &vsi->media;
1702266423Sjfv
1703270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1704266423Sjfv
1705266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1706266423Sjfv		return (EINVAL);
1707266423Sjfv
1708299545Serj	if_printf(ifp, "Media change is not supported.\n");
1709269198Sjfv
1710269198Sjfv	return (ENODEV);
1711266423Sjfv}
1712266423Sjfv
1713266423Sjfv
1714270346Sjfv#ifdef IXL_FDIR
1715266423Sjfv/*
1716266423Sjfv** ATR: Application Targetted Receive - creates a filter
1717266423Sjfv**	based on TX flow info that will keep the receive
1718266423Sjfv**	portion of the flow on the same queue. Based on the
1719266423Sjfv**	implementation this is only available for TCP connections
1720266423Sjfv*/
1721266423Sjfvvoid
1722270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1723266423Sjfv{
1724270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1725266423Sjfv	struct tx_ring			*txr = &que->txr;
1726266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1727266423Sjfv	u32				ptype, dtype;
1728266423Sjfv	int				idx;
1729266423Sjfv
1730266423Sjfv	/* check if ATR is enabled and sample rate */
1731270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1732266423Sjfv		return;
1733266423Sjfv	/*
1734266423Sjfv	** We sample all TCP SYN/FIN packets,
1735266423Sjfv	** or at the selected sample rate
1736266423Sjfv	*/
1737266423Sjfv	txr->atr_count++;
1738266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1739266423Sjfv	    (txr->atr_count < txr->atr_rate))
1740266423Sjfv                return;
1741266423Sjfv	txr->atr_count = 0;
1742266423Sjfv
1743266423Sjfv	/* Get a descriptor to use */
1744266423Sjfv	idx = txr->next_avail;
1745266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1746266423Sjfv	if (++idx == que->num_desc)
1747266423Sjfv		idx = 0;
1748266423Sjfv	txr->avail--;
1749266423Sjfv	txr->next_avail = idx;
1750266423Sjfv
1751266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1752266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1753266423Sjfv
1754266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1755266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1756266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1757266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1758266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1759266423Sjfv
1760266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1761266423Sjfv
1762266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1763266423Sjfv
1764266423Sjfv	/*
1765266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1766266423Sjfv	** the filter, otherwise its an update.
1767266423Sjfv	*/
1768266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1769266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1770266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1771266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1772266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1773266423Sjfv
1774266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1775266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1776266423Sjfv
1777266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1778266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1779266423Sjfv
1780266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1781266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1782266423Sjfv	return;
1783266423Sjfv}
1784266423Sjfv#endif
1785266423Sjfv
1786266423Sjfv
1787266423Sjfvstatic void
1788270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1789266423Sjfv{
1790266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1791266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1792266423Sjfv	int		err, mcnt = 0;
1793266423Sjfv	bool		uni = FALSE, multi = FALSE;
1794266423Sjfv
1795266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1796266423Sjfv                multi = TRUE;
1797266423Sjfv	else { /* Need to count the multicast addresses */
1798266423Sjfv		struct  ifmultiaddr *ifma;
1799266423Sjfv		if_maddr_rlock(ifp);
1800266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1801266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1802266423Sjfv                                continue;
1803266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1804266423Sjfv                                break;
1805266423Sjfv                        mcnt++;
1806266423Sjfv		}
1807266423Sjfv		if_maddr_runlock(ifp);
1808266423Sjfv	}
1809266423Sjfv
1810266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1811266423Sjfv                multi = TRUE;
1812266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1813266423Sjfv		uni = TRUE;
1814266423Sjfv
1815266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1816266423Sjfv	    vsi->seid, uni, NULL);
1817266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1818266423Sjfv	    vsi->seid, multi, NULL);
1819266423Sjfv	return;
1820266423Sjfv}
1821266423Sjfv
1822266423Sjfv/*********************************************************************
1823266423Sjfv * 	Filter Routines
1824266423Sjfv *
1825266423Sjfv *	Routines for multicast and vlan filter management.
1826266423Sjfv *
1827266423Sjfv *********************************************************************/
1828266423Sjfvstatic void
1829270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1830266423Sjfv{
1831266423Sjfv	struct	ifmultiaddr	*ifma;
1832266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1833266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1834266423Sjfv	int			mcnt = 0, flags;
1835266423Sjfv
1836270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1837266423Sjfv
1838266423Sjfv	if_maddr_rlock(ifp);
1839266423Sjfv	/*
1840266423Sjfv	** First just get a count, to decide if we
1841266423Sjfv	** we simply use multicast promiscuous.
1842266423Sjfv	*/
1843266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1844266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1845266423Sjfv			continue;
1846266423Sjfv		mcnt++;
1847266423Sjfv	}
1848266423Sjfv	if_maddr_runlock(ifp);
1849266423Sjfv
1850266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1851266423Sjfv		/* delete existing MC filters */
1852270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1853266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1854266423Sjfv		    vsi->seid, TRUE, NULL);
1855266423Sjfv		return;
1856266423Sjfv	}
1857266423Sjfv
1858266423Sjfv	mcnt = 0;
1859266423Sjfv	if_maddr_rlock(ifp);
1860266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1861266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1862266423Sjfv			continue;
1863270346Sjfv		ixl_add_mc_filter(vsi,
1864266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1865266423Sjfv		mcnt++;
1866266423Sjfv	}
1867266423Sjfv	if_maddr_runlock(ifp);
1868266423Sjfv	if (mcnt > 0) {
1869270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1870270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1871266423Sjfv	}
1872266423Sjfv
1873270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1874266423Sjfv	return;
1875266423Sjfv}
1876266423Sjfv
1877266423Sjfvstatic void
1878270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1879266423Sjfv{
1880266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1881266423Sjfv	struct ifmultiaddr	*ifma;
1882270346Sjfv	struct ixl_mac_filter	*f;
1883266423Sjfv	int			mcnt = 0;
1884266423Sjfv	bool		match = FALSE;
1885266423Sjfv
1886270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1887266423Sjfv
1888266423Sjfv	/* Search for removed multicast addresses */
1889266423Sjfv	if_maddr_rlock(ifp);
1890266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1891270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1892266423Sjfv			match = FALSE;
1893266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1894266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1895266423Sjfv					continue;
1896266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1897266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1898266423Sjfv					match = TRUE;
1899266423Sjfv					break;
1900266423Sjfv				}
1901266423Sjfv			}
1902266423Sjfv			if (match == FALSE) {
1903270346Sjfv				f->flags |= IXL_FILTER_DEL;
1904266423Sjfv				mcnt++;
1905266423Sjfv			}
1906266423Sjfv		}
1907266423Sjfv	}
1908266423Sjfv	if_maddr_runlock(ifp);
1909266423Sjfv
1910266423Sjfv	if (mcnt > 0)
1911270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1912266423Sjfv}
1913266423Sjfv
1914266423Sjfv
1915266423Sjfv/*********************************************************************
1916266423Sjfv *  Timer routine
1917266423Sjfv *
1918266423Sjfv *  This routine checks for link status,updates statistics,
1919266423Sjfv *  and runs the watchdog check.
1920266423Sjfv *
1921266423Sjfv **********************************************************************/
1922266423Sjfv
1923266423Sjfvstatic void
1924270346Sjfvixl_local_timer(void *arg)
1925266423Sjfv{
1926270346Sjfv	struct ixl_pf		*pf = arg;
1927266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1928270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1929270346Sjfv	struct ixl_queue	*que = vsi->queues;
1930266423Sjfv	device_t		dev = pf->dev;
1931266423Sjfv	int			hung = 0;
1932266423Sjfv	u32			mask;
1933266423Sjfv
1934266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1935266423Sjfv
1936266423Sjfv	/* Fire off the adminq task */
1937266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1938266423Sjfv
1939266423Sjfv	/* Update stats */
1940270346Sjfv	ixl_update_stats_counters(pf);
1941266423Sjfv
1942266423Sjfv	/*
1943269198Sjfv	** Check status of the queues
1944266423Sjfv	*/
1945266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1946266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1947266423Sjfv
1948266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1949266423Sjfv		/* Any queues with outstanding work get a sw irq */
1950266423Sjfv		if (que->busy)
1951266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1952266423Sjfv		/*
1953266423Sjfv		** Each time txeof runs without cleaning, but there
1954266423Sjfv		** are uncleaned descriptors it increments busy. If
1955266423Sjfv		** we get to 5 we declare it hung.
1956266423Sjfv		*/
1957270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1958269198Sjfv			++hung;
1959269198Sjfv			/* Mark the queue as inactive */
1960269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1961269198Sjfv			continue;
1962269198Sjfv		} else {
1963269198Sjfv			/* Check if we've come back from hung */
1964269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1965269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1966269198Sjfv		}
1967270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1968277084Sjfv#ifdef IXL_DEBUG
1969266423Sjfv			device_printf(dev,"Warning queue %d "
1970269198Sjfv			    "appears to be hung!\n", i);
1971277084Sjfv#endif
1972270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1973266423Sjfv			++hung;
1974266423Sjfv		}
1975266423Sjfv	}
1976266423Sjfv	/* Only reinit if all queues show hung */
1977266423Sjfv	if (hung == vsi->num_queues)
1978266423Sjfv		goto hung;
1979266423Sjfv
1980270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1981266423Sjfv	return;
1982266423Sjfv
1983266423Sjfvhung:
1984266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1985270346Sjfv	ixl_init_locked(pf);
1986266423Sjfv}
1987266423Sjfv
1988266423Sjfv/*
1989266423Sjfv** Note: this routine updates the OS on the link state
1990266423Sjfv**	the real check of the hardware only happens with
1991266423Sjfv**	a link interrupt.
1992266423Sjfv*/
1993266423Sjfvstatic void
1994270346Sjfvixl_update_link_status(struct ixl_pf *pf)
1995266423Sjfv{
1996270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1997266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1998266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1999266423Sjfv	device_t		dev = pf->dev;
2000266423Sjfv
2001299547Serj	if (pf->link_up) {
2002266423Sjfv		if (vsi->link_active == FALSE) {
2003279033Sjfv			pf->fc = hw->fc.current_mode;
2004266423Sjfv			if (bootverbose) {
2005266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
2006266423Sjfv				    " Flow Control: %s\n",
2007279858Sjfv				    ((pf->link_speed ==
2008279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
2009279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
2010266423Sjfv			}
2011266423Sjfv			vsi->link_active = TRUE;
2012277084Sjfv			/*
2013277084Sjfv			** Warn user if link speed on NPAR enabled
2014277084Sjfv			** partition is not at least 10GB
2015277084Sjfv			*/
2016277084Sjfv			if (hw->func_caps.npar_enable &&
2017279858Sjfv			   (hw->phy.link_info.link_speed ==
2018279858Sjfv			   I40E_LINK_SPEED_1GB ||
2019279858Sjfv			   hw->phy.link_info.link_speed ==
2020279858Sjfv			   I40E_LINK_SPEED_100MB))
2021279858Sjfv				device_printf(dev, "The partition detected"
2022279858Sjfv				    "link speed that is less than 10Gbps\n");
2023266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
2024266423Sjfv		}
2025266423Sjfv	} else { /* Link down */
2026266423Sjfv		if (vsi->link_active == TRUE) {
2027266423Sjfv			if (bootverbose)
2028299547Serj				device_printf(dev, "Link is Down\n");
2029266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
2030266423Sjfv			vsi->link_active = FALSE;
2031266423Sjfv		}
2032266423Sjfv	}
2033266423Sjfv
2034266423Sjfv	return;
2035266423Sjfv}
2036266423Sjfv
2037299547Serjstatic void
2038299547Serjixl_stop(struct ixl_pf *pf)
2039299547Serj{
2040299547Serj	IXL_PF_LOCK(pf);
2041299547Serj	ixl_stop_locked(pf);
2042299547Serj	IXL_PF_UNLOCK(pf);
2043299547Serj
2044299547Serj	ixl_free_interrupt_resources(pf);
2045299547Serj}
2046299547Serj
2047266423Sjfv/*********************************************************************
2048266423Sjfv *
2049266423Sjfv *  This routine disables all traffic on the adapter by issuing a
2050266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
2051266423Sjfv *
2052266423Sjfv **********************************************************************/
2053266423Sjfv
2054266423Sjfvstatic void
2055299547Serjixl_stop_locked(struct ixl_pf *pf)
2056266423Sjfv{
2057270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2058266423Sjfv	struct ifnet	*ifp = vsi->ifp;
2059266423Sjfv
2060299547Serj	INIT_DEBUGOUT("ixl_stop: begin\n");
2061266423Sjfv
2062299547Serj	IXL_PF_LOCK_ASSERT(pf);
2063299547Serj
2064299547Serj	/* Stop the local timer */
2065299547Serj	callout_stop(&pf->timer);
2066299547Serj
2067279858Sjfv	if (pf->num_vfs == 0)
2068279858Sjfv		ixl_disable_intr(vsi);
2069279858Sjfv	else
2070279858Sjfv		ixl_disable_rings_intr(vsi);
2071270346Sjfv	ixl_disable_rings(vsi);
2072266423Sjfv
2073266423Sjfv	/* Tell the stack that the interface is no longer active */
2074266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2075266423Sjfv
2076266423Sjfv	return;
2077266423Sjfv}
2078266423Sjfv
2079266423Sjfv
2080266423Sjfv/*********************************************************************
2081266423Sjfv *
2082266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
2083266423Sjfv *
2084266423Sjfv **********************************************************************/
2085266423Sjfvstatic int
2086270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
2087266423Sjfv{
2088266423Sjfv	device_t        dev = pf->dev;
2089270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
2090270346Sjfv	struct		ixl_queue *que = vsi->queues;
2091266423Sjfv	int 		error, rid = 0;
2092266423Sjfv
2093266423Sjfv	if (pf->msix == 1)
2094266423Sjfv		rid = 1;
2095266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2096266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
2097266423Sjfv	if (pf->res == NULL) {
2098299548Serj		device_printf(dev, "Unable to allocate"
2099266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
2100266423Sjfv		return (ENXIO);
2101266423Sjfv	}
2102266423Sjfv
2103266423Sjfv	/* Set the handler function */
2104266423Sjfv	error = bus_setup_intr(dev, pf->res,
2105266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2106270346Sjfv	    ixl_intr, pf, &pf->tag);
2107266423Sjfv	if (error) {
2108266423Sjfv		pf->res = NULL;
2109266423Sjfv		device_printf(dev, "Failed to register legacy/msi handler");
2110266423Sjfv		return (error);
2111266423Sjfv	}
2112266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
2113270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2114270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
2115270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2116266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
2117266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2118266423Sjfv	    device_get_nameunit(dev));
2119270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2120279858Sjfv
2121279858Sjfv#ifdef PCI_IOV
2122279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2123279858Sjfv#endif
2124279858Sjfv
2125270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2126266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
2127266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2128266423Sjfv	    device_get_nameunit(dev));
2129266423Sjfv
2130266423Sjfv	return (0);
2131266423Sjfv}
2132266423Sjfv
2133299546Serjstatic void
2134299546Serjixl_init_taskqueues(struct ixl_pf *pf)
2135299546Serj{
2136299546Serj	struct ixl_vsi *vsi = &pf->vsi;
2137299546Serj	struct ixl_queue *que = vsi->queues;
2138299546Serj	device_t dev = pf->dev;
2139266423Sjfv
2140299546Serj	/* Tasklet for Admin Queue */
2141299546Serj	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2142299546Serj#ifdef PCI_IOV
2143299546Serj	/* VFLR Tasklet */
2144299546Serj	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2145299546Serj#endif
2146299546Serj
2147299546Serj	/* Create and start PF taskqueue */
2148299546Serj	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2149299546Serj	    taskqueue_thread_enqueue, &pf->tq);
2150299546Serj	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2151299546Serj	    device_get_nameunit(dev));
2152299546Serj
2153299546Serj	/* Create queue tasks and start queue taskqueues */
2154299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2155299546Serj		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2156299546Serj		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2157299546Serj		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2158299546Serj		    taskqueue_thread_enqueue, &que->tq);
2159299546Serj#ifdef RSS
2160299546Serj		CPU_SETOF(cpu_id, &cpu_mask);
2161299546Serj		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2162299546Serj		    &cpu_mask, "%s (bucket %d)",
2163299546Serj		    device_get_nameunit(dev), cpu_id);
2164299546Serj#else
2165299546Serj		taskqueue_start_threads(&que->tq, 1, PI_NET,
2166299546Serj		    "%s (que %d)", device_get_nameunit(dev), que->me);
2167299546Serj#endif
2168299546Serj	}
2169299546Serj
2170299546Serj}
2171299546Serj
2172299546Serjstatic void
2173299546Serjixl_free_taskqueues(struct ixl_pf *pf)
2174299546Serj{
2175299546Serj	struct ixl_vsi		*vsi = &pf->vsi;
2176299546Serj	struct ixl_queue	*que = vsi->queues;
2177299546Serj
2178299546Serj	if (pf->tq)
2179299546Serj		taskqueue_free(pf->tq);
2180299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2181299546Serj		if (que->tq)
2182299546Serj			taskqueue_free(que->tq);
2183299546Serj	}
2184299546Serj}
2185299546Serj
2186266423Sjfv/*********************************************************************
2187266423Sjfv *
2188266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
2189266423Sjfv *
2190266423Sjfv **********************************************************************/
2191266423Sjfvstatic int
2192270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
2193266423Sjfv{
2194266423Sjfv	device_t	dev = pf->dev;
2195270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
2196270346Sjfv	struct 		ixl_queue *que = vsi->queues;
2197266423Sjfv	struct		tx_ring	 *txr;
2198266423Sjfv	int 		error, rid, vector = 0;
2199299545Serj#ifdef	RSS
2200299545Serj	cpuset_t cpu_mask;
2201299545Serj#endif
2202266423Sjfv
2203299546Serj	/* Admin Queue interrupt vector is 0 */
2204266423Sjfv	rid = vector + 1;
2205266423Sjfv	pf->res = bus_alloc_resource_any(dev,
2206266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2207266423Sjfv	if (!pf->res) {
2208299546Serj		device_printf(dev, "Unable to allocate"
2209299546Serj		    " bus resource: Adminq interrupt [rid=%d]\n", rid);
2210266423Sjfv		return (ENXIO);
2211266423Sjfv	}
2212266423Sjfv	/* Set the adminq vector and handler */
2213266423Sjfv	error = bus_setup_intr(dev, pf->res,
2214266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2215270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
2216266423Sjfv	if (error) {
2217266423Sjfv		pf->res = NULL;
2218266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
2219266423Sjfv		return (error);
2220266423Sjfv	}
2221266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2222266423Sjfv	pf->admvec = vector;
2223266423Sjfv	++vector;
2224266423Sjfv
2225266423Sjfv	/* Now set up the stations */
2226266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2227277084Sjfv		int cpu_id = i;
2228266423Sjfv		rid = vector + 1;
2229266423Sjfv		txr = &que->txr;
2230266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2231266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
2232266423Sjfv		if (que->res == NULL) {
2233299546Serj			device_printf(dev, "Unable to allocate"
2234299546Serj		    	    " bus resource: que interrupt [rid=%d]\n", rid);
2235266423Sjfv			return (ENXIO);
2236266423Sjfv		}
2237266423Sjfv		/* Set the handler function */
2238266423Sjfv		error = bus_setup_intr(dev, que->res,
2239266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2240270346Sjfv		    ixl_msix_que, que, &que->tag);
2241266423Sjfv		if (error) {
2242266423Sjfv			que->res = NULL;
2243266423Sjfv			device_printf(dev, "Failed to register que handler");
2244266423Sjfv			return (error);
2245266423Sjfv		}
2246299546Serj		bus_describe_intr(dev, que->res, que->tag, "que%d", i);
2247266423Sjfv		/* Bind the vector to a CPU */
2248277084Sjfv#ifdef RSS
2249277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2250277084Sjfv#endif
2251277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2252266423Sjfv		que->msix = vector;
2253266423Sjfv	}
2254266423Sjfv
2255266423Sjfv	return (0);
2256266423Sjfv}
2257266423Sjfv
2258266423Sjfv
2259266423Sjfv/*
2260266423Sjfv * Allocate MSI/X vectors
2261266423Sjfv */
2262266423Sjfvstatic int
2263270346Sjfvixl_init_msix(struct ixl_pf *pf)
2264266423Sjfv{
2265266423Sjfv	device_t dev = pf->dev;
2266266423Sjfv	int rid, want, vectors, queues, available;
2267266423Sjfv
2268266423Sjfv	/* Override by tuneable */
2269270346Sjfv	if (ixl_enable_msix == 0)
2270266423Sjfv		goto msi;
2271266423Sjfv
2272269198Sjfv	/*
2273269198Sjfv	** When used in a virtualized environment
2274269198Sjfv	** PCI BUSMASTER capability may not be set
2275269198Sjfv	** so explicity set it here and rewrite
2276269198Sjfv	** the ENABLE in the MSIX control register
2277269198Sjfv	** at this point to cause the host to
2278269198Sjfv	** successfully initialize us.
2279269198Sjfv	*/
2280269198Sjfv	{
2281269198Sjfv		u16 pci_cmd_word;
2282269198Sjfv		int msix_ctrl;
2283269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2284269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2285269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2286269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2287269198Sjfv		rid += PCIR_MSIX_CTRL;
2288269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2289269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2290269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2291269198Sjfv	}
2292269198Sjfv
2293266423Sjfv	/* First try MSI/X */
2294270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2295266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2296266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2297266423Sjfv       	if (!pf->msix_mem) {
2298266423Sjfv		/* May not be enabled */
2299266423Sjfv		device_printf(pf->dev,
2300299549Serj		    "Unable to map MSIX table\n");
2301266423Sjfv		goto msi;
2302266423Sjfv	}
2303266423Sjfv
2304266423Sjfv	available = pci_msix_count(dev);
2305266423Sjfv	if (available == 0) { /* system has msix disabled */
2306266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2307266423Sjfv		    rid, pf->msix_mem);
2308266423Sjfv		pf->msix_mem = NULL;
2309266423Sjfv		goto msi;
2310266423Sjfv	}
2311266423Sjfv
2312266423Sjfv	/* Figure out a reasonable auto config value */
2313266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2314266423Sjfv
2315299546Serj	/* Override with hardcoded value if it's less than autoconfig count */
2316270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2317270346Sjfv		queues = ixl_max_queues;
2318299546Serj	else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
2319299546Serj		device_printf(dev, "ixl_max_queues > # of cpus, using "
2320299546Serj		    "autoconfig amount...\n");
2321299546Serj	/* Or limit maximum auto-configured queues to 8 */
2322299546Serj	else if ((ixl_max_queues == 0) && (queues > 8))
2323299546Serj		queues = 8;
2324266423Sjfv
2325277084Sjfv#ifdef  RSS
2326277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2327277084Sjfv	if (queues > rss_getnumbuckets())
2328277084Sjfv		queues = rss_getnumbuckets();
2329277084Sjfv#endif
2330277084Sjfv
2331266423Sjfv	/*
2332266423Sjfv	** Want one vector (RX/TX pair) per queue
2333266423Sjfv	** plus an additional for the admin queue.
2334266423Sjfv	*/
2335266423Sjfv	want = queues + 1;
2336266423Sjfv	if (want <= available)	/* Have enough */
2337266423Sjfv		vectors = want;
2338266423Sjfv	else {
2339266423Sjfv               	device_printf(pf->dev,
2340266423Sjfv		    "MSIX Configuration Problem, "
2341266423Sjfv		    "%d vectors available but %d wanted!\n",
2342266423Sjfv		    available, want);
2343266423Sjfv		return (0); /* Will go to Legacy setup */
2344266423Sjfv	}
2345266423Sjfv
2346266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2347266423Sjfv               	device_printf(pf->dev,
2348266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2349266423Sjfv		pf->msix = vectors;
2350266423Sjfv		pf->vsi.num_queues = queues;
2351277084Sjfv#ifdef RSS
2352277084Sjfv		/*
2353277084Sjfv		 * If we're doing RSS, the number of queues needs to
2354277084Sjfv		 * match the number of RSS buckets that are configured.
2355277084Sjfv		 *
2356277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2357277084Sjfv		 *   up with queues that get no traffic.
2358277084Sjfv		 *
2359277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2360277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2361277084Sjfv		 *   so there'll be some contention.
2362277084Sjfv		 */
2363277084Sjfv		if (queues != rss_getnumbuckets()) {
2364277084Sjfv			device_printf(dev,
2365277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2366277084Sjfv			    "; performance will be impacted.\n",
2367277084Sjfv			    __func__, queues, rss_getnumbuckets());
2368277084Sjfv		}
2369277084Sjfv#endif
2370266423Sjfv		return (vectors);
2371266423Sjfv	}
2372266423Sjfvmsi:
2373266423Sjfv       	vectors = pci_msi_count(dev);
2374266423Sjfv	pf->vsi.num_queues = 1;
2375266423Sjfv	pf->msix = 1;
2376270346Sjfv	ixl_max_queues = 1;
2377270346Sjfv	ixl_enable_msix = 0;
2378266423Sjfv       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2379299547Serj		device_printf(pf->dev, "Using an MSI interrupt\n");
2380266423Sjfv	else {
2381266423Sjfv		pf->msix = 0;
2382299547Serj		device_printf(pf->dev, "Using a Legacy interrupt\n");
2383266423Sjfv	}
2384266423Sjfv	return (vectors);
2385266423Sjfv}
2386266423Sjfv
2387266423Sjfv
2388266423Sjfv/*
2389299547Serj * Plumb MSIX vectors
2390266423Sjfv */
2391266423Sjfvstatic void
2392270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2393266423Sjfv{
2394266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2395270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2396266423Sjfv	u32		reg;
2397266423Sjfv	u16		vector = 1;
2398266423Sjfv
2399266423Sjfv	/* First set up the adminq - vector 0 */
2400266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2401266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2402266423Sjfv
2403266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2404266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2405299549Serj	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2406266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2407266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2408266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2409266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2410266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2411266423Sjfv
2412299547Serj	/*
2413299547Serj	 * 0x7FF is the end of the queue list.
2414299547Serj	 * This means we won't use MSI-X vector 0 for a queue interrupt
2415299547Serj	 * in MSIX mode.
2416299547Serj	 */
2417266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2418299547Serj	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
2419299547Serj	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
2420266423Sjfv
2421266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2422266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2423266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2424266423Sjfv
2425266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2426266423Sjfv
2427266423Sjfv	/* Next configure the queues */
2428266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2429299545Serj		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2430266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2431266423Sjfv
2432266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2433270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2434266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2435266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2436266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2437266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2438266423Sjfv
2439266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2440270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2441266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2442299545Serj		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2443266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2444299545Serj		if (i == (vsi->num_queues - 1))
2445299545Serj			reg |= (IXL_QUEUE_EOL
2446299545Serj			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2447266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2448266423Sjfv	}
2449266423Sjfv}
2450266423Sjfv
2451266423Sjfv/*
2452266423Sjfv * Configure for MSI single vector operation
2453266423Sjfv */
2454266423Sjfvstatic void
2455270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2456266423Sjfv{
2457266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2458266423Sjfv	u32		reg;
2459266423Sjfv
2460266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2461266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2462266423Sjfv
2463266423Sjfv	/* Setup "other" causes */
2464266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2465266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2466266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2467266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2468266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2469266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2470266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2471266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2472266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2473266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2474266423Sjfv	    ;
2475266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2476266423Sjfv
2477266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2478266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2479266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2480266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2481266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2482266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2483266423Sjfv
2484266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2485266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2486266423Sjfv
2487266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2488266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2489270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2490266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2491266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2492266423Sjfv
2493266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2494270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2495270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2496266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2497266423Sjfv
2498266423Sjfv	/* Next enable the queue pair */
2499266423Sjfv	reg = rd32(hw, I40E_QTX_ENA(0));
2500266423Sjfv	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2501266423Sjfv	wr32(hw, I40E_QTX_ENA(0), reg);
2502266423Sjfv
2503266423Sjfv	reg = rd32(hw, I40E_QRX_ENA(0));
2504266423Sjfv	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2505266423Sjfv	wr32(hw, I40E_QRX_ENA(0), reg);
2506266423Sjfv}
2507266423Sjfv
2508266423Sjfv
2509266423Sjfv/*
2510266423Sjfv * Set the Initial ITR state
2511266423Sjfv */
2512266423Sjfvstatic void
2513270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2514266423Sjfv{
2515266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2516270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2517270346Sjfv	struct ixl_queue	*que = vsi->queues;
2518266423Sjfv
2519270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2520270346Sjfv	if (ixl_dynamic_rx_itr)
2521270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2522270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2523270346Sjfv	if (ixl_dynamic_tx_itr)
2524270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2525266423Sjfv
2526266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2527266423Sjfv		struct tx_ring	*txr = &que->txr;
2528266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2529266423Sjfv
2530270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2531266423Sjfv		    vsi->rx_itr_setting);
2532266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2533270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2534270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2535266423Sjfv		    vsi->tx_itr_setting);
2536266423Sjfv		txr->itr = vsi->tx_itr_setting;
2537270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2538266423Sjfv	}
2539266423Sjfv}
2540266423Sjfv
2541266423Sjfv
2542266423Sjfvstatic int
2543270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2544266423Sjfv{
2545266423Sjfv	int             rid;
2546266423Sjfv	device_t        dev = pf->dev;
2547266423Sjfv
2548266423Sjfv	rid = PCIR_BAR(0);
2549266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2550266423Sjfv	    &rid, RF_ACTIVE);
2551266423Sjfv
2552266423Sjfv	if (!(pf->pci_mem)) {
2553299549Serj		device_printf(dev, "Unable to allocate bus resource: memory\n");
2554266423Sjfv		return (ENXIO);
2555266423Sjfv	}
2556266423Sjfv
2557266423Sjfv	pf->osdep.mem_bus_space_tag =
2558266423Sjfv		rman_get_bustag(pf->pci_mem);
2559266423Sjfv	pf->osdep.mem_bus_space_handle =
2560266423Sjfv		rman_get_bushandle(pf->pci_mem);
2561270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2562272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2563266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2564266423Sjfv
2565266423Sjfv	pf->hw.back = &pf->osdep;
2566266423Sjfv
2567266423Sjfv	/*
2568266423Sjfv	** Now setup MSI or MSI/X, should
2569266423Sjfv	** return us the number of supported
2570266423Sjfv	** vectors. (Will be 1 for MSI)
2571266423Sjfv	*/
2572270346Sjfv	pf->msix = ixl_init_msix(pf);
2573266423Sjfv	return (0);
2574266423Sjfv}
2575266423Sjfv
2576266423Sjfvstatic void
2577299547Serjixl_free_interrupt_resources(struct ixl_pf *pf)
2578266423Sjfv{
2579270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2580270346Sjfv	struct ixl_queue	*que = vsi->queues;
2581266423Sjfv	device_t		dev = pf->dev;
2582299547Serj	int rid;
2583266423Sjfv
2584266423Sjfv	/* We may get here before stations are setup */
2585270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2586266423Sjfv		goto early;
2587266423Sjfv
2588266423Sjfv	/*
2589266423Sjfv	**  Release all msix VSI resources:
2590266423Sjfv	*/
2591266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2592266423Sjfv		rid = que->msix + 1;
2593266423Sjfv		if (que->tag != NULL) {
2594266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2595266423Sjfv			que->tag = NULL;
2596266423Sjfv		}
2597299547Serj		if (que->res != NULL) {
2598266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2599299547Serj			que->res = NULL;
2600299547Serj		}
2601266423Sjfv	}
2602266423Sjfv
2603266423Sjfvearly:
2604266423Sjfv	/* Clean the AdminQ interrupt last */
2605266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2606266423Sjfv		rid = pf->admvec + 1;
2607266423Sjfv	else
2608266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2609266423Sjfv
2610266423Sjfv	if (pf->tag != NULL) {
2611266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2612266423Sjfv		pf->tag = NULL;
2613266423Sjfv	}
2614299547Serj	if (pf->res != NULL) {
2615266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2616299547Serj		pf->res = NULL;
2617299547Serj	}
2618299547Serj}
2619266423Sjfv
2620299547Serjstatic void
2621299547Serjixl_free_pci_resources(struct ixl_pf *pf)
2622299547Serj{
2623299547Serj	device_t		dev = pf->dev;
2624299547Serj	int			memrid;
2625299547Serj
2626299547Serj	ixl_free_interrupt_resources(pf);
2627299547Serj
2628266423Sjfv	if (pf->msix)
2629266423Sjfv		pci_release_msi(dev);
2630266423Sjfv
2631299547Serj	memrid = PCIR_BAR(IXL_BAR);
2632299547Serj
2633266423Sjfv	if (pf->msix_mem != NULL)
2634266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2635266423Sjfv		    memrid, pf->msix_mem);
2636266423Sjfv
2637266423Sjfv	if (pf->pci_mem != NULL)
2638266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2639266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2640266423Sjfv
2641266423Sjfv	return;
2642266423Sjfv}
2643266423Sjfv
2644274205Sjfvstatic void
2645274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2646274205Sjfv{
2647274205Sjfv	/* Display supported media types */
2648274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2649274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2650266423Sjfv
2651274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2652274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2653279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2654279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2655279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2656279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2657274205Sjfv
2658284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2659279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2660274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2661274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2662279033Sjfv
2663274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2664274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2665274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2666274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2667274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2668274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2669274205Sjfv
2670279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2671279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2672279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2673279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2674279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2675274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2676274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2677274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2678274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2679274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2680284049Sjfv
2681284049Sjfv#ifndef IFM_ETH_XTYPE
2682284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2683284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2684284049Sjfv
2685284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2686284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2687284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2688284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2689284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2690284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2691284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2692284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2693284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2694284049Sjfv
2695284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2696284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2697284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2698284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2699284049Sjfv#else
2700284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2701284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2702284049Sjfv
2703284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2704284049Sjfv	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2705284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2706284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2707284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2708284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2709284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2710284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2711284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2712284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2713284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2714284049Sjfv
2715284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2716284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2717284049Sjfv
2718284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2719284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2720284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2721284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2722284049Sjfv#endif
2723274205Sjfv}
2724274205Sjfv
2725266423Sjfv/*********************************************************************
2726266423Sjfv *
2727266423Sjfv *  Setup networking device structure and register an interface.
2728266423Sjfv *
2729266423Sjfv **********************************************************************/
2730266423Sjfvstatic int
2731270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2732266423Sjfv{
2733266423Sjfv	struct ifnet		*ifp;
2734266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2735270346Sjfv	struct ixl_queue	*que = vsi->queues;
2736279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2737266423Sjfv	enum i40e_status_code aq_error = 0;
2738266423Sjfv
2739270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2740266423Sjfv
2741266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2742266423Sjfv	if (ifp == NULL) {
2743266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2744266423Sjfv		return (-1);
2745266423Sjfv	}
2746266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2747266423Sjfv	ifp->if_mtu = ETHERMTU;
2748299546Serj	ifp->if_baudrate = IF_Gbps(40);
2749270346Sjfv	ifp->if_init = ixl_init;
2750266423Sjfv	ifp->if_softc = vsi;
2751266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2752270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2753266423Sjfv
2754274205Sjfv#if __FreeBSD_version >= 1100036
2755272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2756272227Sglebius#endif
2757272227Sglebius
2758270346Sjfv	ifp->if_transmit = ixl_mq_start;
2759266423Sjfv
2760270346Sjfv	ifp->if_qflush = ixl_qflush;
2761266423Sjfv
2762266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2763266423Sjfv
2764266423Sjfv	vsi->max_frame_size =
2765266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2766266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2767266423Sjfv
2768266423Sjfv	/*
2769266423Sjfv	 * Tell the upper layer(s) we support long frames.
2770266423Sjfv	 */
2771270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2772266423Sjfv
2773266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2774266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2775266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2776266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2777266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2778266423Sjfv
2779266423Sjfv	/* VLAN capabilties */
2780266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2781266423Sjfv			     |  IFCAP_VLAN_HWTSO
2782266423Sjfv			     |  IFCAP_VLAN_MTU
2783266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2784266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2785266423Sjfv
2786266423Sjfv	/*
2787266423Sjfv	** Don't turn this on by default, if vlans are
2788266423Sjfv	** created on another pseudo device (eg. lagg)
2789266423Sjfv	** then vlan events are not passed thru, breaking
2790266423Sjfv	** operation, but with HW FILTER off it works. If
2791270346Sjfv	** using vlans directly on the ixl driver you can
2792266423Sjfv	** enable this and get full hardware tag filtering.
2793266423Sjfv	*/
2794266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2795266423Sjfv
2796266423Sjfv	/*
2797266423Sjfv	 * Specify the media types supported by this adapter and register
2798266423Sjfv	 * callbacks to update media and link information
2799266423Sjfv	 */
2800270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2801270346Sjfv		     ixl_media_status);
2802266423Sjfv
2803279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2804279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2805279033Sjfv	/* May need delay to detect fiber correctly */
2806274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2807274205Sjfv		i40e_msec_delay(200);
2808277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2809279033Sjfv		    TRUE, &abilities, NULL);
2810279033Sjfv	}
2811279033Sjfv	if (aq_error) {
2812274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2813274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2814274205Sjfv		else
2815279033Sjfv			device_printf(dev,
2816279033Sjfv			    "Error getting supported media types, err %d,"
2817279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2818279033Sjfv		return (0);
2819279033Sjfv	}
2820266423Sjfv
2821279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2822279033Sjfv
2823266423Sjfv	/* Use autoselect media by default */
2824266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2825266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2826266423Sjfv
2827274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2828274205Sjfv
2829266423Sjfv	return (0);
2830266423Sjfv}
2831266423Sjfv
2832279858Sjfv/*
2833299547Serj** Run when the Admin Queue gets a link state change interrupt.
2834279858Sjfv*/
2835279858Sjfvstatic void
2836279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2837266423Sjfv{
2838279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2839299547Serj	device_t dev = pf->dev;
2840279858Sjfv	struct i40e_aqc_get_link_status *status =
2841279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2842266423Sjfv
2843299547Serj	/* Firmware workaround: may need to wait for link to actually come up... */
2844299547Serj	if (!pf->link_up && (status->link_info & I40E_AQ_SIGNAL_DETECT)) {
2845299547Serj		device_printf(dev, "%s: Waiting...\n", __func__);
2846299547Serj		i40e_msec_delay(4000);
2847299547Serj	}
2848299547Serj
2849299547Serj	/* Request link status from adapter */
2850279858Sjfv	hw->phy.get_link_info = TRUE;
2851299547Serj	i40e_get_link_status(hw, &pf->link_up);
2852299547Serj
2853299547Serj	/* Print out message if an unqualified module is found */
2854279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2855279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2856279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2857299547Serj		device_printf(dev, "Link failed because "
2858299547Serj		    "an unqualified module was detected!\n");
2859279858Sjfv
2860299547Serj	/* Update OS link info */
2861299547Serj	ixl_update_link_status(pf);
2862266423Sjfv}
2863266423Sjfv
2864266423Sjfv/*********************************************************************
2865266423Sjfv *
2866279033Sjfv *  Get Firmware Switch configuration
2867279033Sjfv *	- this will need to be more robust when more complex
2868279033Sjfv *	  switch configurations are enabled.
2869266423Sjfv *
2870266423Sjfv **********************************************************************/
2871266423Sjfvstatic int
2872279033Sjfvixl_switch_config(struct ixl_pf *pf)
2873266423Sjfv{
2874279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2875279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2876266423Sjfv	device_t 	dev = vsi->dev;
2877266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2878266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2879279858Sjfv	int	ret;
2880266423Sjfv	u16	next = 0;
2881266423Sjfv
2882279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2883266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2884266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2885266423Sjfv	    sizeof(aq_buf), &next, NULL);
2886266423Sjfv	if (ret) {
2887279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2888279858Sjfv		    ret);
2889266423Sjfv		return (ret);
2890266423Sjfv	}
2891270346Sjfv#ifdef IXL_DEBUG
2892279858Sjfv	device_printf(dev,
2893279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2894266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2895279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2896279858Sjfv		device_printf(dev,
2897279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2898279858Sjfv		    sw_config->element[i].element_type,
2899279858Sjfv		    sw_config->element[i].seid,
2900279858Sjfv		    sw_config->element[i].uplink_seid,
2901279858Sjfv		    sw_config->element[i].downlink_seid);
2902279858Sjfv	}
2903266423Sjfv#endif
2904279033Sjfv	/* Simplified due to a single VSI at the moment */
2905279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2906279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2907266423Sjfv	vsi->seid = sw_config->element[0].seid;
2908279033Sjfv	return (ret);
2909279033Sjfv}
2910266423Sjfv
2911279033Sjfv/*********************************************************************
2912279033Sjfv *
2913279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2914279033Sjfv *  			 like the number of descriptors, buffer size,
2915279033Sjfv *			 plus we init the rings thru this function.
2916279033Sjfv *
2917279033Sjfv **********************************************************************/
2918279033Sjfvstatic int
2919279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2920279033Sjfv{
2921279858Sjfv	struct ixl_pf		*pf = vsi->back;
2922279033Sjfv	struct ixl_queue	*que = vsi->queues;
2923279033Sjfv	device_t		dev = vsi->dev;
2924279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2925279033Sjfv	struct i40e_vsi_context	ctxt;
2926279033Sjfv	int			err = 0;
2927279033Sjfv
2928266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2929266423Sjfv	ctxt.seid = vsi->seid;
2930279858Sjfv	if (pf->veb_seid != 0)
2931279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2932266423Sjfv	ctxt.pf_num = hw->pf_id;
2933279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2934279033Sjfv	if (err) {
2935299548Serj		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err);
2936279033Sjfv		return (err);
2937266423Sjfv	}
2938270346Sjfv#ifdef IXL_DEBUG
2939299548Serj	device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2940266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2941266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2942266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2943266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2944266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2945266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2946266423Sjfv#endif
2947266423Sjfv	/*
2948266423Sjfv	** Set the queue and traffic class bits
2949266423Sjfv	**  - when multiple traffic classes are supported
2950266423Sjfv	**    this will need to be more robust.
2951266423Sjfv	*/
2952266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2953266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2954299545Serj	ctxt.info.queue_mapping[0] = 0;
2955299548Serj	ctxt.info.tc_mapping[0] = 0x0c00;
2956266423Sjfv
2957266423Sjfv	/* Set VLAN receive stripping mode */
2958266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2959266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2960266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2961299548Serj		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2962266423Sjfv	else
2963299548Serj		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2964266423Sjfv
2965266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2966266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2967266423Sjfv
2968266423Sjfv	/* Reset VSI statistics */
2969270346Sjfv	ixl_vsi_reset_stats(vsi);
2970266423Sjfv	vsi->hw_filters_add = 0;
2971266423Sjfv	vsi->hw_filters_del = 0;
2972266423Sjfv
2973279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2974279858Sjfv
2975279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2976279033Sjfv	if (err) {
2977299548Serj		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
2978299548Serj		   err, hw->aq.asq_last_status);
2979279033Sjfv		return (err);
2980279033Sjfv	}
2981266423Sjfv
2982266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2983266423Sjfv		struct tx_ring		*txr = &que->txr;
2984266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2985266423Sjfv		struct i40e_hmc_obj_txq tctx;
2986266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2987266423Sjfv		u32			txctl;
2988266423Sjfv		u16			size;
2989266423Sjfv
2990266423Sjfv		/* Setup the HMC TX Context  */
2991266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2992266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2993266423Sjfv		tctx.new_context = 1;
2994279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2995266423Sjfv		tctx.qlen = que->num_desc;
2996266423Sjfv		tctx.fc_ena = 0;
2997269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2998269198Sjfv		/* Enable HEAD writeback */
2999269198Sjfv		tctx.head_wb_ena = 1;
3000269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
3001269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
3002266423Sjfv		tctx.rdylist_act = 0;
3003266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
3004266423Sjfv		if (err) {
3005266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
3006266423Sjfv			break;
3007266423Sjfv		}
3008266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
3009266423Sjfv		if (err) {
3010266423Sjfv			device_printf(dev, "Unable to set TX context\n");
3011266423Sjfv			break;
3012266423Sjfv		}
3013266423Sjfv		/* Associate the ring with this PF */
3014266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
3015266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3016266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
3017266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
3018270346Sjfv		ixl_flush(hw);
3019266423Sjfv
3020266423Sjfv		/* Do ring (re)init */
3021270346Sjfv		ixl_init_tx_ring(que);
3022266423Sjfv
3023266423Sjfv		/* Next setup the HMC RX Context  */
3024279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
3025266423Sjfv			rxr->mbuf_sz = MCLBYTES;
3026266423Sjfv		else
3027266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
3028266423Sjfv
3029266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
3030266423Sjfv
3031266423Sjfv		/* Set up an RX context for the HMC */
3032266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
3033266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
3034266423Sjfv		/* ignore header split for now */
3035266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
3036266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
3037266423Sjfv		    vsi->max_frame_size : max_rxmax;
3038266423Sjfv		rctx.dtype = 0;
3039266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
3040266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
3041279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
3042266423Sjfv		rctx.qlen = que->num_desc;
3043266423Sjfv		rctx.tphrdesc_ena = 1;
3044266423Sjfv		rctx.tphwdesc_ena = 1;
3045266423Sjfv		rctx.tphdata_ena = 0;
3046266423Sjfv		rctx.tphhead_ena = 0;
3047266423Sjfv		rctx.lrxqthresh = 2;
3048266423Sjfv		rctx.crcstrip = 1;
3049266423Sjfv		rctx.l2tsel = 1;
3050266423Sjfv		rctx.showiv = 1;
3051266423Sjfv		rctx.fc_ena = 0;
3052266423Sjfv		rctx.prefena = 1;
3053266423Sjfv
3054266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
3055266423Sjfv		if (err) {
3056266423Sjfv			device_printf(dev,
3057266423Sjfv			    "Unable to clear RX context %d\n", i);
3058266423Sjfv			break;
3059266423Sjfv		}
3060266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
3061266423Sjfv		if (err) {
3062266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
3063266423Sjfv			break;
3064266423Sjfv		}
3065270346Sjfv		err = ixl_init_rx_ring(que);
3066266423Sjfv		if (err) {
3067266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
3068266423Sjfv			break;
3069266423Sjfv		}
3070299545Serj		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
3071279860Sjfv#ifdef DEV_NETMAP
3072279860Sjfv		/* preserve queue */
3073279860Sjfv		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
3074279860Sjfv			struct netmap_adapter *na = NA(vsi->ifp);
3075279860Sjfv			struct netmap_kring *kring = &na->rx_rings[i];
3076279860Sjfv			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3077279860Sjfv			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
3078279860Sjfv		} else
3079279860Sjfv#endif /* DEV_NETMAP */
3080266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
3081266423Sjfv	}
3082266423Sjfv	return (err);
3083266423Sjfv}
3084266423Sjfv
3085266423Sjfv
3086266423Sjfv/*********************************************************************
3087266423Sjfv *
3088266423Sjfv *  Free all VSI structs.
3089266423Sjfv *
3090266423Sjfv **********************************************************************/
3091266423Sjfvvoid
3092270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
3093266423Sjfv{
3094270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
3095270346Sjfv	struct ixl_queue	*que = vsi->queues;
3096266423Sjfv
3097266423Sjfv	/* Free station queues */
3098299549Serj	if (!vsi->queues)
3099299549Serj		goto free_filters;
3100299549Serj
3101266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
3102266423Sjfv		struct tx_ring *txr = &que->txr;
3103266423Sjfv		struct rx_ring *rxr = &que->rxr;
3104266423Sjfv
3105266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
3106266423Sjfv			continue;
3107270346Sjfv		IXL_TX_LOCK(txr);
3108270346Sjfv		ixl_free_que_tx(que);
3109266423Sjfv		if (txr->base)
3110271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3111270346Sjfv		IXL_TX_UNLOCK(txr);
3112270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
3113266423Sjfv
3114266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
3115266423Sjfv			continue;
3116270346Sjfv		IXL_RX_LOCK(rxr);
3117270346Sjfv		ixl_free_que_rx(que);
3118266423Sjfv		if (rxr->base)
3119271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3120270346Sjfv		IXL_RX_UNLOCK(rxr);
3121270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
3122266423Sjfv
3123266423Sjfv	}
3124266423Sjfv	free(vsi->queues, M_DEVBUF);
3125266423Sjfv
3126299549Serjfree_filters:
3127266423Sjfv	/* Free VSI filter list */
3128279858Sjfv	ixl_free_mac_filters(vsi);
3129279858Sjfv}
3130279858Sjfv
3131279858Sjfvstatic void
3132279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
3133279858Sjfv{
3134279858Sjfv	struct ixl_mac_filter *f;
3135279858Sjfv
3136266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
3137266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
3138266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
3139266423Sjfv		free(f, M_DEVBUF);
3140266423Sjfv	}
3141266423Sjfv}
3142266423Sjfv
3143266423Sjfv
3144266423Sjfv/*********************************************************************
3145266423Sjfv *
3146266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
3147266423Sjfv *  associated queues, rings and the descriptors associated with each,
3148266423Sjfv *  called only once at attach.
3149266423Sjfv *
3150266423Sjfv **********************************************************************/
3151266423Sjfvstatic int
3152270346Sjfvixl_setup_stations(struct ixl_pf *pf)
3153266423Sjfv{
3154266423Sjfv	device_t		dev = pf->dev;
3155270346Sjfv	struct ixl_vsi		*vsi;
3156270346Sjfv	struct ixl_queue	*que;
3157266423Sjfv	struct tx_ring		*txr;
3158266423Sjfv	struct rx_ring		*rxr;
3159266423Sjfv	int 			rsize, tsize;
3160266423Sjfv	int			error = I40E_SUCCESS;
3161266423Sjfv
3162266423Sjfv	vsi = &pf->vsi;
3163266423Sjfv	vsi->back = (void *)pf;
3164266423Sjfv	vsi->hw = &pf->hw;
3165266423Sjfv	vsi->id = 0;
3166266423Sjfv	vsi->num_vlans = 0;
3167279858Sjfv	vsi->back = pf;
3168266423Sjfv
3169266423Sjfv	/* Get memory for the station queues */
3170266423Sjfv        if (!(vsi->queues =
3171270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
3172266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3173266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
3174266423Sjfv                error = ENOMEM;
3175266423Sjfv                goto early;
3176266423Sjfv        }
3177266423Sjfv
3178266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3179266423Sjfv		que = &vsi->queues[i];
3180270346Sjfv		que->num_desc = ixl_ringsz;
3181266423Sjfv		que->me = i;
3182266423Sjfv		que->vsi = vsi;
3183269198Sjfv		/* mark the queue as active */
3184269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
3185266423Sjfv		txr = &que->txr;
3186266423Sjfv		txr->que = que;
3187269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
3188266423Sjfv
3189266423Sjfv		/* Initialize the TX lock */
3190266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3191266423Sjfv		    device_get_nameunit(dev), que->me);
3192266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
3193266423Sjfv		/* Create the TX descriptor ring */
3194269198Sjfv		tsize = roundup2((que->num_desc *
3195269198Sjfv		    sizeof(struct i40e_tx_desc)) +
3196269198Sjfv		    sizeof(u32), DBA_ALIGN);
3197271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3198271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3199266423Sjfv			device_printf(dev,
3200266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
3201266423Sjfv			error = ENOMEM;
3202266423Sjfv			goto fail;
3203266423Sjfv		}
3204266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3205266423Sjfv		bzero((void *)txr->base, tsize);
3206266423Sjfv       		/* Now allocate transmit soft structs for the ring */
3207270346Sjfv       		if (ixl_allocate_tx_data(que)) {
3208266423Sjfv			device_printf(dev,
3209266423Sjfv			    "Critical Failure setting up TX structures\n");
3210266423Sjfv			error = ENOMEM;
3211266423Sjfv			goto fail;
3212266423Sjfv       		}
3213266423Sjfv		/* Allocate a buf ring */
3214266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3215299547Serj		    M_NOWAIT, &txr->mtx);
3216266423Sjfv		if (txr->br == NULL) {
3217266423Sjfv			device_printf(dev,
3218266423Sjfv			    "Critical Failure setting up TX buf ring\n");
3219266423Sjfv			error = ENOMEM;
3220266423Sjfv			goto fail;
3221266423Sjfv       		}
3222266423Sjfv
3223266423Sjfv		/*
3224266423Sjfv		 * Next the RX queues...
3225266423Sjfv		 */
3226266423Sjfv		rsize = roundup2(que->num_desc *
3227266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3228266423Sjfv		rxr = &que->rxr;
3229266423Sjfv		rxr->que = que;
3230269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
3231266423Sjfv
3232266423Sjfv		/* Initialize the RX side lock */
3233266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3234266423Sjfv		    device_get_nameunit(dev), que->me);
3235266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3236266423Sjfv
3237271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3238271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3239266423Sjfv			device_printf(dev,
3240266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
3241266423Sjfv			error = ENOMEM;
3242266423Sjfv			goto fail;
3243266423Sjfv		}
3244266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3245266423Sjfv		bzero((void *)rxr->base, rsize);
3246266423Sjfv
3247266423Sjfv        	/* Allocate receive soft structs for the ring*/
3248270346Sjfv		if (ixl_allocate_rx_data(que)) {
3249266423Sjfv			device_printf(dev,
3250266423Sjfv			    "Critical Failure setting up receive structs\n");
3251266423Sjfv			error = ENOMEM;
3252266423Sjfv			goto fail;
3253266423Sjfv		}
3254266423Sjfv	}
3255266423Sjfv
3256266423Sjfv	return (0);
3257266423Sjfv
3258266423Sjfvfail:
3259266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3260266423Sjfv		que = &vsi->queues[i];
3261266423Sjfv		rxr = &que->rxr;
3262266423Sjfv		txr = &que->txr;
3263266423Sjfv		if (rxr->base)
3264271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3265266423Sjfv		if (txr->base)
3266271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3267266423Sjfv	}
3268266423Sjfv
3269266423Sjfvearly:
3270266423Sjfv	return (error);
3271266423Sjfv}
3272266423Sjfv
3273266423Sjfv/*
3274266423Sjfv** Provide a update to the queue RX
3275266423Sjfv** interrupt moderation value.
3276266423Sjfv*/
3277266423Sjfvstatic void
3278270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
3279266423Sjfv{
3280270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3281266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3282266423Sjfv	struct rx_ring	*rxr = &que->rxr;
3283266423Sjfv	u16		rx_itr;
3284266423Sjfv	u16		rx_latency = 0;
3285266423Sjfv	int		rx_bytes;
3286266423Sjfv
3287266423Sjfv
3288266423Sjfv	/* Idle, do nothing */
3289266423Sjfv	if (rxr->bytes == 0)
3290266423Sjfv		return;
3291266423Sjfv
3292270346Sjfv	if (ixl_dynamic_rx_itr) {
3293266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
3294266423Sjfv		rx_itr = rxr->itr;
3295266423Sjfv
3296266423Sjfv		/* Adjust latency range */
3297266423Sjfv		switch (rxr->latency) {
3298270346Sjfv		case IXL_LOW_LATENCY:
3299266423Sjfv			if (rx_bytes > 10) {
3300270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3301270346Sjfv				rx_itr = IXL_ITR_20K;
3302266423Sjfv			}
3303266423Sjfv			break;
3304270346Sjfv		case IXL_AVE_LATENCY:
3305266423Sjfv			if (rx_bytes > 20) {
3306270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3307270346Sjfv				rx_itr = IXL_ITR_8K;
3308266423Sjfv			} else if (rx_bytes <= 10) {
3309270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3310270346Sjfv				rx_itr = IXL_ITR_100K;
3311266423Sjfv			}
3312266423Sjfv			break;
3313270346Sjfv		case IXL_BULK_LATENCY:
3314266423Sjfv			if (rx_bytes <= 20) {
3315270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3316270346Sjfv				rx_itr = IXL_ITR_20K;
3317266423Sjfv			}
3318266423Sjfv			break;
3319266423Sjfv       		 }
3320266423Sjfv
3321266423Sjfv		rxr->latency = rx_latency;
3322266423Sjfv
3323266423Sjfv		if (rx_itr != rxr->itr) {
3324266423Sjfv			/* do an exponential smoothing */
3325266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3326266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3327270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3328270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3329266423Sjfv			    que->me), rxr->itr);
3330266423Sjfv		}
3331266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3332270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3333270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3334266423Sjfv		/* Update the hardware if needed */
3335266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3336266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3337270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3338266423Sjfv			    que->me), rxr->itr);
3339266423Sjfv		}
3340266423Sjfv	}
3341266423Sjfv	rxr->bytes = 0;
3342266423Sjfv	rxr->packets = 0;
3343266423Sjfv	return;
3344266423Sjfv}
3345266423Sjfv
3346266423Sjfv
3347266423Sjfv/*
3348266423Sjfv** Provide a update to the queue TX
3349266423Sjfv** interrupt moderation value.
3350266423Sjfv*/
3351266423Sjfvstatic void
3352270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3353266423Sjfv{
3354270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3355266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3356266423Sjfv	struct tx_ring	*txr = &que->txr;
3357266423Sjfv	u16		tx_itr;
3358266423Sjfv	u16		tx_latency = 0;
3359266423Sjfv	int		tx_bytes;
3360266423Sjfv
3361266423Sjfv
3362266423Sjfv	/* Idle, do nothing */
3363266423Sjfv	if (txr->bytes == 0)
3364266423Sjfv		return;
3365266423Sjfv
3366270346Sjfv	if (ixl_dynamic_tx_itr) {
3367266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3368266423Sjfv		tx_itr = txr->itr;
3369266423Sjfv
3370266423Sjfv		switch (txr->latency) {
3371270346Sjfv		case IXL_LOW_LATENCY:
3372266423Sjfv			if (tx_bytes > 10) {
3373270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3374270346Sjfv				tx_itr = IXL_ITR_20K;
3375266423Sjfv			}
3376266423Sjfv			break;
3377270346Sjfv		case IXL_AVE_LATENCY:
3378266423Sjfv			if (tx_bytes > 20) {
3379270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3380270346Sjfv				tx_itr = IXL_ITR_8K;
3381266423Sjfv			} else if (tx_bytes <= 10) {
3382270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3383270346Sjfv				tx_itr = IXL_ITR_100K;
3384266423Sjfv			}
3385266423Sjfv			break;
3386270346Sjfv		case IXL_BULK_LATENCY:
3387266423Sjfv			if (tx_bytes <= 20) {
3388270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3389270346Sjfv				tx_itr = IXL_ITR_20K;
3390266423Sjfv			}
3391266423Sjfv			break;
3392266423Sjfv		}
3393266423Sjfv
3394266423Sjfv		txr->latency = tx_latency;
3395266423Sjfv
3396266423Sjfv		if (tx_itr != txr->itr) {
3397266423Sjfv       	         /* do an exponential smoothing */
3398266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3399266423Sjfv			    ((9 * tx_itr) + txr->itr);
3400270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3401270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3402266423Sjfv			    que->me), txr->itr);
3403266423Sjfv		}
3404266423Sjfv
3405266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3406270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3407270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3408266423Sjfv		/* Update the hardware if needed */
3409266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3410266423Sjfv			txr->itr = vsi->tx_itr_setting;
3411270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3412266423Sjfv			    que->me), txr->itr);
3413266423Sjfv		}
3414266423Sjfv	}
3415266423Sjfv	txr->bytes = 0;
3416266423Sjfv	txr->packets = 0;
3417266423Sjfv	return;
3418266423Sjfv}
3419266423Sjfv
3420279858Sjfv#define QUEUE_NAME_LEN 32
3421266423Sjfv
3422266423Sjfvstatic void
3423279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3424279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3425279858Sjfv{
3426279858Sjfv	struct sysctl_oid *tree;
3427279858Sjfv	struct sysctl_oid_list *child;
3428279858Sjfv	struct sysctl_oid_list *vsi_list;
3429279858Sjfv
3430279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3431279858Sjfv	child = SYSCTL_CHILDREN(tree);
3432279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3433279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3434279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3435279858Sjfv
3436279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3437279858Sjfv}
3438279858Sjfv
3439279858Sjfvstatic void
3440270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3441266423Sjfv{
3442266423Sjfv	device_t dev = pf->dev;
3443270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3444270346Sjfv	struct ixl_queue *queues = vsi->queues;
3445269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3446266423Sjfv
3447266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3448266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3449266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3450279858Sjfv	struct sysctl_oid_list *vsi_list;
3451266423Sjfv
3452279858Sjfv	struct sysctl_oid *queue_node;
3453279858Sjfv	struct sysctl_oid_list *queue_list;
3454266423Sjfv
3455269198Sjfv	struct tx_ring *txr;
3456269198Sjfv	struct rx_ring *rxr;
3457279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3458266423Sjfv
3459266423Sjfv	/* Driver statistics */
3460266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3461266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3462266423Sjfv			"Watchdog timeouts");
3463266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3464266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3465266423Sjfv			"Admin Queue IRQ Handled");
3466266423Sjfv
3467279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3468279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3469266423Sjfv
3470266423Sjfv	/* Queue statistics */
3471266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3472269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3473279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3474279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3475266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3476266423Sjfv
3477269198Sjfv		txr = &(queues[q].txr);
3478269198Sjfv		rxr = &(queues[q].rxr);
3479269198Sjfv
3480269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3481266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3482266423Sjfv				"m_defrag() failed");
3483269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3484266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3485266423Sjfv				"Driver dropped packets");
3486266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3487266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3488266423Sjfv				"irqs on this queue");
3489269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3490266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3491266423Sjfv				"TSO");
3492269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3493266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3494266423Sjfv				"Driver tx dma failure in xmit");
3495266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3496266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3497266423Sjfv				"Queue No Descriptor Available");
3498266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3499266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3500266423Sjfv				"Queue Packets Transmitted");
3501266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3502270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3503266423Sjfv				"Queue Bytes Transmitted");
3504266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3505266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3506266423Sjfv				"Queue Packets Received");
3507266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3508266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3509266423Sjfv				"Queue Bytes Received");
3510266423Sjfv	}
3511266423Sjfv
3512266423Sjfv	/* MAC stats */
3513270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3514266423Sjfv}
3515266423Sjfv
3516266423Sjfvstatic void
3517270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3518266423Sjfv	struct sysctl_oid_list *child,
3519266423Sjfv	struct i40e_eth_stats *eth_stats)
3520266423Sjfv{
3521270346Sjfv	struct ixl_sysctl_info ctls[] =
3522266423Sjfv	{
3523266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3524266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3525266423Sjfv			"Unicast Packets Received"},
3526266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3527266423Sjfv			"Multicast Packets Received"},
3528266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3529266423Sjfv			"Broadcast Packets Received"},
3530269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3531266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3532266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3533266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3534266423Sjfv			"Multicast Packets Transmitted"},
3535266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3536266423Sjfv			"Broadcast Packets Transmitted"},
3537266423Sjfv		// end
3538266423Sjfv		{0,0,0}
3539266423Sjfv	};
3540266423Sjfv
3541270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3542297753Spfg	while (entry->stat != NULL)
3543266423Sjfv	{
3544266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3545266423Sjfv				CTLFLAG_RD, entry->stat,
3546266423Sjfv				entry->description);
3547266423Sjfv		entry++;
3548266423Sjfv	}
3549266423Sjfv}
3550266423Sjfv
3551266423Sjfvstatic void
3552270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3553266423Sjfv	struct sysctl_oid_list *child,
3554266423Sjfv	struct i40e_hw_port_stats *stats)
3555266423Sjfv{
3556269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3557266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3558266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3559266423Sjfv
3560266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3561270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3562266423Sjfv
3563270346Sjfv	struct ixl_sysctl_info ctls[] =
3564266423Sjfv	{
3565266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3566266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3567266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3568266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3569266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3570266423Sjfv		/* Packet Reception Stats */
3571266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3572266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3573266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3574266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3575266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3576266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3577266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3578266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3579266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3580266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3581266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3582266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3583266423Sjfv		/* Packet Transmission Stats */
3584266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3585266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3586266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3587266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3588266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3589266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3590266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3591266423Sjfv		/* Flow control */
3592266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3593266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3594266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3595266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3596266423Sjfv		/* End */
3597266423Sjfv		{0,0,0}
3598266423Sjfv	};
3599266423Sjfv
3600270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3601297753Spfg	while (entry->stat != NULL)
3602266423Sjfv	{
3603266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3604266423Sjfv				CTLFLAG_RD, entry->stat,
3605266423Sjfv				entry->description);
3606266423Sjfv		entry++;
3607266423Sjfv	}
3608266423Sjfv}
3609266423Sjfv
3610284049Sjfv
3611266423Sjfv/*
3612270346Sjfv** ixl_config_rss - setup RSS
3613266423Sjfv**  - note this is done for the single vsi
3614266423Sjfv*/
3615270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3616266423Sjfv{
3617270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3618266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3619266423Sjfv	u32		lut = 0;
3620277084Sjfv	u64		set_hena = 0, hena;
3621277084Sjfv	int		i, j, que_id;
3622277084Sjfv#ifdef RSS
3623277084Sjfv	u32		rss_hash_config;
3624277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3625277084Sjfv#else
3626277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3627277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3628277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3629277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3630277084Sjfv#endif
3631266423Sjfv
3632277084Sjfv#ifdef RSS
3633277084Sjfv        /* Fetch the configured RSS key */
3634277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3635277084Sjfv#endif
3636266423Sjfv
3637266423Sjfv	/* Fill out hash function seed */
3638277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3639277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3640266423Sjfv
3641266423Sjfv	/* Enable PCTYPES for RSS: */
3642277084Sjfv#ifdef RSS
3643277084Sjfv	rss_hash_config = rss_gethashconfig();
3644277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3645277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3646277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3647277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3648277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3649277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3650277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3651277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3652279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3653277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3654277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3655277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3656277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3657277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3658277084Sjfv#else
3659266423Sjfv	set_hena =
3660266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3661266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3662266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3663266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3664266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3665266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3666266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3667266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3668266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3669266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3670266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3671277084Sjfv#endif
3672266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3673266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3674266423Sjfv	hena |= set_hena;
3675266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3676266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3677266423Sjfv
3678266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3679266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3680266423Sjfv		if (j == vsi->num_queues)
3681266423Sjfv			j = 0;
3682277084Sjfv#ifdef RSS
3683277084Sjfv		/*
3684277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3685277084Sjfv		 * Cap it at the number of configured buckets (which is
3686277084Sjfv		 * num_queues.)
3687277084Sjfv		 */
3688277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3689277262Sjfv		que_id = que_id % vsi->num_queues;
3690277084Sjfv#else
3691277084Sjfv		que_id = j;
3692277084Sjfv#endif
3693266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3694277084Sjfv		lut = (lut << 8) | (que_id &
3695266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3696266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3697266423Sjfv		if ((i & 3) == 3)
3698266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3699266423Sjfv	}
3700270346Sjfv	ixl_flush(hw);
3701266423Sjfv}
3702266423Sjfv
3703266423Sjfv
3704266423Sjfv/*
3705266423Sjfv** This routine is run via an vlan config EVENT,
3706266423Sjfv** it enables us to use the HW Filter table since
3707266423Sjfv** we can get the vlan id. This just creates the
3708266423Sjfv** entry in the soft version of the VFTA, init will
3709266423Sjfv** repopulate the real table.
3710266423Sjfv*/
3711266423Sjfvstatic void
3712270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3713266423Sjfv{
3714270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3715266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3716270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3717266423Sjfv
3718266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3719266423Sjfv		return;
3720266423Sjfv
3721266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3722266423Sjfv		return;
3723266423Sjfv
3724270346Sjfv	IXL_PF_LOCK(pf);
3725266423Sjfv	++vsi->num_vlans;
3726270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3727270346Sjfv	IXL_PF_UNLOCK(pf);
3728266423Sjfv}
3729266423Sjfv
3730266423Sjfv/*
3731266423Sjfv** This routine is run via an vlan
3732266423Sjfv** unconfig EVENT, remove our entry
3733266423Sjfv** in the soft vfta.
3734266423Sjfv*/
3735266423Sjfvstatic void
3736270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3737266423Sjfv{
3738270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3739266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3740270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3741266423Sjfv
3742266423Sjfv	if (ifp->if_softc !=  arg)
3743266423Sjfv		return;
3744266423Sjfv
3745266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3746266423Sjfv		return;
3747266423Sjfv
3748270346Sjfv	IXL_PF_LOCK(pf);
3749266423Sjfv	--vsi->num_vlans;
3750270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3751270346Sjfv	IXL_PF_UNLOCK(pf);
3752266423Sjfv}
3753266423Sjfv
3754266423Sjfv/*
3755266423Sjfv** This routine updates vlan filters, called by init
3756266423Sjfv** it scans the filter table and then updates the hw
3757266423Sjfv** after a soft reset.
3758266423Sjfv*/
3759266423Sjfvstatic void
3760270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3761266423Sjfv{
3762270346Sjfv	struct ixl_mac_filter	*f;
3763266423Sjfv	int			cnt = 0, flags;
3764266423Sjfv
3765266423Sjfv	if (vsi->num_vlans == 0)
3766266423Sjfv		return;
3767266423Sjfv	/*
3768266423Sjfv	** Scan the filter list for vlan entries,
3769266423Sjfv	** mark them for addition and then call
3770266423Sjfv	** for the AQ update.
3771266423Sjfv	*/
3772266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3773270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3774266423Sjfv			f->flags |=
3775270346Sjfv			    (IXL_FILTER_ADD |
3776270346Sjfv			    IXL_FILTER_USED);
3777266423Sjfv			cnt++;
3778266423Sjfv		}
3779266423Sjfv	}
3780266423Sjfv	if (cnt == 0) {
3781266423Sjfv		printf("setup vlan: no filters found!\n");
3782266423Sjfv		return;
3783266423Sjfv	}
3784270346Sjfv	flags = IXL_FILTER_VLAN;
3785270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3786270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3787266423Sjfv	return;
3788266423Sjfv}
3789266423Sjfv
3790266423Sjfv/*
3791266423Sjfv** Initialize filter list and add filters that the hardware
3792266423Sjfv** needs to know about.
3793266423Sjfv*/
3794266423Sjfvstatic void
3795270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3796266423Sjfv{
3797269198Sjfv	/* Add broadcast address */
3798279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3799266423Sjfv}
3800266423Sjfv
3801266423Sjfv/*
3802266423Sjfv** This routine adds mulicast filters
3803266423Sjfv*/
3804266423Sjfvstatic void
3805270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3806266423Sjfv{
3807270346Sjfv	struct ixl_mac_filter *f;
3808266423Sjfv
3809266423Sjfv	/* Does one already exist */
3810270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3811266423Sjfv	if (f != NULL)
3812266423Sjfv		return;
3813266423Sjfv
3814270346Sjfv	f = ixl_get_filter(vsi);
3815266423Sjfv	if (f == NULL) {
3816266423Sjfv		printf("WARNING: no filter available!!\n");
3817266423Sjfv		return;
3818266423Sjfv	}
3819266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3820270346Sjfv	f->vlan = IXL_VLAN_ANY;
3821270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3822270346Sjfv	    | IXL_FILTER_MC);
3823266423Sjfv
3824266423Sjfv	return;
3825266423Sjfv}
3826266423Sjfv
3827279858Sjfvstatic void
3828279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3829279858Sjfv{
3830279858Sjfv
3831279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3832279858Sjfv}
3833279858Sjfv
3834266423Sjfv/*
3835266423Sjfv** This routine adds macvlan filters
3836266423Sjfv*/
3837266423Sjfvstatic void
3838270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3839266423Sjfv{
3840270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3841279858Sjfv	struct ixl_pf		*pf;
3842279858Sjfv	device_t		dev;
3843266423Sjfv
3844270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3845266423Sjfv
3846279858Sjfv	pf = vsi->back;
3847279858Sjfv	dev = pf->dev;
3848279858Sjfv
3849266423Sjfv	/* Does one already exist */
3850270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3851266423Sjfv	if (f != NULL)
3852266423Sjfv		return;
3853266423Sjfv	/*
3854266423Sjfv	** Is this the first vlan being registered, if so we
3855266423Sjfv	** need to remove the ANY filter that indicates we are
3856266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3857266423Sjfv	*/
3858270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3859270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3860266423Sjfv		if (tmp != NULL) {
3861270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3862270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3863266423Sjfv		}
3864266423Sjfv	}
3865266423Sjfv
3866270346Sjfv	f = ixl_get_filter(vsi);
3867266423Sjfv	if (f == NULL) {
3868266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3869266423Sjfv		return;
3870266423Sjfv	}
3871266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3872266423Sjfv	f->vlan = vlan;
3873270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3874270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3875270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3876279858Sjfv	else
3877279858Sjfv		vsi->num_macs++;
3878266423Sjfv
3879270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3880266423Sjfv	return;
3881266423Sjfv}
3882266423Sjfv
3883266423Sjfvstatic void
3884270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3885266423Sjfv{
3886270346Sjfv	struct ixl_mac_filter *f;
3887266423Sjfv
3888270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3889266423Sjfv	if (f == NULL)
3890266423Sjfv		return;
3891266423Sjfv
3892270346Sjfv	f->flags |= IXL_FILTER_DEL;
3893270346Sjfv	ixl_del_hw_filters(vsi, 1);
3894279858Sjfv	vsi->num_macs--;
3895266423Sjfv
3896266423Sjfv	/* Check if this is the last vlan removal */
3897270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3898266423Sjfv		/* Switch back to a non-vlan filter */
3899270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3900270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3901266423Sjfv	}
3902266423Sjfv	return;
3903266423Sjfv}
3904266423Sjfv
3905266423Sjfv/*
3906266423Sjfv** Find the filter with both matching mac addr and vlan id
3907266423Sjfv*/
3908270346Sjfvstatic struct ixl_mac_filter *
3909270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3910266423Sjfv{
3911270346Sjfv	struct ixl_mac_filter	*f;
3912266423Sjfv	bool			match = FALSE;
3913266423Sjfv
3914266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3915266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3916266423Sjfv			continue;
3917266423Sjfv		if (f->vlan == vlan) {
3918266423Sjfv			match = TRUE;
3919266423Sjfv			break;
3920266423Sjfv		}
3921266423Sjfv	}
3922266423Sjfv
3923266423Sjfv	if (!match)
3924266423Sjfv		f = NULL;
3925266423Sjfv	return (f);
3926266423Sjfv}
3927266423Sjfv
3928266423Sjfv/*
3929266423Sjfv** This routine takes additions to the vsi filter
3930266423Sjfv** table and creates an Admin Queue call to create
3931266423Sjfv** the filters in the hardware.
3932266423Sjfv*/
3933266423Sjfvstatic void
3934270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3935266423Sjfv{
3936266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3937270346Sjfv	struct ixl_mac_filter	*f;
3938279858Sjfv	struct ixl_pf		*pf;
3939279858Sjfv	struct i40e_hw		*hw;
3940279858Sjfv	device_t		dev;
3941279858Sjfv	int			err, j = 0;
3942266423Sjfv
3943279858Sjfv	pf = vsi->back;
3944279858Sjfv	dev = pf->dev;
3945279858Sjfv	hw = &pf->hw;
3946279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3947279858Sjfv
3948266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3949266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3950266423Sjfv	if (a == NULL) {
3951277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3952266423Sjfv		return;
3953266423Sjfv	}
3954266423Sjfv
3955266423Sjfv	/*
3956266423Sjfv	** Scan the filter list, each time we find one
3957266423Sjfv	** we add it to the admin queue array and turn off
3958266423Sjfv	** the add bit.
3959266423Sjfv	*/
3960266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3961266423Sjfv		if (f->flags == flags) {
3962266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3963266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3964279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3965279858Sjfv				b->vlan_tag = 0;
3966279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3967279858Sjfv			} else {
3968279858Sjfv				b->vlan_tag = f->vlan;
3969279858Sjfv				b->flags = 0;
3970279858Sjfv			}
3971279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3972270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3973266423Sjfv			j++;
3974266423Sjfv		}
3975266423Sjfv		if (j == cnt)
3976266423Sjfv			break;
3977266423Sjfv	}
3978266423Sjfv	if (j > 0) {
3979266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3980266423Sjfv		if (err)
3981279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3982279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3983266423Sjfv		else
3984266423Sjfv			vsi->hw_filters_add += j;
3985266423Sjfv	}
3986266423Sjfv	free(a, M_DEVBUF);
3987266423Sjfv	return;
3988266423Sjfv}
3989266423Sjfv
3990266423Sjfv/*
3991266423Sjfv** This routine takes removals in the vsi filter
3992266423Sjfv** table and creates an Admin Queue call to delete
3993266423Sjfv** the filters in the hardware.
3994266423Sjfv*/
3995266423Sjfvstatic void
3996270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3997266423Sjfv{
3998266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3999279858Sjfv	struct ixl_pf		*pf;
4000279858Sjfv	struct i40e_hw		*hw;
4001279858Sjfv	device_t		dev;
4002270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
4003266423Sjfv	int			err, j = 0;
4004266423Sjfv
4005270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
4006266423Sjfv
4007279858Sjfv	pf = vsi->back;
4008279858Sjfv	hw = &pf->hw;
4009279858Sjfv	dev = pf->dev;
4010279858Sjfv
4011266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
4012266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4013266423Sjfv	if (d == NULL) {
4014266423Sjfv		printf("del hw filter failed to get memory\n");
4015266423Sjfv		return;
4016266423Sjfv	}
4017266423Sjfv
4018266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
4019270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
4020266423Sjfv			e = &d[j]; // a pox on fvl long names :)
4021266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
4022270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
4023266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4024266423Sjfv			/* delete entry from vsi list */
4025270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
4026266423Sjfv			free(f, M_DEVBUF);
4027266423Sjfv			j++;
4028266423Sjfv		}
4029266423Sjfv		if (j == cnt)
4030266423Sjfv			break;
4031266423Sjfv	}
4032266423Sjfv	if (j > 0) {
4033266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
4034266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
4035266423Sjfv		   so we'll ignore that specific error. */
4036277084Sjfv		// TODO: Does this still occur on current firmwares?
4037266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
4038266423Sjfv			int sc = 0;
4039266423Sjfv			for (int i = 0; i < j; i++)
4040266423Sjfv				sc += (!d[i].error_code);
4041266423Sjfv			vsi->hw_filters_del += sc;
4042266423Sjfv			device_printf(dev,
4043266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
4044266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
4045266423Sjfv		} else
4046266423Sjfv			vsi->hw_filters_del += j;
4047266423Sjfv	}
4048266423Sjfv	free(d, M_DEVBUF);
4049266423Sjfv
4050270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
4051266423Sjfv	return;
4052266423Sjfv}
4053266423Sjfv
4054279858Sjfvstatic int
4055270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
4056266423Sjfv{
4057279858Sjfv	struct ixl_pf	*pf = vsi->back;
4058279858Sjfv	struct i40e_hw	*hw = &pf->hw;
4059279858Sjfv	int		index, error;
4060266423Sjfv	u32		reg;
4061266423Sjfv
4062279858Sjfv	error = 0;
4063266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
4064279858Sjfv		index = vsi->first_queue + i;
4065279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
4066266423Sjfv
4067279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
4068266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
4069266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
4070279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
4071266423Sjfv		/* Verify the enable took */
4072266423Sjfv		for (int j = 0; j < 10; j++) {
4073279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
4074266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4075266423Sjfv				break;
4076266423Sjfv			i40e_msec_delay(10);
4077266423Sjfv		}
4078279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
4079279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
4080279858Sjfv			    index);
4081279858Sjfv			error = ETIMEDOUT;
4082279858Sjfv		}
4083266423Sjfv
4084279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
4085266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
4086266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
4087279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
4088266423Sjfv		/* Verify the enable took */
4089266423Sjfv		for (int j = 0; j < 10; j++) {
4090279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
4091266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4092266423Sjfv				break;
4093266423Sjfv			i40e_msec_delay(10);
4094266423Sjfv		}
4095279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
4096279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
4097279858Sjfv			    index);
4098279858Sjfv			error = ETIMEDOUT;
4099279858Sjfv		}
4100266423Sjfv	}
4101279858Sjfv
4102279858Sjfv	return (error);
4103266423Sjfv}
4104266423Sjfv
4105279858Sjfvstatic int
4106270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
4107266423Sjfv{
4108279858Sjfv	struct ixl_pf	*pf = vsi->back;
4109279858Sjfv	struct i40e_hw	*hw = &pf->hw;
4110279858Sjfv	int		index, error;
4111266423Sjfv	u32		reg;
4112266423Sjfv
4113279858Sjfv	error = 0;
4114266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
4115279858Sjfv		index = vsi->first_queue + i;
4116279858Sjfv
4117279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
4118266423Sjfv		i40e_usec_delay(500);
4119266423Sjfv
4120279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
4121266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4122279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
4123266423Sjfv		/* Verify the disable took */
4124266423Sjfv		for (int j = 0; j < 10; j++) {
4125279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
4126266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4127266423Sjfv				break;
4128266423Sjfv			i40e_msec_delay(10);
4129266423Sjfv		}
4130279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
4131279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
4132279858Sjfv			    index);
4133279858Sjfv			error = ETIMEDOUT;
4134279858Sjfv		}
4135266423Sjfv
4136279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
4137266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4138279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
4139266423Sjfv		/* Verify the disable took */
4140266423Sjfv		for (int j = 0; j < 10; j++) {
4141279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
4142266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4143266423Sjfv				break;
4144266423Sjfv			i40e_msec_delay(10);
4145266423Sjfv		}
4146279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
4147279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
4148279858Sjfv			    index);
4149279858Sjfv			error = ETIMEDOUT;
4150279858Sjfv		}
4151266423Sjfv	}
4152279858Sjfv
4153279858Sjfv	return (error);
4154266423Sjfv}
4155266423Sjfv
4156269198Sjfv/**
4157270346Sjfv * ixl_handle_mdd_event
4158269198Sjfv *
4159269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
4160269198Sjfv * (But also detects events from the PF, as well)
4161269198Sjfv **/
4162270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
4163269198Sjfv{
4164269198Sjfv	struct i40e_hw *hw = &pf->hw;
4165269198Sjfv	device_t dev = pf->dev;
4166269198Sjfv	bool mdd_detected = false;
4167269198Sjfv	bool pf_mdd_detected = false;
4168269198Sjfv	u32 reg;
4169269198Sjfv
4170269198Sjfv	/* find what triggered the MDD event */
4171269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
4172269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4173269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
4174269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
4175269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4176269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
4177269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
4178269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
4179269198Sjfv		device_printf(dev,
4180269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4181269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
4182269198Sjfv			 event, queue, pf_num);
4183269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4184269198Sjfv		mdd_detected = true;
4185269198Sjfv	}
4186269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
4187269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4188269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
4189269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
4190269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4191269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
4192269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
4193269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
4194269198Sjfv		device_printf(dev,
4195269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4196269198Sjfv			 " on RX queue %d of function 0x%02x\n",
4197269198Sjfv			 event, queue, func);
4198269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4199269198Sjfv		mdd_detected = true;
4200269198Sjfv	}
4201269198Sjfv
4202269198Sjfv	if (mdd_detected) {
4203269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
4204269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4205269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4206269198Sjfv			device_printf(dev,
4207269198Sjfv				 "MDD TX event is for this function 0x%08x",
4208269198Sjfv				 reg);
4209269198Sjfv			pf_mdd_detected = true;
4210269198Sjfv		}
4211269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
4212269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4213269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4214269198Sjfv			device_printf(dev,
4215269198Sjfv				 "MDD RX event is for this function 0x%08x",
4216269198Sjfv				 reg);
4217269198Sjfv			pf_mdd_detected = true;
4218269198Sjfv		}
4219269198Sjfv	}
4220269198Sjfv
4221269198Sjfv	/* re-enable mdd interrupt cause */
4222269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4223269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4224269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4225270346Sjfv	ixl_flush(hw);
4226269198Sjfv}
4227269198Sjfv
4228266423Sjfvstatic void
4229270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
4230266423Sjfv{
4231266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4232270346Sjfv	struct ixl_queue	*que = vsi->queues;
4233266423Sjfv
4234270346Sjfv	if (ixl_enable_msix) {
4235270346Sjfv		ixl_enable_adminq(hw);
4236266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
4237270346Sjfv			ixl_enable_queue(hw, que->me);
4238266423Sjfv	} else
4239270346Sjfv		ixl_enable_legacy(hw);
4240266423Sjfv}
4241266423Sjfv
4242266423Sjfvstatic void
4243279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
4244266423Sjfv{
4245266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4246270346Sjfv	struct ixl_queue	*que = vsi->queues;
4247266423Sjfv
4248279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
4249279858Sjfv		ixl_disable_queue(hw, que->me);
4250279858Sjfv}
4251279858Sjfv
4252279858Sjfvstatic void
4253279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
4254279858Sjfv{
4255279858Sjfv	struct i40e_hw		*hw = vsi->hw;
4256279858Sjfv
4257279858Sjfv	if (ixl_enable_msix)
4258270346Sjfv		ixl_disable_adminq(hw);
4259279858Sjfv	else
4260270346Sjfv		ixl_disable_legacy(hw);
4261266423Sjfv}
4262266423Sjfv
4263266423Sjfvstatic void
4264270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
4265266423Sjfv{
4266266423Sjfv	u32		reg;
4267266423Sjfv
4268266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4269266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4270270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4271266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4272270346Sjfv	ixl_flush(hw);
4273266423Sjfv}
4274266423Sjfv
4275266423Sjfvstatic void
4276270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
4277266423Sjfv{
4278266423Sjfv	u32		reg;
4279266423Sjfv
4280270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4281266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4282299547Serj	ixl_flush(hw);
4283266423Sjfv}
4284266423Sjfv
4285266423Sjfvstatic void
4286270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
4287266423Sjfv{
4288266423Sjfv	u32		reg;
4289266423Sjfv
4290266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4291266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4292270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4293266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4294266423Sjfv}
4295266423Sjfv
4296266423Sjfvstatic void
4297270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
4298266423Sjfv{
4299266423Sjfv	u32		reg;
4300266423Sjfv
4301270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4302266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4303266423Sjfv}
4304266423Sjfv
4305266423Sjfvstatic void
4306270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4307266423Sjfv{
4308266423Sjfv	u32		reg;
4309266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4310266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4311270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4312266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4313266423Sjfv}
4314266423Sjfv
4315266423Sjfvstatic void
4316270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4317266423Sjfv{
4318266423Sjfv	u32		reg;
4319266423Sjfv
4320270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4321266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4322266423Sjfv}
4323266423Sjfv
4324266423Sjfvstatic void
4325270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4326266423Sjfv{
4327266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4328279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4329279858Sjfv	struct ixl_vf	*vf;
4330269198Sjfv
4331266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4332266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4333266423Sjfv
4334266423Sjfv	/* Update hw stats */
4335270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4336266423Sjfv			   pf->stat_offsets_loaded,
4337266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4338270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4339266423Sjfv			   pf->stat_offsets_loaded,
4340266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4341270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4342266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4343266423Sjfv			   pf->stat_offsets_loaded,
4344266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4345270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4346266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4347266423Sjfv			   pf->stat_offsets_loaded,
4348266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4349270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4350266423Sjfv			   pf->stat_offsets_loaded,
4351266423Sjfv			   &osd->eth.rx_discards,
4352266423Sjfv			   &nsd->eth.rx_discards);
4353270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4354266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4355266423Sjfv			   pf->stat_offsets_loaded,
4356266423Sjfv			   &osd->eth.rx_unicast,
4357266423Sjfv			   &nsd->eth.rx_unicast);
4358270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4359266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4360266423Sjfv			   pf->stat_offsets_loaded,
4361266423Sjfv			   &osd->eth.tx_unicast,
4362266423Sjfv			   &nsd->eth.tx_unicast);
4363270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4364266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4365266423Sjfv			   pf->stat_offsets_loaded,
4366266423Sjfv			   &osd->eth.rx_multicast,
4367266423Sjfv			   &nsd->eth.rx_multicast);
4368270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4369266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4370266423Sjfv			   pf->stat_offsets_loaded,
4371266423Sjfv			   &osd->eth.tx_multicast,
4372266423Sjfv			   &nsd->eth.tx_multicast);
4373270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4374266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4375266423Sjfv			   pf->stat_offsets_loaded,
4376266423Sjfv			   &osd->eth.rx_broadcast,
4377266423Sjfv			   &nsd->eth.rx_broadcast);
4378270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4379266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4380266423Sjfv			   pf->stat_offsets_loaded,
4381266423Sjfv			   &osd->eth.tx_broadcast,
4382266423Sjfv			   &nsd->eth.tx_broadcast);
4383266423Sjfv
4384270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4385266423Sjfv			   pf->stat_offsets_loaded,
4386266423Sjfv			   &osd->tx_dropped_link_down,
4387266423Sjfv			   &nsd->tx_dropped_link_down);
4388270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4389266423Sjfv			   pf->stat_offsets_loaded,
4390266423Sjfv			   &osd->mac_local_faults,
4391266423Sjfv			   &nsd->mac_local_faults);
4392270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4393266423Sjfv			   pf->stat_offsets_loaded,
4394266423Sjfv			   &osd->mac_remote_faults,
4395266423Sjfv			   &nsd->mac_remote_faults);
4396270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4397266423Sjfv			   pf->stat_offsets_loaded,
4398266423Sjfv			   &osd->rx_length_errors,
4399266423Sjfv			   &nsd->rx_length_errors);
4400266423Sjfv
4401269198Sjfv	/* Flow control (LFC) stats */
4402270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4403266423Sjfv			   pf->stat_offsets_loaded,
4404266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4405270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4406266423Sjfv			   pf->stat_offsets_loaded,
4407266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4408270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4409266423Sjfv			   pf->stat_offsets_loaded,
4410266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4411270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4412266423Sjfv			   pf->stat_offsets_loaded,
4413266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4414266423Sjfv
4415269198Sjfv	/* Packet size stats rx */
4416270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4417266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4418266423Sjfv			   pf->stat_offsets_loaded,
4419266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4420270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4421266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4422266423Sjfv			   pf->stat_offsets_loaded,
4423266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4424270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4425266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4426266423Sjfv			   pf->stat_offsets_loaded,
4427266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4428270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4429266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4430266423Sjfv			   pf->stat_offsets_loaded,
4431266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4432270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4433266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4434266423Sjfv			   pf->stat_offsets_loaded,
4435266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4436270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4437266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4438266423Sjfv			   pf->stat_offsets_loaded,
4439266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4440270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4441266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4442266423Sjfv			   pf->stat_offsets_loaded,
4443266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4444266423Sjfv
4445269198Sjfv	/* Packet size stats tx */
4446270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4447266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4448266423Sjfv			   pf->stat_offsets_loaded,
4449266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4450270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4451266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4452266423Sjfv			   pf->stat_offsets_loaded,
4453266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4454270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4455266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4456266423Sjfv			   pf->stat_offsets_loaded,
4457266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4458270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4459266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4460266423Sjfv			   pf->stat_offsets_loaded,
4461266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4462270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4463266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4464266423Sjfv			   pf->stat_offsets_loaded,
4465266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4466270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4467266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4468266423Sjfv			   pf->stat_offsets_loaded,
4469266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4470270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4471266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4472266423Sjfv			   pf->stat_offsets_loaded,
4473266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4474266423Sjfv
4475270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4476266423Sjfv			   pf->stat_offsets_loaded,
4477266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4478270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4479266423Sjfv			   pf->stat_offsets_loaded,
4480266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4481270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4482266423Sjfv			   pf->stat_offsets_loaded,
4483266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4484270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4485266423Sjfv			   pf->stat_offsets_loaded,
4486266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4487266423Sjfv	pf->stat_offsets_loaded = true;
4488269198Sjfv	/* End hw stats */
4489266423Sjfv
4490266423Sjfv	/* Update vsi stats */
4491279858Sjfv	ixl_update_vsi_stats(vsi);
4492266423Sjfv
4493279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4494279858Sjfv		vf = &pf->vfs[i];
4495279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4496279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4497279858Sjfv	}
4498266423Sjfv}
4499266423Sjfv
4500266423Sjfv/*
4501266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4502266423Sjfv**  - do outside interrupt since it might sleep
4503266423Sjfv*/
4504266423Sjfvstatic void
4505270346Sjfvixl_do_adminq(void *context, int pending)
4506266423Sjfv{
4507270346Sjfv	struct ixl_pf			*pf = context;
4508266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4509266423Sjfv	struct i40e_arq_event_info	event;
4510266423Sjfv	i40e_status			ret;
4511299547Serj	device_t			dev = pf->dev;
4512299549Serj	u32				reg, loop = 0;
4513266423Sjfv	u16				opcode, result;
4514266423Sjfv
4515299549Serj	// XXX: Possibly inappropriate overload
4516299549Serj	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4517299549Serj		int count = 0;
4518299549Serj		// ERJ: Typically finishes within 3-4 seconds
4519299549Serj		while (count++ < 100) {
4520299549Serj			reg = rd32(hw, I40E_GLGEN_RSTAT);
4521299549Serj			reg = reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
4522299549Serj			if (reg) {
4523299549Serj				i40e_msec_delay(100);
4524299549Serj			} else {
4525299549Serj				break;
4526299549Serj			}
4527299549Serj		}
4528299549Serj		device_printf(dev, "EMPR reset wait count: %d\n", count);
4529299549Serj
4530299549Serj		device_printf(dev, "Rebuilding HW structs...\n");
4531299549Serj		// XXX: I feel like this could cause a kernel panic some time in the future
4532299549Serj		ixl_stop(pf);
4533299549Serj		ixl_init(pf);
4534299549Serj
4535299549Serj		atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4536299549Serj		return;
4537299549Serj	}
4538299549Serj
4539299549Serj	// Actually do Admin Queue handling
4540274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4541274205Sjfv	event.msg_buf = malloc(event.buf_len,
4542266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4543266423Sjfv	if (!event.msg_buf) {
4544299547Serj		device_printf(dev, "%s: Unable to allocate memory for Admin"
4545299547Serj		    " Queue event!\n", __func__);
4546266423Sjfv		return;
4547266423Sjfv	}
4548266423Sjfv
4549279858Sjfv	IXL_PF_LOCK(pf);
4550266423Sjfv	/* clean and process any events */
4551266423Sjfv	do {
4552266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4553266423Sjfv		if (ret)
4554266423Sjfv			break;
4555266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4556299547Serj#ifdef IXL_DEBUG
4557299547Serj		device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode);
4558299547Serj#endif
4559266423Sjfv		switch (opcode) {
4560266423Sjfv		case i40e_aqc_opc_get_link_status:
4561279858Sjfv			ixl_link_event(pf, &event);
4562266423Sjfv			break;
4563266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4564279858Sjfv#ifdef PCI_IOV
4565279858Sjfv			ixl_handle_vf_msg(pf, &event);
4566279858Sjfv#endif
4567266423Sjfv			break;
4568266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4569266423Sjfv		default:
4570266423Sjfv			break;
4571266423Sjfv		}
4572266423Sjfv
4573270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4574266423Sjfv
4575266423Sjfv	free(event.msg_buf, M_DEVBUF);
4576266423Sjfv
4577279858Sjfv	/*
4578279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4579279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4580279858Sjfv	 */
4581279858Sjfv	if (result > 0)
4582279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4583266423Sjfv	else
4584299547Serj		ixl_enable_adminq(hw);
4585279858Sjfv
4586279858Sjfv	IXL_PF_UNLOCK(pf);
4587266423Sjfv}
4588266423Sjfv
4589266423Sjfvstatic int
4590270346Sjfvixl_debug_info(SYSCTL_HANDLER_ARGS)
4591266423Sjfv{
4592270346Sjfv	struct ixl_pf	*pf;
4593266423Sjfv	int		error, input = 0;
4594266423Sjfv
4595266423Sjfv	error = sysctl_handle_int(oidp, &input, 0, req);
4596266423Sjfv
4597266423Sjfv	if (error || !req->newptr)
4598266423Sjfv		return (error);
4599266423Sjfv
4600266423Sjfv	if (input == 1) {
4601270346Sjfv		pf = (struct ixl_pf *)arg1;
4602270346Sjfv		ixl_print_debug_info(pf);
4603266423Sjfv	}
4604266423Sjfv
4605266423Sjfv	return (error);
4606266423Sjfv}
4607266423Sjfv
4608266423Sjfvstatic void
4609270346Sjfvixl_print_debug_info(struct ixl_pf *pf)
4610266423Sjfv{
4611266423Sjfv	struct i40e_hw		*hw = &pf->hw;
4612270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
4613270346Sjfv	struct ixl_queue	*que = vsi->queues;
4614266423Sjfv	struct rx_ring		*rxr = &que->rxr;
4615266423Sjfv	struct tx_ring		*txr = &que->txr;
4616266423Sjfv	u32			reg;
4617266423Sjfv
4618266423Sjfv
4619270799Sbz	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4620270799Sbz	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4621266423Sjfv	printf("RX next check = %x\n", rxr->next_check);
4622270799Sbz	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4623270799Sbz	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4624266423Sjfv	printf("TX desc avail = %x\n", txr->avail);
4625266423Sjfv
4626266423Sjfv	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4627266423Sjfv	 printf("RX Bytes = %x\n", reg);
4628266423Sjfv	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4629266423Sjfv	 printf("Port RX Bytes = %x\n", reg);
4630266423Sjfv	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4631266423Sjfv	 printf("RX discard = %x\n", reg);
4632266423Sjfv	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4633266423Sjfv	 printf("Port RX discard = %x\n", reg);
4634266423Sjfv
4635266423Sjfv	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4636266423Sjfv	 printf("TX errors = %x\n", reg);
4637266423Sjfv	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4638266423Sjfv	 printf("TX Bytes = %x\n", reg);
4639266423Sjfv
4640266423Sjfv	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4641266423Sjfv	 printf("RX undersize = %x\n", reg);
4642266423Sjfv	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4643266423Sjfv	 printf("RX fragments = %x\n", reg);
4644266423Sjfv	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4645266423Sjfv	 printf("RX oversize = %x\n", reg);
4646266423Sjfv	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4647266423Sjfv	 printf("RX length error = %x\n", reg);
4648266423Sjfv	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4649266423Sjfv	 printf("mac remote fault = %x\n", reg);
4650266423Sjfv	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4651266423Sjfv	 printf("mac local fault = %x\n", reg);
4652266423Sjfv}
4653266423Sjfv
4654266423Sjfv/**
4655266423Sjfv * Update VSI-specific ethernet statistics counters.
4656266423Sjfv **/
4657270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4658266423Sjfv{
4659270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4660266423Sjfv	struct i40e_hw *hw = &pf->hw;
4661266423Sjfv	struct i40e_eth_stats *es;
4662266423Sjfv	struct i40e_eth_stats *oes;
4663272227Sglebius	struct i40e_hw_port_stats *nsd;
4664266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4665266423Sjfv
4666266423Sjfv	es = &vsi->eth_stats;
4667266423Sjfv	oes = &vsi->eth_stats_offsets;
4668272227Sglebius	nsd = &pf->stats;
4669266423Sjfv
4670266423Sjfv	/* Gather up the stats that the hw collects */
4671270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4672266423Sjfv			   vsi->stat_offsets_loaded,
4673266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4674270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4675266423Sjfv			   vsi->stat_offsets_loaded,
4676266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4677266423Sjfv
4678270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4679266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4680266423Sjfv			   vsi->stat_offsets_loaded,
4681266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4682270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4683266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4684266423Sjfv			   vsi->stat_offsets_loaded,
4685266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4686270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4687266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4688266423Sjfv			   vsi->stat_offsets_loaded,
4689266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4690270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4691266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4692266423Sjfv			   vsi->stat_offsets_loaded,
4693266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4694266423Sjfv
4695270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4696266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4697266423Sjfv			   vsi->stat_offsets_loaded,
4698266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4699270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4700266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4701266423Sjfv			   vsi->stat_offsets_loaded,
4702266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4703270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4704266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4705266423Sjfv			   vsi->stat_offsets_loaded,
4706266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4707270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4708266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4709266423Sjfv			   vsi->stat_offsets_loaded,
4710266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4711266423Sjfv	vsi->stat_offsets_loaded = true;
4712279858Sjfv}
4713269198Sjfv
4714279858Sjfvstatic void
4715279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4716279858Sjfv{
4717279858Sjfv	struct ixl_pf		*pf;
4718279858Sjfv	struct ifnet		*ifp;
4719279858Sjfv	struct i40e_eth_stats	*es;
4720279858Sjfv	u64			tx_discards;
4721279858Sjfv
4722279858Sjfv	struct i40e_hw_port_stats *nsd;
4723279858Sjfv
4724279858Sjfv	pf = vsi->back;
4725279858Sjfv	ifp = vsi->ifp;
4726279858Sjfv	es = &vsi->eth_stats;
4727279858Sjfv	nsd = &pf->stats;
4728279858Sjfv
4729279858Sjfv	ixl_update_eth_stats(vsi);
4730279858Sjfv
4731272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4732279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4733272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4734272227Sglebius
4735269198Sjfv	/* Update ifnet stats */
4736272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4737269198Sjfv	                   es->rx_multicast +
4738272227Sglebius			   es->rx_broadcast);
4739272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4740269198Sjfv	                   es->tx_multicast +
4741272227Sglebius			   es->tx_broadcast);
4742272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4743272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4744272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4745272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4746269198Sjfv
4747279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4748279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4749279858Sjfv	    nsd->rx_jabber);
4750272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4751272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4752272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4753272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4754272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4755266423Sjfv}
4756266423Sjfv
4757266423Sjfv/**
4758266423Sjfv * Reset all of the stats for the given pf
4759266423Sjfv **/
4760270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4761266423Sjfv{
4762266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4763266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4764266423Sjfv	pf->stat_offsets_loaded = false;
4765266423Sjfv}
4766266423Sjfv
4767266423Sjfv/**
4768266423Sjfv * Resets all stats of the given vsi
4769266423Sjfv **/
4770270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4771266423Sjfv{
4772266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4773266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4774266423Sjfv	vsi->stat_offsets_loaded = false;
4775266423Sjfv}
4776266423Sjfv
4777266423Sjfv/**
4778266423Sjfv * Read and update a 48 bit stat from the hw
4779266423Sjfv *
4780266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4781266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4782266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4783266423Sjfv * to report stats that count from zero.
4784266423Sjfv **/
4785266423Sjfvstatic void
4786270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4787266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4788266423Sjfv{
4789266423Sjfv	u64 new_data;
4790266423Sjfv
4791270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4792266423Sjfv	new_data = rd64(hw, loreg);
4793266423Sjfv#else
4794266423Sjfv	/*
4795269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4796266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4797266423Sjfv	 */
4798266423Sjfv	new_data = rd32(hw, loreg);
4799266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4800266423Sjfv#endif
4801266423Sjfv
4802266423Sjfv	if (!offset_loaded)
4803266423Sjfv		*offset = new_data;
4804266423Sjfv	if (new_data >= *offset)
4805266423Sjfv		*stat = new_data - *offset;
4806266423Sjfv	else
4807266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4808266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4809266423Sjfv}
4810266423Sjfv
4811266423Sjfv/**
4812266423Sjfv * Read and update a 32 bit stat from the hw
4813266423Sjfv **/
4814266423Sjfvstatic void
4815270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4816266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4817266423Sjfv{
4818266423Sjfv	u32 new_data;
4819266423Sjfv
4820266423Sjfv	new_data = rd32(hw, reg);
4821266423Sjfv	if (!offset_loaded)
4822266423Sjfv		*offset = new_data;
4823266423Sjfv	if (new_data >= *offset)
4824266423Sjfv		*stat = (u32)(new_data - *offset);
4825266423Sjfv	else
4826266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4827266423Sjfv}
4828266423Sjfv
4829299549Serjstatic void
4830299549Serjixl_add_device_sysctls(struct ixl_pf *pf)
4831299549Serj{
4832299549Serj	device_t dev = pf->dev;
4833299549Serj
4834299549Serj	/* Set up sysctls */
4835299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4836299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4837299549Serj	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4838299549Serj	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
4839299549Serj
4840299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4841299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4842299549Serj	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4843299549Serj	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
4844299549Serj
4845299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4846299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4847299549Serj	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4848299549Serj	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
4849299549Serj
4850299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4851299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4852299549Serj	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4853299549Serj	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4854299549Serj
4855299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4856299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4857299549Serj	    OID_AUTO, "rx_itr", CTLFLAG_RW,
4858299549Serj	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
4859299549Serj
4860299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4861299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4862299549Serj	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4863299549Serj	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
4864299549Serj
4865299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4866299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4867299549Serj	    OID_AUTO, "tx_itr", CTLFLAG_RW,
4868299549Serj	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
4869299549Serj
4870299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4871299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4872299549Serj	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4873299549Serj	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
4874299549Serj
4875299549Serj#ifdef IXL_DEBUG_SYSCTL
4876299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4877299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4878299549Serj	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
4879299549Serj	    ixl_debug_info, "I", "Debug Information");
4880299549Serj
4881299549Serj	/* Debug shared-code message level */
4882299549Serj	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4883299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4884299549Serj	    OID_AUTO, "debug_mask", CTLFLAG_RW,
4885299549Serj	    &pf->hw.debug_mask, 0, "Debug Message Level");
4886299549Serj
4887299549Serj	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4888299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4889299549Serj	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4890299549Serj	    0, "PF/VF Virtual Channel debug level");
4891299549Serj
4892299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4893299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4894299549Serj	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4895299549Serj	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
4896299549Serj
4897299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4898299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4899299549Serj	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4900299549Serj	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4901299549Serj
4902299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4903299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4904299549Serj	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4905299549Serj	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4906299549Serj
4907299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4908299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4909299549Serj	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4910299549Serj	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4911299549Serj
4912299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4913299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4914299549Serj	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4915299549Serj	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4916299549Serj#endif
4917299549Serj}
4918299549Serj
4919266423Sjfv/*
4920266423Sjfv** Set flow control using sysctl:
4921266423Sjfv** 	0 - off
4922266423Sjfv**	1 - rx pause
4923266423Sjfv**	2 - tx pause
4924266423Sjfv**	3 - full
4925266423Sjfv*/
4926266423Sjfvstatic int
4927270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4928266423Sjfv{
4929266423Sjfv	/*
4930266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4931266423Sjfv	 * if tx flow control is enabled.
4932299547Serj	 * ^ N/A for 40G ports
4933266423Sjfv	 */
4934270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4935266423Sjfv	struct i40e_hw *hw = &pf->hw;
4936266423Sjfv	device_t dev = pf->dev;
4937279033Sjfv	int error = 0;
4938266423Sjfv	enum i40e_status_code aq_error = 0;
4939266423Sjfv	u8 fc_aq_err = 0;
4940266423Sjfv
4941279033Sjfv	/* Get request */
4942279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4943266423Sjfv	if ((error) || (req->newptr == NULL))
4944269198Sjfv		return (error);
4945279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4946266423Sjfv		device_printf(dev,
4947266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4948266423Sjfv		return (EINVAL);
4949266423Sjfv	}
4950266423Sjfv
4951269198Sjfv	/*
4952269198Sjfv	** Changing flow control mode currently does not work on
4953269198Sjfv	** 40GBASE-CR4 PHYs
4954269198Sjfv	*/
4955269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4956269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4957269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4958269198Sjfv		    " on 40GBase-CR4 media.\n");
4959269198Sjfv		return (ENODEV);
4960269198Sjfv	}
4961269198Sjfv
4962266423Sjfv	/* Set fc ability for port */
4963279033Sjfv	hw->fc.requested_mode = pf->fc;
4964269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4965269198Sjfv	if (aq_error) {
4966269198Sjfv		device_printf(dev,
4967269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4968269198Sjfv		    __func__, aq_error, fc_aq_err);
4969299547Serj		return (EIO);
4970269198Sjfv	}
4971266423Sjfv
4972299547Serj	/* Get new link state */
4973299547Serj	i40e_msec_delay(250);
4974299547Serj	hw->phy.get_link_info = TRUE;
4975299547Serj	i40e_get_link_status(hw, &pf->link_up);
4976299547Serj
4977269198Sjfv	return (0);
4978269198Sjfv}
4979266423Sjfv
4980270346Sjfvstatic int
4981270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4982270346Sjfv{
4983270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4984270346Sjfv	struct i40e_hw *hw = &pf->hw;
4985270346Sjfv	int error = 0, index = 0;
4986270346Sjfv
4987270346Sjfv	char *speeds[] = {
4988270346Sjfv		"Unknown",
4989270346Sjfv		"100M",
4990270346Sjfv		"1G",
4991270346Sjfv		"10G",
4992270346Sjfv		"40G",
4993270346Sjfv		"20G"
4994270346Sjfv	};
4995270346Sjfv
4996270346Sjfv	ixl_update_link_status(pf);
4997270346Sjfv
4998270346Sjfv	switch (hw->phy.link_info.link_speed) {
4999270346Sjfv	case I40E_LINK_SPEED_100MB:
5000270346Sjfv		index = 1;
5001270346Sjfv		break;
5002270346Sjfv	case I40E_LINK_SPEED_1GB:
5003270346Sjfv		index = 2;
5004270346Sjfv		break;
5005270346Sjfv	case I40E_LINK_SPEED_10GB:
5006270346Sjfv		index = 3;
5007270346Sjfv		break;
5008270346Sjfv	case I40E_LINK_SPEED_40GB:
5009270346Sjfv		index = 4;
5010270346Sjfv		break;
5011270346Sjfv	case I40E_LINK_SPEED_20GB:
5012270346Sjfv		index = 5;
5013270346Sjfv		break;
5014270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
5015270346Sjfv	default:
5016270346Sjfv		index = 0;
5017270346Sjfv		break;
5018270346Sjfv	}
5019270346Sjfv
5020270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
5021270346Sjfv	    strlen(speeds[index]), req);
5022270346Sjfv	return (error);
5023270346Sjfv}
5024270346Sjfv
5025274205Sjfvstatic int
5026274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
5027274205Sjfv{
5028274205Sjfv	struct i40e_hw *hw = &pf->hw;
5029274205Sjfv	device_t dev = pf->dev;
5030274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
5031274205Sjfv	struct i40e_aq_set_phy_config config;
5032274205Sjfv	enum i40e_status_code aq_error = 0;
5033274205Sjfv
5034274205Sjfv	/* Get current capability information */
5035279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
5036279033Sjfv	    FALSE, FALSE, &abilities, NULL);
5037274205Sjfv	if (aq_error) {
5038279033Sjfv		device_printf(dev,
5039279033Sjfv		    "%s: Error getting phy capabilities %d,"
5040274205Sjfv		    " aq error: %d\n", __func__, aq_error,
5041274205Sjfv		    hw->aq.asq_last_status);
5042274205Sjfv		return (EAGAIN);
5043274205Sjfv	}
5044274205Sjfv
5045274205Sjfv	/* Prepare new config */
5046274205Sjfv	bzero(&config, sizeof(config));
5047274205Sjfv	config.phy_type = abilities.phy_type;
5048274205Sjfv	config.abilities = abilities.abilities
5049274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5050274205Sjfv	config.eee_capability = abilities.eee_capability;
5051274205Sjfv	config.eeer = abilities.eeer_val;
5052274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
5053274205Sjfv	/* Translate into aq cmd link_speed */
5054279858Sjfv	if (speeds & 0x8)
5055279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
5056274205Sjfv	if (speeds & 0x4)
5057274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
5058274205Sjfv	if (speeds & 0x2)
5059274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
5060274205Sjfv	if (speeds & 0x1)
5061274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
5062274205Sjfv
5063274205Sjfv	/* Do aq command & restart link */
5064274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
5065274205Sjfv	if (aq_error) {
5066279033Sjfv		device_printf(dev,
5067279033Sjfv		    "%s: Error setting new phy config %d,"
5068274205Sjfv		    " aq error: %d\n", __func__, aq_error,
5069274205Sjfv		    hw->aq.asq_last_status);
5070274205Sjfv		return (EAGAIN);
5071274205Sjfv	}
5072274205Sjfv
5073277084Sjfv	/*
5074277084Sjfv	** This seems a bit heavy handed, but we
5075277084Sjfv	** need to get a reinit on some devices
5076277084Sjfv	*/
5077277084Sjfv	IXL_PF_LOCK(pf);
5078299547Serj	ixl_stop_locked(pf);
5079277084Sjfv	ixl_init_locked(pf);
5080277084Sjfv	IXL_PF_UNLOCK(pf);
5081277084Sjfv
5082274205Sjfv	return (0);
5083274205Sjfv}
5084274205Sjfv
5085269198Sjfv/*
5086269198Sjfv** Control link advertise speed:
5087270346Sjfv**	Flags:
5088270346Sjfv**	0x1 - advertise 100 Mb
5089270346Sjfv**	0x2 - advertise 1G
5090270346Sjfv**	0x4 - advertise 10G
5091279858Sjfv**	0x8 - advertise 20G
5092269198Sjfv**
5093269198Sjfv** Does not work on 40G devices.
5094269198Sjfv*/
5095269198Sjfvstatic int
5096270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
5097269198Sjfv{
5098270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5099269198Sjfv	struct i40e_hw *hw = &pf->hw;
5100269198Sjfv	device_t dev = pf->dev;
5101270346Sjfv	int requested_ls = 0;
5102269198Sjfv	int error = 0;
5103266423Sjfv
5104269198Sjfv	/*
5105269198Sjfv	** FW doesn't support changing advertised speed
5106269198Sjfv	** for 40G devices; speed is always 40G.
5107269198Sjfv	*/
5108269198Sjfv	if (i40e_is_40G_device(hw->device_id))
5109269198Sjfv		return (ENODEV);
5110266423Sjfv
5111269198Sjfv	/* Read in new mode */
5112270346Sjfv	requested_ls = pf->advertised_speed;
5113269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
5114269198Sjfv	if ((error) || (req->newptr == NULL))
5115269198Sjfv		return (error);
5116279858Sjfv	/* Check for sane value */
5117279858Sjfv	if (requested_ls < 0x1 || requested_ls > 0xE) {
5118279858Sjfv		device_printf(dev, "Invalid advertised speed; "
5119279858Sjfv		    "valid modes are 0x1 through 0xE\n");
5120269198Sjfv		return (EINVAL);
5121266423Sjfv	}
5122279858Sjfv	/* Then check for validity based on adapter type */
5123279858Sjfv	switch (hw->device_id) {
5124279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
5125299545Serj	case I40E_DEV_ID_10G_BASE_T4:
5126279858Sjfv		if (requested_ls & 0x8) {
5127279858Sjfv			device_printf(dev,
5128279858Sjfv			    "20Gbs speed not supported on this device.\n");
5129279858Sjfv			return (EINVAL);
5130279858Sjfv		}
5131279858Sjfv		break;
5132279858Sjfv	case I40E_DEV_ID_20G_KR2:
5133299545Serj	case I40E_DEV_ID_20G_KR2_A:
5134279858Sjfv		if (requested_ls & 0x1) {
5135279858Sjfv			device_printf(dev,
5136279858Sjfv			    "100Mbs speed not supported on this device.\n");
5137279858Sjfv			return (EINVAL);
5138279858Sjfv		}
5139279858Sjfv		break;
5140279858Sjfv	default:
5141279858Sjfv		if (requested_ls & ~0x6) {
5142279858Sjfv			device_printf(dev,
5143279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
5144279858Sjfv			return (EINVAL);
5145279858Sjfv		}
5146279858Sjfv		break;
5147279858Sjfv	}
5148269198Sjfv
5149269198Sjfv	/* Exit if no change */
5150270346Sjfv	if (pf->advertised_speed == requested_ls)
5151269198Sjfv		return (0);
5152269198Sjfv
5153274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
5154274205Sjfv	if (error)
5155274205Sjfv		return (error);
5156270346Sjfv
5157270346Sjfv	pf->advertised_speed = requested_ls;
5158270346Sjfv	ixl_update_link_status(pf);
5159269198Sjfv	return (0);
5160266423Sjfv}
5161266423Sjfv
5162266423Sjfv/*
5163266423Sjfv** Get the width and transaction speed of
5164266423Sjfv** the bus this adapter is plugged into.
5165266423Sjfv*/
5166266423Sjfvstatic u16
5167270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
5168266423Sjfv{
5169266423Sjfv        u16                     link;
5170266423Sjfv        u32                     offset;
5171266423Sjfv
5172266423Sjfv        /* Get the PCI Express Capabilities offset */
5173266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
5174266423Sjfv
5175266423Sjfv        /* ...and read the Link Status Register */
5176266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
5177266423Sjfv
5178266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
5179266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
5180266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
5181266423Sjfv                break;
5182266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
5183266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
5184266423Sjfv                break;
5185266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
5186266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
5187266423Sjfv                break;
5188266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
5189266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
5190266423Sjfv                break;
5191266423Sjfv        default:
5192266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
5193266423Sjfv                break;
5194266423Sjfv        }
5195266423Sjfv
5196266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
5197266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
5198266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
5199266423Sjfv                break;
5200266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
5201266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
5202266423Sjfv                break;
5203266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
5204266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
5205266423Sjfv                break;
5206266423Sjfv        default:
5207266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
5208266423Sjfv                break;
5209266423Sjfv        }
5210266423Sjfv
5211266423Sjfv
5212266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
5213266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
5214266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
5215266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
5216266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
5217266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
5218266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
5219266423Sjfv            ("Unknown"));
5220266423Sjfv
5221266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
5222266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
5223266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
5224279858Sjfv                    " for this device\n     may be insufficient for"
5225279858Sjfv                    " optimal performance.\n");
5226266423Sjfv                device_printf(dev, "For expected performance a x8 "
5227266423Sjfv                    "PCIE Gen3 slot is required.\n");
5228266423Sjfv        }
5229266423Sjfv
5230266423Sjfv        return (link);
5231266423Sjfv}
5232266423Sjfv
5233274205Sjfvstatic int
5234274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
5235274205Sjfv{
5236274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
5237274205Sjfv	struct i40e_hw	*hw = &pf->hw;
5238274205Sjfv	char		buf[32];
5239274205Sjfv
5240274205Sjfv	snprintf(buf, sizeof(buf),
5241274205Sjfv	    "f%d.%d a%d.%d n%02x.%02x e%08x",
5242274205Sjfv	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
5243274205Sjfv	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
5244274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
5245274205Sjfv	    IXL_NVM_VERSION_HI_SHIFT,
5246274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
5247274205Sjfv	    IXL_NVM_VERSION_LO_SHIFT,
5248274205Sjfv	    hw->nvm.eetrack);
5249274205Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5250274205Sjfv}
5251274205Sjfv
5252299547Serjstatic int
5253299547Serjixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5254299547Serj{
5255299547Serj	struct i40e_hw *hw = &pf->hw;
5256299547Serj	struct i40e_nvm_access *nvma;
5257299547Serj	device_t dev = pf->dev;
5258299547Serj	enum i40e_status_code status = 0;
5259299547Serj	int perrno;
5260274205Sjfv
5261299547Serj	DEBUGFUNC("ixl_handle_nvmupd_cmd");
5262299547Serj
5263299547Serj	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
5264299547Serj	    ifd->ifd_data == NULL) {
5265299547Serj		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__);
5266299547Serj		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__,
5267299547Serj		    ifd->ifd_len, sizeof(struct i40e_nvm_access));
5268299547Serj		device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data);
5269299547Serj		return (EINVAL);
5270299547Serj	}
5271299547Serj
5272299547Serj	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5273299547Serj
5274299549Serj	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5275299549Serj		int count = 0;
5276299549Serj		while (count++ < 100) {
5277299549Serj			i40e_msec_delay(100);
5278299549Serj			if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5279299549Serj				break;
5280299549Serj		}
5281299549Serj		// device_printf(dev, "ioctl EMPR reset wait count %d\n", count);
5282299549Serj	}
5283299549Serj
5284299549Serj	if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5285299549Serj		IXL_PF_LOCK(pf);
5286299549Serj		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5287299549Serj		IXL_PF_UNLOCK(pf);
5288299549Serj	} else {
5289299549Serj		perrno = -EBUSY;
5290299549Serj	}
5291299549Serj
5292299548Serj	if (status)
5293299548Serj		device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
5294299548Serj		    status, perrno);
5295299547Serj
5296299549Serj	/*
5297299549Serj	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
5298299549Serj	 * to run this ioctl again. So use -EACCES for -EPERM instead.
5299299549Serj	 */
5300299548Serj	if (perrno == -EPERM)
5301299548Serj		return (-EACCES);
5302299548Serj	else
5303299548Serj		return (perrno);
5304299547Serj}
5305299547Serj
5306277084Sjfv#ifdef IXL_DEBUG_SYSCTL
5307266423Sjfvstatic int
5308270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5309266423Sjfv{
5310270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5311266423Sjfv	struct i40e_hw *hw = &pf->hw;
5312266423Sjfv	struct i40e_link_status link_status;
5313266423Sjfv	char buf[512];
5314266423Sjfv
5315266423Sjfv	enum i40e_status_code aq_error = 0;
5316266423Sjfv
5317266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
5318266423Sjfv	if (aq_error) {
5319266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
5320266423Sjfv		return (EPERM);
5321266423Sjfv	}
5322266423Sjfv
5323266423Sjfv	sprintf(buf, "\n"
5324266423Sjfv	    "PHY Type : %#04x\n"
5325266423Sjfv	    "Speed    : %#04x\n"
5326266423Sjfv	    "Link info: %#04x\n"
5327266423Sjfv	    "AN info  : %#04x\n"
5328266423Sjfv	    "Ext info : %#04x",
5329266423Sjfv	    link_status.phy_type, link_status.link_speed,
5330266423Sjfv	    link_status.link_info, link_status.an_info,
5331266423Sjfv	    link_status.ext_info);
5332266423Sjfv
5333266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5334266423Sjfv}
5335266423Sjfv
5336266423Sjfvstatic int
5337270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5338266423Sjfv{
5339279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
5340279858Sjfv	struct i40e_hw		*hw = &pf->hw;
5341279858Sjfv	char			buf[512];
5342279858Sjfv	enum i40e_status_code	aq_error = 0;
5343266423Sjfv
5344279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
5345266423Sjfv
5346279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
5347279858Sjfv	    TRUE, FALSE, &abilities, NULL);
5348266423Sjfv	if (aq_error) {
5349266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5350266423Sjfv		return (EPERM);
5351266423Sjfv	}
5352266423Sjfv
5353266423Sjfv	sprintf(buf, "\n"
5354266423Sjfv	    "PHY Type : %#010x\n"
5355266423Sjfv	    "Speed    : %#04x\n"
5356266423Sjfv	    "Abilities: %#04x\n"
5357266423Sjfv	    "EEE cap  : %#06x\n"
5358266423Sjfv	    "EEER reg : %#010x\n"
5359266423Sjfv	    "D3 Lpan  : %#04x",
5360279858Sjfv	    abilities.phy_type, abilities.link_speed,
5361279858Sjfv	    abilities.abilities, abilities.eee_capability,
5362279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
5363266423Sjfv
5364266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5365266423Sjfv}
5366266423Sjfv
5367266423Sjfvstatic int
5368270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5369266423Sjfv{
5370270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5371270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
5372270346Sjfv	struct ixl_mac_filter *f;
5373266423Sjfv	char *buf, *buf_i;
5374266423Sjfv
5375266423Sjfv	int error = 0;
5376266423Sjfv	int ftl_len = 0;
5377266423Sjfv	int ftl_counter = 0;
5378266423Sjfv	int buf_len = 0;
5379266423Sjfv	int entry_len = 42;
5380266423Sjfv
5381266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5382266423Sjfv		ftl_len++;
5383266423Sjfv	}
5384266423Sjfv
5385266423Sjfv	if (ftl_len < 1) {
5386266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
5387266423Sjfv		return (0);
5388266423Sjfv	}
5389266423Sjfv
5390266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5391266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5392266423Sjfv
5393266423Sjfv	sprintf(buf_i++, "\n");
5394266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5395266423Sjfv		sprintf(buf_i,
5396266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
5397266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5398266423Sjfv		buf_i += entry_len;
5399266423Sjfv		/* don't print '\n' for last entry */
5400266423Sjfv		if (++ftl_counter != ftl_len) {
5401266423Sjfv			sprintf(buf_i, "\n");
5402266423Sjfv			buf_i++;
5403266423Sjfv		}
5404266423Sjfv	}
5405266423Sjfv
5406266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5407266423Sjfv	if (error)
5408266423Sjfv		printf("sysctl error: %d\n", error);
5409266423Sjfv	free(buf, M_DEVBUF);
5410266423Sjfv	return error;
5411266423Sjfv}
5412269198Sjfv
5413270346Sjfv#define IXL_SW_RES_SIZE 0x14
5414269198Sjfvstatic int
5415277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
5416277084Sjfv{
5417277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5418284049Sjfv	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5419284049Sjfv	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5420277084Sjfv
5421277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
5422277084Sjfv}
5423277084Sjfv
5424299549Serj/*
5425299549Serj * Longest string length: 25
5426299549Serj */
5427299549Serjstatic char *
5428299549Serjixl_switch_res_type_string(u8 type)
5429299549Serj{
5430299549Serj	static char * ixl_switch_res_type_strings[0x14] = {
5431299549Serj		"VEB",
5432299549Serj		"VSI",
5433299549Serj		"Perfect Match MAC address",
5434299549Serj		"S-tag",
5435299549Serj		"(Reserved)",
5436299549Serj		"Multicast hash entry",
5437299549Serj		"Unicast hash entry",
5438299549Serj		"VLAN",
5439299549Serj		"VSI List entry",
5440299549Serj		"(Reserved)",
5441299549Serj		"VLAN Statistic Pool",
5442299549Serj		"Mirror Rule",
5443299549Serj		"Queue Set",
5444299549Serj		"Inner VLAN Forward filter",
5445299549Serj		"(Reserved)",
5446299549Serj		"Inner MAC",
5447299549Serj		"IP",
5448299549Serj		"GRE/VN1 Key",
5449299549Serj		"VN2 Key",
5450299549Serj		"Tunneling Port"
5451299549Serj	};
5452299549Serj
5453299549Serj	if (type < 0x14)
5454299549Serj		return ixl_switch_res_type_strings[type];
5455299549Serj	else
5456299549Serj		return "(Reserved)";
5457299549Serj}
5458299549Serj
5459277084Sjfvstatic int
5460274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5461269198Sjfv{
5462270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5463269198Sjfv	struct i40e_hw *hw = &pf->hw;
5464269198Sjfv	device_t dev = pf->dev;
5465269198Sjfv	struct sbuf *buf;
5466269198Sjfv	int error = 0;
5467269198Sjfv
5468269198Sjfv	u8 num_entries;
5469270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5470269198Sjfv
5471299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5472269198Sjfv	if (!buf) {
5473269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
5474269198Sjfv		return (ENOMEM);
5475269198Sjfv	}
5476269198Sjfv
5477277084Sjfv	bzero(resp, sizeof(resp));
5478269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5479269198Sjfv				resp,
5480270346Sjfv				IXL_SW_RES_SIZE,
5481269198Sjfv				NULL);
5482269198Sjfv	if (error) {
5483279858Sjfv		device_printf(dev,
5484279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5485269198Sjfv		    __func__, error, hw->aq.asq_last_status);
5486269198Sjfv		sbuf_delete(buf);
5487269198Sjfv		return error;
5488269198Sjfv	}
5489269198Sjfv
5490277084Sjfv	/* Sort entries by type for display */
5491277084Sjfv	qsort(resp, num_entries,
5492277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5493277084Sjfv	    &ixl_res_alloc_cmp);
5494277084Sjfv
5495269198Sjfv	sbuf_cat(buf, "\n");
5496277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5497269198Sjfv	sbuf_printf(buf,
5498299549Serj#if 0
5499269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5500269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5501299549Serj#endif
5502299549Serj	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5503299549Serj	    "                          | (this)     | (all) | (this) | (all)       \n");
5504269198Sjfv	for (int i = 0; i < num_entries; i++) {
5505269198Sjfv		sbuf_printf(buf,
5506299549Serj#if 0
5507269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5508269198Sjfv		    resp[i].resource_type,
5509299549Serj#endif
5510299549Serj		    "%25s | %10d   %5d   %6d   %12d",
5511299549Serj		    ixl_switch_res_type_string(resp[i].resource_type),
5512269198Sjfv		    resp[i].guaranteed,
5513269198Sjfv		    resp[i].total,
5514269198Sjfv		    resp[i].used,
5515269198Sjfv		    resp[i].total_unalloced);
5516269198Sjfv		if (i < num_entries - 1)
5517269198Sjfv			sbuf_cat(buf, "\n");
5518269198Sjfv	}
5519269198Sjfv
5520269198Sjfv	error = sbuf_finish(buf);
5521299546Serj	if (error)
5522299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5523299545Serj
5524290708Ssmh	sbuf_delete(buf);
5525299545Serj	return error;
5526274205Sjfv}
5527269198Sjfv
5528274205Sjfv/*
5529274205Sjfv** Caller must init and delete sbuf; this function will clear and
5530274205Sjfv** finish it for caller.
5531299549Serj**
5532299549Serj** XXX: Cannot use the SEID for this, since there is no longer a
5533299549Serj** fixed mapping between SEID and element type.
5534274205Sjfv*/
5535274205Sjfvstatic char *
5536299549Serjixl_switch_element_string(struct sbuf *s,
5537299549Serj    struct i40e_aqc_switch_config_element_resp *element)
5538274205Sjfv{
5539274205Sjfv	sbuf_clear(s);
5540274205Sjfv
5541299549Serj	switch (element->element_type) {
5542299549Serj	case I40E_AQ_SW_ELEM_TYPE_MAC:
5543299549Serj		sbuf_printf(s, "MAC %3d", element->element_info);
5544299549Serj		break;
5545299549Serj	case I40E_AQ_SW_ELEM_TYPE_PF:
5546299549Serj		sbuf_printf(s, "PF  %3d", element->element_info);
5547299549Serj		break;
5548299549Serj	case I40E_AQ_SW_ELEM_TYPE_VF:
5549299549Serj		sbuf_printf(s, "VF  %3d", element->element_info);
5550299549Serj		break;
5551299549Serj	case I40E_AQ_SW_ELEM_TYPE_EMP:
5552274205Sjfv		sbuf_cat(s, "EMP");
5553299549Serj		break;
5554299549Serj	case I40E_AQ_SW_ELEM_TYPE_BMC:
5555299549Serj		sbuf_cat(s, "BMC");
5556299549Serj		break;
5557299549Serj	case I40E_AQ_SW_ELEM_TYPE_PV:
5558299549Serj		sbuf_cat(s, "PV");
5559299549Serj		break;
5560299549Serj	case I40E_AQ_SW_ELEM_TYPE_VEB:
5561299549Serj		sbuf_cat(s, "VEB");
5562299549Serj		break;
5563299549Serj	case I40E_AQ_SW_ELEM_TYPE_PA:
5564299549Serj		sbuf_cat(s, "PA");
5565299549Serj		break;
5566299549Serj	case I40E_AQ_SW_ELEM_TYPE_VSI:
5567299549Serj		sbuf_printf(s, "VSI %3d", element->element_info);
5568299549Serj		break;
5569299549Serj	default:
5570299549Serj		sbuf_cat(s, "?");
5571299549Serj		break;
5572299549Serj	}
5573274205Sjfv
5574274205Sjfv	sbuf_finish(s);
5575274205Sjfv	return sbuf_data(s);
5576269198Sjfv}
5577269198Sjfv
5578274205Sjfvstatic int
5579274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5580274205Sjfv{
5581274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5582274205Sjfv	struct i40e_hw *hw = &pf->hw;
5583274205Sjfv	device_t dev = pf->dev;
5584274205Sjfv	struct sbuf *buf;
5585274205Sjfv	struct sbuf *nmbuf;
5586274205Sjfv	int error = 0;
5587299549Serj	u16 next = 0;
5588274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5589274205Sjfv
5590274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5591274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5592274205Sjfv
5593299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5594274205Sjfv	if (!buf) {
5595274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5596274205Sjfv		return (ENOMEM);
5597274205Sjfv	}
5598274205Sjfv
5599274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5600274205Sjfv	    sizeof(aq_buf), &next, NULL);
5601274205Sjfv	if (error) {
5602279858Sjfv		device_printf(dev,
5603279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5604274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5605274205Sjfv		sbuf_delete(buf);
5606274205Sjfv		return error;
5607274205Sjfv	}
5608299549Serj	if (next)
5609299549Serj		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5610299549Serj		    __func__, next);
5611274205Sjfv
5612274205Sjfv	nmbuf = sbuf_new_auto();
5613274205Sjfv	if (!nmbuf) {
5614274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5615299546Serj		sbuf_delete(buf);
5616274205Sjfv		return (ENOMEM);
5617274205Sjfv	}
5618274205Sjfv
5619274205Sjfv	sbuf_cat(buf, "\n");
5620274205Sjfv	// Assuming <= 255 elements in switch
5621299549Serj	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5622299549Serj	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5623274205Sjfv	/* Exclude:
5624274205Sjfv	** Revision -- all elements are revision 1 for now
5625274205Sjfv	*/
5626274205Sjfv	sbuf_printf(buf,
5627274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5628274205Sjfv	    "                |          |          | (uplink)\n");
5629274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5630274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5631274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5632274205Sjfv		sbuf_cat(buf, " ");
5633279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5634299549Serj		    &sw_config->element[i]));
5635274205Sjfv		sbuf_cat(buf, " | ");
5636299549Serj		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5637274205Sjfv		sbuf_cat(buf, "   ");
5638299549Serj		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5639274205Sjfv		sbuf_cat(buf, "   ");
5640274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5641274205Sjfv		if (i < sw_config->header.num_reported - 1)
5642274205Sjfv			sbuf_cat(buf, "\n");
5643274205Sjfv	}
5644274205Sjfv	sbuf_delete(nmbuf);
5645274205Sjfv
5646274205Sjfv	error = sbuf_finish(buf);
5647299546Serj	if (error)
5648299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5649299545Serj
5650274205Sjfv	sbuf_delete(buf);
5651274205Sjfv
5652274205Sjfv	return (error);
5653274205Sjfv}
5654279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5655274205Sjfv
5656279858Sjfv#ifdef PCI_IOV
5657269198Sjfvstatic int
5658279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5659269198Sjfv{
5660279858Sjfv	struct i40e_hw *hw;
5661279858Sjfv	struct ixl_vsi *vsi;
5662279858Sjfv	struct i40e_vsi_context vsi_ctx;
5663279858Sjfv	int i;
5664279858Sjfv	uint16_t first_queue;
5665279858Sjfv	enum i40e_status_code code;
5666269198Sjfv
5667279858Sjfv	hw = &pf->hw;
5668279858Sjfv	vsi = &pf->vsi;
5669269198Sjfv
5670279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5671279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5672279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5673279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5674279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5675279858Sjfv
5676279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5677279858Sjfv
5678279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5679279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5680279858Sjfv
5681279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5682279858Sjfv	vsi_ctx.info.sec_flags = 0;
5683279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5684279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5685279858Sjfv
5686279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5687279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5688279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5689279858Sjfv
5690279858Sjfv	vsi_ctx.info.valid_sections |=
5691279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5692279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5693279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5694279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5695279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5696279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5697279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5698279858Sjfv
5699279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5700279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5701279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5702279858Sjfv
5703279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5704279858Sjfv	if (code != I40E_SUCCESS)
5705279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5706279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5707279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5708279858Sjfv	vf->vsi.first_queue = first_queue;
5709279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5710279858Sjfv
5711279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5712279858Sjfv	if (code != I40E_SUCCESS)
5713279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5714279858Sjfv
5715279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5716279858Sjfv	if (code != I40E_SUCCESS) {
5717279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5718279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5719279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5720269198Sjfv	}
5721269198Sjfv
5722279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5723279858Sjfv	return (0);
5724279858Sjfv}
5725279858Sjfv
5726279858Sjfvstatic int
5727279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5728279858Sjfv{
5729279858Sjfv	struct i40e_hw *hw;
5730279858Sjfv	int error;
5731279858Sjfv
5732279858Sjfv	hw = &pf->hw;
5733279858Sjfv
5734279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5735279858Sjfv	if (error != 0)
5736269198Sjfv		return (error);
5737279858Sjfv
5738279858Sjfv	vf->vsi.hw_filters_add = 0;
5739279858Sjfv	vf->vsi.hw_filters_del = 0;
5740279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5741279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5742279858Sjfv
5743279858Sjfv	return (0);
5744279858Sjfv}
5745279858Sjfv
5746279858Sjfvstatic void
5747279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5748279858Sjfv    uint32_t val)
5749279858Sjfv{
5750279858Sjfv	uint32_t qtable;
5751279858Sjfv	int index, shift;
5752279858Sjfv
5753279858Sjfv	/*
5754279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5755279858Sjfv	 * gymnastics to convert the queue number into a register index and
5756279858Sjfv	 * shift.
5757279858Sjfv	 */
5758279858Sjfv	index = qnum / 2;
5759279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5760279858Sjfv
5761279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5762279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5763279858Sjfv	qtable |= val << shift;
5764279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5765279858Sjfv}
5766279858Sjfv
5767279858Sjfvstatic void
5768279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5769279858Sjfv{
5770279858Sjfv	struct i40e_hw *hw;
5771279858Sjfv	uint32_t qtable;
5772279858Sjfv	int i;
5773279858Sjfv
5774279858Sjfv	hw = &pf->hw;
5775279858Sjfv
5776279858Sjfv	/*
5777279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5778279858Sjfv	 * so we have to use non-contiguous mappings.
5779279858Sjfv	 */
5780279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5781279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5782279858Sjfv
5783279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5784279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5785279858Sjfv
5786279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5787279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5788279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5789279858Sjfv
5790279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5791279858Sjfv	}
5792279858Sjfv
5793279858Sjfv	/* Map queues allocated to VF to its VSI. */
5794279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5795279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5796279858Sjfv
5797279858Sjfv	/* Set rest of VSI queues as unused. */
5798279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5799279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5800279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5801279858Sjfv
5802279858Sjfv	ixl_flush(hw);
5803279858Sjfv}
5804279858Sjfv
5805279858Sjfvstatic void
5806279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5807279858Sjfv{
5808279858Sjfv	struct i40e_hw *hw;
5809279858Sjfv
5810279858Sjfv	hw = &pf->hw;
5811279858Sjfv
5812279858Sjfv	if (vsi->seid == 0)
5813279858Sjfv		return;
5814279858Sjfv
5815279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5816279858Sjfv}
5817279858Sjfv
5818279858Sjfvstatic void
5819279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5820279858Sjfv{
5821279858Sjfv
5822279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5823279858Sjfv	ixl_flush(hw);
5824279858Sjfv}
5825279858Sjfv
5826279858Sjfvstatic void
5827279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5828279858Sjfv{
5829279858Sjfv
5830279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5831279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5832279858Sjfv	ixl_flush(hw);
5833279858Sjfv}
5834279858Sjfv
5835279858Sjfvstatic void
5836279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5837279858Sjfv{
5838279858Sjfv	struct i40e_hw *hw;
5839279858Sjfv	uint32_t vfint_reg, vpint_reg;
5840279858Sjfv	int i;
5841279858Sjfv
5842279858Sjfv	hw = &pf->hw;
5843279858Sjfv
5844279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5845279858Sjfv
5846279858Sjfv	/* Index 0 has a special register. */
5847279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5848279858Sjfv
5849279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5850279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5851279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5852279858Sjfv	}
5853279858Sjfv
5854279858Sjfv	/* Index 0 has a special register. */
5855279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5856279858Sjfv
5857279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5858279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5859279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5860279858Sjfv	}
5861279858Sjfv
5862279858Sjfv	vf->vsi.num_queues = 0;
5863279858Sjfv}
5864279858Sjfv
5865279858Sjfvstatic int
5866279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5867279858Sjfv{
5868279858Sjfv	struct i40e_hw *hw;
5869279858Sjfv	int i;
5870279858Sjfv	uint16_t global_vf_num;
5871279858Sjfv	uint32_t ciad;
5872279858Sjfv
5873279858Sjfv	hw = &pf->hw;
5874279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5875279858Sjfv
5876279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5877279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5878279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5879279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5880279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5881279858Sjfv			return (0);
5882279858Sjfv		DELAY(1);
5883279858Sjfv	}
5884279858Sjfv
5885279858Sjfv	return (ETIMEDOUT);
5886279858Sjfv}
5887279858Sjfv
5888279858Sjfvstatic void
5889279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5890279858Sjfv{
5891279858Sjfv	struct i40e_hw *hw;
5892279858Sjfv	uint32_t vfrtrig;
5893279858Sjfv
5894279858Sjfv	hw = &pf->hw;
5895279858Sjfv
5896279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5897279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5898279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5899279858Sjfv	ixl_flush(hw);
5900279858Sjfv
5901279858Sjfv	ixl_reinit_vf(pf, vf);
5902279858Sjfv}
5903279858Sjfv
5904279858Sjfvstatic void
5905279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5906279858Sjfv{
5907279858Sjfv	struct i40e_hw *hw;
5908279858Sjfv	uint32_t vfrstat, vfrtrig;
5909279858Sjfv	int i, error;
5910279858Sjfv
5911279858Sjfv	hw = &pf->hw;
5912279858Sjfv
5913279858Sjfv	error = ixl_flush_pcie(pf, vf);
5914279858Sjfv	if (error != 0)
5915279858Sjfv		device_printf(pf->dev,
5916279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5917279858Sjfv		    vf->vf_num);
5918279858Sjfv
5919279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5920279858Sjfv		DELAY(10);
5921279858Sjfv
5922279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5923279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5924279858Sjfv			break;
5925279858Sjfv	}
5926279858Sjfv
5927279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5928279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5929279858Sjfv
5930279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5931279858Sjfv
5932279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5933279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5934279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5935279858Sjfv
5936279858Sjfv	if (vf->vsi.seid != 0)
5937279858Sjfv		ixl_disable_rings(&vf->vsi);
5938279858Sjfv
5939279858Sjfv	ixl_vf_release_resources(pf, vf);
5940279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5941279858Sjfv	ixl_vf_map_queues(pf, vf);
5942279858Sjfv
5943279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5944279858Sjfv	ixl_flush(hw);
5945279858Sjfv}
5946279858Sjfv
5947279858Sjfvstatic const char *
5948279858Sjfvixl_vc_opcode_str(uint16_t op)
5949279858Sjfv{
5950279858Sjfv
5951279858Sjfv	switch (op) {
5952279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5953279858Sjfv		return ("VERSION");
5954279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5955279858Sjfv		return ("RESET_VF");
5956279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5957279858Sjfv		return ("GET_VF_RESOURCES");
5958279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5959279858Sjfv		return ("CONFIG_TX_QUEUE");
5960279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5961279858Sjfv		return ("CONFIG_RX_QUEUE");
5962279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5963279858Sjfv		return ("CONFIG_VSI_QUEUES");
5964279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5965279858Sjfv		return ("CONFIG_IRQ_MAP");
5966279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5967279858Sjfv		return ("ENABLE_QUEUES");
5968279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5969279858Sjfv		return ("DISABLE_QUEUES");
5970279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5971279858Sjfv		return ("ADD_ETHER_ADDRESS");
5972279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5973279858Sjfv		return ("DEL_ETHER_ADDRESS");
5974279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5975279858Sjfv		return ("ADD_VLAN");
5976279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5977279858Sjfv		return ("DEL_VLAN");
5978279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5979279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5980279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5981279858Sjfv		return ("GET_STATS");
5982279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5983279858Sjfv		return ("FCOE");
5984279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5985279858Sjfv		return ("EVENT");
5986279858Sjfv	default:
5987279858Sjfv		return ("UNKNOWN");
5988279858Sjfv	}
5989279858Sjfv}
5990279858Sjfv
5991279858Sjfvstatic int
5992279858Sjfvixl_vc_opcode_level(uint16_t opcode)
5993279858Sjfv{
5994279858Sjfv
5995279858Sjfv	switch (opcode) {
5996279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5997279858Sjfv		return (10);
5998279858Sjfv	default:
5999279858Sjfv		return (5);
6000279858Sjfv	}
6001279858Sjfv}
6002279858Sjfv
6003279858Sjfvstatic void
6004279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6005279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
6006279858Sjfv{
6007279858Sjfv	struct i40e_hw *hw;
6008279858Sjfv	int global_vf_id;
6009279858Sjfv
6010279858Sjfv	hw = &pf->hw;
6011279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
6012279858Sjfv
6013279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
6014279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
6015279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
6016279858Sjfv
6017279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
6018279858Sjfv}
6019279858Sjfv
6020279858Sjfvstatic void
6021279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
6022279858Sjfv{
6023279858Sjfv
6024279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
6025279858Sjfv}
6026279858Sjfv
6027279858Sjfvstatic void
6028279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6029279858Sjfv    enum i40e_status_code status, const char *file, int line)
6030279858Sjfv{
6031279858Sjfv
6032279858Sjfv	I40E_VC_DEBUG(pf, 1,
6033279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
6034279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
6035279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
6036279858Sjfv}
6037279858Sjfv
6038279858Sjfvstatic void
6039279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6040279858Sjfv    uint16_t msg_size)
6041279858Sjfv{
6042279858Sjfv	struct i40e_virtchnl_version_info reply;
6043279858Sjfv
6044279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
6045279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
6046279858Sjfv		    I40E_ERR_PARAM);
6047279858Sjfv		return;
6048279858Sjfv	}
6049279858Sjfv
6050279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
6051279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
6052279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
6053279858Sjfv	    sizeof(reply));
6054279858Sjfv}
6055279858Sjfv
6056279858Sjfvstatic void
6057279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6058279858Sjfv    uint16_t msg_size)
6059279858Sjfv{
6060279858Sjfv
6061279858Sjfv	if (msg_size != 0) {
6062279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
6063279858Sjfv		    I40E_ERR_PARAM);
6064279858Sjfv		return;
6065279858Sjfv	}
6066279858Sjfv
6067279858Sjfv	ixl_reset_vf(pf, vf);
6068279858Sjfv
6069279858Sjfv	/* No response to a reset message. */
6070279858Sjfv}
6071279858Sjfv
6072279858Sjfvstatic void
6073279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6074279858Sjfv    uint16_t msg_size)
6075279858Sjfv{
6076279858Sjfv	struct i40e_virtchnl_vf_resource reply;
6077279858Sjfv
6078279858Sjfv	if (msg_size != 0) {
6079279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6080279858Sjfv		    I40E_ERR_PARAM);
6081279858Sjfv		return;
6082279858Sjfv	}
6083279858Sjfv
6084279858Sjfv	bzero(&reply, sizeof(reply));
6085279858Sjfv
6086279858Sjfv	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
6087279858Sjfv
6088279858Sjfv	reply.num_vsis = 1;
6089279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
6090279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
6091279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
6092279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
6093279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
6094279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
6095279858Sjfv
6096279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6097279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
6098279858Sjfv}
6099279858Sjfv
6100279858Sjfvstatic int
6101279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6102279858Sjfv    struct i40e_virtchnl_txq_info *info)
6103279858Sjfv{
6104279858Sjfv	struct i40e_hw *hw;
6105279858Sjfv	struct i40e_hmc_obj_txq txq;
6106279858Sjfv	uint16_t global_queue_num, global_vf_num;
6107279858Sjfv	enum i40e_status_code status;
6108279858Sjfv	uint32_t qtx_ctl;
6109279858Sjfv
6110279858Sjfv	hw = &pf->hw;
6111279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
6112279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6113279858Sjfv	bzero(&txq, sizeof(txq));
6114279858Sjfv
6115279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
6116279858Sjfv	if (status != I40E_SUCCESS)
6117269198Sjfv		return (EINVAL);
6118279858Sjfv
6119279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
6120279858Sjfv
6121279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
6122279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
6123279858Sjfv	txq.qlen = info->ring_len;
6124279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
6125279858Sjfv	txq.rdylist_act = 0;
6126279858Sjfv
6127279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
6128279858Sjfv	if (status != I40E_SUCCESS)
6129279858Sjfv		return (EINVAL);
6130279858Sjfv
6131279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
6132279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
6133279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
6134279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
6135279858Sjfv	ixl_flush(hw);
6136279858Sjfv
6137279858Sjfv	return (0);
6138279858Sjfv}
6139279858Sjfv
6140279858Sjfvstatic int
6141279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6142279858Sjfv    struct i40e_virtchnl_rxq_info *info)
6143279858Sjfv{
6144279858Sjfv	struct i40e_hw *hw;
6145279858Sjfv	struct i40e_hmc_obj_rxq rxq;
6146279858Sjfv	uint16_t global_queue_num;
6147279858Sjfv	enum i40e_status_code status;
6148279858Sjfv
6149279858Sjfv	hw = &pf->hw;
6150279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
6151279858Sjfv	bzero(&rxq, sizeof(rxq));
6152279858Sjfv
6153279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
6154279858Sjfv		return (EINVAL);
6155279858Sjfv
6156279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
6157279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
6158279858Sjfv		return (EINVAL);
6159279858Sjfv
6160279858Sjfv	if (info->splithdr_enabled) {
6161279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
6162279858Sjfv			return (EINVAL);
6163279858Sjfv
6164279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
6165279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
6166279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
6167279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
6168279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
6169279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
6170279858Sjfv
6171279858Sjfv		rxq.dtype = 2;
6172269198Sjfv	}
6173269198Sjfv
6174279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
6175279858Sjfv	if (status != I40E_SUCCESS)
6176279858Sjfv		return (EINVAL);
6177269198Sjfv
6178279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
6179279858Sjfv	rxq.qlen = info->ring_len;
6180269198Sjfv
6181279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
6182269198Sjfv
6183279858Sjfv	rxq.dsize = 1;
6184279858Sjfv	rxq.crcstrip = 1;
6185279858Sjfv	rxq.l2tsel = 1;
6186269198Sjfv
6187279858Sjfv	rxq.rxmax = info->max_pkt_size;
6188279858Sjfv	rxq.tphrdesc_ena = 1;
6189279858Sjfv	rxq.tphwdesc_ena = 1;
6190279858Sjfv	rxq.tphdata_ena = 1;
6191279858Sjfv	rxq.tphhead_ena = 1;
6192279858Sjfv	rxq.lrxqthresh = 2;
6193279858Sjfv	rxq.prefena = 1;
6194279858Sjfv
6195279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
6196279858Sjfv	if (status != I40E_SUCCESS)
6197279858Sjfv		return (EINVAL);
6198279858Sjfv
6199279858Sjfv	return (0);
6200279858Sjfv}
6201279858Sjfv
6202279858Sjfvstatic void
6203279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6204279858Sjfv    uint16_t msg_size)
6205279858Sjfv{
6206279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
6207279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
6208279858Sjfv	int i;
6209279858Sjfv
6210279858Sjfv	if (msg_size < sizeof(*info)) {
6211279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6212279858Sjfv		    I40E_ERR_PARAM);
6213279858Sjfv		return;
6214279858Sjfv	}
6215279858Sjfv
6216279858Sjfv	info = msg;
6217279858Sjfv	if (info->num_queue_pairs == 0) {
6218279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6219279858Sjfv		    I40E_ERR_PARAM);
6220279858Sjfv		return;
6221279858Sjfv	}
6222279858Sjfv
6223279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
6224279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6225279858Sjfv		    I40E_ERR_PARAM);
6226279858Sjfv		return;
6227279858Sjfv	}
6228279858Sjfv
6229279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6230279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6231279858Sjfv		    I40E_ERR_PARAM);
6232279858Sjfv		return;
6233279858Sjfv	}
6234279858Sjfv
6235279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
6236279858Sjfv		pair = &info->qpair[i];
6237279858Sjfv
6238279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
6239279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
6240279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
6241279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
6242279858Sjfv
6243279858Sjfv			i40e_send_vf_nack(pf, vf,
6244279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6245279858Sjfv			return;
6246279858Sjfv		}
6247279858Sjfv
6248279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
6249279858Sjfv			i40e_send_vf_nack(pf, vf,
6250279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6251279858Sjfv			return;
6252279858Sjfv		}
6253279858Sjfv
6254279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
6255279858Sjfv			i40e_send_vf_nack(pf, vf,
6256279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6257279858Sjfv			return;
6258279858Sjfv		}
6259279858Sjfv	}
6260279858Sjfv
6261279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
6262279858Sjfv}
6263279858Sjfv
6264279858Sjfvstatic void
6265279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
6266279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
6267279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
6268279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
6269279858Sjfv{
6270279858Sjfv	uint32_t offset, qctl;
6271279858Sjfv	uint16_t itr_indx;
6272279858Sjfv
6273279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
6274279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
6275279858Sjfv		itr_indx = vector->rxitr_idx;
6276279858Sjfv	} else {
6277279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
6278279858Sjfv		itr_indx = vector->txitr_idx;
6279279858Sjfv	}
6280279858Sjfv
6281279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
6282279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
6283279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
6284279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
6285279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
6286279858Sjfv
6287279858Sjfv	wr32(&pf->hw, offset, qctl);
6288279858Sjfv
6289279858Sjfv	*last_type = cur_type;
6290279858Sjfv	*last_queue = cur_queue;
6291279858Sjfv}
6292279858Sjfv
6293279858Sjfvstatic void
6294279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
6295279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
6296279858Sjfv{
6297279858Sjfv	struct i40e_hw *hw;
6298279858Sjfv	u_int qindex;
6299279858Sjfv	enum i40e_queue_type type, last_type;
6300279858Sjfv	uint32_t lnklst_reg;
6301279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
6302279858Sjfv
6303279858Sjfv	hw = &pf->hw;
6304279858Sjfv
6305279858Sjfv	rxq_map = vector->rxq_map;
6306279858Sjfv	txq_map = vector->txq_map;
6307279858Sjfv
6308279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
6309279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
6310279858Sjfv
6311279858Sjfv	/*
6312279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
6313279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
6314279858Sjfv	 * both at once here.
6315279858Sjfv	 */
6316279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
6317279858Sjfv		if (txq_map != 0) {
6318279858Sjfv			qindex = ffs(txq_map) - 1;
6319279858Sjfv			type = I40E_QUEUE_TYPE_TX;
6320279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6321279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6322279858Sjfv			    &last_type, &last_queue);
6323279858Sjfv			txq_map &= ~(1 << qindex);
6324279858Sjfv		}
6325279858Sjfv
6326279858Sjfv		if (rxq_map != 0) {
6327279858Sjfv			qindex = ffs(rxq_map) - 1;
6328279858Sjfv			type = I40E_QUEUE_TYPE_RX;
6329279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6330279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6331279858Sjfv			    &last_type, &last_queue);
6332279858Sjfv			rxq_map &= ~(1 << qindex);
6333279858Sjfv		}
6334279858Sjfv	}
6335279858Sjfv
6336279858Sjfv	if (vector->vector_id == 0)
6337279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
6338279858Sjfv	else
6339279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
6340279858Sjfv		    vf->vf_num);
6341279858Sjfv	wr32(hw, lnklst_reg,
6342279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
6343279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
6344279858Sjfv
6345279858Sjfv	ixl_flush(hw);
6346279858Sjfv}
6347279858Sjfv
6348279858Sjfvstatic void
6349279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6350279858Sjfv    uint16_t msg_size)
6351279858Sjfv{
6352279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
6353279858Sjfv	struct i40e_virtchnl_vector_map *vector;
6354279858Sjfv	struct i40e_hw *hw;
6355279858Sjfv	int i, largest_txq, largest_rxq;
6356279858Sjfv
6357279858Sjfv	hw = &pf->hw;
6358279858Sjfv
6359279858Sjfv	if (msg_size < sizeof(*map)) {
6360279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6361279858Sjfv		    I40E_ERR_PARAM);
6362279858Sjfv		return;
6363279858Sjfv	}
6364279858Sjfv
6365279858Sjfv	map = msg;
6366279858Sjfv	if (map->num_vectors == 0) {
6367279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6368279858Sjfv		    I40E_ERR_PARAM);
6369279858Sjfv		return;
6370279858Sjfv	}
6371279858Sjfv
6372279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
6373279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6374279858Sjfv		    I40E_ERR_PARAM);
6375279858Sjfv		return;
6376279858Sjfv	}
6377279858Sjfv
6378279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
6379279858Sjfv		vector = &map->vecmap[i];
6380279858Sjfv
6381279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6382279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
6383279858Sjfv			i40e_send_vf_nack(pf, vf,
6384279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6385279858Sjfv			return;
6386279858Sjfv		}
6387279858Sjfv
6388279858Sjfv		if (vector->rxq_map != 0) {
6389279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
6390279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
6391279858Sjfv				i40e_send_vf_nack(pf, vf,
6392279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6393279858Sjfv				    I40E_ERR_PARAM);
6394279858Sjfv				return;
6395279858Sjfv			}
6396279858Sjfv		}
6397279858Sjfv
6398279858Sjfv		if (vector->txq_map != 0) {
6399279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
6400279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
6401279858Sjfv				i40e_send_vf_nack(pf, vf,
6402279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6403279858Sjfv				    I40E_ERR_PARAM);
6404279858Sjfv				return;
6405279858Sjfv			}
6406279858Sjfv		}
6407279858Sjfv
6408279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6409279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6410279858Sjfv			i40e_send_vf_nack(pf, vf,
6411279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6412279858Sjfv			    I40E_ERR_PARAM);
6413279858Sjfv			return;
6414279858Sjfv		}
6415279858Sjfv
6416279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
6417279858Sjfv	}
6418279858Sjfv
6419279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6420279858Sjfv}
6421279858Sjfv
6422279858Sjfvstatic void
6423279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6424279858Sjfv    uint16_t msg_size)
6425279858Sjfv{
6426279858Sjfv	struct i40e_virtchnl_queue_select *select;
6427279858Sjfv	int error;
6428279858Sjfv
6429279858Sjfv	if (msg_size != sizeof(*select)) {
6430279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6431279858Sjfv		    I40E_ERR_PARAM);
6432279858Sjfv		return;
6433279858Sjfv	}
6434279858Sjfv
6435279858Sjfv	select = msg;
6436279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6437279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6438279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6439279858Sjfv		    I40E_ERR_PARAM);
6440279858Sjfv		return;
6441279858Sjfv	}
6442279858Sjfv
6443279858Sjfv	error = ixl_enable_rings(&vf->vsi);
6444269198Sjfv	if (error) {
6445279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6446279858Sjfv		    I40E_ERR_TIMEOUT);
6447279858Sjfv		return;
6448269198Sjfv	}
6449269198Sjfv
6450279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6451269198Sjfv}
6452266423Sjfv
6453279858Sjfvstatic void
6454279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6455279858Sjfv    void *msg, uint16_t msg_size)
6456279858Sjfv{
6457279858Sjfv	struct i40e_virtchnl_queue_select *select;
6458279858Sjfv	int error;
6459279858Sjfv
6460279858Sjfv	if (msg_size != sizeof(*select)) {
6461279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6462279858Sjfv		    I40E_ERR_PARAM);
6463279858Sjfv		return;
6464279858Sjfv	}
6465279858Sjfv
6466279858Sjfv	select = msg;
6467279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6468279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6469279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6470279858Sjfv		    I40E_ERR_PARAM);
6471279858Sjfv		return;
6472279858Sjfv	}
6473279858Sjfv
6474279858Sjfv	error = ixl_disable_rings(&vf->vsi);
6475279858Sjfv	if (error) {
6476279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6477279858Sjfv		    I40E_ERR_TIMEOUT);
6478279858Sjfv		return;
6479279858Sjfv	}
6480279858Sjfv
6481279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6482279858Sjfv}
6483279858Sjfv
6484279858Sjfvstatic boolean_t
6485279858Sjfvixl_zero_mac(const uint8_t *addr)
6486279858Sjfv{
6487279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6488279858Sjfv
6489279858Sjfv	return (cmp_etheraddr(addr, zero));
6490279858Sjfv}
6491279858Sjfv
6492279858Sjfvstatic boolean_t
6493279858Sjfvixl_bcast_mac(const uint8_t *addr)
6494279858Sjfv{
6495279858Sjfv
6496279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
6497279858Sjfv}
6498279858Sjfv
6499279858Sjfvstatic int
6500279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6501279858Sjfv{
6502279858Sjfv
6503279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6504279858Sjfv		return (EINVAL);
6505279858Sjfv
6506279858Sjfv	/*
6507279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
6508279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6509279858Sjfv	 * is not its assigned MAC.
6510279858Sjfv	 */
6511279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6512279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6513279858Sjfv		return (EPERM);
6514279858Sjfv
6515279858Sjfv	return (0);
6516279858Sjfv}
6517279858Sjfv
6518279858Sjfvstatic void
6519279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6520279858Sjfv    uint16_t msg_size)
6521279858Sjfv{
6522279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6523279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6524279858Sjfv	struct ixl_vsi *vsi;
6525279858Sjfv	int i;
6526279858Sjfv	size_t expected_size;
6527279858Sjfv
6528279858Sjfv	vsi = &vf->vsi;
6529279858Sjfv
6530279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6531279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6532279858Sjfv		    I40E_ERR_PARAM);
6533279858Sjfv		return;
6534279858Sjfv	}
6535279858Sjfv
6536279858Sjfv	addr_list = msg;
6537279858Sjfv	expected_size = sizeof(*addr_list) +
6538279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6539279858Sjfv
6540279858Sjfv	if (addr_list->num_elements == 0 ||
6541279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6542279858Sjfv	    msg_size != expected_size) {
6543279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6544279858Sjfv		    I40E_ERR_PARAM);
6545279858Sjfv		return;
6546279858Sjfv	}
6547279858Sjfv
6548279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6549279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6550279858Sjfv			i40e_send_vf_nack(pf, vf,
6551279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6552279858Sjfv			return;
6553279858Sjfv		}
6554279858Sjfv	}
6555279858Sjfv
6556279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6557279858Sjfv		addr = &addr_list->list[i];
6558279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6559279858Sjfv	}
6560279858Sjfv
6561279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6562279858Sjfv}
6563279858Sjfv
6564279858Sjfvstatic void
6565279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6566279858Sjfv    uint16_t msg_size)
6567279858Sjfv{
6568279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6569279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6570279858Sjfv	size_t expected_size;
6571279858Sjfv	int i;
6572279858Sjfv
6573279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6574279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6575279858Sjfv		    I40E_ERR_PARAM);
6576279858Sjfv		return;
6577279858Sjfv	}
6578279858Sjfv
6579279858Sjfv	addr_list = msg;
6580279858Sjfv	expected_size = sizeof(*addr_list) +
6581279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6582279858Sjfv
6583279858Sjfv	if (addr_list->num_elements == 0 ||
6584279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6585279858Sjfv	    msg_size != expected_size) {
6586279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6587279858Sjfv		    I40E_ERR_PARAM);
6588279858Sjfv		return;
6589279858Sjfv	}
6590279858Sjfv
6591279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6592279858Sjfv		addr = &addr_list->list[i];
6593279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6594279858Sjfv			i40e_send_vf_nack(pf, vf,
6595279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6596279858Sjfv			return;
6597279858Sjfv		}
6598279858Sjfv	}
6599279858Sjfv
6600279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6601279858Sjfv		addr = &addr_list->list[i];
6602279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6603279858Sjfv	}
6604279858Sjfv
6605279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6606279858Sjfv}
6607279858Sjfv
6608279858Sjfvstatic enum i40e_status_code
6609279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6610279858Sjfv{
6611279858Sjfv	struct i40e_vsi_context vsi_ctx;
6612279858Sjfv
6613279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6614279858Sjfv
6615279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6616279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6617279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6618279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6619279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6620279858Sjfv}
6621279858Sjfv
6622279858Sjfvstatic void
6623279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6624279858Sjfv    uint16_t msg_size)
6625279858Sjfv{
6626279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6627279858Sjfv	enum i40e_status_code code;
6628279858Sjfv	size_t expected_size;
6629279858Sjfv	int i;
6630279858Sjfv
6631279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6632279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6633279858Sjfv		    I40E_ERR_PARAM);
6634279858Sjfv		return;
6635279858Sjfv	}
6636279858Sjfv
6637279858Sjfv	filter_list = msg;
6638279858Sjfv	expected_size = sizeof(*filter_list) +
6639279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6640279858Sjfv	if (filter_list->num_elements == 0 ||
6641279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6642279858Sjfv	    msg_size != expected_size) {
6643279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6644279858Sjfv		    I40E_ERR_PARAM);
6645279858Sjfv		return;
6646279858Sjfv	}
6647279858Sjfv
6648279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6649279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6650279858Sjfv		    I40E_ERR_PARAM);
6651279858Sjfv		return;
6652279858Sjfv	}
6653279858Sjfv
6654279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6655279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6656279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6657279858Sjfv			    I40E_ERR_PARAM);
6658279858Sjfv			return;
6659279858Sjfv		}
6660279858Sjfv	}
6661279858Sjfv
6662279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6663279858Sjfv	if (code != I40E_SUCCESS) {
6664279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6665279858Sjfv		    I40E_ERR_PARAM);
6666279858Sjfv	}
6667279858Sjfv
6668279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6669279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6670279858Sjfv
6671279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6672279858Sjfv}
6673279858Sjfv
6674279858Sjfvstatic void
6675279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6676279858Sjfv    uint16_t msg_size)
6677279858Sjfv{
6678279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6679279858Sjfv	int i;
6680279858Sjfv	size_t expected_size;
6681279858Sjfv
6682279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6683279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6684279858Sjfv		    I40E_ERR_PARAM);
6685279858Sjfv		return;
6686279858Sjfv	}
6687279858Sjfv
6688279858Sjfv	filter_list = msg;
6689279858Sjfv	expected_size = sizeof(*filter_list) +
6690279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6691279858Sjfv	if (filter_list->num_elements == 0 ||
6692279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6693279858Sjfv	    msg_size != expected_size) {
6694279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6695279858Sjfv		    I40E_ERR_PARAM);
6696279858Sjfv		return;
6697279858Sjfv	}
6698279858Sjfv
6699279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6700279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6701279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6702279858Sjfv			    I40E_ERR_PARAM);
6703279858Sjfv			return;
6704279858Sjfv		}
6705279858Sjfv	}
6706279858Sjfv
6707279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6708279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6709279858Sjfv		    I40E_ERR_PARAM);
6710279858Sjfv		return;
6711279858Sjfv	}
6712279858Sjfv
6713279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6714279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6715279858Sjfv
6716279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6717279858Sjfv}
6718279858Sjfv
6719279858Sjfvstatic void
6720279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6721279858Sjfv    void *msg, uint16_t msg_size)
6722279858Sjfv{
6723279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6724279858Sjfv	enum i40e_status_code code;
6725279858Sjfv
6726279858Sjfv	if (msg_size != sizeof(*info)) {
6727279858Sjfv		i40e_send_vf_nack(pf, vf,
6728279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6729279858Sjfv		return;
6730279858Sjfv	}
6731279858Sjfv
6732295787Skevlo	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6733279858Sjfv		i40e_send_vf_nack(pf, vf,
6734279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6735279858Sjfv		return;
6736279858Sjfv	}
6737279858Sjfv
6738279858Sjfv	info = msg;
6739279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6740279858Sjfv		i40e_send_vf_nack(pf, vf,
6741279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6742279858Sjfv		return;
6743279858Sjfv	}
6744279858Sjfv
6745279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6746279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6747279858Sjfv	if (code != I40E_SUCCESS) {
6748279858Sjfv		i40e_send_vf_nack(pf, vf,
6749279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6750279858Sjfv		return;
6751279858Sjfv	}
6752279858Sjfv
6753279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6754279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6755279858Sjfv	if (code != I40E_SUCCESS) {
6756279858Sjfv		i40e_send_vf_nack(pf, vf,
6757279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6758279858Sjfv		return;
6759279858Sjfv	}
6760279858Sjfv
6761279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6762279858Sjfv}
6763279858Sjfv
6764279858Sjfvstatic void
6765279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6766279858Sjfv    uint16_t msg_size)
6767279858Sjfv{
6768279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6769279858Sjfv
6770279858Sjfv	if (msg_size != sizeof(*queue)) {
6771279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6772279858Sjfv		    I40E_ERR_PARAM);
6773279858Sjfv		return;
6774279858Sjfv	}
6775279858Sjfv
6776279858Sjfv	queue = msg;
6777279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6778279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6779279858Sjfv		    I40E_ERR_PARAM);
6780279858Sjfv		return;
6781279858Sjfv	}
6782279858Sjfv
6783279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6784279858Sjfv
6785279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6786279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6787279858Sjfv}
6788279858Sjfv
6789279858Sjfvstatic void
6790279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6791279858Sjfv{
6792279858Sjfv	struct ixl_vf *vf;
6793279858Sjfv	void *msg;
6794279858Sjfv	uint16_t vf_num, msg_size;
6795279858Sjfv	uint32_t opcode;
6796279858Sjfv
6797279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6798279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6799279858Sjfv
6800279858Sjfv	if (vf_num >= pf->num_vfs) {
6801279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6802279858Sjfv		return;
6803279858Sjfv	}
6804279858Sjfv
6805279858Sjfv	vf = &pf->vfs[vf_num];
6806279858Sjfv	msg = event->msg_buf;
6807279858Sjfv	msg_size = event->msg_len;
6808279858Sjfv
6809279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6810279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6811279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6812279858Sjfv
6813279858Sjfv	switch (opcode) {
6814279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6815279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6816279858Sjfv		break;
6817279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6818279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6819279858Sjfv		break;
6820279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6821279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6822279858Sjfv		break;
6823279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6824279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6825279858Sjfv		break;
6826279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6827279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6828279858Sjfv		break;
6829279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6830279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6831279858Sjfv		break;
6832279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6833279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6834279858Sjfv		break;
6835279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6836279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6837279858Sjfv		break;
6838279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6839279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6840279858Sjfv		break;
6841279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6842279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6843279858Sjfv		break;
6844279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6845279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6846279858Sjfv		break;
6847279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6848279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6849279858Sjfv		break;
6850279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6851279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6852279858Sjfv		break;
6853279858Sjfv
6854279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6855279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6856279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6857279858Sjfv	default:
6858279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6859279858Sjfv		break;
6860279858Sjfv	}
6861279858Sjfv}
6862279858Sjfv
6863279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6864279858Sjfvstatic void
6865279858Sjfvixl_handle_vflr(void *arg, int pending)
6866279858Sjfv{
6867279858Sjfv	struct ixl_pf *pf;
6868279858Sjfv	struct i40e_hw *hw;
6869279858Sjfv	uint16_t global_vf_num;
6870279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6871279858Sjfv	int i;
6872279858Sjfv
6873279858Sjfv	pf = arg;
6874279858Sjfv	hw = &pf->hw;
6875279858Sjfv
6876279858Sjfv	IXL_PF_LOCK(pf);
6877279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6878279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6879279858Sjfv
6880279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6881279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6882279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6883279858Sjfv		if (vflrstat & vflrstat_mask) {
6884279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6885279858Sjfv			    vflrstat_mask);
6886279858Sjfv
6887279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6888279858Sjfv		}
6889279858Sjfv	}
6890279858Sjfv
6891279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6892279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6893279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6894279858Sjfv	ixl_flush(hw);
6895279858Sjfv
6896279858Sjfv	IXL_PF_UNLOCK(pf);
6897279858Sjfv}
6898279858Sjfv
6899279858Sjfvstatic int
6900279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6901279858Sjfv{
6902279858Sjfv
6903279858Sjfv	switch (err) {
6904279858Sjfv	case I40E_AQ_RC_EPERM:
6905279858Sjfv		return (EPERM);
6906279858Sjfv	case I40E_AQ_RC_ENOENT:
6907279858Sjfv		return (ENOENT);
6908279858Sjfv	case I40E_AQ_RC_ESRCH:
6909279858Sjfv		return (ESRCH);
6910279858Sjfv	case I40E_AQ_RC_EINTR:
6911279858Sjfv		return (EINTR);
6912279858Sjfv	case I40E_AQ_RC_EIO:
6913279858Sjfv		return (EIO);
6914279858Sjfv	case I40E_AQ_RC_ENXIO:
6915279858Sjfv		return (ENXIO);
6916279858Sjfv	case I40E_AQ_RC_E2BIG:
6917279858Sjfv		return (E2BIG);
6918279858Sjfv	case I40E_AQ_RC_EAGAIN:
6919279858Sjfv		return (EAGAIN);
6920279858Sjfv	case I40E_AQ_RC_ENOMEM:
6921279858Sjfv		return (ENOMEM);
6922279858Sjfv	case I40E_AQ_RC_EACCES:
6923279858Sjfv		return (EACCES);
6924279858Sjfv	case I40E_AQ_RC_EFAULT:
6925279858Sjfv		return (EFAULT);
6926279858Sjfv	case I40E_AQ_RC_EBUSY:
6927279858Sjfv		return (EBUSY);
6928279858Sjfv	case I40E_AQ_RC_EEXIST:
6929279858Sjfv		return (EEXIST);
6930279858Sjfv	case I40E_AQ_RC_EINVAL:
6931279858Sjfv		return (EINVAL);
6932279858Sjfv	case I40E_AQ_RC_ENOTTY:
6933279858Sjfv		return (ENOTTY);
6934279858Sjfv	case I40E_AQ_RC_ENOSPC:
6935279858Sjfv		return (ENOSPC);
6936279858Sjfv	case I40E_AQ_RC_ENOSYS:
6937279858Sjfv		return (ENOSYS);
6938279858Sjfv	case I40E_AQ_RC_ERANGE:
6939279858Sjfv		return (ERANGE);
6940279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6941279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6942279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6943279858Sjfv		return (EFAULT);
6944279858Sjfv	case I40E_AQ_RC_EMODE:
6945279858Sjfv		return (EPERM);
6946279858Sjfv	case I40E_AQ_RC_EFBIG:
6947279858Sjfv		return (EFBIG);
6948279858Sjfv	default:
6949279858Sjfv		return (EINVAL);
6950279858Sjfv	}
6951279858Sjfv}
6952279858Sjfv
6953279858Sjfvstatic int
6954299546Serjixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6955279858Sjfv{
6956279858Sjfv	struct ixl_pf *pf;
6957279858Sjfv	struct i40e_hw *hw;
6958279858Sjfv	struct ixl_vsi *pf_vsi;
6959279858Sjfv	enum i40e_status_code ret;
6960279858Sjfv	int i, error;
6961279858Sjfv
6962279858Sjfv	pf = device_get_softc(dev);
6963279858Sjfv	hw = &pf->hw;
6964279858Sjfv	pf_vsi = &pf->vsi;
6965279858Sjfv
6966279858Sjfv	IXL_PF_LOCK(pf);
6967279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6968279858Sjfv	    M_ZERO);
6969279858Sjfv
6970279858Sjfv	if (pf->vfs == NULL) {
6971279858Sjfv		error = ENOMEM;
6972279858Sjfv		goto fail;
6973279858Sjfv	}
6974279858Sjfv
6975279858Sjfv	for (i = 0; i < num_vfs; i++)
6976279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
6977279858Sjfv
6978279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6979279858Sjfv	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6980279858Sjfv	if (ret != I40E_SUCCESS) {
6981279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6982279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6983279858Sjfv		    error);
6984279858Sjfv		goto fail;
6985279858Sjfv	}
6986279858Sjfv
6987279858Sjfv	ixl_configure_msix(pf);
6988279858Sjfv	ixl_enable_adminq(hw);
6989279858Sjfv
6990279858Sjfv	pf->num_vfs = num_vfs;
6991279858Sjfv	IXL_PF_UNLOCK(pf);
6992279858Sjfv	return (0);
6993279858Sjfv
6994279858Sjfvfail:
6995279858Sjfv	free(pf->vfs, M_IXL);
6996279858Sjfv	pf->vfs = NULL;
6997279858Sjfv	IXL_PF_UNLOCK(pf);
6998279858Sjfv	return (error);
6999279858Sjfv}
7000279858Sjfv
7001279858Sjfvstatic void
7002299546Serjixl_iov_uninit(device_t dev)
7003279858Sjfv{
7004279858Sjfv	struct ixl_pf *pf;
7005279858Sjfv	struct i40e_hw *hw;
7006279858Sjfv	struct ixl_vsi *vsi;
7007279858Sjfv	struct ifnet *ifp;
7008279858Sjfv	struct ixl_vf *vfs;
7009279858Sjfv	int i, num_vfs;
7010279858Sjfv
7011279858Sjfv	pf = device_get_softc(dev);
7012279858Sjfv	hw = &pf->hw;
7013279858Sjfv	vsi = &pf->vsi;
7014279858Sjfv	ifp = vsi->ifp;
7015279858Sjfv
7016279858Sjfv	IXL_PF_LOCK(pf);
7017279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
7018279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
7019279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
7020279858Sjfv	}
7021279858Sjfv
7022279858Sjfv	if (pf->veb_seid != 0) {
7023279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
7024279858Sjfv		pf->veb_seid = 0;
7025279858Sjfv	}
7026279858Sjfv
7027279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
7028279858Sjfv		ixl_disable_intr(vsi);
7029279858Sjfv
7030279858Sjfv	vfs = pf->vfs;
7031279858Sjfv	num_vfs = pf->num_vfs;
7032279858Sjfv
7033279858Sjfv	pf->vfs = NULL;
7034279858Sjfv	pf->num_vfs = 0;
7035279858Sjfv	IXL_PF_UNLOCK(pf);
7036279858Sjfv
7037279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
7038279858Sjfv	for (i = 0; i < num_vfs; i++)
7039279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
7040279858Sjfv	free(vfs, M_IXL);
7041279858Sjfv}
7042279858Sjfv
7043279858Sjfvstatic int
7044279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
7045279858Sjfv{
7046279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
7047279858Sjfv	struct ixl_pf *pf;
7048279858Sjfv	struct ixl_vf *vf;
7049279858Sjfv	const void *mac;
7050279858Sjfv	size_t size;
7051279858Sjfv	int error;
7052279858Sjfv
7053279858Sjfv	pf = device_get_softc(dev);
7054279858Sjfv	vf = &pf->vfs[vfnum];
7055279858Sjfv
7056279858Sjfv	IXL_PF_LOCK(pf);
7057279858Sjfv	vf->vf_num = vfnum;
7058279858Sjfv
7059279858Sjfv	vf->vsi.back = pf;
7060279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
7061279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
7062279858Sjfv
7063279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
7064279858Sjfv	if (error != 0)
7065279858Sjfv		goto out;
7066279858Sjfv
7067279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
7068279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
7069279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
7070279858Sjfv
7071279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
7072279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7073279858Sjfv	} else
7074279858Sjfv		/*
7075279858Sjfv		 * If the administrator has not specified a MAC address then
7076279858Sjfv		 * we must allow the VF to choose one.
7077279858Sjfv		 */
7078279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7079279858Sjfv
7080279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
7081279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
7082279858Sjfv
7083279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
7084279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
7085279858Sjfv
7086279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
7087279858Sjfv
7088279858Sjfv	ixl_reset_vf(pf, vf);
7089279858Sjfvout:
7090279858Sjfv	IXL_PF_UNLOCK(pf);
7091279858Sjfv	if (error == 0) {
7092279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
7093279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
7094279858Sjfv	}
7095279858Sjfv
7096279858Sjfv	return (error);
7097279858Sjfv}
7098279858Sjfv#endif /* PCI_IOV */
7099