if_ixl.c revision 279858
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 279858 2015-03-10 19:17:40Z jfv $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51279858Sjfvchar ixl_driver_version[] = "1.4.1";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73279858Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74266423Sjfv	/* required last entry */
75266423Sjfv	{0, 0, 0, 0, 0}
76266423Sjfv};
77266423Sjfv
78266423Sjfv/*********************************************************************
79266423Sjfv *  Table of branding strings
80266423Sjfv *********************************************************************/
81266423Sjfv
82270346Sjfvstatic char    *ixl_strings[] = {
83266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
84266423Sjfv};
85266423Sjfv
86266423Sjfv
87266423Sjfv/*********************************************************************
88266423Sjfv *  Function prototypes
89266423Sjfv *********************************************************************/
90270346Sjfvstatic int      ixl_probe(device_t);
91270346Sjfvstatic int      ixl_attach(device_t);
92270346Sjfvstatic int      ixl_detach(device_t);
93270346Sjfvstatic int      ixl_shutdown(device_t);
94270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
95270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
96270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
97270346Sjfvstatic void	ixl_init(void *);
98270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
99270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
100270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
101270346Sjfvstatic int      ixl_media_change(struct ifnet *);
102270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
103270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
104270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
105270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
106279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
107270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
108270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
109270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
110270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
111270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
112270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
113270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
114270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
115270346Sjfvstatic void	ixl_local_timer(void *);
116270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
117279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
118270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
119270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
120270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
121274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
122266423Sjfv
123279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
124279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
125279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
126279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
127279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
128266423Sjfv
129270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
130270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
131270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
132270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
133270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
134270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
135266423Sjfv
136270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
137270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
138270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
139270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
140270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
141270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
142266423Sjfv
143270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
144279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
145270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
146270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
147270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
148270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
149270346Sjfvstatic struct ixl_mac_filter *
150270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
151270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
152279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
153266423Sjfv
154279858Sjfv
155266423Sjfv/* Sysctl debug interface */
156270346Sjfvstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
157270346Sjfvstatic void	ixl_print_debug_info(struct ixl_pf *);
158266423Sjfv
159266423Sjfv/* The MSI/X Interrupt handlers */
160270346Sjfvstatic void	ixl_intr(void *);
161270346Sjfvstatic void	ixl_msix_que(void *);
162270346Sjfvstatic void	ixl_msix_adminq(void *);
163270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
164266423Sjfv
165266423Sjfv/* Deferred interrupt tasklets */
166270346Sjfvstatic void	ixl_do_adminq(void *, int);
167266423Sjfv
168266423Sjfv/* Sysctl handlers */
169270346Sjfvstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
170270346Sjfvstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
171270346Sjfvstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
172274205Sjfvstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
173266423Sjfv
174266423Sjfv/* Statistics */
175270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
176270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
177266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
178270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
179266423Sjfv		    struct sysctl_oid_list *,
180266423Sjfv		    struct i40e_eth_stats *);
181270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
182270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
183279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
184270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
185270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
186270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
187266423Sjfv		    u64 *, u64 *);
188270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
189266423Sjfv		    u64 *, u64 *);
190266423Sjfv
191277084Sjfv#ifdef IXL_DEBUG_SYSCTL
192270346Sjfvstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
193270346Sjfvstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
194270346Sjfvstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
195274205Sjfvstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
196274205Sjfvstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
197266423Sjfv#endif
198266423Sjfv
199279858Sjfv#ifdef PCI_IOV
200279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
201279858Sjfv
202279858Sjfvstatic int	ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
203279858Sjfvstatic void	ixl_uninit_iov(device_t dev);
204279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
205279858Sjfv
206279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
207279858Sjfv		    struct i40e_arq_event_info *);
208279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
209279858Sjfv
210279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
211279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
212279858Sjfv#endif
213279858Sjfv
214266423Sjfv/*********************************************************************
215266423Sjfv *  FreeBSD Device Interface Entry Points
216266423Sjfv *********************************************************************/
217266423Sjfv
218270346Sjfvstatic device_method_t ixl_methods[] = {
219266423Sjfv	/* Device interface */
220270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
221270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
222270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
223270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
224279858Sjfv#ifdef PCI_IOV
225279858Sjfv	DEVMETHOD(pci_init_iov, ixl_init_iov),
226279858Sjfv	DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
227279858Sjfv	DEVMETHOD(pci_add_vf, ixl_add_vf),
228279858Sjfv#endif
229266423Sjfv	{0, 0}
230266423Sjfv};
231266423Sjfv
232270346Sjfvstatic driver_t ixl_driver = {
233270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
234266423Sjfv};
235266423Sjfv
236270346Sjfvdevclass_t ixl_devclass;
237270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
238266423Sjfv
239270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
240270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
241266423Sjfv
242266423Sjfv/*
243269198Sjfv** Global reset mutex
244269198Sjfv*/
245270346Sjfvstatic struct mtx ixl_reset_mtx;
246269198Sjfv
247269198Sjfv/*
248270346Sjfv** TUNEABLE PARAMETERS:
249270346Sjfv*/
250270346Sjfv
251270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
252270346Sjfv                   "IXL driver parameters");
253270346Sjfv
254270346Sjfv/*
255266423Sjfv * MSIX should be the default for best performance,
256266423Sjfv * but this allows it to be forced off for testing.
257266423Sjfv */
258270346Sjfvstatic int ixl_enable_msix = 1;
259270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
260270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
261270346Sjfv    "Enable MSI-X interrupts");
262266423Sjfv
263266423Sjfv/*
264266423Sjfv** Number of descriptors per ring:
265266423Sjfv**   - TX and RX are the same size
266266423Sjfv*/
267270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
268270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
269270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
270270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
271266423Sjfv
272266423Sjfv/*
273266423Sjfv** This can be set manually, if left as 0 the
274266423Sjfv** number of queues will be calculated based
275266423Sjfv** on cpus and msix vectors available.
276266423Sjfv*/
277270346Sjfvint ixl_max_queues = 0;
278270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
279270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
280270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
281266423Sjfv
282266423Sjfv/*
283266423Sjfv** Controls for Interrupt Throttling
284266423Sjfv**	- true/false for dynamic adjustment
285266423Sjfv** 	- default values for static ITR
286266423Sjfv*/
287270346Sjfvint ixl_dynamic_rx_itr = 0;
288270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
289270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
290270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
291266423Sjfv
292270346Sjfvint ixl_dynamic_tx_itr = 0;
293270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
294270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
295270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
296266423Sjfv
297270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
298270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
301270346Sjfv
302270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
303270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
306270346Sjfv
307270346Sjfv#ifdef IXL_FDIR
308270346Sjfvstatic int ixl_enable_fdir = 1;
309270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
310266423Sjfv/* Rate at which we sample */
311270346Sjfvint ixl_atr_rate = 20;
312270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
313266423Sjfv#endif
314266423Sjfv
315274205Sjfv
316270346Sjfvstatic char *ixl_fc_string[6] = {
317266423Sjfv	"None",
318266423Sjfv	"Rx",
319266423Sjfv	"Tx",
320266423Sjfv	"Full",
321266423Sjfv	"Priority",
322266423Sjfv	"Default"
323266423Sjfv};
324266423Sjfv
325279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
326269198Sjfv
327279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
328279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
329279858Sjfv
330266423Sjfv/*********************************************************************
331266423Sjfv *  Device identification routine
332266423Sjfv *
333270346Sjfv *  ixl_probe determines if the driver should be loaded on
334266423Sjfv *  the hardware based on PCI vendor/device id of the device.
335266423Sjfv *
336266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
337266423Sjfv *********************************************************************/
338266423Sjfv
339266423Sjfvstatic int
340270346Sjfvixl_probe(device_t dev)
341266423Sjfv{
342270346Sjfv	ixl_vendor_info_t *ent;
343266423Sjfv
344266423Sjfv	u16	pci_vendor_id, pci_device_id;
345266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
346266423Sjfv	char	device_name[256];
347269198Sjfv	static bool lock_init = FALSE;
348266423Sjfv
349270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
350266423Sjfv
351266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
352266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
353266423Sjfv		return (ENXIO);
354266423Sjfv
355266423Sjfv	pci_device_id = pci_get_device(dev);
356266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
357266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
358266423Sjfv
359270346Sjfv	ent = ixl_vendor_info_array;
360266423Sjfv	while (ent->vendor_id != 0) {
361266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
362266423Sjfv		    (pci_device_id == ent->device_id) &&
363266423Sjfv
364266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
365266423Sjfv		     (ent->subvendor_id == 0)) &&
366266423Sjfv
367266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
368266423Sjfv		     (ent->subdevice_id == 0))) {
369266423Sjfv			sprintf(device_name, "%s, Version - %s",
370270346Sjfv				ixl_strings[ent->index],
371270346Sjfv				ixl_driver_version);
372266423Sjfv			device_set_desc_copy(dev, device_name);
373269198Sjfv			/* One shot mutex init */
374269198Sjfv			if (lock_init == FALSE) {
375269198Sjfv				lock_init = TRUE;
376270346Sjfv				mtx_init(&ixl_reset_mtx,
377270346Sjfv				    "ixl_reset",
378270346Sjfv				    "IXL RESET Lock", MTX_DEF);
379269198Sjfv			}
380266423Sjfv			return (BUS_PROBE_DEFAULT);
381266423Sjfv		}
382266423Sjfv		ent++;
383266423Sjfv	}
384266423Sjfv	return (ENXIO);
385266423Sjfv}
386266423Sjfv
387266423Sjfv/*********************************************************************
388266423Sjfv *  Device initialization routine
389266423Sjfv *
390266423Sjfv *  The attach entry point is called when the driver is being loaded.
391266423Sjfv *  This routine identifies the type of hardware, allocates all resources
392266423Sjfv *  and initializes the hardware.
393266423Sjfv *
394266423Sjfv *  return 0 on success, positive on failure
395266423Sjfv *********************************************************************/
396266423Sjfv
397266423Sjfvstatic int
398270346Sjfvixl_attach(device_t dev)
399266423Sjfv{
400270346Sjfv	struct ixl_pf	*pf;
401266423Sjfv	struct i40e_hw	*hw;
402270346Sjfv	struct ixl_vsi *vsi;
403266423Sjfv	u16		bus;
404266423Sjfv	int             error = 0;
405279858Sjfv#ifdef PCI_IOV
406279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
407279858Sjfv	int		iov_error;
408279858Sjfv#endif
409266423Sjfv
410270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
411266423Sjfv
412266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
413266423Sjfv	pf = device_get_softc(dev);
414266423Sjfv	pf->dev = pf->osdep.dev = dev;
415266423Sjfv	hw = &pf->hw;
416266423Sjfv
417266423Sjfv	/*
418266423Sjfv	** Note this assumes we have a single embedded VSI,
419266423Sjfv	** this could be enhanced later to allocate multiple
420266423Sjfv	*/
421266423Sjfv	vsi = &pf->vsi;
422266423Sjfv	vsi->dev = pf->dev;
423266423Sjfv
424266423Sjfv	/* Core Lock Init*/
425270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
426266423Sjfv
427266423Sjfv	/* Set up the timer callout */
428266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
429266423Sjfv
430266423Sjfv	/* Set up sysctls */
431266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
432266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
433266423Sjfv	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
434270346Sjfv	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
435266423Sjfv
436269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
437269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
438269198Sjfv	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
439270346Sjfv	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
440269198Sjfv
441270346Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
442270346Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
443270346Sjfv	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
444270346Sjfv	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
445270346Sjfv
446274205Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447274205Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448274205Sjfv	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
449274205Sjfv	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
450274205Sjfv
451266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
452266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
453273377Shselasky	    OID_AUTO, "rx_itr", CTLFLAG_RW,
454270346Sjfv	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
455266423Sjfv
456266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
457266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
458273377Shselasky	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
459270346Sjfv	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
460266423Sjfv
461266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
462266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
463273377Shselasky	    OID_AUTO, "tx_itr", CTLFLAG_RW,
464270346Sjfv	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
465266423Sjfv
466266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
467266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
468273377Shselasky	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
469270346Sjfv	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
470266423Sjfv
471277084Sjfv#ifdef IXL_DEBUG_SYSCTL
472266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
473266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
474266423Sjfv	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
475270346Sjfv	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
476266423Sjfv
477266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
478266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479266423Sjfv	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
480270346Sjfv	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
481266423Sjfv
482266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
483266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484266423Sjfv	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
485270346Sjfv	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
486269198Sjfv
487269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
488269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489274205Sjfv	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
490274205Sjfv	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
491269198Sjfv
492269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
493269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
494274205Sjfv	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
495274205Sjfv	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
496266423Sjfv#endif
497266423Sjfv
498274205Sjfv	/* Save off the PCI information */
499266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
500266423Sjfv	hw->device_id = pci_get_device(dev);
501266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
502266423Sjfv	hw->subsystem_vendor_id =
503266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
504266423Sjfv	hw->subsystem_device_id =
505266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
506266423Sjfv
507269198Sjfv	hw->bus.device = pci_get_slot(dev);
508266423Sjfv	hw->bus.func = pci_get_function(dev);
509266423Sjfv
510279858Sjfv	pf->vc_debug_lvl = 1;
511279858Sjfv
512266423Sjfv	/* Do PCI setup - map BAR0, etc */
513270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
514266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
515266423Sjfv		error = ENXIO;
516266423Sjfv		goto err_out;
517266423Sjfv	}
518266423Sjfv
519266423Sjfv	/* Create for initial debugging use */
520266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
521266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
522266423Sjfv	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
523270346Sjfv	    ixl_debug_info, "I", "Debug Information");
524266423Sjfv
525266423Sjfv
526266423Sjfv	/* Establish a clean starting point */
527269198Sjfv	i40e_clear_hw(hw);
528266423Sjfv	error = i40e_pf_reset(hw);
529266423Sjfv	if (error) {
530269198Sjfv		device_printf(dev,"PF reset failure %x\n", error);
531269198Sjfv		error = EIO;
532269198Sjfv		goto err_out;
533269198Sjfv	}
534266423Sjfv
535266423Sjfv	/* Set admin queue parameters */
536270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
537270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
538270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
539270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
540266423Sjfv
541266423Sjfv	/* Initialize the shared code */
542266423Sjfv	error = i40e_init_shared_code(hw);
543266423Sjfv	if (error) {
544266423Sjfv		device_printf(dev,"Unable to initialize the shared code\n");
545266423Sjfv		error = EIO;
546266423Sjfv		goto err_out;
547266423Sjfv	}
548266423Sjfv
549266423Sjfv	/* Set up the admin queue */
550266423Sjfv	error = i40e_init_adminq(hw);
551266423Sjfv	if (error) {
552269198Sjfv		device_printf(dev, "The driver for the device stopped "
553269198Sjfv		    "because the NVM image is newer than expected.\n"
554269198Sjfv		    "You must install the most recent version of "
555269198Sjfv		    " the network driver.\n");
556266423Sjfv		goto err_out;
557266423Sjfv	}
558270346Sjfv	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
559266423Sjfv
560269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
561269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
562269198Sjfv		device_printf(dev, "The driver for the device detected "
563269198Sjfv		    "a newer version of the NVM image than expected.\n"
564269198Sjfv		    "Please install the most recent version of the network driver.\n");
565269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
566269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
567269198Sjfv		device_printf(dev, "The driver for the device detected "
568269198Sjfv		    "an older version of the NVM image than expected.\n"
569269198Sjfv		    "Please update the NVM image.\n");
570266423Sjfv
571266423Sjfv	/* Clear PXE mode */
572266423Sjfv	i40e_clear_pxe_mode(hw);
573266423Sjfv
574266423Sjfv	/* Get capabilities from the device */
575270346Sjfv	error = ixl_get_hw_capabilities(pf);
576266423Sjfv	if (error) {
577266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
578266423Sjfv		goto err_get_cap;
579266423Sjfv	}
580266423Sjfv
581266423Sjfv	/* Set up host memory cache */
582279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
583279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
584266423Sjfv	if (error) {
585266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
586266423Sjfv		goto err_get_cap;
587266423Sjfv	}
588266423Sjfv
589266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
590266423Sjfv	if (error) {
591266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
592266423Sjfv		goto err_mac_hmc;
593266423Sjfv	}
594266423Sjfv
595269198Sjfv	/* Disable LLDP from the firmware */
596269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
597269198Sjfv
598266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
599266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
600266423Sjfv	if (error) {
601266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
602266423Sjfv		goto err_mac_hmc;
603266423Sjfv	}
604266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
605266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
606266423Sjfv
607274205Sjfv	/* Set up VSI and queues */
608270346Sjfv	if (ixl_setup_stations(pf) != 0) {
609266423Sjfv		device_printf(dev, "setup stations failed!\n");
610266423Sjfv		error = ENOMEM;
611266423Sjfv		goto err_mac_hmc;
612266423Sjfv	}
613266423Sjfv
614266423Sjfv	/* Initialize mac filter list for VSI */
615266423Sjfv	SLIST_INIT(&vsi->ftl);
616266423Sjfv
617266423Sjfv	/* Set up interrupt routing here */
618266423Sjfv	if (pf->msix > 1)
619270346Sjfv		error = ixl_assign_vsi_msix(pf);
620266423Sjfv	else
621270346Sjfv		error = ixl_assign_vsi_legacy(pf);
622266423Sjfv	if (error)
623266423Sjfv		goto err_late;
624266423Sjfv
625279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
626279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
627279033Sjfv		i40e_msec_delay(75);
628279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
629279033Sjfv		if (error)
630279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
631279033Sjfv			    pf->hw.aq.asq_last_status);
632270346Sjfv	}
633279033Sjfv
634266423Sjfv	/* Determine link state */
635279858Sjfv	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
636279858Sjfv	pf->link_up = i40e_get_link_status(hw);
637266423Sjfv
638266423Sjfv	/* Setup OS specific network interface */
639274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
640274205Sjfv		device_printf(dev, "interface setup failed!\n");
641274205Sjfv		error = EIO;
642266423Sjfv		goto err_late;
643274205Sjfv	}
644266423Sjfv
645279033Sjfv	error = ixl_switch_config(pf);
646279033Sjfv	if (error) {
647279033Sjfv		device_printf(dev, "Initial switch config failed: %d\n", error);
648279033Sjfv		goto err_mac_hmc;
649279033Sjfv	}
650279033Sjfv
651279033Sjfv	/* Limit phy interrupts to link and modules failure */
652279033Sjfv	error = i40e_aq_set_phy_int_mask(hw,
653279033Sjfv	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
654279033Sjfv        if (error)
655279033Sjfv		device_printf(dev, "set phy mask failed: %d\n", error);
656279033Sjfv
657266423Sjfv	/* Get the bus configuration and set the shared code */
658270346Sjfv	bus = ixl_get_bus_info(hw, dev);
659266423Sjfv	i40e_set_pci_config_data(hw, bus);
660266423Sjfv
661266423Sjfv	/* Initialize statistics */
662270346Sjfv	ixl_pf_reset_stats(pf);
663270346Sjfv	ixl_update_stats_counters(pf);
664270346Sjfv	ixl_add_hw_stats(pf);
665266423Sjfv
666266423Sjfv	/* Register for VLAN events */
667266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
668270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
669266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
670270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
671266423Sjfv
672279858Sjfv#ifdef PCI_IOV
673279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
674279858Sjfv	if (pf->msix > 1) {
675279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
676279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
677279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
678279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
679279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
680279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
681279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
682279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
683279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
684274205Sjfv
685279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
686279858Sjfv		if (iov_error != 0)
687279858Sjfv			device_printf(dev,
688279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
689279858Sjfv			    iov_error);
690279858Sjfv	}
691279858Sjfv#endif
692279858Sjfv
693270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
694266423Sjfv	return (0);
695266423Sjfv
696266423Sjfverr_late:
697274205Sjfv	if (vsi->ifp != NULL)
698274205Sjfv		if_free(vsi->ifp);
699266423Sjfverr_mac_hmc:
700266423Sjfv	i40e_shutdown_lan_hmc(hw);
701266423Sjfverr_get_cap:
702266423Sjfv	i40e_shutdown_adminq(hw);
703266423Sjfverr_out:
704270346Sjfv	ixl_free_pci_resources(pf);
705274205Sjfv	ixl_free_vsi(vsi);
706270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
707266423Sjfv	return (error);
708266423Sjfv}
709266423Sjfv
710266423Sjfv/*********************************************************************
711266423Sjfv *  Device removal routine
712266423Sjfv *
713266423Sjfv *  The detach entry point is called when the driver is being removed.
714266423Sjfv *  This routine stops the adapter and deallocates all the resources
715266423Sjfv *  that were allocated for driver operation.
716266423Sjfv *
717266423Sjfv *  return 0 on success, positive on failure
718266423Sjfv *********************************************************************/
719266423Sjfv
720266423Sjfvstatic int
721270346Sjfvixl_detach(device_t dev)
722266423Sjfv{
723270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
724266423Sjfv	struct i40e_hw		*hw = &pf->hw;
725270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
726270346Sjfv	struct ixl_queue	*que = vsi->queues;
727266423Sjfv	i40e_status		status;
728279858Sjfv#ifdef PCI_IOV
729279858Sjfv	int			error;
730279858Sjfv#endif
731266423Sjfv
732270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
733266423Sjfv
734266423Sjfv	/* Make sure VLANS are not using driver */
735266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
736266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
737266423Sjfv		return (EBUSY);
738266423Sjfv	}
739266423Sjfv
740279858Sjfv#ifdef PCI_IOV
741279858Sjfv	error = pci_iov_detach(dev);
742279858Sjfv	if (error != 0) {
743279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
744279858Sjfv		return (error);
745279858Sjfv	}
746279858Sjfv#endif
747279858Sjfv
748279033Sjfv	ether_ifdetach(vsi->ifp);
749279033Sjfv	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
750279033Sjfv		IXL_PF_LOCK(pf);
751279033Sjfv		ixl_stop(pf);
752279033Sjfv		IXL_PF_UNLOCK(pf);
753279033Sjfv	}
754266423Sjfv
755266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
756266423Sjfv		if (que->tq) {
757266423Sjfv			taskqueue_drain(que->tq, &que->task);
758266423Sjfv			taskqueue_drain(que->tq, &que->tx_task);
759266423Sjfv			taskqueue_free(que->tq);
760266423Sjfv		}
761266423Sjfv	}
762266423Sjfv
763266423Sjfv	/* Shutdown LAN HMC */
764266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
765266423Sjfv	if (status)
766266423Sjfv		device_printf(dev,
767266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
768266423Sjfv
769266423Sjfv	/* Shutdown admin queue */
770266423Sjfv	status = i40e_shutdown_adminq(hw);
771266423Sjfv	if (status)
772266423Sjfv		device_printf(dev,
773266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
774266423Sjfv
775266423Sjfv	/* Unregister VLAN events */
776266423Sjfv	if (vsi->vlan_attach != NULL)
777266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
778266423Sjfv	if (vsi->vlan_detach != NULL)
779266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
780266423Sjfv
781266423Sjfv	callout_drain(&pf->timer);
782270346Sjfv	ixl_free_pci_resources(pf);
783266423Sjfv	bus_generic_detach(dev);
784266423Sjfv	if_free(vsi->ifp);
785270346Sjfv	ixl_free_vsi(vsi);
786270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
787266423Sjfv	return (0);
788266423Sjfv}
789266423Sjfv
790266423Sjfv/*********************************************************************
791266423Sjfv *
792266423Sjfv *  Shutdown entry point
793266423Sjfv *
794266423Sjfv **********************************************************************/
795266423Sjfv
796266423Sjfvstatic int
797270346Sjfvixl_shutdown(device_t dev)
798266423Sjfv{
799270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
800270346Sjfv	IXL_PF_LOCK(pf);
801270346Sjfv	ixl_stop(pf);
802270346Sjfv	IXL_PF_UNLOCK(pf);
803266423Sjfv	return (0);
804266423Sjfv}
805266423Sjfv
806266423Sjfv
807266423Sjfv/*********************************************************************
808266423Sjfv *
809266423Sjfv *  Get the hardware capabilities
810266423Sjfv *
811266423Sjfv **********************************************************************/
812266423Sjfv
813266423Sjfvstatic int
814270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
815266423Sjfv{
816266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
817266423Sjfv	struct i40e_hw	*hw = &pf->hw;
818266423Sjfv	device_t 	dev = pf->dev;
819266423Sjfv	int             error, len;
820266423Sjfv	u16		needed;
821266423Sjfv	bool		again = TRUE;
822266423Sjfv
823266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
824266423Sjfvretry:
825266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
826266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
827266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
828266423Sjfv                return (ENOMEM);
829266423Sjfv	}
830266423Sjfv
831266423Sjfv	/* This populates the hw struct */
832266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
833266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
834266423Sjfv	free(buf, M_DEVBUF);
835266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
836266423Sjfv	    (again == TRUE)) {
837266423Sjfv		/* retry once with a larger buffer */
838266423Sjfv		again = FALSE;
839266423Sjfv		len = needed;
840266423Sjfv		goto retry;
841266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
842266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
843266423Sjfv		    pf->hw.aq.asq_last_status);
844266423Sjfv		return (ENODEV);
845266423Sjfv	}
846266423Sjfv
847266423Sjfv	/* Capture this PF's starting queue pair */
848266423Sjfv	pf->qbase = hw->func_caps.base_queue;
849266423Sjfv
850270346Sjfv#ifdef IXL_DEBUG
851266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
852266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
853266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
854266423Sjfv	    hw->func_caps.num_msix_vectors,
855266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
856266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
857266423Sjfv	    hw->func_caps.fd_filters_best_effort,
858266423Sjfv	    hw->func_caps.num_tx_qp,
859266423Sjfv	    hw->func_caps.num_rx_qp,
860266423Sjfv	    hw->func_caps.base_queue);
861266423Sjfv#endif
862266423Sjfv	return (error);
863266423Sjfv}
864266423Sjfv
865266423Sjfvstatic void
866270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
867266423Sjfv{
868266423Sjfv	device_t 	dev = vsi->dev;
869266423Sjfv
870266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
871266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
872266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
873266423Sjfv		if (mask & IFCAP_TXCSUM) {
874266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
875266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
876270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
877270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
878266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
879266423Sjfv			}
880266423Sjfv		}
881266423Sjfv		else if (mask & IFCAP_TSO4) {
882266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
883270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
884266423Sjfv			device_printf(dev,
885266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
886266423Sjfv		}
887266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
888266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
889266423Sjfv		if (mask & IFCAP_TXCSUM)
890266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
891266423Sjfv		else if (mask & IFCAP_TSO4)
892266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
893266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
894266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
895266423Sjfv		if (mask & IFCAP_TXCSUM) {
896270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
897266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
898266423Sjfv			device_printf(dev,
899266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
900266423Sjfv		} else if (mask & IFCAP_TSO4)
901266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
902266423Sjfv	}
903266423Sjfv
904266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
905266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
906266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
907266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
908266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
909270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
910270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
911266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
912266423Sjfv			}
913266423Sjfv		} else if (mask & IFCAP_TSO6) {
914266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
915270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
916266423Sjfv			device_printf(dev,
917266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
918266423Sjfv		}
919266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
920266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
921266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
922266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
923266423Sjfv		else if (mask & IFCAP_TSO6)
924266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
925266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
926266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
927266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
928270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
929266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
930266423Sjfv			device_printf(dev,
931266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
932266423Sjfv		} else if (mask & IFCAP_TSO6)
933266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
934266423Sjfv	}
935266423Sjfv}
936266423Sjfv
937266423Sjfv/*********************************************************************
938266423Sjfv *  Ioctl entry point
939266423Sjfv *
940270346Sjfv *  ixl_ioctl is called when the user wants to configure the
941266423Sjfv *  interface.
942266423Sjfv *
943266423Sjfv *  return 0 on success, positive on failure
944266423Sjfv **********************************************************************/
945266423Sjfv
946266423Sjfvstatic int
947270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
948266423Sjfv{
949270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
950279858Sjfv	struct ixl_pf	*pf = vsi->back;
951266423Sjfv	struct ifreq	*ifr = (struct ifreq *) data;
952266423Sjfv#if defined(INET) || defined(INET6)
953266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
954266423Sjfv	bool		avoid_reset = FALSE;
955266423Sjfv#endif
956266423Sjfv	int             error = 0;
957266423Sjfv
958266423Sjfv	switch (command) {
959266423Sjfv
960266423Sjfv        case SIOCSIFADDR:
961266423Sjfv#ifdef INET
962266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
963266423Sjfv			avoid_reset = TRUE;
964266423Sjfv#endif
965266423Sjfv#ifdef INET6
966266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
967266423Sjfv			avoid_reset = TRUE;
968266423Sjfv#endif
969266423Sjfv#if defined(INET) || defined(INET6)
970266423Sjfv		/*
971266423Sjfv		** Calling init results in link renegotiation,
972266423Sjfv		** so we avoid doing it when possible.
973266423Sjfv		*/
974266423Sjfv		if (avoid_reset) {
975266423Sjfv			ifp->if_flags |= IFF_UP;
976266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
977270346Sjfv				ixl_init(pf);
978271900Sbz#ifdef INET
979266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
980266423Sjfv				arp_ifinit(ifp, ifa);
981271900Sbz#endif
982266423Sjfv		} else
983266423Sjfv			error = ether_ioctl(ifp, command, data);
984266423Sjfv		break;
985266423Sjfv#endif
986266423Sjfv	case SIOCSIFMTU:
987266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
988270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
989266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
990266423Sjfv			error = EINVAL;
991266423Sjfv		} else {
992270346Sjfv			IXL_PF_LOCK(pf);
993266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
994266423Sjfv			vsi->max_frame_size =
995266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
996266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
997270346Sjfv			ixl_init_locked(pf);
998270346Sjfv			IXL_PF_UNLOCK(pf);
999266423Sjfv		}
1000266423Sjfv		break;
1001266423Sjfv	case SIOCSIFFLAGS:
1002266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1003270346Sjfv		IXL_PF_LOCK(pf);
1004266423Sjfv		if (ifp->if_flags & IFF_UP) {
1005266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1006266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
1007266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
1008270346Sjfv					ixl_set_promisc(vsi);
1009266423Sjfv				}
1010266423Sjfv			} else
1011270346Sjfv				ixl_init_locked(pf);
1012266423Sjfv		} else
1013266423Sjfv			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1014270346Sjfv				ixl_stop(pf);
1015266423Sjfv		pf->if_flags = ifp->if_flags;
1016270346Sjfv		IXL_PF_UNLOCK(pf);
1017266423Sjfv		break;
1018266423Sjfv	case SIOCADDMULTI:
1019266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1020266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1021270346Sjfv			IXL_PF_LOCK(pf);
1022270346Sjfv			ixl_disable_intr(vsi);
1023270346Sjfv			ixl_add_multi(vsi);
1024270346Sjfv			ixl_enable_intr(vsi);
1025270346Sjfv			IXL_PF_UNLOCK(pf);
1026266423Sjfv		}
1027266423Sjfv		break;
1028266423Sjfv	case SIOCDELMULTI:
1029266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1030266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1031270346Sjfv			IXL_PF_LOCK(pf);
1032270346Sjfv			ixl_disable_intr(vsi);
1033270346Sjfv			ixl_del_multi(vsi);
1034270346Sjfv			ixl_enable_intr(vsi);
1035270346Sjfv			IXL_PF_UNLOCK(pf);
1036266423Sjfv		}
1037266423Sjfv		break;
1038266423Sjfv	case SIOCSIFMEDIA:
1039266423Sjfv	case SIOCGIFMEDIA:
1040266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1041266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1042266423Sjfv		break;
1043266423Sjfv	case SIOCSIFCAP:
1044266423Sjfv	{
1045266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1046266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1047266423Sjfv
1048270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1049266423Sjfv
1050266423Sjfv		if (mask & IFCAP_RXCSUM)
1051266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1052266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1053266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1054266423Sjfv		if (mask & IFCAP_LRO)
1055266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1056266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1057266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1058266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1059266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1060266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1061266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1062266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1063270346Sjfv			IXL_PF_LOCK(pf);
1064270346Sjfv			ixl_init_locked(pf);
1065270346Sjfv			IXL_PF_UNLOCK(pf);
1066266423Sjfv		}
1067266423Sjfv		VLAN_CAPABILITIES(ifp);
1068266423Sjfv
1069266423Sjfv		break;
1070266423Sjfv	}
1071266423Sjfv
1072266423Sjfv	default:
1073270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1074266423Sjfv		error = ether_ioctl(ifp, command, data);
1075266423Sjfv		break;
1076266423Sjfv	}
1077266423Sjfv
1078266423Sjfv	return (error);
1079266423Sjfv}
1080266423Sjfv
1081266423Sjfv
1082266423Sjfv/*********************************************************************
1083266423Sjfv *  Init entry point
1084266423Sjfv *
1085266423Sjfv *  This routine is used in two ways. It is used by the stack as
1086266423Sjfv *  init entry point in network interface structure. It is also used
1087266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1088266423Sjfv *  consistent state.
1089266423Sjfv *
1090266423Sjfv *  return 0 on success, positive on failure
1091266423Sjfv **********************************************************************/
1092266423Sjfv
1093266423Sjfvstatic void
1094270346Sjfvixl_init_locked(struct ixl_pf *pf)
1095266423Sjfv{
1096266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1097270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1098266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1099266423Sjfv	device_t 	dev = pf->dev;
1100266423Sjfv	struct i40e_filter_control_settings	filter;
1101266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1102266423Sjfv	int		ret;
1103266423Sjfv
1104266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1105270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1106270346Sjfv	ixl_stop(pf);
1107266423Sjfv
1108266423Sjfv	/* Get the latest mac address... User might use a LAA */
1109266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1110266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1111266423Sjfv	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1112266423Sjfv	    i40e_validate_mac_addr(tmpaddr)) {
1113266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1114266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1115266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1116266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1117266423Sjfv		    hw->mac.addr, NULL);
1118266423Sjfv		if (ret) {
1119266423Sjfv			device_printf(dev, "LLA address"
1120266423Sjfv			 "change failed!!\n");
1121266423Sjfv			return;
1122266423Sjfv		}
1123266423Sjfv	}
1124266423Sjfv
1125266423Sjfv	/* Set the various hardware offload abilities */
1126266423Sjfv	ifp->if_hwassist = 0;
1127266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1128266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1129266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1130266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1131266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1132266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1133266423Sjfv
1134266423Sjfv	/* Set up the device filtering */
1135266423Sjfv	bzero(&filter, sizeof(filter));
1136266423Sjfv	filter.enable_ethtype = TRUE;
1137266423Sjfv	filter.enable_macvlan = TRUE;
1138270346Sjfv#ifdef IXL_FDIR
1139266423Sjfv	filter.enable_fdir = TRUE;
1140266423Sjfv#endif
1141266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1142266423Sjfv		device_printf(dev, "set_filter_control() failed\n");
1143266423Sjfv
1144266423Sjfv	/* Set up RSS */
1145270346Sjfv	ixl_config_rss(vsi);
1146266423Sjfv
1147266423Sjfv	/*
1148279033Sjfv	** Prepare the VSI: rings, hmc contexts, etc...
1149266423Sjfv	*/
1150270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1151270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1152266423Sjfv		return;
1153266423Sjfv	}
1154266423Sjfv
1155266423Sjfv	/* Add protocol filters to list */
1156270346Sjfv	ixl_init_filters(vsi);
1157266423Sjfv
1158266423Sjfv	/* Setup vlan's if needed */
1159270346Sjfv	ixl_setup_vlan_filters(vsi);
1160266423Sjfv
1161266423Sjfv	/* Start the local timer */
1162270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1163266423Sjfv
1164266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1165270346Sjfv	if (ixl_enable_msix) {
1166270346Sjfv		ixl_configure_msix(pf);
1167270346Sjfv		ixl_configure_itr(pf);
1168266423Sjfv	} else
1169270346Sjfv		ixl_configure_legacy(pf);
1170266423Sjfv
1171270346Sjfv	ixl_enable_rings(vsi);
1172266423Sjfv
1173266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1174266423Sjfv
1175279858Sjfv	ixl_reconfigure_filters(vsi);
1176279858Sjfv
1177266423Sjfv	/* Set MTU in hardware*/
1178270346Sjfv	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1179270346Sjfv	    TRUE, 0, NULL);
1180270346Sjfv	if (aq_error)
1181270346Sjfv		device_printf(vsi->dev,
1182270346Sjfv			"aq_set_mac_config in init error, code %d\n",
1183270346Sjfv		    aq_error);
1184266423Sjfv
1185266423Sjfv	/* And now turn on interrupts */
1186270346Sjfv	ixl_enable_intr(vsi);
1187266423Sjfv
1188266423Sjfv	/* Now inform the stack we're ready */
1189266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1190266423Sjfv	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1191266423Sjfv
1192266423Sjfv	return;
1193266423Sjfv}
1194266423Sjfv
1195266423Sjfvstatic void
1196270346Sjfvixl_init(void *arg)
1197266423Sjfv{
1198270346Sjfv	struct ixl_pf *pf = arg;
1199266423Sjfv
1200270346Sjfv	IXL_PF_LOCK(pf);
1201270346Sjfv	ixl_init_locked(pf);
1202270346Sjfv	IXL_PF_UNLOCK(pf);
1203266423Sjfv	return;
1204266423Sjfv}
1205266423Sjfv
1206266423Sjfv/*
1207266423Sjfv**
1208266423Sjfv** MSIX Interrupt Handlers and Tasklets
1209266423Sjfv**
1210266423Sjfv*/
1211266423Sjfvstatic void
1212270346Sjfvixl_handle_que(void *context, int pending)
1213266423Sjfv{
1214270346Sjfv	struct ixl_queue *que = context;
1215270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1216266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1217266423Sjfv	struct tx_ring  *txr = &que->txr;
1218266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1219266423Sjfv	bool		more;
1220266423Sjfv
1221266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1222270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1223270346Sjfv		IXL_TX_LOCK(txr);
1224270346Sjfv		ixl_txeof(que);
1225266423Sjfv		if (!drbr_empty(ifp, txr->br))
1226270346Sjfv			ixl_mq_start_locked(ifp, txr);
1227270346Sjfv		IXL_TX_UNLOCK(txr);
1228266423Sjfv		if (more) {
1229266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1230266423Sjfv			return;
1231266423Sjfv		}
1232266423Sjfv	}
1233266423Sjfv
1234266423Sjfv	/* Reenable this interrupt - hmmm */
1235270346Sjfv	ixl_enable_queue(hw, que->me);
1236266423Sjfv	return;
1237266423Sjfv}
1238266423Sjfv
1239266423Sjfv
1240266423Sjfv/*********************************************************************
1241266423Sjfv *
1242266423Sjfv *  Legacy Interrupt Service routine
1243266423Sjfv *
1244266423Sjfv **********************************************************************/
1245266423Sjfvvoid
1246270346Sjfvixl_intr(void *arg)
1247266423Sjfv{
1248270346Sjfv	struct ixl_pf		*pf = arg;
1249266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1250270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1251270346Sjfv	struct ixl_queue	*que = vsi->queues;
1252266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1253266423Sjfv	struct tx_ring		*txr = &que->txr;
1254266423Sjfv        u32			reg, icr0, mask;
1255266423Sjfv	bool			more_tx, more_rx;
1256266423Sjfv
1257266423Sjfv	++que->irqs;
1258266423Sjfv
1259266423Sjfv	/* Protect against spurious interrupts */
1260266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1261266423Sjfv		return;
1262266423Sjfv
1263266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1264266423Sjfv
1265266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1266266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1267266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1268266423Sjfv
1269266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1270266423Sjfv
1271279858Sjfv#ifdef PCI_IOV
1272279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1273279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1274279858Sjfv#endif
1275279858Sjfv
1276266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1277266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1278266423Sjfv		return;
1279266423Sjfv	}
1280266423Sjfv
1281270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1282266423Sjfv
1283270346Sjfv	IXL_TX_LOCK(txr);
1284270346Sjfv	more_tx = ixl_txeof(que);
1285266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1286266423Sjfv		more_tx = 1;
1287270346Sjfv	IXL_TX_UNLOCK(txr);
1288266423Sjfv
1289266423Sjfv	/* re-enable other interrupt causes */
1290266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1291266423Sjfv
1292266423Sjfv	/* And now the queues */
1293266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1294266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1295266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1296266423Sjfv
1297266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1298266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1299266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1300266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1301266423Sjfv
1302270346Sjfv	ixl_enable_legacy(hw);
1303266423Sjfv
1304266423Sjfv	return;
1305266423Sjfv}
1306266423Sjfv
1307266423Sjfv
1308266423Sjfv/*********************************************************************
1309266423Sjfv *
1310266423Sjfv *  MSIX VSI Interrupt Service routine
1311266423Sjfv *
1312266423Sjfv **********************************************************************/
1313266423Sjfvvoid
1314270346Sjfvixl_msix_que(void *arg)
1315266423Sjfv{
1316270346Sjfv	struct ixl_queue	*que = arg;
1317270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1318266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1319266423Sjfv	struct tx_ring	*txr = &que->txr;
1320266423Sjfv	bool		more_tx, more_rx;
1321266423Sjfv
1322269198Sjfv	/* Protect against spurious interrupts */
1323269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1324269198Sjfv		return;
1325269198Sjfv
1326266423Sjfv	++que->irqs;
1327266423Sjfv
1328270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1329266423Sjfv
1330270346Sjfv	IXL_TX_LOCK(txr);
1331270346Sjfv	more_tx = ixl_txeof(que);
1332266423Sjfv	/*
1333266423Sjfv	** Make certain that if the stack
1334266423Sjfv	** has anything queued the task gets
1335266423Sjfv	** scheduled to handle it.
1336266423Sjfv	*/
1337266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1338266423Sjfv		more_tx = 1;
1339270346Sjfv	IXL_TX_UNLOCK(txr);
1340266423Sjfv
1341270346Sjfv	ixl_set_queue_rx_itr(que);
1342270346Sjfv	ixl_set_queue_tx_itr(que);
1343266423Sjfv
1344266423Sjfv	if (more_tx || more_rx)
1345266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1346266423Sjfv	else
1347270346Sjfv		ixl_enable_queue(hw, que->me);
1348266423Sjfv
1349266423Sjfv	return;
1350266423Sjfv}
1351266423Sjfv
1352266423Sjfv
1353266423Sjfv/*********************************************************************
1354266423Sjfv *
1355266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1356266423Sjfv *
1357266423Sjfv **********************************************************************/
1358266423Sjfvstatic void
1359270346Sjfvixl_msix_adminq(void *arg)
1360266423Sjfv{
1361270346Sjfv	struct ixl_pf	*pf = arg;
1362266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1363266423Sjfv	u32		reg, mask;
1364266423Sjfv
1365266423Sjfv	++pf->admin_irq;
1366266423Sjfv
1367266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1368266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1369266423Sjfv
1370266423Sjfv	/* Check on the cause */
1371266423Sjfv	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1372266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1373266423Sjfv
1374269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1375270346Sjfv		ixl_handle_mdd_event(pf);
1376266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1377269198Sjfv	}
1378266423Sjfv
1379279858Sjfv#ifdef PCI_IOV
1380279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1381266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1382279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1383279858Sjfv	}
1384279858Sjfv#endif
1385266423Sjfv
1386266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1387266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1388266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1389266423Sjfv
1390266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1391266423Sjfv	return;
1392266423Sjfv}
1393266423Sjfv
1394266423Sjfv/*********************************************************************
1395266423Sjfv *
1396266423Sjfv *  Media Ioctl callback
1397266423Sjfv *
1398266423Sjfv *  This routine is called whenever the user queries the status of
1399266423Sjfv *  the interface using ifconfig.
1400266423Sjfv *
1401266423Sjfv **********************************************************************/
1402266423Sjfvstatic void
1403270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1404266423Sjfv{
1405270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1406279858Sjfv	struct ixl_pf	*pf = vsi->back;
1407266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1408266423Sjfv
1409270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1410270346Sjfv	IXL_PF_LOCK(pf);
1411266423Sjfv
1412279858Sjfv	hw->phy.get_link_info = TRUE;
1413279858Sjfv	pf->link_up = i40e_get_link_status(hw);
1414270346Sjfv	ixl_update_link_status(pf);
1415266423Sjfv
1416266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1417266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1418266423Sjfv
1419279858Sjfv	if (!pf->link_up) {
1420270346Sjfv		IXL_PF_UNLOCK(pf);
1421266423Sjfv		return;
1422266423Sjfv	}
1423266423Sjfv
1424266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1425266423Sjfv	/* Hardware is always full-duplex */
1426266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1427266423Sjfv
1428266423Sjfv	switch (hw->phy.link_info.phy_type) {
1429266423Sjfv		/* 100 M */
1430266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1431266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1432266423Sjfv			break;
1433266423Sjfv		/* 1 G */
1434266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1435266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1436266423Sjfv			break;
1437269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1438269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1439269198Sjfv			break;
1440269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1441269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1442269198Sjfv			break;
1443266423Sjfv		/* 10 G */
1444279033Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1445266423Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1446266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1447279033Sjfv		/* Using this until a real KR media type */
1448279033Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1449279033Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1450266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1451266423Sjfv			break;
1452266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1453266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1454266423Sjfv			break;
1455266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1456266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1457266423Sjfv			break;
1458270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1459270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1460270346Sjfv			break;
1461266423Sjfv		/* 40 G */
1462266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1463266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1464266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1465266423Sjfv			break;
1466266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1467266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1468266423Sjfv			break;
1469266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1470266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1471266423Sjfv			break;
1472279033Sjfv		/*
1473279033Sjfv		** Set these to CR4 because OS does not
1474279033Sjfv		** have types available yet.
1475279033Sjfv		*/
1476279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1477279033Sjfv		case I40E_PHY_TYPE_XLAUI:
1478279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1479279033Sjfv		case I40E_PHY_TYPE_40GBASE_AOC:
1480279033Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1481279033Sjfv			break;
1482266423Sjfv		default:
1483266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1484266423Sjfv			break;
1485266423Sjfv	}
1486266423Sjfv	/* Report flow control status as well */
1487266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1488266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1489266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1490266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1491266423Sjfv
1492270346Sjfv	IXL_PF_UNLOCK(pf);
1493266423Sjfv
1494266423Sjfv	return;
1495266423Sjfv}
1496266423Sjfv
1497266423Sjfv/*********************************************************************
1498266423Sjfv *
1499266423Sjfv *  Media Ioctl callback
1500266423Sjfv *
1501266423Sjfv *  This routine is called when the user changes speed/duplex using
1502266423Sjfv *  media/mediopt option with ifconfig.
1503266423Sjfv *
1504266423Sjfv **********************************************************************/
1505266423Sjfvstatic int
1506270346Sjfvixl_media_change(struct ifnet * ifp)
1507266423Sjfv{
1508270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1509266423Sjfv	struct ifmedia *ifm = &vsi->media;
1510266423Sjfv
1511270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1512266423Sjfv
1513266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1514266423Sjfv		return (EINVAL);
1515266423Sjfv
1516269198Sjfv	if_printf(ifp, "Media change is currently not supported.\n");
1517269198Sjfv
1518269198Sjfv	return (ENODEV);
1519266423Sjfv}
1520266423Sjfv
1521266423Sjfv
1522270346Sjfv#ifdef IXL_FDIR
1523266423Sjfv/*
1524266423Sjfv** ATR: Application Targetted Receive - creates a filter
1525266423Sjfv**	based on TX flow info that will keep the receive
1526266423Sjfv**	portion of the flow on the same queue. Based on the
1527266423Sjfv**	implementation this is only available for TCP connections
1528266423Sjfv*/
1529266423Sjfvvoid
1530270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1531266423Sjfv{
1532270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1533266423Sjfv	struct tx_ring			*txr = &que->txr;
1534266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1535266423Sjfv	u32				ptype, dtype;
1536266423Sjfv	int				idx;
1537266423Sjfv
1538266423Sjfv	/* check if ATR is enabled and sample rate */
1539270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1540266423Sjfv		return;
1541266423Sjfv	/*
1542266423Sjfv	** We sample all TCP SYN/FIN packets,
1543266423Sjfv	** or at the selected sample rate
1544266423Sjfv	*/
1545266423Sjfv	txr->atr_count++;
1546266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1547266423Sjfv	    (txr->atr_count < txr->atr_rate))
1548266423Sjfv                return;
1549266423Sjfv	txr->atr_count = 0;
1550266423Sjfv
1551266423Sjfv	/* Get a descriptor to use */
1552266423Sjfv	idx = txr->next_avail;
1553266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1554266423Sjfv	if (++idx == que->num_desc)
1555266423Sjfv		idx = 0;
1556266423Sjfv	txr->avail--;
1557266423Sjfv	txr->next_avail = idx;
1558266423Sjfv
1559266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1560266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1561266423Sjfv
1562266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1563266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1564266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1565266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1566266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1567266423Sjfv
1568266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1569266423Sjfv
1570266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1571266423Sjfv
1572266423Sjfv	/*
1573266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1574266423Sjfv	** the filter, otherwise its an update.
1575266423Sjfv	*/
1576266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1577266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1578266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1579266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1580266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1581266423Sjfv
1582266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1583266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1584266423Sjfv
1585266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1586266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1587266423Sjfv
1588266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1589266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1590266423Sjfv	return;
1591266423Sjfv}
1592266423Sjfv#endif
1593266423Sjfv
1594266423Sjfv
1595266423Sjfvstatic void
1596270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1597266423Sjfv{
1598266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1599266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1600266423Sjfv	int		err, mcnt = 0;
1601266423Sjfv	bool		uni = FALSE, multi = FALSE;
1602266423Sjfv
1603266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1604266423Sjfv                multi = TRUE;
1605266423Sjfv	else { /* Need to count the multicast addresses */
1606266423Sjfv		struct  ifmultiaddr *ifma;
1607266423Sjfv		if_maddr_rlock(ifp);
1608266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1609266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1610266423Sjfv                                continue;
1611266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1612266423Sjfv                                break;
1613266423Sjfv                        mcnt++;
1614266423Sjfv		}
1615266423Sjfv		if_maddr_runlock(ifp);
1616266423Sjfv	}
1617266423Sjfv
1618266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1619266423Sjfv                multi = TRUE;
1620266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1621266423Sjfv		uni = TRUE;
1622266423Sjfv
1623266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1624266423Sjfv	    vsi->seid, uni, NULL);
1625266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1626266423Sjfv	    vsi->seid, multi, NULL);
1627266423Sjfv	return;
1628266423Sjfv}
1629266423Sjfv
1630266423Sjfv/*********************************************************************
1631266423Sjfv * 	Filter Routines
1632266423Sjfv *
1633266423Sjfv *	Routines for multicast and vlan filter management.
1634266423Sjfv *
1635266423Sjfv *********************************************************************/
1636266423Sjfvstatic void
1637270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1638266423Sjfv{
1639266423Sjfv	struct	ifmultiaddr	*ifma;
1640266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1641266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1642266423Sjfv	int			mcnt = 0, flags;
1643266423Sjfv
1644270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1645266423Sjfv
1646266423Sjfv	if_maddr_rlock(ifp);
1647266423Sjfv	/*
1648266423Sjfv	** First just get a count, to decide if we
1649266423Sjfv	** we simply use multicast promiscuous.
1650266423Sjfv	*/
1651266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1652266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1653266423Sjfv			continue;
1654266423Sjfv		mcnt++;
1655266423Sjfv	}
1656266423Sjfv	if_maddr_runlock(ifp);
1657266423Sjfv
1658266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1659266423Sjfv		/* delete existing MC filters */
1660270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1661266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1662266423Sjfv		    vsi->seid, TRUE, NULL);
1663266423Sjfv		return;
1664266423Sjfv	}
1665266423Sjfv
1666266423Sjfv	mcnt = 0;
1667266423Sjfv	if_maddr_rlock(ifp);
1668266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1669266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1670266423Sjfv			continue;
1671270346Sjfv		ixl_add_mc_filter(vsi,
1672266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1673266423Sjfv		mcnt++;
1674266423Sjfv	}
1675266423Sjfv	if_maddr_runlock(ifp);
1676266423Sjfv	if (mcnt > 0) {
1677270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1678270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1679266423Sjfv	}
1680266423Sjfv
1681270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1682266423Sjfv	return;
1683266423Sjfv}
1684266423Sjfv
1685266423Sjfvstatic void
1686270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1687266423Sjfv{
1688266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1689266423Sjfv	struct ifmultiaddr	*ifma;
1690270346Sjfv	struct ixl_mac_filter	*f;
1691266423Sjfv	int			mcnt = 0;
1692266423Sjfv	bool		match = FALSE;
1693266423Sjfv
1694270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1695266423Sjfv
1696266423Sjfv	/* Search for removed multicast addresses */
1697266423Sjfv	if_maddr_rlock(ifp);
1698266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1699270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1700266423Sjfv			match = FALSE;
1701266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1702266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1703266423Sjfv					continue;
1704266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1705266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1706266423Sjfv					match = TRUE;
1707266423Sjfv					break;
1708266423Sjfv				}
1709266423Sjfv			}
1710266423Sjfv			if (match == FALSE) {
1711270346Sjfv				f->flags |= IXL_FILTER_DEL;
1712266423Sjfv				mcnt++;
1713266423Sjfv			}
1714266423Sjfv		}
1715266423Sjfv	}
1716266423Sjfv	if_maddr_runlock(ifp);
1717266423Sjfv
1718266423Sjfv	if (mcnt > 0)
1719270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1720266423Sjfv}
1721266423Sjfv
1722266423Sjfv
1723266423Sjfv/*********************************************************************
1724266423Sjfv *  Timer routine
1725266423Sjfv *
1726266423Sjfv *  This routine checks for link status,updates statistics,
1727266423Sjfv *  and runs the watchdog check.
1728266423Sjfv *
1729266423Sjfv **********************************************************************/
1730266423Sjfv
1731266423Sjfvstatic void
1732270346Sjfvixl_local_timer(void *arg)
1733266423Sjfv{
1734270346Sjfv	struct ixl_pf		*pf = arg;
1735266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1736270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1737270346Sjfv	struct ixl_queue	*que = vsi->queues;
1738266423Sjfv	device_t		dev = pf->dev;
1739266423Sjfv	int			hung = 0;
1740266423Sjfv	u32			mask;
1741266423Sjfv
1742266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1743266423Sjfv
1744266423Sjfv	/* Fire off the adminq task */
1745266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1746266423Sjfv
1747266423Sjfv	/* Update stats */
1748270346Sjfv	ixl_update_stats_counters(pf);
1749266423Sjfv
1750266423Sjfv	/*
1751269198Sjfv	** Check status of the queues
1752266423Sjfv	*/
1753266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1754266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1755266423Sjfv
1756266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1757266423Sjfv		/* Any queues with outstanding work get a sw irq */
1758266423Sjfv		if (que->busy)
1759266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1760266423Sjfv		/*
1761266423Sjfv		** Each time txeof runs without cleaning, but there
1762266423Sjfv		** are uncleaned descriptors it increments busy. If
1763266423Sjfv		** we get to 5 we declare it hung.
1764266423Sjfv		*/
1765270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1766269198Sjfv			++hung;
1767269198Sjfv			/* Mark the queue as inactive */
1768269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1769269198Sjfv			continue;
1770269198Sjfv		} else {
1771269198Sjfv			/* Check if we've come back from hung */
1772269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1773269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1774269198Sjfv		}
1775270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1776277084Sjfv#ifdef IXL_DEBUG
1777266423Sjfv			device_printf(dev,"Warning queue %d "
1778269198Sjfv			    "appears to be hung!\n", i);
1779277084Sjfv#endif
1780270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1781266423Sjfv			++hung;
1782266423Sjfv		}
1783266423Sjfv	}
1784266423Sjfv	/* Only reinit if all queues show hung */
1785266423Sjfv	if (hung == vsi->num_queues)
1786266423Sjfv		goto hung;
1787266423Sjfv
1788270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1789266423Sjfv	return;
1790266423Sjfv
1791266423Sjfvhung:
1792266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1793270346Sjfv	ixl_init_locked(pf);
1794266423Sjfv}
1795266423Sjfv
1796266423Sjfv/*
1797266423Sjfv** Note: this routine updates the OS on the link state
1798266423Sjfv**	the real check of the hardware only happens with
1799266423Sjfv**	a link interrupt.
1800266423Sjfv*/
1801266423Sjfvstatic void
1802270346Sjfvixl_update_link_status(struct ixl_pf *pf)
1803266423Sjfv{
1804270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1805266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1806266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1807266423Sjfv	device_t		dev = pf->dev;
1808266423Sjfv
1809279858Sjfv	if (pf->link_up){
1810266423Sjfv		if (vsi->link_active == FALSE) {
1811279033Sjfv			pf->fc = hw->fc.current_mode;
1812266423Sjfv			if (bootverbose) {
1813266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
1814266423Sjfv				    " Flow Control: %s\n",
1815279858Sjfv				    ((pf->link_speed ==
1816279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
1817279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
1818266423Sjfv			}
1819266423Sjfv			vsi->link_active = TRUE;
1820277084Sjfv			/*
1821277084Sjfv			** Warn user if link speed on NPAR enabled
1822277084Sjfv			** partition is not at least 10GB
1823277084Sjfv			*/
1824277084Sjfv			if (hw->func_caps.npar_enable &&
1825279858Sjfv			   (hw->phy.link_info.link_speed ==
1826279858Sjfv			   I40E_LINK_SPEED_1GB ||
1827279858Sjfv			   hw->phy.link_info.link_speed ==
1828279858Sjfv			   I40E_LINK_SPEED_100MB))
1829279858Sjfv				device_printf(dev, "The partition detected"
1830279858Sjfv				    "link speed that is less than 10Gbps\n");
1831266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
1832266423Sjfv		}
1833266423Sjfv	} else { /* Link down */
1834266423Sjfv		if (vsi->link_active == TRUE) {
1835266423Sjfv			if (bootverbose)
1836266423Sjfv				device_printf(dev,"Link is Down\n");
1837266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
1838266423Sjfv			vsi->link_active = FALSE;
1839266423Sjfv		}
1840266423Sjfv	}
1841266423Sjfv
1842266423Sjfv	return;
1843266423Sjfv}
1844266423Sjfv
1845266423Sjfv/*********************************************************************
1846266423Sjfv *
1847266423Sjfv *  This routine disables all traffic on the adapter by issuing a
1848266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
1849266423Sjfv *
1850266423Sjfv **********************************************************************/
1851266423Sjfv
1852266423Sjfvstatic void
1853270346Sjfvixl_stop(struct ixl_pf *pf)
1854266423Sjfv{
1855270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1856266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1857266423Sjfv
1858266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1859266423Sjfv
1860270346Sjfv	INIT_DEBUGOUT("ixl_stop: begin\n");
1861279858Sjfv	if (pf->num_vfs == 0)
1862279858Sjfv		ixl_disable_intr(vsi);
1863279858Sjfv	else
1864279858Sjfv		ixl_disable_rings_intr(vsi);
1865270346Sjfv	ixl_disable_rings(vsi);
1866266423Sjfv
1867266423Sjfv	/* Tell the stack that the interface is no longer active */
1868266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1869266423Sjfv
1870266423Sjfv	/* Stop the local timer */
1871266423Sjfv	callout_stop(&pf->timer);
1872266423Sjfv
1873266423Sjfv	return;
1874266423Sjfv}
1875266423Sjfv
1876266423Sjfv
1877266423Sjfv/*********************************************************************
1878266423Sjfv *
1879266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1880266423Sjfv *
1881266423Sjfv **********************************************************************/
1882266423Sjfvstatic int
1883270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
1884266423Sjfv{
1885266423Sjfv	device_t        dev = pf->dev;
1886270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1887270346Sjfv	struct		ixl_queue *que = vsi->queues;
1888266423Sjfv	int 		error, rid = 0;
1889266423Sjfv
1890266423Sjfv	if (pf->msix == 1)
1891266423Sjfv		rid = 1;
1892266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1893266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
1894266423Sjfv	if (pf->res == NULL) {
1895266423Sjfv		device_printf(dev,"Unable to allocate"
1896266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
1897266423Sjfv		return (ENXIO);
1898266423Sjfv	}
1899266423Sjfv
1900266423Sjfv	/* Set the handler function */
1901266423Sjfv	error = bus_setup_intr(dev, pf->res,
1902266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1903270346Sjfv	    ixl_intr, pf, &pf->tag);
1904266423Sjfv	if (error) {
1905266423Sjfv		pf->res = NULL;
1906266423Sjfv		device_printf(dev, "Failed to register legacy/msi handler");
1907266423Sjfv		return (error);
1908266423Sjfv	}
1909266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1910270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1911270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1912270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1913266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
1914266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1915266423Sjfv	    device_get_nameunit(dev));
1916270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1917279858Sjfv
1918279858Sjfv#ifdef PCI_IOV
1919279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1920279858Sjfv#endif
1921279858Sjfv
1922270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1923266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
1924266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1925266423Sjfv	    device_get_nameunit(dev));
1926266423Sjfv
1927266423Sjfv	return (0);
1928266423Sjfv}
1929266423Sjfv
1930266423Sjfv
1931266423Sjfv/*********************************************************************
1932266423Sjfv *
1933266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1934266423Sjfv *
1935266423Sjfv **********************************************************************/
1936266423Sjfvstatic int
1937270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
1938266423Sjfv{
1939266423Sjfv	device_t	dev = pf->dev;
1940270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1941270346Sjfv	struct 		ixl_queue *que = vsi->queues;
1942266423Sjfv	struct		tx_ring	 *txr;
1943266423Sjfv	int 		error, rid, vector = 0;
1944279255Sadrian#ifdef	RSS
1945279255Sadrian	cpuset_t cpu_mask;
1946279255Sadrian#endif
1947266423Sjfv
1948266423Sjfv	/* Admin Que is vector 0*/
1949266423Sjfv	rid = vector + 1;
1950266423Sjfv	pf->res = bus_alloc_resource_any(dev,
1951266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1952266423Sjfv	if (!pf->res) {
1953266423Sjfv		device_printf(dev,"Unable to allocate"
1954266423Sjfv    	    " bus resource: Adminq interrupt [%d]\n", rid);
1955266423Sjfv		return (ENXIO);
1956266423Sjfv	}
1957266423Sjfv	/* Set the adminq vector and handler */
1958266423Sjfv	error = bus_setup_intr(dev, pf->res,
1959266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1960270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
1961266423Sjfv	if (error) {
1962266423Sjfv		pf->res = NULL;
1963266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
1964266423Sjfv		return (error);
1965266423Sjfv	}
1966266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1967266423Sjfv	pf->admvec = vector;
1968266423Sjfv	/* Tasklet for Admin Queue */
1969270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1970279858Sjfv
1971279858Sjfv#ifdef PCI_IOV
1972279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1973279858Sjfv#endif
1974279858Sjfv
1975270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1976266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
1977266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1978266423Sjfv	    device_get_nameunit(pf->dev));
1979266423Sjfv	++vector;
1980266423Sjfv
1981266423Sjfv	/* Now set up the stations */
1982266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1983277084Sjfv		int cpu_id = i;
1984266423Sjfv		rid = vector + 1;
1985266423Sjfv		txr = &que->txr;
1986266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1987266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
1988266423Sjfv		if (que->res == NULL) {
1989266423Sjfv			device_printf(dev,"Unable to allocate"
1990266423Sjfv		    	    " bus resource: que interrupt [%d]\n", vector);
1991266423Sjfv			return (ENXIO);
1992266423Sjfv		}
1993266423Sjfv		/* Set the handler function */
1994266423Sjfv		error = bus_setup_intr(dev, que->res,
1995266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1996270346Sjfv		    ixl_msix_que, que, &que->tag);
1997266423Sjfv		if (error) {
1998266423Sjfv			que->res = NULL;
1999266423Sjfv			device_printf(dev, "Failed to register que handler");
2000266423Sjfv			return (error);
2001266423Sjfv		}
2002266423Sjfv		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2003266423Sjfv		/* Bind the vector to a CPU */
2004277084Sjfv#ifdef RSS
2005277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2006277084Sjfv#endif
2007277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2008266423Sjfv		que->msix = vector;
2009270346Sjfv		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2010270346Sjfv		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2011270346Sjfv		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2012266423Sjfv		    taskqueue_thread_enqueue, &que->tq);
2013277084Sjfv#ifdef RSS
2014279299Sadrian		CPU_SETOF(cpu_id, &cpu_mask);
2015279255Sadrian		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2016279255Sadrian		    &cpu_mask, "%s (bucket %d)",
2017277084Sjfv		    device_get_nameunit(dev), cpu_id);
2018277084Sjfv#else
2019277084Sjfv		taskqueue_start_threads(&que->tq, 1, PI_NET,
2020277084Sjfv		    "%s que", device_get_nameunit(dev));
2021277084Sjfv#endif
2022266423Sjfv	}
2023266423Sjfv
2024266423Sjfv	return (0);
2025266423Sjfv}
2026266423Sjfv
2027266423Sjfv
2028266423Sjfv/*
2029266423Sjfv * Allocate MSI/X vectors
2030266423Sjfv */
2031266423Sjfvstatic int
2032270346Sjfvixl_init_msix(struct ixl_pf *pf)
2033266423Sjfv{
2034266423Sjfv	device_t dev = pf->dev;
2035266423Sjfv	int rid, want, vectors, queues, available;
2036266423Sjfv
2037266423Sjfv	/* Override by tuneable */
2038270346Sjfv	if (ixl_enable_msix == 0)
2039266423Sjfv		goto msi;
2040266423Sjfv
2041269198Sjfv	/*
2042269198Sjfv	** When used in a virtualized environment
2043269198Sjfv	** PCI BUSMASTER capability may not be set
2044269198Sjfv	** so explicity set it here and rewrite
2045269198Sjfv	** the ENABLE in the MSIX control register
2046269198Sjfv	** at this point to cause the host to
2047269198Sjfv	** successfully initialize us.
2048269198Sjfv	*/
2049269198Sjfv	{
2050269198Sjfv		u16 pci_cmd_word;
2051269198Sjfv		int msix_ctrl;
2052269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2053269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2054269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2055269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2056269198Sjfv		rid += PCIR_MSIX_CTRL;
2057269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2058269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2059269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2060269198Sjfv	}
2061269198Sjfv
2062266423Sjfv	/* First try MSI/X */
2063270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2064266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2065266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2066266423Sjfv       	if (!pf->msix_mem) {
2067266423Sjfv		/* May not be enabled */
2068266423Sjfv		device_printf(pf->dev,
2069266423Sjfv		    "Unable to map MSIX table \n");
2070266423Sjfv		goto msi;
2071266423Sjfv	}
2072266423Sjfv
2073266423Sjfv	available = pci_msix_count(dev);
2074266423Sjfv	if (available == 0) { /* system has msix disabled */
2075266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2076266423Sjfv		    rid, pf->msix_mem);
2077266423Sjfv		pf->msix_mem = NULL;
2078266423Sjfv		goto msi;
2079266423Sjfv	}
2080266423Sjfv
2081266423Sjfv	/* Figure out a reasonable auto config value */
2082266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2083266423Sjfv
2084266423Sjfv	/* Override with hardcoded value if sane */
2085270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2086270346Sjfv		queues = ixl_max_queues;
2087266423Sjfv
2088277084Sjfv#ifdef  RSS
2089277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2090277084Sjfv	if (queues > rss_getnumbuckets())
2091277084Sjfv		queues = rss_getnumbuckets();
2092277084Sjfv#endif
2093277084Sjfv
2094266423Sjfv	/*
2095266423Sjfv	** Want one vector (RX/TX pair) per queue
2096266423Sjfv	** plus an additional for the admin queue.
2097266423Sjfv	*/
2098266423Sjfv	want = queues + 1;
2099266423Sjfv	if (want <= available)	/* Have enough */
2100266423Sjfv		vectors = want;
2101266423Sjfv	else {
2102266423Sjfv               	device_printf(pf->dev,
2103266423Sjfv		    "MSIX Configuration Problem, "
2104266423Sjfv		    "%d vectors available but %d wanted!\n",
2105266423Sjfv		    available, want);
2106266423Sjfv		return (0); /* Will go to Legacy setup */
2107266423Sjfv	}
2108266423Sjfv
2109266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2110266423Sjfv               	device_printf(pf->dev,
2111266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2112266423Sjfv		pf->msix = vectors;
2113266423Sjfv		pf->vsi.num_queues = queues;
2114277084Sjfv#ifdef RSS
2115277084Sjfv		/*
2116277084Sjfv		 * If we're doing RSS, the number of queues needs to
2117277084Sjfv		 * match the number of RSS buckets that are configured.
2118277084Sjfv		 *
2119277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2120277084Sjfv		 *   up with queues that get no traffic.
2121277084Sjfv		 *
2122277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2123277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2124277084Sjfv		 *   so there'll be some contention.
2125277084Sjfv		 */
2126277084Sjfv		if (queues != rss_getnumbuckets()) {
2127277084Sjfv			device_printf(dev,
2128277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2129277084Sjfv			    "; performance will be impacted.\n",
2130277084Sjfv			    __func__, queues, rss_getnumbuckets());
2131277084Sjfv		}
2132277084Sjfv#endif
2133266423Sjfv		return (vectors);
2134266423Sjfv	}
2135266423Sjfvmsi:
2136266423Sjfv       	vectors = pci_msi_count(dev);
2137266423Sjfv	pf->vsi.num_queues = 1;
2138266423Sjfv	pf->msix = 1;
2139270346Sjfv	ixl_max_queues = 1;
2140270346Sjfv	ixl_enable_msix = 0;
2141266423Sjfv       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2142266423Sjfv               	device_printf(pf->dev,"Using an MSI interrupt\n");
2143266423Sjfv	else {
2144266423Sjfv		pf->msix = 0;
2145266423Sjfv               	device_printf(pf->dev,"Using a Legacy interrupt\n");
2146266423Sjfv	}
2147266423Sjfv	return (vectors);
2148266423Sjfv}
2149266423Sjfv
2150266423Sjfv
2151266423Sjfv/*
2152266423Sjfv * Plumb MSI/X vectors
2153266423Sjfv */
2154266423Sjfvstatic void
2155270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2156266423Sjfv{
2157266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2158270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2159266423Sjfv	u32		reg;
2160266423Sjfv	u16		vector = 1;
2161266423Sjfv
2162266423Sjfv	/* First set up the adminq - vector 0 */
2163266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2164266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2165266423Sjfv
2166266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2167266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2168266423Sjfv	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2169266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2170266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2171266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2172266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2173266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2174266423Sjfv
2175266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2176270346Sjfv	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2177266423Sjfv
2178266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2179266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2180266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2181266423Sjfv
2182266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2183266423Sjfv
2184266423Sjfv	/* Next configure the queues */
2185266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2186266423Sjfv		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2187266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2188266423Sjfv
2189266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2190270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2191266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2192266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2193266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2194266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2195266423Sjfv
2196266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2197270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2198266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2199266423Sjfv		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2200266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2201266423Sjfv		if (i == (vsi->num_queues - 1))
2202270346Sjfv			reg |= (IXL_QUEUE_EOL
2203266423Sjfv			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2204266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2205266423Sjfv	}
2206266423Sjfv}
2207266423Sjfv
2208266423Sjfv/*
2209266423Sjfv * Configure for MSI single vector operation
2210266423Sjfv */
2211266423Sjfvstatic void
2212270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2213266423Sjfv{
2214266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2215266423Sjfv	u32		reg;
2216266423Sjfv
2217266423Sjfv
2218266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2219266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2220266423Sjfv
2221266423Sjfv
2222266423Sjfv	/* Setup "other" causes */
2223266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2224266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2225266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2226266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2227266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2228266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2229266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2230266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2231266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2232266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2233266423Sjfv	    ;
2234266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2235266423Sjfv
2236266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2237266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2238266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2239266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2240266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2241266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2242266423Sjfv
2243266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2244266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2245266423Sjfv
2246266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2247266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2248270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2249266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2250266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2251266423Sjfv
2252266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2253270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2254270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2255266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2256266423Sjfv
2257266423Sjfv	/* Next enable the queue pair */
2258266423Sjfv	reg = rd32(hw, I40E_QTX_ENA(0));
2259266423Sjfv	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2260266423Sjfv	wr32(hw, I40E_QTX_ENA(0), reg);
2261266423Sjfv
2262266423Sjfv	reg = rd32(hw, I40E_QRX_ENA(0));
2263266423Sjfv	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2264266423Sjfv	wr32(hw, I40E_QRX_ENA(0), reg);
2265266423Sjfv}
2266266423Sjfv
2267266423Sjfv
2268266423Sjfv/*
2269266423Sjfv * Set the Initial ITR state
2270266423Sjfv */
2271266423Sjfvstatic void
2272270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2273266423Sjfv{
2274266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2275270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2276270346Sjfv	struct ixl_queue	*que = vsi->queues;
2277266423Sjfv
2278270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2279270346Sjfv	if (ixl_dynamic_rx_itr)
2280270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2281270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2282270346Sjfv	if (ixl_dynamic_tx_itr)
2283270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2284266423Sjfv
2285266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2286266423Sjfv		struct tx_ring	*txr = &que->txr;
2287266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2288266423Sjfv
2289270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2290266423Sjfv		    vsi->rx_itr_setting);
2291266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2292270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2293270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2294266423Sjfv		    vsi->tx_itr_setting);
2295266423Sjfv		txr->itr = vsi->tx_itr_setting;
2296270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2297266423Sjfv	}
2298266423Sjfv}
2299266423Sjfv
2300266423Sjfv
2301266423Sjfvstatic int
2302270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2303266423Sjfv{
2304266423Sjfv	int             rid;
2305266423Sjfv	device_t        dev = pf->dev;
2306266423Sjfv
2307266423Sjfv	rid = PCIR_BAR(0);
2308266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2309266423Sjfv	    &rid, RF_ACTIVE);
2310266423Sjfv
2311266423Sjfv	if (!(pf->pci_mem)) {
2312266423Sjfv		device_printf(dev,"Unable to allocate bus resource: memory\n");
2313266423Sjfv		return (ENXIO);
2314266423Sjfv	}
2315266423Sjfv
2316266423Sjfv	pf->osdep.mem_bus_space_tag =
2317266423Sjfv		rman_get_bustag(pf->pci_mem);
2318266423Sjfv	pf->osdep.mem_bus_space_handle =
2319266423Sjfv		rman_get_bushandle(pf->pci_mem);
2320270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2321272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2322266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2323266423Sjfv
2324266423Sjfv	pf->hw.back = &pf->osdep;
2325266423Sjfv
2326266423Sjfv	/*
2327266423Sjfv	** Now setup MSI or MSI/X, should
2328266423Sjfv	** return us the number of supported
2329266423Sjfv	** vectors. (Will be 1 for MSI)
2330266423Sjfv	*/
2331270346Sjfv	pf->msix = ixl_init_msix(pf);
2332266423Sjfv	return (0);
2333266423Sjfv}
2334266423Sjfv
2335266423Sjfvstatic void
2336270346Sjfvixl_free_pci_resources(struct ixl_pf * pf)
2337266423Sjfv{
2338270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2339270346Sjfv	struct ixl_queue	*que = vsi->queues;
2340266423Sjfv	device_t		dev = pf->dev;
2341266423Sjfv	int			rid, memrid;
2342266423Sjfv
2343270346Sjfv	memrid = PCIR_BAR(IXL_BAR);
2344266423Sjfv
2345266423Sjfv	/* We may get here before stations are setup */
2346270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2347266423Sjfv		goto early;
2348266423Sjfv
2349266423Sjfv	/*
2350266423Sjfv	**  Release all msix VSI resources:
2351266423Sjfv	*/
2352266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2353266423Sjfv		rid = que->msix + 1;
2354266423Sjfv		if (que->tag != NULL) {
2355266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2356266423Sjfv			que->tag = NULL;
2357266423Sjfv		}
2358266423Sjfv		if (que->res != NULL)
2359266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2360266423Sjfv	}
2361266423Sjfv
2362266423Sjfvearly:
2363266423Sjfv	/* Clean the AdminQ interrupt last */
2364266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2365266423Sjfv		rid = pf->admvec + 1;
2366266423Sjfv	else
2367266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2368266423Sjfv
2369266423Sjfv	if (pf->tag != NULL) {
2370266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2371266423Sjfv		pf->tag = NULL;
2372266423Sjfv	}
2373266423Sjfv	if (pf->res != NULL)
2374266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2375266423Sjfv
2376266423Sjfv	if (pf->msix)
2377266423Sjfv		pci_release_msi(dev);
2378266423Sjfv
2379266423Sjfv	if (pf->msix_mem != NULL)
2380266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2381266423Sjfv		    memrid, pf->msix_mem);
2382266423Sjfv
2383266423Sjfv	if (pf->pci_mem != NULL)
2384266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2385266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2386266423Sjfv
2387266423Sjfv	return;
2388266423Sjfv}
2389266423Sjfv
2390274205Sjfvstatic void
2391274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2392274205Sjfv{
2393274205Sjfv	/* Display supported media types */
2394274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2395274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2396266423Sjfv
2397274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2398274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2399279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2400279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2401279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2402279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2403274205Sjfv
2404274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2405279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2406279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2407279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2408279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2409279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2410279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2411274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2412274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2413279033Sjfv
2414274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2415274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2416274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2417274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2418274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2419274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2420274205Sjfv
2421279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2422279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2423279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2424279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2425279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2426279033Sjfv	    /* KR4 uses CR4 until the OS has the real media type */
2427279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2428274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2429279033Sjfv
2430274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2431274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2432274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2433274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2434274205Sjfv}
2435274205Sjfv
2436266423Sjfv/*********************************************************************
2437266423Sjfv *
2438266423Sjfv *  Setup networking device structure and register an interface.
2439266423Sjfv *
2440266423Sjfv **********************************************************************/
2441266423Sjfvstatic int
2442270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2443266423Sjfv{
2444266423Sjfv	struct ifnet		*ifp;
2445266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2446270346Sjfv	struct ixl_queue	*que = vsi->queues;
2447279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2448266423Sjfv	enum i40e_status_code aq_error = 0;
2449266423Sjfv
2450270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2451266423Sjfv
2452266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2453266423Sjfv	if (ifp == NULL) {
2454266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2455266423Sjfv		return (-1);
2456266423Sjfv	}
2457266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2458266423Sjfv	ifp->if_mtu = ETHERMTU;
2459266423Sjfv	ifp->if_baudrate = 4000000000;  // ??
2460270346Sjfv	ifp->if_init = ixl_init;
2461266423Sjfv	ifp->if_softc = vsi;
2462266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2463270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2464266423Sjfv
2465274205Sjfv#if __FreeBSD_version >= 1100036
2466272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2467272227Sglebius#endif
2468272227Sglebius
2469270346Sjfv	ifp->if_transmit = ixl_mq_start;
2470266423Sjfv
2471270346Sjfv	ifp->if_qflush = ixl_qflush;
2472266423Sjfv
2473266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2474266423Sjfv
2475266423Sjfv	vsi->max_frame_size =
2476266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2477266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2478266423Sjfv
2479266423Sjfv	/*
2480266423Sjfv	 * Tell the upper layer(s) we support long frames.
2481266423Sjfv	 */
2482270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2483266423Sjfv
2484266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2485266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2486266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2487266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2488266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2489266423Sjfv
2490266423Sjfv	/* VLAN capabilties */
2491266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2492266423Sjfv			     |  IFCAP_VLAN_HWTSO
2493266423Sjfv			     |  IFCAP_VLAN_MTU
2494266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2495266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2496266423Sjfv
2497266423Sjfv	/*
2498266423Sjfv	** Don't turn this on by default, if vlans are
2499266423Sjfv	** created on another pseudo device (eg. lagg)
2500266423Sjfv	** then vlan events are not passed thru, breaking
2501266423Sjfv	** operation, but with HW FILTER off it works. If
2502270346Sjfv	** using vlans directly on the ixl driver you can
2503266423Sjfv	** enable this and get full hardware tag filtering.
2504266423Sjfv	*/
2505266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2506266423Sjfv
2507266423Sjfv	/*
2508266423Sjfv	 * Specify the media types supported by this adapter and register
2509266423Sjfv	 * callbacks to update media and link information
2510266423Sjfv	 */
2511270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2512270346Sjfv		     ixl_media_status);
2513266423Sjfv
2514279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2515279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2516279033Sjfv	/* May need delay to detect fiber correctly */
2517274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2518274205Sjfv		i40e_msec_delay(200);
2519277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2520279033Sjfv		    TRUE, &abilities, NULL);
2521279033Sjfv	}
2522279033Sjfv	if (aq_error) {
2523274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2524274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2525274205Sjfv		else
2526279033Sjfv			device_printf(dev,
2527279033Sjfv			    "Error getting supported media types, err %d,"
2528279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2529279033Sjfv		return (0);
2530279033Sjfv	}
2531266423Sjfv
2532279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2533279033Sjfv
2534266423Sjfv	/* Use autoselect media by default */
2535266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2536266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2537266423Sjfv
2538274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2539274205Sjfv
2540266423Sjfv	return (0);
2541266423Sjfv}
2542266423Sjfv
2543279858Sjfv/*
2544279858Sjfv** Run when the Admin Queue gets a
2545279858Sjfv** link transition interrupt.
2546279858Sjfv*/
2547279858Sjfvstatic void
2548279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2549266423Sjfv{
2550279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2551279858Sjfv	struct i40e_aqc_get_link_status *status =
2552279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2553266423Sjfv	bool check;
2554266423Sjfv
2555279858Sjfv	hw->phy.get_link_info = TRUE;
2556266423Sjfv	check = i40e_get_link_status(hw);
2557279858Sjfv	pf->link_up = check;
2558270346Sjfv#ifdef IXL_DEBUG
2559266423Sjfv	printf("Link is %s\n", check ? "up":"down");
2560266423Sjfv#endif
2561279858Sjfv	/* Report if Unqualified modules are found */
2562279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2563279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2564279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2565279858Sjfv		device_printf(pf->dev, "Link failed because "
2566279858Sjfv		    "an unqualified module was detected\n");
2567279858Sjfv
2568279858Sjfv	return;
2569266423Sjfv}
2570266423Sjfv
2571266423Sjfv/*********************************************************************
2572266423Sjfv *
2573279033Sjfv *  Get Firmware Switch configuration
2574279033Sjfv *	- this will need to be more robust when more complex
2575279033Sjfv *	  switch configurations are enabled.
2576266423Sjfv *
2577266423Sjfv **********************************************************************/
2578266423Sjfvstatic int
2579279033Sjfvixl_switch_config(struct ixl_pf *pf)
2580266423Sjfv{
2581279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2582279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2583266423Sjfv	device_t 	dev = vsi->dev;
2584266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2585266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2586279858Sjfv	int	ret;
2587266423Sjfv	u16	next = 0;
2588266423Sjfv
2589279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2590266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2591266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2592266423Sjfv	    sizeof(aq_buf), &next, NULL);
2593266423Sjfv	if (ret) {
2594279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2595279858Sjfv		    ret);
2596266423Sjfv		return (ret);
2597266423Sjfv	}
2598270346Sjfv#ifdef IXL_DEBUG
2599279858Sjfv	device_printf(dev,
2600279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2601266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2602279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2603279858Sjfv		device_printf(dev,
2604279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2605279858Sjfv		    sw_config->element[i].element_type,
2606279858Sjfv		    sw_config->element[i].seid,
2607279858Sjfv		    sw_config->element[i].uplink_seid,
2608279858Sjfv		    sw_config->element[i].downlink_seid);
2609279858Sjfv	}
2610266423Sjfv#endif
2611279033Sjfv	/* Simplified due to a single VSI at the moment */
2612279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2613279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2614266423Sjfv	vsi->seid = sw_config->element[0].seid;
2615279033Sjfv	return (ret);
2616279033Sjfv}
2617266423Sjfv
2618279033Sjfv/*********************************************************************
2619279033Sjfv *
2620279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2621279033Sjfv *  			 like the number of descriptors, buffer size,
2622279033Sjfv *			 plus we init the rings thru this function.
2623279033Sjfv *
2624279033Sjfv **********************************************************************/
2625279033Sjfvstatic int
2626279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2627279033Sjfv{
2628279858Sjfv	struct ixl_pf		*pf = vsi->back;
2629279033Sjfv	struct ixl_queue	*que = vsi->queues;
2630279033Sjfv	device_t		dev = vsi->dev;
2631279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2632279033Sjfv	struct i40e_vsi_context	ctxt;
2633279033Sjfv	int			err = 0;
2634279033Sjfv
2635266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2636266423Sjfv	ctxt.seid = vsi->seid;
2637279858Sjfv	if (pf->veb_seid != 0)
2638279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2639266423Sjfv	ctxt.pf_num = hw->pf_id;
2640279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2641279033Sjfv	if (err) {
2642279033Sjfv		device_printf(dev,"get vsi params failed %x!!\n", err);
2643279033Sjfv		return (err);
2644266423Sjfv	}
2645270346Sjfv#ifdef IXL_DEBUG
2646266423Sjfv	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2647266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2648266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2649266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2650266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2651266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2652266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2653266423Sjfv#endif
2654266423Sjfv	/*
2655266423Sjfv	** Set the queue and traffic class bits
2656266423Sjfv	**  - when multiple traffic classes are supported
2657266423Sjfv	**    this will need to be more robust.
2658266423Sjfv	*/
2659266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2660266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2661266423Sjfv	ctxt.info.queue_mapping[0] = 0;
2662266423Sjfv	ctxt.info.tc_mapping[0] = 0x0800;
2663266423Sjfv
2664266423Sjfv	/* Set VLAN receive stripping mode */
2665266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2666266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2667266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2668266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2669266423Sjfv	else
2670266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2671266423Sjfv
2672266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2673266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2674266423Sjfv
2675266423Sjfv	/* Reset VSI statistics */
2676270346Sjfv	ixl_vsi_reset_stats(vsi);
2677266423Sjfv	vsi->hw_filters_add = 0;
2678266423Sjfv	vsi->hw_filters_del = 0;
2679266423Sjfv
2680279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2681279858Sjfv
2682279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2683279033Sjfv	if (err) {
2684266423Sjfv		device_printf(dev,"update vsi params failed %x!!\n",
2685266423Sjfv		   hw->aq.asq_last_status);
2686279033Sjfv		return (err);
2687279033Sjfv	}
2688266423Sjfv
2689266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2690266423Sjfv		struct tx_ring		*txr = &que->txr;
2691266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2692266423Sjfv		struct i40e_hmc_obj_txq tctx;
2693266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2694266423Sjfv		u32			txctl;
2695266423Sjfv		u16			size;
2696266423Sjfv
2697266423Sjfv
2698266423Sjfv		/* Setup the HMC TX Context  */
2699266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2700266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2701266423Sjfv		tctx.new_context = 1;
2702279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2703266423Sjfv		tctx.qlen = que->num_desc;
2704266423Sjfv		tctx.fc_ena = 0;
2705269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2706269198Sjfv		/* Enable HEAD writeback */
2707269198Sjfv		tctx.head_wb_ena = 1;
2708269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
2709269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
2710266423Sjfv		tctx.rdylist_act = 0;
2711266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
2712266423Sjfv		if (err) {
2713266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
2714266423Sjfv			break;
2715266423Sjfv		}
2716266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2717266423Sjfv		if (err) {
2718266423Sjfv			device_printf(dev, "Unable to set TX context\n");
2719266423Sjfv			break;
2720266423Sjfv		}
2721266423Sjfv		/* Associate the ring with this PF */
2722266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
2723266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2724266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
2725266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
2726270346Sjfv		ixl_flush(hw);
2727266423Sjfv
2728266423Sjfv		/* Do ring (re)init */
2729270346Sjfv		ixl_init_tx_ring(que);
2730266423Sjfv
2731266423Sjfv		/* Next setup the HMC RX Context  */
2732279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
2733266423Sjfv			rxr->mbuf_sz = MCLBYTES;
2734266423Sjfv		else
2735266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
2736266423Sjfv
2737266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2738266423Sjfv
2739266423Sjfv		/* Set up an RX context for the HMC */
2740266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2741266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2742266423Sjfv		/* ignore header split for now */
2743266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2744266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2745266423Sjfv		    vsi->max_frame_size : max_rxmax;
2746266423Sjfv		rctx.dtype = 0;
2747266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
2748266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
2749279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2750266423Sjfv		rctx.qlen = que->num_desc;
2751266423Sjfv		rctx.tphrdesc_ena = 1;
2752266423Sjfv		rctx.tphwdesc_ena = 1;
2753266423Sjfv		rctx.tphdata_ena = 0;
2754266423Sjfv		rctx.tphhead_ena = 0;
2755266423Sjfv		rctx.lrxqthresh = 2;
2756266423Sjfv		rctx.crcstrip = 1;
2757266423Sjfv		rctx.l2tsel = 1;
2758266423Sjfv		rctx.showiv = 1;
2759266423Sjfv		rctx.fc_ena = 0;
2760266423Sjfv		rctx.prefena = 1;
2761266423Sjfv
2762266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
2763266423Sjfv		if (err) {
2764266423Sjfv			device_printf(dev,
2765266423Sjfv			    "Unable to clear RX context %d\n", i);
2766266423Sjfv			break;
2767266423Sjfv		}
2768266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2769266423Sjfv		if (err) {
2770266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
2771266423Sjfv			break;
2772266423Sjfv		}
2773270346Sjfv		err = ixl_init_rx_ring(que);
2774266423Sjfv		if (err) {
2775266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2776266423Sjfv			break;
2777266423Sjfv		}
2778266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2779266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2780266423Sjfv	}
2781266423Sjfv	return (err);
2782266423Sjfv}
2783266423Sjfv
2784266423Sjfv
2785266423Sjfv/*********************************************************************
2786266423Sjfv *
2787266423Sjfv *  Free all VSI structs.
2788266423Sjfv *
2789266423Sjfv **********************************************************************/
2790266423Sjfvvoid
2791270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
2792266423Sjfv{
2793270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2794270346Sjfv	struct ixl_queue	*que = vsi->queues;
2795266423Sjfv
2796266423Sjfv	/* Free station queues */
2797266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2798266423Sjfv		struct tx_ring *txr = &que->txr;
2799266423Sjfv		struct rx_ring *rxr = &que->rxr;
2800266423Sjfv
2801266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2802266423Sjfv			continue;
2803270346Sjfv		IXL_TX_LOCK(txr);
2804270346Sjfv		ixl_free_que_tx(que);
2805266423Sjfv		if (txr->base)
2806271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
2807270346Sjfv		IXL_TX_UNLOCK(txr);
2808270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
2809266423Sjfv
2810266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2811266423Sjfv			continue;
2812270346Sjfv		IXL_RX_LOCK(rxr);
2813270346Sjfv		ixl_free_que_rx(que);
2814266423Sjfv		if (rxr->base)
2815271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2816270346Sjfv		IXL_RX_UNLOCK(rxr);
2817270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
2818266423Sjfv
2819266423Sjfv	}
2820266423Sjfv	free(vsi->queues, M_DEVBUF);
2821266423Sjfv
2822266423Sjfv	/* Free VSI filter list */
2823279858Sjfv	ixl_free_mac_filters(vsi);
2824279858Sjfv}
2825279858Sjfv
2826279858Sjfvstatic void
2827279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
2828279858Sjfv{
2829279858Sjfv	struct ixl_mac_filter *f;
2830279858Sjfv
2831266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
2832266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
2833266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2834266423Sjfv		free(f, M_DEVBUF);
2835266423Sjfv	}
2836266423Sjfv}
2837266423Sjfv
2838266423Sjfv
2839266423Sjfv/*********************************************************************
2840266423Sjfv *
2841266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
2842266423Sjfv *  associated queues, rings and the descriptors associated with each,
2843266423Sjfv *  called only once at attach.
2844266423Sjfv *
2845266423Sjfv **********************************************************************/
2846266423Sjfvstatic int
2847270346Sjfvixl_setup_stations(struct ixl_pf *pf)
2848266423Sjfv{
2849266423Sjfv	device_t		dev = pf->dev;
2850270346Sjfv	struct ixl_vsi		*vsi;
2851270346Sjfv	struct ixl_queue	*que;
2852266423Sjfv	struct tx_ring		*txr;
2853266423Sjfv	struct rx_ring		*rxr;
2854266423Sjfv	int 			rsize, tsize;
2855266423Sjfv	int			error = I40E_SUCCESS;
2856266423Sjfv
2857266423Sjfv	vsi = &pf->vsi;
2858266423Sjfv	vsi->back = (void *)pf;
2859266423Sjfv	vsi->hw = &pf->hw;
2860266423Sjfv	vsi->id = 0;
2861266423Sjfv	vsi->num_vlans = 0;
2862279858Sjfv	vsi->back = pf;
2863266423Sjfv
2864266423Sjfv	/* Get memory for the station queues */
2865266423Sjfv        if (!(vsi->queues =
2866270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2867266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2868266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
2869266423Sjfv                error = ENOMEM;
2870266423Sjfv                goto early;
2871266423Sjfv        }
2872266423Sjfv
2873266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
2874266423Sjfv		que = &vsi->queues[i];
2875270346Sjfv		que->num_desc = ixl_ringsz;
2876266423Sjfv		que->me = i;
2877266423Sjfv		que->vsi = vsi;
2878269198Sjfv		/* mark the queue as active */
2879269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
2880266423Sjfv		txr = &que->txr;
2881266423Sjfv		txr->que = que;
2882269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
2883266423Sjfv
2884266423Sjfv		/* Initialize the TX lock */
2885266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2886266423Sjfv		    device_get_nameunit(dev), que->me);
2887266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2888266423Sjfv		/* Create the TX descriptor ring */
2889269198Sjfv		tsize = roundup2((que->num_desc *
2890269198Sjfv		    sizeof(struct i40e_tx_desc)) +
2891269198Sjfv		    sizeof(u32), DBA_ALIGN);
2892271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
2893271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2894266423Sjfv			device_printf(dev,
2895266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
2896266423Sjfv			error = ENOMEM;
2897266423Sjfv			goto fail;
2898266423Sjfv		}
2899266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2900266423Sjfv		bzero((void *)txr->base, tsize);
2901266423Sjfv       		/* Now allocate transmit soft structs for the ring */
2902270346Sjfv       		if (ixl_allocate_tx_data(que)) {
2903266423Sjfv			device_printf(dev,
2904266423Sjfv			    "Critical Failure setting up TX structures\n");
2905266423Sjfv			error = ENOMEM;
2906266423Sjfv			goto fail;
2907266423Sjfv       		}
2908266423Sjfv		/* Allocate a buf ring */
2909266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2910266423Sjfv		    M_WAITOK, &txr->mtx);
2911266423Sjfv		if (txr->br == NULL) {
2912266423Sjfv			device_printf(dev,
2913266423Sjfv			    "Critical Failure setting up TX buf ring\n");
2914266423Sjfv			error = ENOMEM;
2915266423Sjfv			goto fail;
2916266423Sjfv       		}
2917266423Sjfv
2918266423Sjfv		/*
2919266423Sjfv		 * Next the RX queues...
2920266423Sjfv		 */
2921266423Sjfv		rsize = roundup2(que->num_desc *
2922266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2923266423Sjfv		rxr = &que->rxr;
2924266423Sjfv		rxr->que = que;
2925269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
2926266423Sjfv
2927266423Sjfv		/* Initialize the RX side lock */
2928266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2929266423Sjfv		    device_get_nameunit(dev), que->me);
2930266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2931266423Sjfv
2932271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
2933271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2934266423Sjfv			device_printf(dev,
2935266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
2936266423Sjfv			error = ENOMEM;
2937266423Sjfv			goto fail;
2938266423Sjfv		}
2939266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2940266423Sjfv		bzero((void *)rxr->base, rsize);
2941266423Sjfv
2942266423Sjfv        	/* Allocate receive soft structs for the ring*/
2943270346Sjfv		if (ixl_allocate_rx_data(que)) {
2944266423Sjfv			device_printf(dev,
2945266423Sjfv			    "Critical Failure setting up receive structs\n");
2946266423Sjfv			error = ENOMEM;
2947266423Sjfv			goto fail;
2948266423Sjfv		}
2949266423Sjfv	}
2950266423Sjfv
2951266423Sjfv	return (0);
2952266423Sjfv
2953266423Sjfvfail:
2954266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
2955266423Sjfv		que = &vsi->queues[i];
2956266423Sjfv		rxr = &que->rxr;
2957266423Sjfv		txr = &que->txr;
2958266423Sjfv		if (rxr->base)
2959271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2960266423Sjfv		if (txr->base)
2961271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
2962266423Sjfv	}
2963266423Sjfv
2964266423Sjfvearly:
2965266423Sjfv	return (error);
2966266423Sjfv}
2967266423Sjfv
2968266423Sjfv/*
2969266423Sjfv** Provide a update to the queue RX
2970266423Sjfv** interrupt moderation value.
2971266423Sjfv*/
2972266423Sjfvstatic void
2973270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
2974266423Sjfv{
2975270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
2976266423Sjfv	struct i40e_hw	*hw = vsi->hw;
2977266423Sjfv	struct rx_ring	*rxr = &que->rxr;
2978266423Sjfv	u16		rx_itr;
2979266423Sjfv	u16		rx_latency = 0;
2980266423Sjfv	int		rx_bytes;
2981266423Sjfv
2982266423Sjfv
2983266423Sjfv	/* Idle, do nothing */
2984266423Sjfv	if (rxr->bytes == 0)
2985266423Sjfv		return;
2986266423Sjfv
2987270346Sjfv	if (ixl_dynamic_rx_itr) {
2988266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
2989266423Sjfv		rx_itr = rxr->itr;
2990266423Sjfv
2991266423Sjfv		/* Adjust latency range */
2992266423Sjfv		switch (rxr->latency) {
2993270346Sjfv		case IXL_LOW_LATENCY:
2994266423Sjfv			if (rx_bytes > 10) {
2995270346Sjfv				rx_latency = IXL_AVE_LATENCY;
2996270346Sjfv				rx_itr = IXL_ITR_20K;
2997266423Sjfv			}
2998266423Sjfv			break;
2999270346Sjfv		case IXL_AVE_LATENCY:
3000266423Sjfv			if (rx_bytes > 20) {
3001270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3002270346Sjfv				rx_itr = IXL_ITR_8K;
3003266423Sjfv			} else if (rx_bytes <= 10) {
3004270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3005270346Sjfv				rx_itr = IXL_ITR_100K;
3006266423Sjfv			}
3007266423Sjfv			break;
3008270346Sjfv		case IXL_BULK_LATENCY:
3009266423Sjfv			if (rx_bytes <= 20) {
3010270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3011270346Sjfv				rx_itr = IXL_ITR_20K;
3012266423Sjfv			}
3013266423Sjfv			break;
3014266423Sjfv       		 }
3015266423Sjfv
3016266423Sjfv		rxr->latency = rx_latency;
3017266423Sjfv
3018266423Sjfv		if (rx_itr != rxr->itr) {
3019266423Sjfv			/* do an exponential smoothing */
3020266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3021266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3022270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3023270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3024266423Sjfv			    que->me), rxr->itr);
3025266423Sjfv		}
3026266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3027270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3028270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3029266423Sjfv		/* Update the hardware if needed */
3030266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3031266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3032270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3033266423Sjfv			    que->me), rxr->itr);
3034266423Sjfv		}
3035266423Sjfv	}
3036266423Sjfv	rxr->bytes = 0;
3037266423Sjfv	rxr->packets = 0;
3038266423Sjfv	return;
3039266423Sjfv}
3040266423Sjfv
3041266423Sjfv
3042266423Sjfv/*
3043266423Sjfv** Provide a update to the queue TX
3044266423Sjfv** interrupt moderation value.
3045266423Sjfv*/
3046266423Sjfvstatic void
3047270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3048266423Sjfv{
3049270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3050266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3051266423Sjfv	struct tx_ring	*txr = &que->txr;
3052266423Sjfv	u16		tx_itr;
3053266423Sjfv	u16		tx_latency = 0;
3054266423Sjfv	int		tx_bytes;
3055266423Sjfv
3056266423Sjfv
3057266423Sjfv	/* Idle, do nothing */
3058266423Sjfv	if (txr->bytes == 0)
3059266423Sjfv		return;
3060266423Sjfv
3061270346Sjfv	if (ixl_dynamic_tx_itr) {
3062266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3063266423Sjfv		tx_itr = txr->itr;
3064266423Sjfv
3065266423Sjfv		switch (txr->latency) {
3066270346Sjfv		case IXL_LOW_LATENCY:
3067266423Sjfv			if (tx_bytes > 10) {
3068270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3069270346Sjfv				tx_itr = IXL_ITR_20K;
3070266423Sjfv			}
3071266423Sjfv			break;
3072270346Sjfv		case IXL_AVE_LATENCY:
3073266423Sjfv			if (tx_bytes > 20) {
3074270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3075270346Sjfv				tx_itr = IXL_ITR_8K;
3076266423Sjfv			} else if (tx_bytes <= 10) {
3077270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3078270346Sjfv				tx_itr = IXL_ITR_100K;
3079266423Sjfv			}
3080266423Sjfv			break;
3081270346Sjfv		case IXL_BULK_LATENCY:
3082266423Sjfv			if (tx_bytes <= 20) {
3083270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3084270346Sjfv				tx_itr = IXL_ITR_20K;
3085266423Sjfv			}
3086266423Sjfv			break;
3087266423Sjfv		}
3088266423Sjfv
3089266423Sjfv		txr->latency = tx_latency;
3090266423Sjfv
3091266423Sjfv		if (tx_itr != txr->itr) {
3092266423Sjfv       	         /* do an exponential smoothing */
3093266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3094266423Sjfv			    ((9 * tx_itr) + txr->itr);
3095270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3096270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3097266423Sjfv			    que->me), txr->itr);
3098266423Sjfv		}
3099266423Sjfv
3100266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3101270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3102270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3103266423Sjfv		/* Update the hardware if needed */
3104266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3105266423Sjfv			txr->itr = vsi->tx_itr_setting;
3106270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3107266423Sjfv			    que->me), txr->itr);
3108266423Sjfv		}
3109266423Sjfv	}
3110266423Sjfv	txr->bytes = 0;
3111266423Sjfv	txr->packets = 0;
3112266423Sjfv	return;
3113266423Sjfv}
3114266423Sjfv
3115279858Sjfv#define QUEUE_NAME_LEN 32
3116266423Sjfv
3117266423Sjfvstatic void
3118279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3119279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3120279858Sjfv{
3121279858Sjfv	struct sysctl_oid *tree;
3122279858Sjfv	struct sysctl_oid_list *child;
3123279858Sjfv	struct sysctl_oid_list *vsi_list;
3124279858Sjfv
3125279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3126279858Sjfv	child = SYSCTL_CHILDREN(tree);
3127279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3128279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3129279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3130279858Sjfv
3131279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3132279858Sjfv}
3133279858Sjfv
3134279858Sjfvstatic void
3135270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3136266423Sjfv{
3137266423Sjfv	device_t dev = pf->dev;
3138270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3139270346Sjfv	struct ixl_queue *queues = vsi->queues;
3140269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3141266423Sjfv
3142266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3143266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3144266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3145279858Sjfv	struct sysctl_oid_list *vsi_list;
3146266423Sjfv
3147279858Sjfv	struct sysctl_oid *queue_node;
3148279858Sjfv	struct sysctl_oid_list *queue_list;
3149266423Sjfv
3150269198Sjfv	struct tx_ring *txr;
3151269198Sjfv	struct rx_ring *rxr;
3152279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3153266423Sjfv
3154266423Sjfv	/* Driver statistics */
3155266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3156266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3157266423Sjfv			"Watchdog timeouts");
3158266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3159266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3160266423Sjfv			"Admin Queue IRQ Handled");
3161266423Sjfv
3162279858Sjfv	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "vc_debug_level",
3163279858Sjfv	    CTLFLAG_RW, &pf->vc_debug_lvl, 0,
3164279858Sjfv	    "PF/VF Virtual Channel debug logging level");
3165266423Sjfv
3166279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3167279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3168266423Sjfv
3169266423Sjfv	/* Queue statistics */
3170266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3171269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3172279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3173279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3174266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3175266423Sjfv
3176269198Sjfv		txr = &(queues[q].txr);
3177269198Sjfv		rxr = &(queues[q].rxr);
3178269198Sjfv
3179269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3180266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3181266423Sjfv				"m_defrag() failed");
3182269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3183266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3184266423Sjfv				"Driver dropped packets");
3185266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3186266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3187266423Sjfv				"irqs on this queue");
3188269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3189266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3190266423Sjfv				"TSO");
3191269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3192266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3193266423Sjfv				"Driver tx dma failure in xmit");
3194266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3195266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3196266423Sjfv				"Queue No Descriptor Available");
3197266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3198266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3199266423Sjfv				"Queue Packets Transmitted");
3200266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3201270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3202266423Sjfv				"Queue Bytes Transmitted");
3203266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3204266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3205266423Sjfv				"Queue Packets Received");
3206266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3207266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3208266423Sjfv				"Queue Bytes Received");
3209266423Sjfv	}
3210266423Sjfv
3211266423Sjfv	/* MAC stats */
3212270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3213266423Sjfv}
3214266423Sjfv
3215266423Sjfvstatic void
3216270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3217266423Sjfv	struct sysctl_oid_list *child,
3218266423Sjfv	struct i40e_eth_stats *eth_stats)
3219266423Sjfv{
3220270346Sjfv	struct ixl_sysctl_info ctls[] =
3221266423Sjfv	{
3222266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3223266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3224266423Sjfv			"Unicast Packets Received"},
3225266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3226266423Sjfv			"Multicast Packets Received"},
3227266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3228266423Sjfv			"Broadcast Packets Received"},
3229269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3230266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3231266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3232266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3233266423Sjfv			"Multicast Packets Transmitted"},
3234266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3235266423Sjfv			"Broadcast Packets Transmitted"},
3236266423Sjfv		// end
3237266423Sjfv		{0,0,0}
3238266423Sjfv	};
3239266423Sjfv
3240270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3241266423Sjfv	while (entry->stat != 0)
3242266423Sjfv	{
3243266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3244266423Sjfv				CTLFLAG_RD, entry->stat,
3245266423Sjfv				entry->description);
3246266423Sjfv		entry++;
3247266423Sjfv	}
3248266423Sjfv}
3249266423Sjfv
3250266423Sjfvstatic void
3251270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3252266423Sjfv	struct sysctl_oid_list *child,
3253266423Sjfv	struct i40e_hw_port_stats *stats)
3254266423Sjfv{
3255269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3256266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3257266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3258266423Sjfv
3259266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3260270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3261266423Sjfv
3262270346Sjfv	struct ixl_sysctl_info ctls[] =
3263266423Sjfv	{
3264266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3265266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3266266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3267266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3268266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3269266423Sjfv		/* Packet Reception Stats */
3270266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3271266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3272266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3273266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3274266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3275266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3276266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3277266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3278266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3279266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3280266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3281266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3282266423Sjfv		/* Packet Transmission Stats */
3283266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3284266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3285266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3286266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3287266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3288266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3289266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3290266423Sjfv		/* Flow control */
3291266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3292266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3293266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3294266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3295266423Sjfv		/* End */
3296266423Sjfv		{0,0,0}
3297266423Sjfv	};
3298266423Sjfv
3299270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3300266423Sjfv	while (entry->stat != 0)
3301266423Sjfv	{
3302266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3303266423Sjfv				CTLFLAG_RD, entry->stat,
3304266423Sjfv				entry->description);
3305266423Sjfv		entry++;
3306266423Sjfv	}
3307266423Sjfv}
3308266423Sjfv
3309266423Sjfv/*
3310270346Sjfv** ixl_config_rss - setup RSS
3311266423Sjfv**  - note this is done for the single vsi
3312266423Sjfv*/
3313270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3314266423Sjfv{
3315270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3316266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3317266423Sjfv	u32		lut = 0;
3318277084Sjfv	u64		set_hena = 0, hena;
3319277084Sjfv	int		i, j, que_id;
3320277084Sjfv#ifdef RSS
3321277084Sjfv	u32		rss_hash_config;
3322277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3323277084Sjfv#else
3324277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3325277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3326277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3327277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3328277084Sjfv#endif
3329266423Sjfv
3330277084Sjfv#ifdef RSS
3331277084Sjfv        /* Fetch the configured RSS key */
3332277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3333277084Sjfv#endif
3334266423Sjfv
3335266423Sjfv	/* Fill out hash function seed */
3336277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3337277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3338266423Sjfv
3339266423Sjfv	/* Enable PCTYPES for RSS: */
3340277084Sjfv#ifdef RSS
3341277084Sjfv	rss_hash_config = rss_gethashconfig();
3342277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3343277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3344277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3345277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3346277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3347277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3348277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3349277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3350279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3351277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3352277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3353277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3354277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3355277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3356277084Sjfv#else
3357266423Sjfv	set_hena =
3358266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3359266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3360266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3361266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3362266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3363266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3364266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3365266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3366266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3367266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3368266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3369277084Sjfv#endif
3370266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3371266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3372266423Sjfv	hena |= set_hena;
3373266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3374266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3375266423Sjfv
3376266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3377266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3378266423Sjfv		if (j == vsi->num_queues)
3379266423Sjfv			j = 0;
3380277084Sjfv#ifdef RSS
3381277084Sjfv		/*
3382277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3383277084Sjfv		 * Cap it at the number of configured buckets (which is
3384277084Sjfv		 * num_queues.)
3385277084Sjfv		 */
3386277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3387277262Sjfv		que_id = que_id % vsi->num_queues;
3388277084Sjfv#else
3389277084Sjfv		que_id = j;
3390277084Sjfv#endif
3391266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3392277084Sjfv		lut = (lut << 8) | (que_id &
3393266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3394266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3395266423Sjfv		if ((i & 3) == 3)
3396266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3397266423Sjfv	}
3398270346Sjfv	ixl_flush(hw);
3399266423Sjfv}
3400266423Sjfv
3401266423Sjfv
3402266423Sjfv/*
3403266423Sjfv** This routine is run via an vlan config EVENT,
3404266423Sjfv** it enables us to use the HW Filter table since
3405266423Sjfv** we can get the vlan id. This just creates the
3406266423Sjfv** entry in the soft version of the VFTA, init will
3407266423Sjfv** repopulate the real table.
3408266423Sjfv*/
3409266423Sjfvstatic void
3410270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3411266423Sjfv{
3412270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3413266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3414270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3415266423Sjfv
3416266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3417266423Sjfv		return;
3418266423Sjfv
3419266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3420266423Sjfv		return;
3421266423Sjfv
3422270346Sjfv	IXL_PF_LOCK(pf);
3423266423Sjfv	++vsi->num_vlans;
3424270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3425270346Sjfv	IXL_PF_UNLOCK(pf);
3426266423Sjfv}
3427266423Sjfv
3428266423Sjfv/*
3429266423Sjfv** This routine is run via an vlan
3430266423Sjfv** unconfig EVENT, remove our entry
3431266423Sjfv** in the soft vfta.
3432266423Sjfv*/
3433266423Sjfvstatic void
3434270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3435266423Sjfv{
3436270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3437266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3438270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3439266423Sjfv
3440266423Sjfv	if (ifp->if_softc !=  arg)
3441266423Sjfv		return;
3442266423Sjfv
3443266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3444266423Sjfv		return;
3445266423Sjfv
3446270346Sjfv	IXL_PF_LOCK(pf);
3447266423Sjfv	--vsi->num_vlans;
3448270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3449270346Sjfv	IXL_PF_UNLOCK(pf);
3450266423Sjfv}
3451266423Sjfv
3452266423Sjfv/*
3453266423Sjfv** This routine updates vlan filters, called by init
3454266423Sjfv** it scans the filter table and then updates the hw
3455266423Sjfv** after a soft reset.
3456266423Sjfv*/
3457266423Sjfvstatic void
3458270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3459266423Sjfv{
3460270346Sjfv	struct ixl_mac_filter	*f;
3461266423Sjfv	int			cnt = 0, flags;
3462266423Sjfv
3463266423Sjfv	if (vsi->num_vlans == 0)
3464266423Sjfv		return;
3465266423Sjfv	/*
3466266423Sjfv	** Scan the filter list for vlan entries,
3467266423Sjfv	** mark them for addition and then call
3468266423Sjfv	** for the AQ update.
3469266423Sjfv	*/
3470266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3471270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3472266423Sjfv			f->flags |=
3473270346Sjfv			    (IXL_FILTER_ADD |
3474270346Sjfv			    IXL_FILTER_USED);
3475266423Sjfv			cnt++;
3476266423Sjfv		}
3477266423Sjfv	}
3478266423Sjfv	if (cnt == 0) {
3479266423Sjfv		printf("setup vlan: no filters found!\n");
3480266423Sjfv		return;
3481266423Sjfv	}
3482270346Sjfv	flags = IXL_FILTER_VLAN;
3483270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3484270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3485266423Sjfv	return;
3486266423Sjfv}
3487266423Sjfv
3488266423Sjfv/*
3489266423Sjfv** Initialize filter list and add filters that the hardware
3490266423Sjfv** needs to know about.
3491266423Sjfv*/
3492266423Sjfvstatic void
3493270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3494266423Sjfv{
3495269198Sjfv	/* Add broadcast address */
3496279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3497266423Sjfv}
3498266423Sjfv
3499266423Sjfv/*
3500266423Sjfv** This routine adds mulicast filters
3501266423Sjfv*/
3502266423Sjfvstatic void
3503270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3504266423Sjfv{
3505270346Sjfv	struct ixl_mac_filter *f;
3506266423Sjfv
3507266423Sjfv	/* Does one already exist */
3508270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3509266423Sjfv	if (f != NULL)
3510266423Sjfv		return;
3511266423Sjfv
3512270346Sjfv	f = ixl_get_filter(vsi);
3513266423Sjfv	if (f == NULL) {
3514266423Sjfv		printf("WARNING: no filter available!!\n");
3515266423Sjfv		return;
3516266423Sjfv	}
3517266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3518270346Sjfv	f->vlan = IXL_VLAN_ANY;
3519270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3520270346Sjfv	    | IXL_FILTER_MC);
3521266423Sjfv
3522266423Sjfv	return;
3523266423Sjfv}
3524266423Sjfv
3525279858Sjfvstatic void
3526279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3527279858Sjfv{
3528279858Sjfv
3529279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3530279858Sjfv}
3531279858Sjfv
3532266423Sjfv/*
3533266423Sjfv** This routine adds macvlan filters
3534266423Sjfv*/
3535266423Sjfvstatic void
3536270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3537266423Sjfv{
3538270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3539279858Sjfv	struct ixl_pf		*pf;
3540279858Sjfv	device_t		dev;
3541266423Sjfv
3542270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3543266423Sjfv
3544279858Sjfv	pf = vsi->back;
3545279858Sjfv	dev = pf->dev;
3546279858Sjfv
3547266423Sjfv	/* Does one already exist */
3548270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3549266423Sjfv	if (f != NULL)
3550266423Sjfv		return;
3551266423Sjfv	/*
3552266423Sjfv	** Is this the first vlan being registered, if so we
3553266423Sjfv	** need to remove the ANY filter that indicates we are
3554266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3555266423Sjfv	*/
3556270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3557270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3558266423Sjfv		if (tmp != NULL) {
3559270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3560270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3561266423Sjfv		}
3562266423Sjfv	}
3563266423Sjfv
3564270346Sjfv	f = ixl_get_filter(vsi);
3565266423Sjfv	if (f == NULL) {
3566266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3567266423Sjfv		return;
3568266423Sjfv	}
3569266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3570266423Sjfv	f->vlan = vlan;
3571270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3572270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3573270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3574279858Sjfv	else
3575279858Sjfv		vsi->num_macs++;
3576266423Sjfv
3577270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3578266423Sjfv	return;
3579266423Sjfv}
3580266423Sjfv
3581266423Sjfvstatic void
3582270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3583266423Sjfv{
3584270346Sjfv	struct ixl_mac_filter *f;
3585266423Sjfv
3586270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3587266423Sjfv	if (f == NULL)
3588266423Sjfv		return;
3589266423Sjfv
3590270346Sjfv	f->flags |= IXL_FILTER_DEL;
3591270346Sjfv	ixl_del_hw_filters(vsi, 1);
3592279858Sjfv	vsi->num_macs--;
3593266423Sjfv
3594266423Sjfv	/* Check if this is the last vlan removal */
3595270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3596266423Sjfv		/* Switch back to a non-vlan filter */
3597270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3598270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3599266423Sjfv	}
3600266423Sjfv	return;
3601266423Sjfv}
3602266423Sjfv
3603266423Sjfv/*
3604266423Sjfv** Find the filter with both matching mac addr and vlan id
3605266423Sjfv*/
3606270346Sjfvstatic struct ixl_mac_filter *
3607270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3608266423Sjfv{
3609270346Sjfv	struct ixl_mac_filter	*f;
3610266423Sjfv	bool			match = FALSE;
3611266423Sjfv
3612266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3613266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3614266423Sjfv			continue;
3615266423Sjfv		if (f->vlan == vlan) {
3616266423Sjfv			match = TRUE;
3617266423Sjfv			break;
3618266423Sjfv		}
3619266423Sjfv	}
3620266423Sjfv
3621266423Sjfv	if (!match)
3622266423Sjfv		f = NULL;
3623266423Sjfv	return (f);
3624266423Sjfv}
3625266423Sjfv
3626266423Sjfv/*
3627266423Sjfv** This routine takes additions to the vsi filter
3628266423Sjfv** table and creates an Admin Queue call to create
3629266423Sjfv** the filters in the hardware.
3630266423Sjfv*/
3631266423Sjfvstatic void
3632270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3633266423Sjfv{
3634266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3635270346Sjfv	struct ixl_mac_filter	*f;
3636279858Sjfv	struct ixl_pf		*pf;
3637279858Sjfv	struct i40e_hw		*hw;
3638279858Sjfv	device_t		dev;
3639279858Sjfv	int			err, j = 0;
3640266423Sjfv
3641279858Sjfv	pf = vsi->back;
3642279858Sjfv	dev = pf->dev;
3643279858Sjfv	hw = &pf->hw;
3644279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3645279858Sjfv
3646266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3647266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3648266423Sjfv	if (a == NULL) {
3649277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3650266423Sjfv		return;
3651266423Sjfv	}
3652266423Sjfv
3653266423Sjfv	/*
3654266423Sjfv	** Scan the filter list, each time we find one
3655266423Sjfv	** we add it to the admin queue array and turn off
3656266423Sjfv	** the add bit.
3657266423Sjfv	*/
3658266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3659266423Sjfv		if (f->flags == flags) {
3660266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3661266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3662279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3663279858Sjfv				b->vlan_tag = 0;
3664279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3665279858Sjfv			} else {
3666279858Sjfv				b->vlan_tag = f->vlan;
3667279858Sjfv				b->flags = 0;
3668279858Sjfv			}
3669279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3670270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3671266423Sjfv			j++;
3672266423Sjfv		}
3673266423Sjfv		if (j == cnt)
3674266423Sjfv			break;
3675266423Sjfv	}
3676266423Sjfv	if (j > 0) {
3677266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3678266423Sjfv		if (err)
3679279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3680279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3681266423Sjfv		else
3682266423Sjfv			vsi->hw_filters_add += j;
3683266423Sjfv	}
3684266423Sjfv	free(a, M_DEVBUF);
3685266423Sjfv	return;
3686266423Sjfv}
3687266423Sjfv
3688266423Sjfv/*
3689266423Sjfv** This routine takes removals in the vsi filter
3690266423Sjfv** table and creates an Admin Queue call to delete
3691266423Sjfv** the filters in the hardware.
3692266423Sjfv*/
3693266423Sjfvstatic void
3694270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3695266423Sjfv{
3696266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3697279858Sjfv	struct ixl_pf		*pf;
3698279858Sjfv	struct i40e_hw		*hw;
3699279858Sjfv	device_t		dev;
3700270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
3701266423Sjfv	int			err, j = 0;
3702266423Sjfv
3703270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
3704266423Sjfv
3705279858Sjfv	pf = vsi->back;
3706279858Sjfv	hw = &pf->hw;
3707279858Sjfv	dev = pf->dev;
3708279858Sjfv
3709266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3710266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3711266423Sjfv	if (d == NULL) {
3712266423Sjfv		printf("del hw filter failed to get memory\n");
3713266423Sjfv		return;
3714266423Sjfv	}
3715266423Sjfv
3716266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3717270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
3718266423Sjfv			e = &d[j]; // a pox on fvl long names :)
3719266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3720270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3721266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3722266423Sjfv			/* delete entry from vsi list */
3723270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3724266423Sjfv			free(f, M_DEVBUF);
3725266423Sjfv			j++;
3726266423Sjfv		}
3727266423Sjfv		if (j == cnt)
3728266423Sjfv			break;
3729266423Sjfv	}
3730266423Sjfv	if (j > 0) {
3731266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3732266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
3733266423Sjfv		   so we'll ignore that specific error. */
3734277084Sjfv		// TODO: Does this still occur on current firmwares?
3735266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3736266423Sjfv			int sc = 0;
3737266423Sjfv			for (int i = 0; i < j; i++)
3738266423Sjfv				sc += (!d[i].error_code);
3739266423Sjfv			vsi->hw_filters_del += sc;
3740266423Sjfv			device_printf(dev,
3741266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
3742266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
3743266423Sjfv		} else
3744266423Sjfv			vsi->hw_filters_del += j;
3745266423Sjfv	}
3746266423Sjfv	free(d, M_DEVBUF);
3747266423Sjfv
3748270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
3749266423Sjfv	return;
3750266423Sjfv}
3751266423Sjfv
3752279858Sjfvstatic int
3753270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
3754266423Sjfv{
3755279858Sjfv	struct ixl_pf	*pf = vsi->back;
3756279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3757279858Sjfv	int		index, error;
3758266423Sjfv	u32		reg;
3759266423Sjfv
3760279858Sjfv	error = 0;
3761266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3762279858Sjfv		index = vsi->first_queue + i;
3763279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3764266423Sjfv
3765279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3766266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3767266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
3768279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3769266423Sjfv		/* Verify the enable took */
3770266423Sjfv		for (int j = 0; j < 10; j++) {
3771279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3772266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3773266423Sjfv				break;
3774266423Sjfv			i40e_msec_delay(10);
3775266423Sjfv		}
3776279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3777279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
3778279858Sjfv			    index);
3779279858Sjfv			error = ETIMEDOUT;
3780279858Sjfv		}
3781266423Sjfv
3782279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3783266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3784266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
3785279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3786266423Sjfv		/* Verify the enable took */
3787266423Sjfv		for (int j = 0; j < 10; j++) {
3788279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3789266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3790266423Sjfv				break;
3791266423Sjfv			i40e_msec_delay(10);
3792266423Sjfv		}
3793279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3794279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
3795279858Sjfv			    index);
3796279858Sjfv			error = ETIMEDOUT;
3797279858Sjfv		}
3798266423Sjfv	}
3799279858Sjfv
3800279858Sjfv	return (error);
3801266423Sjfv}
3802266423Sjfv
3803279858Sjfvstatic int
3804270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
3805266423Sjfv{
3806279858Sjfv	struct ixl_pf	*pf = vsi->back;
3807279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3808279858Sjfv	int		index, error;
3809266423Sjfv	u32		reg;
3810266423Sjfv
3811279858Sjfv	error = 0;
3812266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3813279858Sjfv		index = vsi->first_queue + i;
3814279858Sjfv
3815279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3816266423Sjfv		i40e_usec_delay(500);
3817266423Sjfv
3818279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3819266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3820279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3821266423Sjfv		/* Verify the disable took */
3822266423Sjfv		for (int j = 0; j < 10; j++) {
3823279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3824266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3825266423Sjfv				break;
3826266423Sjfv			i40e_msec_delay(10);
3827266423Sjfv		}
3828279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3829279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
3830279858Sjfv			    index);
3831279858Sjfv			error = ETIMEDOUT;
3832279858Sjfv		}
3833266423Sjfv
3834279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3835266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3836279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3837266423Sjfv		/* Verify the disable took */
3838266423Sjfv		for (int j = 0; j < 10; j++) {
3839279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3840266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3841266423Sjfv				break;
3842266423Sjfv			i40e_msec_delay(10);
3843266423Sjfv		}
3844279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3845279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
3846279858Sjfv			    index);
3847279858Sjfv			error = ETIMEDOUT;
3848279858Sjfv		}
3849266423Sjfv	}
3850279858Sjfv
3851279858Sjfv	return (error);
3852266423Sjfv}
3853266423Sjfv
3854269198Sjfv/**
3855270346Sjfv * ixl_handle_mdd_event
3856269198Sjfv *
3857269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
3858269198Sjfv * (But also detects events from the PF, as well)
3859269198Sjfv **/
3860270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
3861269198Sjfv{
3862269198Sjfv	struct i40e_hw *hw = &pf->hw;
3863269198Sjfv	device_t dev = pf->dev;
3864269198Sjfv	bool mdd_detected = false;
3865269198Sjfv	bool pf_mdd_detected = false;
3866269198Sjfv	u32 reg;
3867269198Sjfv
3868269198Sjfv	/* find what triggered the MDD event */
3869269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
3870269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3871269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3872269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3873269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3874269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
3875269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3876269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
3877269198Sjfv		device_printf(dev,
3878269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3879269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
3880269198Sjfv			 event, queue, pf_num);
3881269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3882269198Sjfv		mdd_detected = true;
3883269198Sjfv	}
3884269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
3885269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3886269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3887269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3888269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3889269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
3890269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3891269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
3892269198Sjfv		device_printf(dev,
3893269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3894269198Sjfv			 " on RX queue %d of function 0x%02x\n",
3895269198Sjfv			 event, queue, func);
3896269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3897269198Sjfv		mdd_detected = true;
3898269198Sjfv	}
3899269198Sjfv
3900269198Sjfv	if (mdd_detected) {
3901269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
3902269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3903269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3904269198Sjfv			device_printf(dev,
3905269198Sjfv				 "MDD TX event is for this function 0x%08x",
3906269198Sjfv				 reg);
3907269198Sjfv			pf_mdd_detected = true;
3908269198Sjfv		}
3909269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
3910269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3911269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3912269198Sjfv			device_printf(dev,
3913269198Sjfv				 "MDD RX event is for this function 0x%08x",
3914269198Sjfv				 reg);
3915269198Sjfv			pf_mdd_detected = true;
3916269198Sjfv		}
3917269198Sjfv	}
3918269198Sjfv
3919269198Sjfv	/* re-enable mdd interrupt cause */
3920269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3921269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3922269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3923270346Sjfv	ixl_flush(hw);
3924269198Sjfv}
3925269198Sjfv
3926266423Sjfvstatic void
3927270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
3928266423Sjfv{
3929266423Sjfv	struct i40e_hw		*hw = vsi->hw;
3930270346Sjfv	struct ixl_queue	*que = vsi->queues;
3931266423Sjfv
3932270346Sjfv	if (ixl_enable_msix) {
3933270346Sjfv		ixl_enable_adminq(hw);
3934266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
3935270346Sjfv			ixl_enable_queue(hw, que->me);
3936266423Sjfv	} else
3937270346Sjfv		ixl_enable_legacy(hw);
3938266423Sjfv}
3939266423Sjfv
3940266423Sjfvstatic void
3941279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
3942266423Sjfv{
3943266423Sjfv	struct i40e_hw		*hw = vsi->hw;
3944270346Sjfv	struct ixl_queue	*que = vsi->queues;
3945266423Sjfv
3946279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
3947279858Sjfv		ixl_disable_queue(hw, que->me);
3948279858Sjfv}
3949279858Sjfv
3950279858Sjfvstatic void
3951279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
3952279858Sjfv{
3953279858Sjfv	struct i40e_hw		*hw = vsi->hw;
3954279858Sjfv
3955279858Sjfv	if (ixl_enable_msix)
3956270346Sjfv		ixl_disable_adminq(hw);
3957279858Sjfv	else
3958270346Sjfv		ixl_disable_legacy(hw);
3959266423Sjfv}
3960266423Sjfv
3961266423Sjfvstatic void
3962270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
3963266423Sjfv{
3964266423Sjfv	u32		reg;
3965266423Sjfv
3966266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3967266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3968270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3969266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3970270346Sjfv	ixl_flush(hw);
3971266423Sjfv	return;
3972266423Sjfv}
3973266423Sjfv
3974266423Sjfvstatic void
3975270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
3976266423Sjfv{
3977266423Sjfv	u32		reg;
3978266423Sjfv
3979270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3980266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3981266423Sjfv
3982266423Sjfv	return;
3983266423Sjfv}
3984266423Sjfv
3985266423Sjfvstatic void
3986270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
3987266423Sjfv{
3988266423Sjfv	u32		reg;
3989266423Sjfv
3990266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3991266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3992270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3993266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3994266423Sjfv}
3995266423Sjfv
3996266423Sjfvstatic void
3997270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
3998266423Sjfv{
3999266423Sjfv	u32		reg;
4000266423Sjfv
4001270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4002266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4003266423Sjfv
4004266423Sjfv	return;
4005266423Sjfv}
4006266423Sjfv
4007266423Sjfvstatic void
4008270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4009266423Sjfv{
4010266423Sjfv	u32		reg;
4011266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4012266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4013270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4014266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4015266423Sjfv}
4016266423Sjfv
4017266423Sjfvstatic void
4018270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4019266423Sjfv{
4020266423Sjfv	u32		reg;
4021266423Sjfv
4022270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4023266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4024266423Sjfv
4025266423Sjfv	return;
4026266423Sjfv}
4027266423Sjfv
4028266423Sjfvstatic void
4029270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4030266423Sjfv{
4031266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4032279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4033279858Sjfv	struct ixl_vf	*vf;
4034269198Sjfv
4035266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4036266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4037266423Sjfv
4038266423Sjfv	/* Update hw stats */
4039270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4040266423Sjfv			   pf->stat_offsets_loaded,
4041266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4042270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4043266423Sjfv			   pf->stat_offsets_loaded,
4044266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4045270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4046266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4047266423Sjfv			   pf->stat_offsets_loaded,
4048266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4049270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4050266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4051266423Sjfv			   pf->stat_offsets_loaded,
4052266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4053270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4054266423Sjfv			   pf->stat_offsets_loaded,
4055266423Sjfv			   &osd->eth.rx_discards,
4056266423Sjfv			   &nsd->eth.rx_discards);
4057270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4058266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4059266423Sjfv			   pf->stat_offsets_loaded,
4060266423Sjfv			   &osd->eth.rx_unicast,
4061266423Sjfv			   &nsd->eth.rx_unicast);
4062270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4063266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4064266423Sjfv			   pf->stat_offsets_loaded,
4065266423Sjfv			   &osd->eth.tx_unicast,
4066266423Sjfv			   &nsd->eth.tx_unicast);
4067270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4068266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4069266423Sjfv			   pf->stat_offsets_loaded,
4070266423Sjfv			   &osd->eth.rx_multicast,
4071266423Sjfv			   &nsd->eth.rx_multicast);
4072270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4073266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4074266423Sjfv			   pf->stat_offsets_loaded,
4075266423Sjfv			   &osd->eth.tx_multicast,
4076266423Sjfv			   &nsd->eth.tx_multicast);
4077270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4078266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4079266423Sjfv			   pf->stat_offsets_loaded,
4080266423Sjfv			   &osd->eth.rx_broadcast,
4081266423Sjfv			   &nsd->eth.rx_broadcast);
4082270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4083266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4084266423Sjfv			   pf->stat_offsets_loaded,
4085266423Sjfv			   &osd->eth.tx_broadcast,
4086266423Sjfv			   &nsd->eth.tx_broadcast);
4087266423Sjfv
4088270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4089266423Sjfv			   pf->stat_offsets_loaded,
4090266423Sjfv			   &osd->tx_dropped_link_down,
4091266423Sjfv			   &nsd->tx_dropped_link_down);
4092270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4093266423Sjfv			   pf->stat_offsets_loaded,
4094266423Sjfv			   &osd->mac_local_faults,
4095266423Sjfv			   &nsd->mac_local_faults);
4096270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4097266423Sjfv			   pf->stat_offsets_loaded,
4098266423Sjfv			   &osd->mac_remote_faults,
4099266423Sjfv			   &nsd->mac_remote_faults);
4100270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4101266423Sjfv			   pf->stat_offsets_loaded,
4102266423Sjfv			   &osd->rx_length_errors,
4103266423Sjfv			   &nsd->rx_length_errors);
4104266423Sjfv
4105269198Sjfv	/* Flow control (LFC) stats */
4106270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4107266423Sjfv			   pf->stat_offsets_loaded,
4108266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4109270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4110266423Sjfv			   pf->stat_offsets_loaded,
4111266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4112270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4113266423Sjfv			   pf->stat_offsets_loaded,
4114266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4115270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4116266423Sjfv			   pf->stat_offsets_loaded,
4117266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4118266423Sjfv
4119269198Sjfv	/* Packet size stats rx */
4120270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4121266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4122266423Sjfv			   pf->stat_offsets_loaded,
4123266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4124270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4125266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4126266423Sjfv			   pf->stat_offsets_loaded,
4127266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4128270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4129266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4130266423Sjfv			   pf->stat_offsets_loaded,
4131266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4132270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4133266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4134266423Sjfv			   pf->stat_offsets_loaded,
4135266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4136270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4137266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4138266423Sjfv			   pf->stat_offsets_loaded,
4139266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4140270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4141266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4142266423Sjfv			   pf->stat_offsets_loaded,
4143266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4144270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4145266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4146266423Sjfv			   pf->stat_offsets_loaded,
4147266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4148266423Sjfv
4149269198Sjfv	/* Packet size stats tx */
4150270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4151266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4152266423Sjfv			   pf->stat_offsets_loaded,
4153266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4154270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4155266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4156266423Sjfv			   pf->stat_offsets_loaded,
4157266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4158270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4159266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4160266423Sjfv			   pf->stat_offsets_loaded,
4161266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4162270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4163266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4164266423Sjfv			   pf->stat_offsets_loaded,
4165266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4166270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4167266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4168266423Sjfv			   pf->stat_offsets_loaded,
4169266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4170270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4171266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4172266423Sjfv			   pf->stat_offsets_loaded,
4173266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4174270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4175266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4176266423Sjfv			   pf->stat_offsets_loaded,
4177266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4178266423Sjfv
4179270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4180266423Sjfv			   pf->stat_offsets_loaded,
4181266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4182270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4183266423Sjfv			   pf->stat_offsets_loaded,
4184266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4185270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4186266423Sjfv			   pf->stat_offsets_loaded,
4187266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4188270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4189266423Sjfv			   pf->stat_offsets_loaded,
4190266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4191266423Sjfv	pf->stat_offsets_loaded = true;
4192269198Sjfv	/* End hw stats */
4193266423Sjfv
4194266423Sjfv	/* Update vsi stats */
4195279858Sjfv	ixl_update_vsi_stats(vsi);
4196266423Sjfv
4197279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4198279858Sjfv		vf = &pf->vfs[i];
4199279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4200279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4201279858Sjfv	}
4202266423Sjfv}
4203266423Sjfv
4204266423Sjfv/*
4205266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4206266423Sjfv**  - do outside interrupt since it might sleep
4207266423Sjfv*/
4208266423Sjfvstatic void
4209270346Sjfvixl_do_adminq(void *context, int pending)
4210266423Sjfv{
4211270346Sjfv	struct ixl_pf			*pf = context;
4212266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4213270346Sjfv	struct ixl_vsi			*vsi = &pf->vsi;
4214266423Sjfv	struct i40e_arq_event_info	event;
4215266423Sjfv	i40e_status			ret;
4216266423Sjfv	u32				reg, loop = 0;
4217266423Sjfv	u16				opcode, result;
4218266423Sjfv
4219274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4220274205Sjfv	event.msg_buf = malloc(event.buf_len,
4221266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4222266423Sjfv	if (!event.msg_buf) {
4223266423Sjfv		printf("Unable to allocate adminq memory\n");
4224266423Sjfv		return;
4225266423Sjfv	}
4226266423Sjfv
4227279858Sjfv	IXL_PF_LOCK(pf);
4228266423Sjfv	/* clean and process any events */
4229266423Sjfv	do {
4230266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4231266423Sjfv		if (ret)
4232266423Sjfv			break;
4233266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4234266423Sjfv		switch (opcode) {
4235266423Sjfv		case i40e_aqc_opc_get_link_status:
4236279858Sjfv			ixl_link_event(pf, &event);
4237270346Sjfv			ixl_update_link_status(pf);
4238266423Sjfv			break;
4239266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4240279858Sjfv#ifdef PCI_IOV
4241279858Sjfv			ixl_handle_vf_msg(pf, &event);
4242279858Sjfv#endif
4243266423Sjfv			break;
4244266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4245266423Sjfv			break;
4246266423Sjfv		default:
4247270346Sjfv#ifdef IXL_DEBUG
4248266423Sjfv			printf("AdminQ unknown event %x\n", opcode);
4249266423Sjfv#endif
4250266423Sjfv			break;
4251266423Sjfv		}
4252266423Sjfv
4253270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4254266423Sjfv
4255266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4256269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4257266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4258266423Sjfv	free(event.msg_buf, M_DEVBUF);
4259266423Sjfv
4260279858Sjfv	/*
4261279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4262279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4263279858Sjfv	 */
4264279858Sjfv	if (result > 0)
4265279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4266266423Sjfv	else
4267270346Sjfv		ixl_enable_intr(vsi);
4268279858Sjfv
4269279858Sjfv	IXL_PF_UNLOCK(pf);
4270266423Sjfv}
4271266423Sjfv
4272266423Sjfvstatic int
4273270346Sjfvixl_debug_info(SYSCTL_HANDLER_ARGS)
4274266423Sjfv{
4275270346Sjfv	struct ixl_pf	*pf;
4276266423Sjfv	int		error, input = 0;
4277266423Sjfv
4278266423Sjfv	error = sysctl_handle_int(oidp, &input, 0, req);
4279266423Sjfv
4280266423Sjfv	if (error || !req->newptr)
4281266423Sjfv		return (error);
4282266423Sjfv
4283266423Sjfv	if (input == 1) {
4284270346Sjfv		pf = (struct ixl_pf *)arg1;
4285270346Sjfv		ixl_print_debug_info(pf);
4286266423Sjfv	}
4287266423Sjfv
4288266423Sjfv	return (error);
4289266423Sjfv}
4290266423Sjfv
4291266423Sjfvstatic void
4292270346Sjfvixl_print_debug_info(struct ixl_pf *pf)
4293266423Sjfv{
4294266423Sjfv	struct i40e_hw		*hw = &pf->hw;
4295270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
4296270346Sjfv	struct ixl_queue	*que = vsi->queues;
4297266423Sjfv	struct rx_ring		*rxr = &que->rxr;
4298266423Sjfv	struct tx_ring		*txr = &que->txr;
4299266423Sjfv	u32			reg;
4300266423Sjfv
4301266423Sjfv
4302270799Sbz	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4303270799Sbz	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4304266423Sjfv	printf("RX next check = %x\n", rxr->next_check);
4305270799Sbz	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4306270799Sbz	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4307266423Sjfv	printf("TX desc avail = %x\n", txr->avail);
4308266423Sjfv
4309266423Sjfv	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4310266423Sjfv	 printf("RX Bytes = %x\n", reg);
4311266423Sjfv	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4312266423Sjfv	 printf("Port RX Bytes = %x\n", reg);
4313266423Sjfv	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4314266423Sjfv	 printf("RX discard = %x\n", reg);
4315266423Sjfv	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4316266423Sjfv	 printf("Port RX discard = %x\n", reg);
4317266423Sjfv
4318266423Sjfv	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4319266423Sjfv	 printf("TX errors = %x\n", reg);
4320266423Sjfv	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4321266423Sjfv	 printf("TX Bytes = %x\n", reg);
4322266423Sjfv
4323266423Sjfv	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4324266423Sjfv	 printf("RX undersize = %x\n", reg);
4325266423Sjfv	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4326266423Sjfv	 printf("RX fragments = %x\n", reg);
4327266423Sjfv	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4328266423Sjfv	 printf("RX oversize = %x\n", reg);
4329266423Sjfv	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4330266423Sjfv	 printf("RX length error = %x\n", reg);
4331266423Sjfv	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4332266423Sjfv	 printf("mac remote fault = %x\n", reg);
4333266423Sjfv	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4334266423Sjfv	 printf("mac local fault = %x\n", reg);
4335266423Sjfv}
4336266423Sjfv
4337266423Sjfv/**
4338266423Sjfv * Update VSI-specific ethernet statistics counters.
4339266423Sjfv **/
4340270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4341266423Sjfv{
4342270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4343266423Sjfv	struct i40e_hw *hw = &pf->hw;
4344266423Sjfv	struct i40e_eth_stats *es;
4345266423Sjfv	struct i40e_eth_stats *oes;
4346272227Sglebius	struct i40e_hw_port_stats *nsd;
4347266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4348266423Sjfv
4349266423Sjfv	es = &vsi->eth_stats;
4350266423Sjfv	oes = &vsi->eth_stats_offsets;
4351272227Sglebius	nsd = &pf->stats;
4352266423Sjfv
4353266423Sjfv	/* Gather up the stats that the hw collects */
4354270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4355266423Sjfv			   vsi->stat_offsets_loaded,
4356266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4357270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4358266423Sjfv			   vsi->stat_offsets_loaded,
4359266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4360266423Sjfv
4361270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4362266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4363266423Sjfv			   vsi->stat_offsets_loaded,
4364266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4365270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4366266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4367266423Sjfv			   vsi->stat_offsets_loaded,
4368266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4369270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4370266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4371266423Sjfv			   vsi->stat_offsets_loaded,
4372266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4373270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4374266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4375266423Sjfv			   vsi->stat_offsets_loaded,
4376266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4377266423Sjfv
4378270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4379266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4380266423Sjfv			   vsi->stat_offsets_loaded,
4381266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4382270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4383266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4384266423Sjfv			   vsi->stat_offsets_loaded,
4385266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4386270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4387266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4388266423Sjfv			   vsi->stat_offsets_loaded,
4389266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4390270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4391266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4392266423Sjfv			   vsi->stat_offsets_loaded,
4393266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4394266423Sjfv	vsi->stat_offsets_loaded = true;
4395279858Sjfv}
4396269198Sjfv
4397279858Sjfvstatic void
4398279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4399279858Sjfv{
4400279858Sjfv	struct ixl_pf		*pf;
4401279858Sjfv	struct ifnet		*ifp;
4402279858Sjfv	struct i40e_eth_stats	*es;
4403279858Sjfv	u64			tx_discards;
4404279858Sjfv
4405279858Sjfv	struct i40e_hw_port_stats *nsd;
4406279858Sjfv
4407279858Sjfv	pf = vsi->back;
4408279858Sjfv	ifp = vsi->ifp;
4409279858Sjfv	es = &vsi->eth_stats;
4410279858Sjfv	nsd = &pf->stats;
4411279858Sjfv
4412279858Sjfv	ixl_update_eth_stats(vsi);
4413279858Sjfv
4414272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4415279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4416272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4417272227Sglebius
4418269198Sjfv	/* Update ifnet stats */
4419272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4420269198Sjfv	                   es->rx_multicast +
4421272227Sglebius			   es->rx_broadcast);
4422272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4423269198Sjfv	                   es->tx_multicast +
4424272227Sglebius			   es->tx_broadcast);
4425272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4426272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4427272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4428272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4429269198Sjfv
4430279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4431279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4432279858Sjfv	    nsd->rx_jabber);
4433272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4434272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4435272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4436272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4437272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4438266423Sjfv}
4439266423Sjfv
4440266423Sjfv/**
4441266423Sjfv * Reset all of the stats for the given pf
4442266423Sjfv **/
4443270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4444266423Sjfv{
4445266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4446266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4447266423Sjfv	pf->stat_offsets_loaded = false;
4448266423Sjfv}
4449266423Sjfv
4450266423Sjfv/**
4451266423Sjfv * Resets all stats of the given vsi
4452266423Sjfv **/
4453270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4454266423Sjfv{
4455266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4456266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4457266423Sjfv	vsi->stat_offsets_loaded = false;
4458266423Sjfv}
4459266423Sjfv
4460266423Sjfv/**
4461266423Sjfv * Read and update a 48 bit stat from the hw
4462266423Sjfv *
4463266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4464266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4465266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4466266423Sjfv * to report stats that count from zero.
4467266423Sjfv **/
4468266423Sjfvstatic void
4469270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4470266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4471266423Sjfv{
4472266423Sjfv	u64 new_data;
4473266423Sjfv
4474270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4475266423Sjfv	new_data = rd64(hw, loreg);
4476266423Sjfv#else
4477266423Sjfv	/*
4478269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4479266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4480266423Sjfv	 */
4481266423Sjfv	new_data = rd32(hw, loreg);
4482266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4483266423Sjfv#endif
4484266423Sjfv
4485266423Sjfv	if (!offset_loaded)
4486266423Sjfv		*offset = new_data;
4487266423Sjfv	if (new_data >= *offset)
4488266423Sjfv		*stat = new_data - *offset;
4489266423Sjfv	else
4490266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4491266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4492266423Sjfv}
4493266423Sjfv
4494266423Sjfv/**
4495266423Sjfv * Read and update a 32 bit stat from the hw
4496266423Sjfv **/
4497266423Sjfvstatic void
4498270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4499266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4500266423Sjfv{
4501266423Sjfv	u32 new_data;
4502266423Sjfv
4503266423Sjfv	new_data = rd32(hw, reg);
4504266423Sjfv	if (!offset_loaded)
4505266423Sjfv		*offset = new_data;
4506266423Sjfv	if (new_data >= *offset)
4507266423Sjfv		*stat = (u32)(new_data - *offset);
4508266423Sjfv	else
4509266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4510266423Sjfv}
4511266423Sjfv
4512266423Sjfv/*
4513266423Sjfv** Set flow control using sysctl:
4514266423Sjfv** 	0 - off
4515266423Sjfv**	1 - rx pause
4516266423Sjfv**	2 - tx pause
4517266423Sjfv**	3 - full
4518266423Sjfv*/
4519266423Sjfvstatic int
4520270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4521266423Sjfv{
4522266423Sjfv	/*
4523266423Sjfv	 * TODO: ensure flow control is disabled if
4524266423Sjfv	 * priority flow control is enabled
4525266423Sjfv	 *
4526266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4527266423Sjfv	 * if tx flow control is enabled.
4528266423Sjfv	 */
4529270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4530266423Sjfv	struct i40e_hw *hw = &pf->hw;
4531266423Sjfv	device_t dev = pf->dev;
4532279033Sjfv	int error = 0;
4533266423Sjfv	enum i40e_status_code aq_error = 0;
4534266423Sjfv	u8 fc_aq_err = 0;
4535266423Sjfv
4536279033Sjfv	/* Get request */
4537279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4538266423Sjfv	if ((error) || (req->newptr == NULL))
4539269198Sjfv		return (error);
4540279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4541266423Sjfv		device_printf(dev,
4542266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4543266423Sjfv		return (EINVAL);
4544266423Sjfv	}
4545266423Sjfv
4546269198Sjfv	/*
4547269198Sjfv	** Changing flow control mode currently does not work on
4548269198Sjfv	** 40GBASE-CR4 PHYs
4549269198Sjfv	*/
4550269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4551269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4552269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4553269198Sjfv		    " on 40GBase-CR4 media.\n");
4554269198Sjfv		return (ENODEV);
4555269198Sjfv	}
4556269198Sjfv
4557266423Sjfv	/* Set fc ability for port */
4558279033Sjfv	hw->fc.requested_mode = pf->fc;
4559269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4560269198Sjfv	if (aq_error) {
4561269198Sjfv		device_printf(dev,
4562269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4563269198Sjfv		    __func__, aq_error, fc_aq_err);
4564269198Sjfv		return (EAGAIN);
4565269198Sjfv	}
4566266423Sjfv
4567269198Sjfv	return (0);
4568269198Sjfv}
4569266423Sjfv
4570270346Sjfvstatic int
4571270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4572270346Sjfv{
4573270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4574270346Sjfv	struct i40e_hw *hw = &pf->hw;
4575270346Sjfv	int error = 0, index = 0;
4576270346Sjfv
4577270346Sjfv	char *speeds[] = {
4578270346Sjfv		"Unknown",
4579270346Sjfv		"100M",
4580270346Sjfv		"1G",
4581270346Sjfv		"10G",
4582270346Sjfv		"40G",
4583270346Sjfv		"20G"
4584270346Sjfv	};
4585270346Sjfv
4586270346Sjfv	ixl_update_link_status(pf);
4587270346Sjfv
4588270346Sjfv	switch (hw->phy.link_info.link_speed) {
4589270346Sjfv	case I40E_LINK_SPEED_100MB:
4590270346Sjfv		index = 1;
4591270346Sjfv		break;
4592270346Sjfv	case I40E_LINK_SPEED_1GB:
4593270346Sjfv		index = 2;
4594270346Sjfv		break;
4595270346Sjfv	case I40E_LINK_SPEED_10GB:
4596270346Sjfv		index = 3;
4597270346Sjfv		break;
4598270346Sjfv	case I40E_LINK_SPEED_40GB:
4599270346Sjfv		index = 4;
4600270346Sjfv		break;
4601270346Sjfv	case I40E_LINK_SPEED_20GB:
4602270346Sjfv		index = 5;
4603270346Sjfv		break;
4604270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
4605270346Sjfv	default:
4606270346Sjfv		index = 0;
4607270346Sjfv		break;
4608270346Sjfv	}
4609270346Sjfv
4610270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
4611270346Sjfv	    strlen(speeds[index]), req);
4612270346Sjfv	return (error);
4613270346Sjfv}
4614270346Sjfv
4615274205Sjfvstatic int
4616274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4617274205Sjfv{
4618274205Sjfv	struct i40e_hw *hw = &pf->hw;
4619274205Sjfv	device_t dev = pf->dev;
4620274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4621274205Sjfv	struct i40e_aq_set_phy_config config;
4622274205Sjfv	enum i40e_status_code aq_error = 0;
4623274205Sjfv
4624274205Sjfv	/* Get current capability information */
4625279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4626279033Sjfv	    FALSE, FALSE, &abilities, NULL);
4627274205Sjfv	if (aq_error) {
4628279033Sjfv		device_printf(dev,
4629279033Sjfv		    "%s: Error getting phy capabilities %d,"
4630274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4631274205Sjfv		    hw->aq.asq_last_status);
4632274205Sjfv		return (EAGAIN);
4633274205Sjfv	}
4634274205Sjfv
4635274205Sjfv	/* Prepare new config */
4636274205Sjfv	bzero(&config, sizeof(config));
4637274205Sjfv	config.phy_type = abilities.phy_type;
4638274205Sjfv	config.abilities = abilities.abilities
4639274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4640274205Sjfv	config.eee_capability = abilities.eee_capability;
4641274205Sjfv	config.eeer = abilities.eeer_val;
4642274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
4643274205Sjfv	/* Translate into aq cmd link_speed */
4644279858Sjfv	if (speeds & 0x8)
4645279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
4646274205Sjfv	if (speeds & 0x4)
4647274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
4648274205Sjfv	if (speeds & 0x2)
4649274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
4650274205Sjfv	if (speeds & 0x1)
4651274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
4652274205Sjfv
4653274205Sjfv	/* Do aq command & restart link */
4654274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4655274205Sjfv	if (aq_error) {
4656279033Sjfv		device_printf(dev,
4657279033Sjfv		    "%s: Error setting new phy config %d,"
4658274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4659274205Sjfv		    hw->aq.asq_last_status);
4660274205Sjfv		return (EAGAIN);
4661274205Sjfv	}
4662274205Sjfv
4663277084Sjfv	/*
4664277084Sjfv	** This seems a bit heavy handed, but we
4665277084Sjfv	** need to get a reinit on some devices
4666277084Sjfv	*/
4667277084Sjfv	IXL_PF_LOCK(pf);
4668277084Sjfv	ixl_stop(pf);
4669277084Sjfv	ixl_init_locked(pf);
4670277084Sjfv	IXL_PF_UNLOCK(pf);
4671277084Sjfv
4672274205Sjfv	return (0);
4673274205Sjfv}
4674274205Sjfv
4675269198Sjfv/*
4676269198Sjfv** Control link advertise speed:
4677270346Sjfv**	Flags:
4678270346Sjfv**	0x1 - advertise 100 Mb
4679270346Sjfv**	0x2 - advertise 1G
4680270346Sjfv**	0x4 - advertise 10G
4681279858Sjfv**	0x8 - advertise 20G
4682269198Sjfv**
4683269198Sjfv** Does not work on 40G devices.
4684269198Sjfv*/
4685269198Sjfvstatic int
4686270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
4687269198Sjfv{
4688270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4689269198Sjfv	struct i40e_hw *hw = &pf->hw;
4690269198Sjfv	device_t dev = pf->dev;
4691270346Sjfv	int requested_ls = 0;
4692269198Sjfv	int error = 0;
4693266423Sjfv
4694269198Sjfv	/*
4695269198Sjfv	** FW doesn't support changing advertised speed
4696269198Sjfv	** for 40G devices; speed is always 40G.
4697269198Sjfv	*/
4698269198Sjfv	if (i40e_is_40G_device(hw->device_id))
4699269198Sjfv		return (ENODEV);
4700266423Sjfv
4701269198Sjfv	/* Read in new mode */
4702270346Sjfv	requested_ls = pf->advertised_speed;
4703269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4704269198Sjfv	if ((error) || (req->newptr == NULL))
4705269198Sjfv		return (error);
4706279858Sjfv	/* Check for sane value */
4707279858Sjfv	if (requested_ls < 0x1 || requested_ls > 0xE) {
4708279858Sjfv		device_printf(dev, "Invalid advertised speed; "
4709279858Sjfv		    "valid modes are 0x1 through 0xE\n");
4710269198Sjfv		return (EINVAL);
4711266423Sjfv	}
4712279858Sjfv	/* Then check for validity based on adapter type */
4713279858Sjfv	switch (hw->device_id) {
4714279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
4715279858Sjfv		if (requested_ls & 0x8) {
4716279858Sjfv			device_printf(dev,
4717279858Sjfv			    "20Gbs speed not supported on this device.\n");
4718279858Sjfv			return (EINVAL);
4719279858Sjfv		}
4720279858Sjfv		break;
4721279858Sjfv	case I40E_DEV_ID_20G_KR2:
4722279858Sjfv		if (requested_ls & 0x1) {
4723279858Sjfv			device_printf(dev,
4724279858Sjfv			    "100Mbs speed not supported on this device.\n");
4725279858Sjfv			return (EINVAL);
4726279858Sjfv		}
4727279858Sjfv		break;
4728279858Sjfv	default:
4729279858Sjfv		if (requested_ls & ~0x6) {
4730279858Sjfv			device_printf(dev,
4731279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
4732279858Sjfv			return (EINVAL);
4733279858Sjfv		}
4734279858Sjfv		break;
4735279858Sjfv	}
4736269198Sjfv
4737269198Sjfv	/* Exit if no change */
4738270346Sjfv	if (pf->advertised_speed == requested_ls)
4739269198Sjfv		return (0);
4740269198Sjfv
4741274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
4742274205Sjfv	if (error)
4743274205Sjfv		return (error);
4744270346Sjfv
4745270346Sjfv	pf->advertised_speed = requested_ls;
4746270346Sjfv	ixl_update_link_status(pf);
4747269198Sjfv	return (0);
4748266423Sjfv}
4749266423Sjfv
4750266423Sjfv/*
4751266423Sjfv** Get the width and transaction speed of
4752266423Sjfv** the bus this adapter is plugged into.
4753266423Sjfv*/
4754266423Sjfvstatic u16
4755270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4756266423Sjfv{
4757266423Sjfv        u16                     link;
4758266423Sjfv        u32                     offset;
4759266423Sjfv
4760266423Sjfv
4761266423Sjfv        /* Get the PCI Express Capabilities offset */
4762266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4763266423Sjfv
4764266423Sjfv        /* ...and read the Link Status Register */
4765266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4766266423Sjfv
4767266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
4768266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
4769266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
4770266423Sjfv                break;
4771266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
4772266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
4773266423Sjfv                break;
4774266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
4775266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
4776266423Sjfv                break;
4777266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
4778266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
4779266423Sjfv                break;
4780266423Sjfv        default:
4781266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
4782266423Sjfv                break;
4783266423Sjfv        }
4784266423Sjfv
4785266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
4786266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
4787266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
4788266423Sjfv                break;
4789266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
4790266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
4791266423Sjfv                break;
4792266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
4793266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
4794266423Sjfv                break;
4795266423Sjfv        default:
4796266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
4797266423Sjfv                break;
4798266423Sjfv        }
4799266423Sjfv
4800266423Sjfv
4801266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4802266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4803266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4804266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4805266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4806266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4807266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4808266423Sjfv            ("Unknown"));
4809266423Sjfv
4810266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4811266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
4812266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
4813279858Sjfv                    " for this device\n     may be insufficient for"
4814279858Sjfv                    " optimal performance.\n");
4815266423Sjfv                device_printf(dev, "For expected performance a x8 "
4816266423Sjfv                    "PCIE Gen3 slot is required.\n");
4817266423Sjfv        }
4818266423Sjfv
4819266423Sjfv        return (link);
4820266423Sjfv}
4821266423Sjfv
4822274205Sjfvstatic int
4823274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4824274205Sjfv{
4825274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4826274205Sjfv	struct i40e_hw	*hw = &pf->hw;
4827274205Sjfv	char		buf[32];
4828274205Sjfv
4829274205Sjfv	snprintf(buf, sizeof(buf),
4830274205Sjfv	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4831274205Sjfv	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4832274205Sjfv	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4833274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4834274205Sjfv	    IXL_NVM_VERSION_HI_SHIFT,
4835274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4836274205Sjfv	    IXL_NVM_VERSION_LO_SHIFT,
4837274205Sjfv	    hw->nvm.eetrack);
4838274205Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4839274205Sjfv}
4840274205Sjfv
4841274205Sjfv
4842277084Sjfv#ifdef IXL_DEBUG_SYSCTL
4843266423Sjfvstatic int
4844270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4845266423Sjfv{
4846270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4847266423Sjfv	struct i40e_hw *hw = &pf->hw;
4848266423Sjfv	struct i40e_link_status link_status;
4849266423Sjfv	char buf[512];
4850266423Sjfv
4851266423Sjfv	enum i40e_status_code aq_error = 0;
4852266423Sjfv
4853266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4854266423Sjfv	if (aq_error) {
4855266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4856266423Sjfv		return (EPERM);
4857266423Sjfv	}
4858266423Sjfv
4859266423Sjfv	sprintf(buf, "\n"
4860266423Sjfv	    "PHY Type : %#04x\n"
4861266423Sjfv	    "Speed    : %#04x\n"
4862266423Sjfv	    "Link info: %#04x\n"
4863266423Sjfv	    "AN info  : %#04x\n"
4864266423Sjfv	    "Ext info : %#04x",
4865266423Sjfv	    link_status.phy_type, link_status.link_speed,
4866266423Sjfv	    link_status.link_info, link_status.an_info,
4867266423Sjfv	    link_status.ext_info);
4868266423Sjfv
4869266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4870266423Sjfv}
4871266423Sjfv
4872266423Sjfvstatic int
4873270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4874266423Sjfv{
4875279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
4876279858Sjfv	struct i40e_hw		*hw = &pf->hw;
4877279858Sjfv	char			buf[512];
4878279858Sjfv	enum i40e_status_code	aq_error = 0;
4879266423Sjfv
4880279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4881266423Sjfv
4882279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4883279858Sjfv	    TRUE, FALSE, &abilities, NULL);
4884266423Sjfv	if (aq_error) {
4885266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4886266423Sjfv		return (EPERM);
4887266423Sjfv	}
4888266423Sjfv
4889266423Sjfv	sprintf(buf, "\n"
4890266423Sjfv	    "PHY Type : %#010x\n"
4891266423Sjfv	    "Speed    : %#04x\n"
4892266423Sjfv	    "Abilities: %#04x\n"
4893266423Sjfv	    "EEE cap  : %#06x\n"
4894266423Sjfv	    "EEER reg : %#010x\n"
4895266423Sjfv	    "D3 Lpan  : %#04x",
4896279858Sjfv	    abilities.phy_type, abilities.link_speed,
4897279858Sjfv	    abilities.abilities, abilities.eee_capability,
4898279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
4899266423Sjfv
4900266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4901266423Sjfv}
4902266423Sjfv
4903266423Sjfvstatic int
4904270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4905266423Sjfv{
4906270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4907270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
4908270346Sjfv	struct ixl_mac_filter *f;
4909266423Sjfv	char *buf, *buf_i;
4910266423Sjfv
4911266423Sjfv	int error = 0;
4912266423Sjfv	int ftl_len = 0;
4913266423Sjfv	int ftl_counter = 0;
4914266423Sjfv	int buf_len = 0;
4915266423Sjfv	int entry_len = 42;
4916266423Sjfv
4917266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
4918266423Sjfv		ftl_len++;
4919266423Sjfv	}
4920266423Sjfv
4921266423Sjfv	if (ftl_len < 1) {
4922266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
4923266423Sjfv		return (0);
4924266423Sjfv	}
4925266423Sjfv
4926266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4927266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4928266423Sjfv
4929266423Sjfv	sprintf(buf_i++, "\n");
4930266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
4931266423Sjfv		sprintf(buf_i,
4932266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
4933266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4934266423Sjfv		buf_i += entry_len;
4935266423Sjfv		/* don't print '\n' for last entry */
4936266423Sjfv		if (++ftl_counter != ftl_len) {
4937266423Sjfv			sprintf(buf_i, "\n");
4938266423Sjfv			buf_i++;
4939266423Sjfv		}
4940266423Sjfv	}
4941266423Sjfv
4942266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4943266423Sjfv	if (error)
4944266423Sjfv		printf("sysctl error: %d\n", error);
4945266423Sjfv	free(buf, M_DEVBUF);
4946266423Sjfv	return error;
4947266423Sjfv}
4948269198Sjfv
4949270346Sjfv#define IXL_SW_RES_SIZE 0x14
4950269198Sjfvstatic int
4951277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
4952277084Sjfv{
4953277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4954277084Sjfv	one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4955277084Sjfv	two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4956277084Sjfv
4957277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
4958277084Sjfv}
4959277084Sjfv
4960277084Sjfvstatic int
4961274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4962269198Sjfv{
4963270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4964269198Sjfv	struct i40e_hw *hw = &pf->hw;
4965269198Sjfv	device_t dev = pf->dev;
4966269198Sjfv	struct sbuf *buf;
4967269198Sjfv	int error = 0;
4968269198Sjfv
4969269198Sjfv	u8 num_entries;
4970270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4971269198Sjfv
4972269198Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4973269198Sjfv	if (!buf) {
4974269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
4975269198Sjfv		return (ENOMEM);
4976269198Sjfv	}
4977269198Sjfv
4978277084Sjfv	bzero(resp, sizeof(resp));
4979269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4980269198Sjfv				resp,
4981270346Sjfv				IXL_SW_RES_SIZE,
4982269198Sjfv				NULL);
4983269198Sjfv	if (error) {
4984279858Sjfv		device_printf(dev,
4985279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4986269198Sjfv		    __func__, error, hw->aq.asq_last_status);
4987269198Sjfv		sbuf_delete(buf);
4988269198Sjfv		return error;
4989269198Sjfv	}
4990269198Sjfv
4991277084Sjfv	/* Sort entries by type for display */
4992277084Sjfv	qsort(resp, num_entries,
4993277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
4994277084Sjfv	    &ixl_res_alloc_cmp);
4995277084Sjfv
4996269198Sjfv	sbuf_cat(buf, "\n");
4997277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
4998269198Sjfv	sbuf_printf(buf,
4999269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5000269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5001269198Sjfv	for (int i = 0; i < num_entries; i++) {
5002269198Sjfv		sbuf_printf(buf,
5003269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5004269198Sjfv		    resp[i].resource_type,
5005269198Sjfv		    resp[i].guaranteed,
5006269198Sjfv		    resp[i].total,
5007269198Sjfv		    resp[i].used,
5008269198Sjfv		    resp[i].total_unalloced);
5009269198Sjfv		if (i < num_entries - 1)
5010269198Sjfv			sbuf_cat(buf, "\n");
5011269198Sjfv	}
5012269198Sjfv
5013269198Sjfv	error = sbuf_finish(buf);
5014269198Sjfv	if (error) {
5015269198Sjfv		device_printf(dev, "Error finishing sbuf: %d\n", error);
5016269198Sjfv		sbuf_delete(buf);
5017269198Sjfv		return error;
5018269198Sjfv	}
5019269198Sjfv
5020269198Sjfv	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5021269198Sjfv	if (error)
5022269198Sjfv		device_printf(dev, "sysctl error: %d\n", error);
5023269198Sjfv	sbuf_delete(buf);
5024269198Sjfv	return error;
5025274205Sjfv}
5026269198Sjfv
5027274205Sjfv/*
5028274205Sjfv** Caller must init and delete sbuf; this function will clear and
5029274205Sjfv** finish it for caller.
5030274205Sjfv*/
5031274205Sjfvstatic char *
5032274205Sjfvixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5033274205Sjfv{
5034274205Sjfv	sbuf_clear(s);
5035274205Sjfv
5036274205Sjfv	if (seid == 0 && uplink)
5037274205Sjfv		sbuf_cat(s, "Network");
5038274205Sjfv	else if (seid == 0)
5039274205Sjfv		sbuf_cat(s, "Host");
5040274205Sjfv	else if (seid == 1)
5041274205Sjfv		sbuf_cat(s, "EMP");
5042274205Sjfv	else if (seid <= 5)
5043274205Sjfv		sbuf_printf(s, "MAC %d", seid - 2);
5044274205Sjfv	else if (seid <= 15)
5045274205Sjfv		sbuf_cat(s, "Reserved");
5046274205Sjfv	else if (seid <= 31)
5047274205Sjfv		sbuf_printf(s, "PF %d", seid - 16);
5048274205Sjfv	else if (seid <= 159)
5049274205Sjfv		sbuf_printf(s, "VF %d", seid - 32);
5050274205Sjfv	else if (seid <= 287)
5051274205Sjfv		sbuf_cat(s, "Reserved");
5052274205Sjfv	else if (seid <= 511)
5053274205Sjfv		sbuf_cat(s, "Other"); // for other structures
5054274205Sjfv	else if (seid <= 895)
5055274205Sjfv		sbuf_printf(s, "VSI %d", seid - 512);
5056274205Sjfv	else if (seid <= 1023)
5057274205Sjfv		sbuf_printf(s, "Reserved");
5058274205Sjfv	else
5059274205Sjfv		sbuf_cat(s, "Invalid");
5060274205Sjfv
5061274205Sjfv	sbuf_finish(s);
5062274205Sjfv	return sbuf_data(s);
5063269198Sjfv}
5064269198Sjfv
5065274205Sjfvstatic int
5066274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5067274205Sjfv{
5068274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5069274205Sjfv	struct i40e_hw *hw = &pf->hw;
5070274205Sjfv	device_t dev = pf->dev;
5071274205Sjfv	struct sbuf *buf;
5072274205Sjfv	struct sbuf *nmbuf;
5073274205Sjfv	int error = 0;
5074274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5075274205Sjfv
5076274205Sjfv	u16 next = 0;
5077274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5078274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5079274205Sjfv
5080274205Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5081274205Sjfv	if (!buf) {
5082274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5083274205Sjfv		return (ENOMEM);
5084274205Sjfv	}
5085274205Sjfv
5086274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5087274205Sjfv	    sizeof(aq_buf), &next, NULL);
5088274205Sjfv	if (error) {
5089279858Sjfv		device_printf(dev,
5090279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5091274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5092274205Sjfv		sbuf_delete(buf);
5093274205Sjfv		return error;
5094274205Sjfv	}
5095274205Sjfv
5096274205Sjfv	nmbuf = sbuf_new_auto();
5097274205Sjfv	if (!nmbuf) {
5098274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5099274205Sjfv		return (ENOMEM);
5100274205Sjfv	}
5101274205Sjfv
5102274205Sjfv	sbuf_cat(buf, "\n");
5103274205Sjfv	// Assuming <= 255 elements in switch
5104274205Sjfv	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5105274205Sjfv	/* Exclude:
5106274205Sjfv	** Revision -- all elements are revision 1 for now
5107274205Sjfv	*/
5108274205Sjfv	sbuf_printf(buf,
5109274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5110274205Sjfv	    "                |          |          | (uplink)\n");
5111274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5112274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5113274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5114274205Sjfv		sbuf_cat(buf, " ");
5115279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5116279858Sjfv		    sw_config->element[i].seid, false));
5117274205Sjfv		sbuf_cat(buf, " | ");
5118279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5119279858Sjfv		    sw_config->element[i].uplink_seid, true));
5120274205Sjfv		sbuf_cat(buf, "   ");
5121279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5122279858Sjfv		    sw_config->element[i].downlink_seid, false));
5123274205Sjfv		sbuf_cat(buf, "   ");
5124274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5125274205Sjfv		if (i < sw_config->header.num_reported - 1)
5126274205Sjfv			sbuf_cat(buf, "\n");
5127274205Sjfv	}
5128274205Sjfv	sbuf_delete(nmbuf);
5129274205Sjfv
5130274205Sjfv	error = sbuf_finish(buf);
5131274205Sjfv	if (error) {
5132274205Sjfv		device_printf(dev, "Error finishing sbuf: %d\n", error);
5133274205Sjfv		sbuf_delete(buf);
5134274205Sjfv		return error;
5135274205Sjfv	}
5136274205Sjfv
5137274205Sjfv	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5138274205Sjfv	if (error)
5139274205Sjfv		device_printf(dev, "sysctl error: %d\n", error);
5140274205Sjfv	sbuf_delete(buf);
5141274205Sjfv
5142274205Sjfv	return (error);
5143274205Sjfv}
5144279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5145274205Sjfv
5146279858Sjfv
5147279858Sjfv#ifdef PCI_IOV
5148269198Sjfvstatic int
5149279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5150269198Sjfv{
5151279858Sjfv	struct i40e_hw *hw;
5152279858Sjfv	struct ixl_vsi *vsi;
5153279858Sjfv	struct i40e_vsi_context vsi_ctx;
5154279858Sjfv	int i;
5155279858Sjfv	uint16_t first_queue;
5156279858Sjfv	enum i40e_status_code code;
5157269198Sjfv
5158279858Sjfv	hw = &pf->hw;
5159279858Sjfv	vsi = &pf->vsi;
5160269198Sjfv
5161279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5162279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5163279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5164279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5165279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5166279858Sjfv
5167279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5168279858Sjfv
5169279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5170279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5171279858Sjfv
5172279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5173279858Sjfv	vsi_ctx.info.sec_flags = 0;
5174279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5175279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5176279858Sjfv
5177279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5178279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5179279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5180279858Sjfv
5181279858Sjfv	vsi_ctx.info.valid_sections |=
5182279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5183279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5184279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5185279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5186279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5187279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5188279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5189279858Sjfv
5190279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5191279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5192279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5193279858Sjfv
5194279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5195279858Sjfv	if (code != I40E_SUCCESS)
5196279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5197279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5198279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5199279858Sjfv	vf->vsi.first_queue = first_queue;
5200279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5201279858Sjfv
5202279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5203279858Sjfv	if (code != I40E_SUCCESS)
5204279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5205279858Sjfv
5206279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5207279858Sjfv	if (code != I40E_SUCCESS) {
5208279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5209279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5210279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5211269198Sjfv	}
5212269198Sjfv
5213279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5214279858Sjfv	return (0);
5215279858Sjfv}
5216279858Sjfv
5217279858Sjfvstatic int
5218279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5219279858Sjfv{
5220279858Sjfv	struct i40e_hw *hw;
5221279858Sjfv	int error;
5222279858Sjfv
5223279858Sjfv	hw = &pf->hw;
5224279858Sjfv
5225279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5226279858Sjfv	if (error != 0)
5227269198Sjfv		return (error);
5228279858Sjfv
5229279858Sjfv	vf->vsi.hw_filters_add = 0;
5230279858Sjfv	vf->vsi.hw_filters_del = 0;
5231279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5232279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5233279858Sjfv
5234279858Sjfv	return (0);
5235279858Sjfv}
5236279858Sjfv
5237279858Sjfvstatic void
5238279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5239279858Sjfv    uint32_t val)
5240279858Sjfv{
5241279858Sjfv	uint32_t qtable;
5242279858Sjfv	int index, shift;
5243279858Sjfv
5244279858Sjfv	/*
5245279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5246279858Sjfv	 * gymnastics to convert the queue number into a register index and
5247279858Sjfv	 * shift.
5248279858Sjfv	 */
5249279858Sjfv	index = qnum / 2;
5250279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5251279858Sjfv
5252279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5253279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5254279858Sjfv	qtable |= val << shift;
5255279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5256279858Sjfv}
5257279858Sjfv
5258279858Sjfvstatic void
5259279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5260279858Sjfv{
5261279858Sjfv	struct i40e_hw *hw;
5262279858Sjfv	uint32_t qtable;
5263279858Sjfv	int i;
5264279858Sjfv
5265279858Sjfv	hw = &pf->hw;
5266279858Sjfv
5267279858Sjfv	/*
5268279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5269279858Sjfv	 * so we have to use non-contiguous mappings.
5270279858Sjfv	 */
5271279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5272279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5273279858Sjfv
5274279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5275279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5276279858Sjfv
5277279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5278279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5279279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5280279858Sjfv
5281279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5282279858Sjfv	}
5283279858Sjfv
5284279858Sjfv	/* Map queues allocated to VF to its VSI. */
5285279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5286279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5287279858Sjfv
5288279858Sjfv	/* Set rest of VSI queues as unused. */
5289279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5290279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5291279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5292279858Sjfv
5293279858Sjfv	ixl_flush(hw);
5294279858Sjfv}
5295279858Sjfv
5296279858Sjfvstatic void
5297279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5298279858Sjfv{
5299279858Sjfv	struct i40e_hw *hw;
5300279858Sjfv
5301279858Sjfv	hw = &pf->hw;
5302279858Sjfv
5303279858Sjfv	if (vsi->seid == 0)
5304279858Sjfv		return;
5305279858Sjfv
5306279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5307279858Sjfv}
5308279858Sjfv
5309279858Sjfvstatic void
5310279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5311279858Sjfv{
5312279858Sjfv
5313279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5314279858Sjfv	ixl_flush(hw);
5315279858Sjfv}
5316279858Sjfv
5317279858Sjfvstatic void
5318279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5319279858Sjfv{
5320279858Sjfv
5321279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5322279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5323279858Sjfv	ixl_flush(hw);
5324279858Sjfv}
5325279858Sjfv
5326279858Sjfvstatic void
5327279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5328279858Sjfv{
5329279858Sjfv	struct i40e_hw *hw;
5330279858Sjfv	uint32_t vfint_reg, vpint_reg;
5331279858Sjfv	int i;
5332279858Sjfv
5333279858Sjfv	hw = &pf->hw;
5334279858Sjfv
5335279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5336279858Sjfv
5337279858Sjfv	/* Index 0 has a special register. */
5338279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5339279858Sjfv
5340279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5341279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5342279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5343279858Sjfv	}
5344279858Sjfv
5345279858Sjfv	/* Index 0 has a special register. */
5346279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5347279858Sjfv
5348279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5349279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5350279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5351279858Sjfv	}
5352279858Sjfv
5353279858Sjfv	vf->vsi.num_queues = 0;
5354279858Sjfv}
5355279858Sjfv
5356279858Sjfvstatic int
5357279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5358279858Sjfv{
5359279858Sjfv	struct i40e_hw *hw;
5360279858Sjfv	int i;
5361279858Sjfv	uint16_t global_vf_num;
5362279858Sjfv	uint32_t ciad;
5363279858Sjfv
5364279858Sjfv	hw = &pf->hw;
5365279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5366279858Sjfv
5367279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5368279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5369279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5370279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5371279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5372279858Sjfv			return (0);
5373279858Sjfv		DELAY(1);
5374279858Sjfv	}
5375279858Sjfv
5376279858Sjfv	return (ETIMEDOUT);
5377279858Sjfv}
5378279858Sjfv
5379279858Sjfvstatic void
5380279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5381279858Sjfv{
5382279858Sjfv	struct i40e_hw *hw;
5383279858Sjfv	uint32_t vfrtrig;
5384279858Sjfv
5385279858Sjfv	hw = &pf->hw;
5386279858Sjfv
5387279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5388279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5389279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5390279858Sjfv	ixl_flush(hw);
5391279858Sjfv
5392279858Sjfv	ixl_reinit_vf(pf, vf);
5393279858Sjfv}
5394279858Sjfv
5395279858Sjfvstatic void
5396279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5397279858Sjfv{
5398279858Sjfv	struct i40e_hw *hw;
5399279858Sjfv	uint32_t vfrstat, vfrtrig;
5400279858Sjfv	int i, error;
5401279858Sjfv
5402279858Sjfv	hw = &pf->hw;
5403279858Sjfv
5404279858Sjfv	error = ixl_flush_pcie(pf, vf);
5405279858Sjfv	if (error != 0)
5406279858Sjfv		device_printf(pf->dev,
5407279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5408279858Sjfv		    vf->vf_num);
5409279858Sjfv
5410279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5411279858Sjfv		DELAY(10);
5412279858Sjfv
5413279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5414279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5415279858Sjfv			break;
5416279858Sjfv	}
5417279858Sjfv
5418279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5419279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5420279858Sjfv
5421279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5422279858Sjfv
5423279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5424279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5425279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5426279858Sjfv
5427279858Sjfv	if (vf->vsi.seid != 0)
5428279858Sjfv		ixl_disable_rings(&vf->vsi);
5429279858Sjfv
5430279858Sjfv	ixl_vf_release_resources(pf, vf);
5431279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5432279858Sjfv	ixl_vf_map_queues(pf, vf);
5433279858Sjfv
5434279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5435279858Sjfv	ixl_flush(hw);
5436279858Sjfv}
5437279858Sjfv
5438279858Sjfvstatic const char *
5439279858Sjfvixl_vc_opcode_str(uint16_t op)
5440279858Sjfv{
5441279858Sjfv
5442279858Sjfv	switch (op) {
5443279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5444279858Sjfv		return ("VERSION");
5445279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5446279858Sjfv		return ("RESET_VF");
5447279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5448279858Sjfv		return ("GET_VF_RESOURCES");
5449279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5450279858Sjfv		return ("CONFIG_TX_QUEUE");
5451279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5452279858Sjfv		return ("CONFIG_RX_QUEUE");
5453279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5454279858Sjfv		return ("CONFIG_VSI_QUEUES");
5455279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5456279858Sjfv		return ("CONFIG_IRQ_MAP");
5457279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5458279858Sjfv		return ("ENABLE_QUEUES");
5459279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5460279858Sjfv		return ("DISABLE_QUEUES");
5461279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5462279858Sjfv		return ("ADD_ETHER_ADDRESS");
5463279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5464279858Sjfv		return ("DEL_ETHER_ADDRESS");
5465279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5466279858Sjfv		return ("ADD_VLAN");
5467279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5468279858Sjfv		return ("DEL_VLAN");
5469279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5470279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5471279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5472279858Sjfv		return ("GET_STATS");
5473279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5474279858Sjfv		return ("FCOE");
5475279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5476279858Sjfv		return ("EVENT");
5477279858Sjfv	default:
5478279858Sjfv		return ("UNKNOWN");
5479279858Sjfv	}
5480279858Sjfv}
5481279858Sjfv
5482279858Sjfvstatic int
5483279858Sjfvixl_vc_opcode_level(uint16_t opcode)
5484279858Sjfv{
5485279858Sjfv
5486279858Sjfv	switch (opcode) {
5487279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5488279858Sjfv		return (10);
5489279858Sjfv	default:
5490279858Sjfv		return (5);
5491279858Sjfv	}
5492279858Sjfv}
5493279858Sjfv
5494279858Sjfvstatic void
5495279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5496279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
5497279858Sjfv{
5498279858Sjfv	struct i40e_hw *hw;
5499279858Sjfv	int global_vf_id;
5500279858Sjfv
5501279858Sjfv	hw = &pf->hw;
5502279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5503279858Sjfv
5504279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5505279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5506279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5507279858Sjfv
5508279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5509279858Sjfv}
5510279858Sjfv
5511279858Sjfvstatic void
5512279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5513279858Sjfv{
5514279858Sjfv
5515279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5516279858Sjfv}
5517279858Sjfv
5518279858Sjfvstatic void
5519279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5520279858Sjfv    enum i40e_status_code status, const char *file, int line)
5521279858Sjfv{
5522279858Sjfv
5523279858Sjfv	I40E_VC_DEBUG(pf, 1,
5524279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5525279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5526279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5527279858Sjfv}
5528279858Sjfv
5529279858Sjfvstatic void
5530279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5531279858Sjfv    uint16_t msg_size)
5532279858Sjfv{
5533279858Sjfv	struct i40e_virtchnl_version_info reply;
5534279858Sjfv
5535279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5536279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5537279858Sjfv		    I40E_ERR_PARAM);
5538279858Sjfv		return;
5539279858Sjfv	}
5540279858Sjfv
5541279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5542279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5543279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5544279858Sjfv	    sizeof(reply));
5545279858Sjfv}
5546279858Sjfv
5547279858Sjfvstatic void
5548279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5549279858Sjfv    uint16_t msg_size)
5550279858Sjfv{
5551279858Sjfv
5552279858Sjfv	if (msg_size != 0) {
5553279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5554279858Sjfv		    I40E_ERR_PARAM);
5555279858Sjfv		return;
5556279858Sjfv	}
5557279858Sjfv
5558279858Sjfv	ixl_reset_vf(pf, vf);
5559279858Sjfv
5560279858Sjfv	/* No response to a reset message. */
5561279858Sjfv}
5562279858Sjfv
5563279858Sjfvstatic void
5564279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5565279858Sjfv    uint16_t msg_size)
5566279858Sjfv{
5567279858Sjfv	struct i40e_virtchnl_vf_resource reply;
5568279858Sjfv
5569279858Sjfv	if (msg_size != 0) {
5570279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5571279858Sjfv		    I40E_ERR_PARAM);
5572279858Sjfv		return;
5573279858Sjfv	}
5574279858Sjfv
5575279858Sjfv	bzero(&reply, sizeof(reply));
5576279858Sjfv
5577279858Sjfv	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5578279858Sjfv
5579279858Sjfv	reply.num_vsis = 1;
5580279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
5581279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5582279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5583279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5584279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5585279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5586279858Sjfv
5587279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5588279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
5589279858Sjfv}
5590279858Sjfv
5591279858Sjfvstatic int
5592279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5593279858Sjfv    struct i40e_virtchnl_txq_info *info)
5594279858Sjfv{
5595279858Sjfv	struct i40e_hw *hw;
5596279858Sjfv	struct i40e_hmc_obj_txq txq;
5597279858Sjfv	uint16_t global_queue_num, global_vf_num;
5598279858Sjfv	enum i40e_status_code status;
5599279858Sjfv	uint32_t qtx_ctl;
5600279858Sjfv
5601279858Sjfv	hw = &pf->hw;
5602279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5603279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5604279858Sjfv	bzero(&txq, sizeof(txq));
5605279858Sjfv
5606279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5607279858Sjfv	if (status != I40E_SUCCESS)
5608269198Sjfv		return (EINVAL);
5609279858Sjfv
5610279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5611279858Sjfv
5612279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
5613279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
5614279858Sjfv	txq.qlen = info->ring_len;
5615279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5616279858Sjfv	txq.rdylist_act = 0;
5617279858Sjfv
5618279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5619279858Sjfv	if (status != I40E_SUCCESS)
5620279858Sjfv		return (EINVAL);
5621279858Sjfv
5622279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5623279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5624279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5625279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5626279858Sjfv	ixl_flush(hw);
5627279858Sjfv
5628279858Sjfv	return (0);
5629279858Sjfv}
5630279858Sjfv
5631279858Sjfvstatic int
5632279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5633279858Sjfv    struct i40e_virtchnl_rxq_info *info)
5634279858Sjfv{
5635279858Sjfv	struct i40e_hw *hw;
5636279858Sjfv	struct i40e_hmc_obj_rxq rxq;
5637279858Sjfv	uint16_t global_queue_num;
5638279858Sjfv	enum i40e_status_code status;
5639279858Sjfv
5640279858Sjfv	hw = &pf->hw;
5641279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5642279858Sjfv	bzero(&rxq, sizeof(rxq));
5643279858Sjfv
5644279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5645279858Sjfv		return (EINVAL);
5646279858Sjfv
5647279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5648279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
5649279858Sjfv		return (EINVAL);
5650279858Sjfv
5651279858Sjfv	if (info->splithdr_enabled) {
5652279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5653279858Sjfv			return (EINVAL);
5654279858Sjfv
5655279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
5656279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5657279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5658279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5659279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5660279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5661279858Sjfv
5662279858Sjfv		rxq.dtype = 2;
5663269198Sjfv	}
5664269198Sjfv
5665279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5666279858Sjfv	if (status != I40E_SUCCESS)
5667279858Sjfv		return (EINVAL);
5668269198Sjfv
5669279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5670279858Sjfv	rxq.qlen = info->ring_len;
5671269198Sjfv
5672279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5673269198Sjfv
5674279858Sjfv	rxq.dsize = 1;
5675279858Sjfv	rxq.crcstrip = 1;
5676279858Sjfv	rxq.l2tsel = 1;
5677269198Sjfv
5678279858Sjfv	rxq.rxmax = info->max_pkt_size;
5679279858Sjfv	rxq.tphrdesc_ena = 1;
5680279858Sjfv	rxq.tphwdesc_ena = 1;
5681279858Sjfv	rxq.tphdata_ena = 1;
5682279858Sjfv	rxq.tphhead_ena = 1;
5683279858Sjfv	rxq.lrxqthresh = 2;
5684279858Sjfv	rxq.prefena = 1;
5685279858Sjfv
5686279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5687279858Sjfv	if (status != I40E_SUCCESS)
5688279858Sjfv		return (EINVAL);
5689279858Sjfv
5690279858Sjfv	return (0);
5691279858Sjfv}
5692279858Sjfv
5693279858Sjfvstatic void
5694279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5695279858Sjfv    uint16_t msg_size)
5696279858Sjfv{
5697279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
5698279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
5699279858Sjfv	int i;
5700279858Sjfv
5701279858Sjfv	if (msg_size < sizeof(*info)) {
5702279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5703279858Sjfv		    I40E_ERR_PARAM);
5704279858Sjfv		return;
5705279858Sjfv	}
5706279858Sjfv
5707279858Sjfv	info = msg;
5708279858Sjfv	if (info->num_queue_pairs == 0) {
5709279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5710279858Sjfv		    I40E_ERR_PARAM);
5711279858Sjfv		return;
5712279858Sjfv	}
5713279858Sjfv
5714279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5715279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5716279858Sjfv		    I40E_ERR_PARAM);
5717279858Sjfv		return;
5718279858Sjfv	}
5719279858Sjfv
5720279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
5721279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5722279858Sjfv		    I40E_ERR_PARAM);
5723279858Sjfv		return;
5724279858Sjfv	}
5725279858Sjfv
5726279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
5727279858Sjfv		pair = &info->qpair[i];
5728279858Sjfv
5729279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5730279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5731279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
5732279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
5733279858Sjfv
5734279858Sjfv			i40e_send_vf_nack(pf, vf,
5735279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5736279858Sjfv			return;
5737279858Sjfv		}
5738279858Sjfv
5739279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5740279858Sjfv			i40e_send_vf_nack(pf, vf,
5741279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5742279858Sjfv			return;
5743279858Sjfv		}
5744279858Sjfv
5745279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5746279858Sjfv			i40e_send_vf_nack(pf, vf,
5747279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5748279858Sjfv			return;
5749279858Sjfv		}
5750279858Sjfv	}
5751279858Sjfv
5752279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5753279858Sjfv}
5754279858Sjfv
5755279858Sjfvstatic void
5756279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
5757279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
5758279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
5759279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
5760279858Sjfv{
5761279858Sjfv	uint32_t offset, qctl;
5762279858Sjfv	uint16_t itr_indx;
5763279858Sjfv
5764279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
5765279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
5766279858Sjfv		itr_indx = vector->rxitr_idx;
5767279858Sjfv	} else {
5768279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
5769279858Sjfv		itr_indx = vector->txitr_idx;
5770279858Sjfv	}
5771279858Sjfv
5772279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5773279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5774279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5775279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5776279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5777279858Sjfv
5778279858Sjfv	wr32(&pf->hw, offset, qctl);
5779279858Sjfv
5780279858Sjfv	*last_type = cur_type;
5781279858Sjfv	*last_queue = cur_queue;
5782279858Sjfv}
5783279858Sjfv
5784279858Sjfvstatic void
5785279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5786279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
5787279858Sjfv{
5788279858Sjfv	struct i40e_hw *hw;
5789279858Sjfv	u_int qindex;
5790279858Sjfv	enum i40e_queue_type type, last_type;
5791279858Sjfv	uint32_t lnklst_reg;
5792279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5793279858Sjfv
5794279858Sjfv	hw = &pf->hw;
5795279858Sjfv
5796279858Sjfv	rxq_map = vector->rxq_map;
5797279858Sjfv	txq_map = vector->txq_map;
5798279858Sjfv
5799279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
5800279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
5801279858Sjfv
5802279858Sjfv	/*
5803279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
5804279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
5805279858Sjfv	 * both at once here.
5806279858Sjfv	 */
5807279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
5808279858Sjfv		if (txq_map != 0) {
5809279858Sjfv			qindex = ffs(txq_map) - 1;
5810279858Sjfv			type = I40E_QUEUE_TYPE_TX;
5811279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5812279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5813279858Sjfv			    &last_type, &last_queue);
5814279858Sjfv			txq_map &= ~(1 << qindex);
5815279858Sjfv		}
5816279858Sjfv
5817279858Sjfv		if (rxq_map != 0) {
5818279858Sjfv			qindex = ffs(rxq_map) - 1;
5819279858Sjfv			type = I40E_QUEUE_TYPE_RX;
5820279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5821279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5822279858Sjfv			    &last_type, &last_queue);
5823279858Sjfv			rxq_map &= ~(1 << qindex);
5824279858Sjfv		}
5825279858Sjfv	}
5826279858Sjfv
5827279858Sjfv	if (vector->vector_id == 0)
5828279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5829279858Sjfv	else
5830279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5831279858Sjfv		    vf->vf_num);
5832279858Sjfv	wr32(hw, lnklst_reg,
5833279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5834279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5835279858Sjfv
5836279858Sjfv	ixl_flush(hw);
5837279858Sjfv}
5838279858Sjfv
5839279858Sjfvstatic void
5840279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5841279858Sjfv    uint16_t msg_size)
5842279858Sjfv{
5843279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
5844279858Sjfv	struct i40e_virtchnl_vector_map *vector;
5845279858Sjfv	struct i40e_hw *hw;
5846279858Sjfv	int i, largest_txq, largest_rxq;
5847279858Sjfv
5848279858Sjfv	hw = &pf->hw;
5849279858Sjfv
5850279858Sjfv	if (msg_size < sizeof(*map)) {
5851279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5852279858Sjfv		    I40E_ERR_PARAM);
5853279858Sjfv		return;
5854279858Sjfv	}
5855279858Sjfv
5856279858Sjfv	map = msg;
5857279858Sjfv	if (map->num_vectors == 0) {
5858279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5859279858Sjfv		    I40E_ERR_PARAM);
5860279858Sjfv		return;
5861279858Sjfv	}
5862279858Sjfv
5863279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5864279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5865279858Sjfv		    I40E_ERR_PARAM);
5866279858Sjfv		return;
5867279858Sjfv	}
5868279858Sjfv
5869279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
5870279858Sjfv		vector = &map->vecmap[i];
5871279858Sjfv
5872279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5873279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
5874279858Sjfv			i40e_send_vf_nack(pf, vf,
5875279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5876279858Sjfv			return;
5877279858Sjfv		}
5878279858Sjfv
5879279858Sjfv		if (vector->rxq_map != 0) {
5880279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
5881279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
5882279858Sjfv				i40e_send_vf_nack(pf, vf,
5883279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5884279858Sjfv				    I40E_ERR_PARAM);
5885279858Sjfv				return;
5886279858Sjfv			}
5887279858Sjfv		}
5888279858Sjfv
5889279858Sjfv		if (vector->txq_map != 0) {
5890279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
5891279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
5892279858Sjfv				i40e_send_vf_nack(pf, vf,
5893279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5894279858Sjfv				    I40E_ERR_PARAM);
5895279858Sjfv				return;
5896279858Sjfv			}
5897279858Sjfv		}
5898279858Sjfv
5899279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5900279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
5901279858Sjfv			i40e_send_vf_nack(pf, vf,
5902279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5903279858Sjfv			    I40E_ERR_PARAM);
5904279858Sjfv			return;
5905279858Sjfv		}
5906279858Sjfv
5907279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
5908279858Sjfv	}
5909279858Sjfv
5910279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
5911279858Sjfv}
5912279858Sjfv
5913279858Sjfvstatic void
5914279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5915279858Sjfv    uint16_t msg_size)
5916279858Sjfv{
5917279858Sjfv	struct i40e_virtchnl_queue_select *select;
5918279858Sjfv	int error;
5919279858Sjfv
5920279858Sjfv	if (msg_size != sizeof(*select)) {
5921279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5922279858Sjfv		    I40E_ERR_PARAM);
5923279858Sjfv		return;
5924279858Sjfv	}
5925279858Sjfv
5926279858Sjfv	select = msg;
5927279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
5928279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
5929279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5930279858Sjfv		    I40E_ERR_PARAM);
5931279858Sjfv		return;
5932279858Sjfv	}
5933279858Sjfv
5934279858Sjfv	error = ixl_enable_rings(&vf->vsi);
5935269198Sjfv	if (error) {
5936279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5937279858Sjfv		    I40E_ERR_TIMEOUT);
5938279858Sjfv		return;
5939269198Sjfv	}
5940269198Sjfv
5941279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
5942269198Sjfv}
5943266423Sjfv
5944279858Sjfvstatic void
5945279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
5946279858Sjfv    void *msg, uint16_t msg_size)
5947279858Sjfv{
5948279858Sjfv	struct i40e_virtchnl_queue_select *select;
5949279858Sjfv	int error;
5950279858Sjfv
5951279858Sjfv	if (msg_size != sizeof(*select)) {
5952279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5953279858Sjfv		    I40E_ERR_PARAM);
5954279858Sjfv		return;
5955279858Sjfv	}
5956279858Sjfv
5957279858Sjfv	select = msg;
5958279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
5959279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
5960279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5961279858Sjfv		    I40E_ERR_PARAM);
5962279858Sjfv		return;
5963279858Sjfv	}
5964279858Sjfv
5965279858Sjfv	error = ixl_disable_rings(&vf->vsi);
5966279858Sjfv	if (error) {
5967279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5968279858Sjfv		    I40E_ERR_TIMEOUT);
5969279858Sjfv		return;
5970279858Sjfv	}
5971279858Sjfv
5972279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
5973279858Sjfv}
5974279858Sjfv
5975279858Sjfvstatic boolean_t
5976279858Sjfvixl_zero_mac(const uint8_t *addr)
5977279858Sjfv{
5978279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
5979279858Sjfv
5980279858Sjfv	return (cmp_etheraddr(addr, zero));
5981279858Sjfv}
5982279858Sjfv
5983279858Sjfvstatic boolean_t
5984279858Sjfvixl_bcast_mac(const uint8_t *addr)
5985279858Sjfv{
5986279858Sjfv
5987279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
5988279858Sjfv}
5989279858Sjfv
5990279858Sjfvstatic int
5991279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
5992279858Sjfv{
5993279858Sjfv
5994279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
5995279858Sjfv		return (EINVAL);
5996279858Sjfv
5997279858Sjfv	/*
5998279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
5999279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6000279858Sjfv	 * is not its assigned MAC.
6001279858Sjfv	 */
6002279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6003279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6004279858Sjfv		return (EPERM);
6005279858Sjfv
6006279858Sjfv	return (0);
6007279858Sjfv}
6008279858Sjfv
6009279858Sjfvstatic void
6010279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6011279858Sjfv    uint16_t msg_size)
6012279858Sjfv{
6013279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6014279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6015279858Sjfv	struct ixl_vsi *vsi;
6016279858Sjfv	int i;
6017279858Sjfv	size_t expected_size;
6018279858Sjfv
6019279858Sjfv	vsi = &vf->vsi;
6020279858Sjfv
6021279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6022279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6023279858Sjfv		    I40E_ERR_PARAM);
6024279858Sjfv		return;
6025279858Sjfv	}
6026279858Sjfv
6027279858Sjfv	addr_list = msg;
6028279858Sjfv	expected_size = sizeof(*addr_list) +
6029279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6030279858Sjfv
6031279858Sjfv	if (addr_list->num_elements == 0 ||
6032279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6033279858Sjfv	    msg_size != expected_size) {
6034279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6035279858Sjfv		    I40E_ERR_PARAM);
6036279858Sjfv		return;
6037279858Sjfv	}
6038279858Sjfv
6039279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6040279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6041279858Sjfv			i40e_send_vf_nack(pf, vf,
6042279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6043279858Sjfv			return;
6044279858Sjfv		}
6045279858Sjfv	}
6046279858Sjfv
6047279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6048279858Sjfv		addr = &addr_list->list[i];
6049279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6050279858Sjfv	}
6051279858Sjfv
6052279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6053279858Sjfv}
6054279858Sjfv
6055279858Sjfvstatic void
6056279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6057279858Sjfv    uint16_t msg_size)
6058279858Sjfv{
6059279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6060279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6061279858Sjfv	size_t expected_size;
6062279858Sjfv	int i;
6063279858Sjfv
6064279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6065279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6066279858Sjfv		    I40E_ERR_PARAM);
6067279858Sjfv		return;
6068279858Sjfv	}
6069279858Sjfv
6070279858Sjfv	addr_list = msg;
6071279858Sjfv	expected_size = sizeof(*addr_list) +
6072279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6073279858Sjfv
6074279858Sjfv	if (addr_list->num_elements == 0 ||
6075279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6076279858Sjfv	    msg_size != expected_size) {
6077279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6078279858Sjfv		    I40E_ERR_PARAM);
6079279858Sjfv		return;
6080279858Sjfv	}
6081279858Sjfv
6082279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6083279858Sjfv		addr = &addr_list->list[i];
6084279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6085279858Sjfv			i40e_send_vf_nack(pf, vf,
6086279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6087279858Sjfv			return;
6088279858Sjfv		}
6089279858Sjfv	}
6090279858Sjfv
6091279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6092279858Sjfv		addr = &addr_list->list[i];
6093279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6094279858Sjfv	}
6095279858Sjfv
6096279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6097279858Sjfv}
6098279858Sjfv
6099279858Sjfvstatic enum i40e_status_code
6100279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6101279858Sjfv{
6102279858Sjfv	struct i40e_vsi_context vsi_ctx;
6103279858Sjfv
6104279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6105279858Sjfv
6106279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6107279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6108279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6109279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6110279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6111279858Sjfv}
6112279858Sjfv
6113279858Sjfvstatic void
6114279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6115279858Sjfv    uint16_t msg_size)
6116279858Sjfv{
6117279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6118279858Sjfv	enum i40e_status_code code;
6119279858Sjfv	size_t expected_size;
6120279858Sjfv	int i;
6121279858Sjfv
6122279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6123279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6124279858Sjfv		    I40E_ERR_PARAM);
6125279858Sjfv		return;
6126279858Sjfv	}
6127279858Sjfv
6128279858Sjfv	filter_list = msg;
6129279858Sjfv	expected_size = sizeof(*filter_list) +
6130279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6131279858Sjfv	if (filter_list->num_elements == 0 ||
6132279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6133279858Sjfv	    msg_size != expected_size) {
6134279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6135279858Sjfv		    I40E_ERR_PARAM);
6136279858Sjfv		return;
6137279858Sjfv	}
6138279858Sjfv
6139279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6140279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6141279858Sjfv		    I40E_ERR_PARAM);
6142279858Sjfv		return;
6143279858Sjfv	}
6144279858Sjfv
6145279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6146279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6147279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6148279858Sjfv			    I40E_ERR_PARAM);
6149279858Sjfv			return;
6150279858Sjfv		}
6151279858Sjfv	}
6152279858Sjfv
6153279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6154279858Sjfv	if (code != I40E_SUCCESS) {
6155279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6156279858Sjfv		    I40E_ERR_PARAM);
6157279858Sjfv	}
6158279858Sjfv
6159279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6160279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6161279858Sjfv
6162279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6163279858Sjfv}
6164279858Sjfv
6165279858Sjfvstatic void
6166279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6167279858Sjfv    uint16_t msg_size)
6168279858Sjfv{
6169279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6170279858Sjfv	int i;
6171279858Sjfv	size_t expected_size;
6172279858Sjfv
6173279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6174279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6175279858Sjfv		    I40E_ERR_PARAM);
6176279858Sjfv		return;
6177279858Sjfv	}
6178279858Sjfv
6179279858Sjfv	filter_list = msg;
6180279858Sjfv	expected_size = sizeof(*filter_list) +
6181279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6182279858Sjfv	if (filter_list->num_elements == 0 ||
6183279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6184279858Sjfv	    msg_size != expected_size) {
6185279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6186279858Sjfv		    I40E_ERR_PARAM);
6187279858Sjfv		return;
6188279858Sjfv	}
6189279858Sjfv
6190279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6191279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6192279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6193279858Sjfv			    I40E_ERR_PARAM);
6194279858Sjfv			return;
6195279858Sjfv		}
6196279858Sjfv	}
6197279858Sjfv
6198279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6199279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6200279858Sjfv		    I40E_ERR_PARAM);
6201279858Sjfv		return;
6202279858Sjfv	}
6203279858Sjfv
6204279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6205279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6206279858Sjfv
6207279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6208279858Sjfv}
6209279858Sjfv
6210279858Sjfvstatic void
6211279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6212279858Sjfv    void *msg, uint16_t msg_size)
6213279858Sjfv{
6214279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6215279858Sjfv	enum i40e_status_code code;
6216279858Sjfv
6217279858Sjfv	if (msg_size != sizeof(*info)) {
6218279858Sjfv		i40e_send_vf_nack(pf, vf,
6219279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6220279858Sjfv		return;
6221279858Sjfv	}
6222279858Sjfv
6223279858Sjfv	if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6224279858Sjfv		i40e_send_vf_nack(pf, vf,
6225279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6226279858Sjfv		return;
6227279858Sjfv	}
6228279858Sjfv
6229279858Sjfv	info = msg;
6230279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6231279858Sjfv		i40e_send_vf_nack(pf, vf,
6232279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6233279858Sjfv		return;
6234279858Sjfv	}
6235279858Sjfv
6236279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6237279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6238279858Sjfv	if (code != I40E_SUCCESS) {
6239279858Sjfv		i40e_send_vf_nack(pf, vf,
6240279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6241279858Sjfv		return;
6242279858Sjfv	}
6243279858Sjfv
6244279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6245279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6246279858Sjfv	if (code != I40E_SUCCESS) {
6247279858Sjfv		i40e_send_vf_nack(pf, vf,
6248279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6249279858Sjfv		return;
6250279858Sjfv	}
6251279858Sjfv
6252279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6253279858Sjfv}
6254279858Sjfv
6255279858Sjfvstatic void
6256279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6257279858Sjfv    uint16_t msg_size)
6258279858Sjfv{
6259279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6260279858Sjfv
6261279858Sjfv	if (msg_size != sizeof(*queue)) {
6262279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6263279858Sjfv		    I40E_ERR_PARAM);
6264279858Sjfv		return;
6265279858Sjfv	}
6266279858Sjfv
6267279858Sjfv	queue = msg;
6268279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6269279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6270279858Sjfv		    I40E_ERR_PARAM);
6271279858Sjfv		return;
6272279858Sjfv	}
6273279858Sjfv
6274279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6275279858Sjfv
6276279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6277279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6278279858Sjfv}
6279279858Sjfv
6280279858Sjfvstatic void
6281279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6282279858Sjfv{
6283279858Sjfv	struct ixl_vf *vf;
6284279858Sjfv	void *msg;
6285279858Sjfv	uint16_t vf_num, msg_size;
6286279858Sjfv	uint32_t opcode;
6287279858Sjfv
6288279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6289279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6290279858Sjfv
6291279858Sjfv	if (vf_num >= pf->num_vfs) {
6292279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6293279858Sjfv		return;
6294279858Sjfv	}
6295279858Sjfv
6296279858Sjfv	vf = &pf->vfs[vf_num];
6297279858Sjfv	msg = event->msg_buf;
6298279858Sjfv	msg_size = event->msg_len;
6299279858Sjfv
6300279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6301279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6302279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6303279858Sjfv
6304279858Sjfv	switch (opcode) {
6305279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6306279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6307279858Sjfv		break;
6308279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6309279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6310279858Sjfv		break;
6311279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6312279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6313279858Sjfv		break;
6314279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6315279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6316279858Sjfv		break;
6317279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6318279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6319279858Sjfv		break;
6320279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6321279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6322279858Sjfv		break;
6323279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6324279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6325279858Sjfv		break;
6326279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6327279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6328279858Sjfv		break;
6329279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6330279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6331279858Sjfv		break;
6332279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6333279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6334279858Sjfv		break;
6335279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6336279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6337279858Sjfv		break;
6338279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6339279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6340279858Sjfv		break;
6341279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6342279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6343279858Sjfv		break;
6344279858Sjfv
6345279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6346279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6347279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6348279858Sjfv	default:
6349279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6350279858Sjfv		break;
6351279858Sjfv	}
6352279858Sjfv}
6353279858Sjfv
6354279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6355279858Sjfvstatic void
6356279858Sjfvixl_handle_vflr(void *arg, int pending)
6357279858Sjfv{
6358279858Sjfv	struct ixl_pf *pf;
6359279858Sjfv	struct i40e_hw *hw;
6360279858Sjfv	uint16_t global_vf_num;
6361279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6362279858Sjfv	int i;
6363279858Sjfv
6364279858Sjfv	pf = arg;
6365279858Sjfv	hw = &pf->hw;
6366279858Sjfv
6367279858Sjfv	IXL_PF_LOCK(pf);
6368279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6369279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6370279858Sjfv
6371279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6372279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6373279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6374279858Sjfv		if (vflrstat & vflrstat_mask) {
6375279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6376279858Sjfv			    vflrstat_mask);
6377279858Sjfv
6378279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6379279858Sjfv		}
6380279858Sjfv	}
6381279858Sjfv
6382279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6383279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6384279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6385279858Sjfv	ixl_flush(hw);
6386279858Sjfv
6387279858Sjfv	IXL_PF_UNLOCK(pf);
6388279858Sjfv}
6389279858Sjfv
6390279858Sjfvstatic int
6391279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6392279858Sjfv{
6393279858Sjfv
6394279858Sjfv	switch (err) {
6395279858Sjfv	case I40E_AQ_RC_EPERM:
6396279858Sjfv		return (EPERM);
6397279858Sjfv	case I40E_AQ_RC_ENOENT:
6398279858Sjfv		return (ENOENT);
6399279858Sjfv	case I40E_AQ_RC_ESRCH:
6400279858Sjfv		return (ESRCH);
6401279858Sjfv	case I40E_AQ_RC_EINTR:
6402279858Sjfv		return (EINTR);
6403279858Sjfv	case I40E_AQ_RC_EIO:
6404279858Sjfv		return (EIO);
6405279858Sjfv	case I40E_AQ_RC_ENXIO:
6406279858Sjfv		return (ENXIO);
6407279858Sjfv	case I40E_AQ_RC_E2BIG:
6408279858Sjfv		return (E2BIG);
6409279858Sjfv	case I40E_AQ_RC_EAGAIN:
6410279858Sjfv		return (EAGAIN);
6411279858Sjfv	case I40E_AQ_RC_ENOMEM:
6412279858Sjfv		return (ENOMEM);
6413279858Sjfv	case I40E_AQ_RC_EACCES:
6414279858Sjfv		return (EACCES);
6415279858Sjfv	case I40E_AQ_RC_EFAULT:
6416279858Sjfv		return (EFAULT);
6417279858Sjfv	case I40E_AQ_RC_EBUSY:
6418279858Sjfv		return (EBUSY);
6419279858Sjfv	case I40E_AQ_RC_EEXIST:
6420279858Sjfv		return (EEXIST);
6421279858Sjfv	case I40E_AQ_RC_EINVAL:
6422279858Sjfv		return (EINVAL);
6423279858Sjfv	case I40E_AQ_RC_ENOTTY:
6424279858Sjfv		return (ENOTTY);
6425279858Sjfv	case I40E_AQ_RC_ENOSPC:
6426279858Sjfv		return (ENOSPC);
6427279858Sjfv	case I40E_AQ_RC_ENOSYS:
6428279858Sjfv		return (ENOSYS);
6429279858Sjfv	case I40E_AQ_RC_ERANGE:
6430279858Sjfv		return (ERANGE);
6431279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6432279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6433279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6434279858Sjfv		return (EFAULT);
6435279858Sjfv	case I40E_AQ_RC_EMODE:
6436279858Sjfv		return (EPERM);
6437279858Sjfv	case I40E_AQ_RC_EFBIG:
6438279858Sjfv		return (EFBIG);
6439279858Sjfv	default:
6440279858Sjfv		return (EINVAL);
6441279858Sjfv	}
6442279858Sjfv}
6443279858Sjfv
6444279858Sjfvstatic int
6445279858Sjfvixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6446279858Sjfv{
6447279858Sjfv	struct ixl_pf *pf;
6448279858Sjfv	struct i40e_hw *hw;
6449279858Sjfv	struct ixl_vsi *pf_vsi;
6450279858Sjfv	enum i40e_status_code ret;
6451279858Sjfv	int i, error;
6452279858Sjfv
6453279858Sjfv	pf = device_get_softc(dev);
6454279858Sjfv	hw = &pf->hw;
6455279858Sjfv	pf_vsi = &pf->vsi;
6456279858Sjfv
6457279858Sjfv	IXL_PF_LOCK(pf);
6458279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6459279858Sjfv	    M_ZERO);
6460279858Sjfv
6461279858Sjfv	if (pf->vfs == NULL) {
6462279858Sjfv		error = ENOMEM;
6463279858Sjfv		goto fail;
6464279858Sjfv	}
6465279858Sjfv
6466279858Sjfv	for (i = 0; i < num_vfs; i++)
6467279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
6468279858Sjfv
6469279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6470279858Sjfv	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6471279858Sjfv	if (ret != I40E_SUCCESS) {
6472279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6473279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6474279858Sjfv		    error);
6475279858Sjfv		goto fail;
6476279858Sjfv	}
6477279858Sjfv
6478279858Sjfv	ixl_configure_msix(pf);
6479279858Sjfv	ixl_enable_adminq(hw);
6480279858Sjfv
6481279858Sjfv	pf->num_vfs = num_vfs;
6482279858Sjfv	IXL_PF_UNLOCK(pf);
6483279858Sjfv	return (0);
6484279858Sjfv
6485279858Sjfvfail:
6486279858Sjfv	free(pf->vfs, M_IXL);
6487279858Sjfv	pf->vfs = NULL;
6488279858Sjfv	IXL_PF_UNLOCK(pf);
6489279858Sjfv	return (error);
6490279858Sjfv}
6491279858Sjfv
6492279858Sjfvstatic void
6493279858Sjfvixl_uninit_iov(device_t dev)
6494279858Sjfv{
6495279858Sjfv	struct ixl_pf *pf;
6496279858Sjfv	struct i40e_hw *hw;
6497279858Sjfv	struct ixl_vsi *vsi;
6498279858Sjfv	struct ifnet *ifp;
6499279858Sjfv	struct ixl_vf *vfs;
6500279858Sjfv	int i, num_vfs;
6501279858Sjfv
6502279858Sjfv	pf = device_get_softc(dev);
6503279858Sjfv	hw = &pf->hw;
6504279858Sjfv	vsi = &pf->vsi;
6505279858Sjfv	ifp = vsi->ifp;
6506279858Sjfv
6507279858Sjfv	IXL_PF_LOCK(pf);
6508279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6509279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
6510279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6511279858Sjfv	}
6512279858Sjfv
6513279858Sjfv	if (pf->veb_seid != 0) {
6514279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6515279858Sjfv		pf->veb_seid = 0;
6516279858Sjfv	}
6517279858Sjfv
6518279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6519279858Sjfv		ixl_disable_intr(vsi);
6520279858Sjfv
6521279858Sjfv	vfs = pf->vfs;
6522279858Sjfv	num_vfs = pf->num_vfs;
6523279858Sjfv
6524279858Sjfv	pf->vfs = NULL;
6525279858Sjfv	pf->num_vfs = 0;
6526279858Sjfv	IXL_PF_UNLOCK(pf);
6527279858Sjfv
6528279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6529279858Sjfv	for (i = 0; i < num_vfs; i++)
6530279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
6531279858Sjfv	free(vfs, M_IXL);
6532279858Sjfv}
6533279858Sjfv
6534279858Sjfvstatic int
6535279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6536279858Sjfv{
6537279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
6538279858Sjfv	struct ixl_pf *pf;
6539279858Sjfv	struct ixl_vf *vf;
6540279858Sjfv	const void *mac;
6541279858Sjfv	size_t size;
6542279858Sjfv	int error;
6543279858Sjfv
6544279858Sjfv	pf = device_get_softc(dev);
6545279858Sjfv	vf = &pf->vfs[vfnum];
6546279858Sjfv
6547279858Sjfv	IXL_PF_LOCK(pf);
6548279858Sjfv	vf->vf_num = vfnum;
6549279858Sjfv
6550279858Sjfv	vf->vsi.back = pf;
6551279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
6552279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
6553279858Sjfv
6554279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
6555279858Sjfv	if (error != 0)
6556279858Sjfv		goto out;
6557279858Sjfv
6558279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
6559279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
6560279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6561279858Sjfv
6562279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
6563279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6564279858Sjfv	} else
6565279858Sjfv		/*
6566279858Sjfv		 * If the administrator has not specified a MAC address then
6567279858Sjfv		 * we must allow the VF to choose one.
6568279858Sjfv		 */
6569279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6570279858Sjfv
6571279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
6572279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6573279858Sjfv
6574279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
6575279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6576279858Sjfv
6577279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6578279858Sjfv
6579279858Sjfv	ixl_reset_vf(pf, vf);
6580279858Sjfvout:
6581279858Sjfv	IXL_PF_UNLOCK(pf);
6582279858Sjfv	if (error == 0) {
6583279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6584279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6585279858Sjfv	}
6586279858Sjfv
6587279858Sjfv	return (error);
6588279858Sjfv}
6589279858Sjfv#endif /* PCI_IOV */
6590