if_ixl.c revision 299547
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 299547 2016-05-12 18:19:53Z erj $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51299547Serjchar ixl_driver_version[] = "1.4.7-k";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73284049Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
74299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
75299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76266423Sjfv	/* required last entry */
77266423Sjfv	{0, 0, 0, 0, 0}
78266423Sjfv};
79266423Sjfv
80266423Sjfv/*********************************************************************
81266423Sjfv *  Table of branding strings
82266423Sjfv *********************************************************************/
83266423Sjfv
84270346Sjfvstatic char    *ixl_strings[] = {
85266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
86266423Sjfv};
87266423Sjfv
88266423Sjfv
89266423Sjfv/*********************************************************************
90266423Sjfv *  Function prototypes
91266423Sjfv *********************************************************************/
92270346Sjfvstatic int      ixl_probe(device_t);
93270346Sjfvstatic int      ixl_attach(device_t);
94270346Sjfvstatic int      ixl_detach(device_t);
95270346Sjfvstatic int      ixl_shutdown(device_t);
96270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
97270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
98270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
99270346Sjfvstatic void	ixl_init(void *);
100270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
101270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
102299547Serjstatic void	ixl_stop_locked(struct ixl_pf *);
103270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
104270346Sjfvstatic int      ixl_media_change(struct ifnet *);
105270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
106270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
107270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
108270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
109279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
110270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
111270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
112270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
113270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
114270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
115270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
116270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
117299546Serjstatic void	ixl_init_taskqueues(struct ixl_pf *);
118299546Serjstatic void	ixl_free_taskqueues(struct ixl_pf *);
119299547Serjstatic void	ixl_free_interrupt_resources(struct ixl_pf *);
120270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
121270346Sjfvstatic void	ixl_local_timer(void *);
122270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
123279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
124270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
125270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
126270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
127274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
128266423Sjfv
129279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
130279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
131279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
132279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
133279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
134266423Sjfv
135270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
136270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
137270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
138270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
139270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
140270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
141266423Sjfv
142270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
143270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
144270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
145270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
146270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
147270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
148266423Sjfv
149270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
150279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
151270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
152270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
153270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
154270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
155270346Sjfvstatic struct ixl_mac_filter *
156270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
157270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
158279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
159266423Sjfv
160279858Sjfv
161266423Sjfv/* Sysctl debug interface */
162270346Sjfvstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
163270346Sjfvstatic void	ixl_print_debug_info(struct ixl_pf *);
164266423Sjfv
165266423Sjfv/* The MSI/X Interrupt handlers */
166270346Sjfvstatic void	ixl_intr(void *);
167270346Sjfvstatic void	ixl_msix_que(void *);
168270346Sjfvstatic void	ixl_msix_adminq(void *);
169270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
170266423Sjfv
171266423Sjfv/* Deferred interrupt tasklets */
172270346Sjfvstatic void	ixl_do_adminq(void *, int);
173266423Sjfv
174266423Sjfv/* Sysctl handlers */
175270346Sjfvstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
176270346Sjfvstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
177270346Sjfvstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
178274205Sjfvstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
179266423Sjfv
180266423Sjfv/* Statistics */
181270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
182270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
183266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
184270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
185266423Sjfv		    struct sysctl_oid_list *,
186266423Sjfv		    struct i40e_eth_stats *);
187270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
188270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
189279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
190270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
191270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
192270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
193266423Sjfv		    u64 *, u64 *);
194270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
195266423Sjfv		    u64 *, u64 *);
196299547Serj/* NVM update */
197299547Serjstatic int	ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
198266423Sjfv
199277084Sjfv#ifdef IXL_DEBUG_SYSCTL
200270346Sjfvstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
201270346Sjfvstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
202270346Sjfvstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
203274205Sjfvstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
204274205Sjfvstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
205266423Sjfv#endif
206266423Sjfv
207299547Serj
208279858Sjfv#ifdef PCI_IOV
209279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
210279858Sjfv
211299546Serjstatic int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
212299546Serjstatic void	ixl_iov_uninit(device_t dev);
213279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
214279858Sjfv
215279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
216279858Sjfv		    struct i40e_arq_event_info *);
217279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
218279858Sjfv
219279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
220279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
221279858Sjfv#endif
222279858Sjfv
223266423Sjfv/*********************************************************************
224266423Sjfv *  FreeBSD Device Interface Entry Points
225266423Sjfv *********************************************************************/
226266423Sjfv
227270346Sjfvstatic device_method_t ixl_methods[] = {
228266423Sjfv	/* Device interface */
229270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
230270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
231270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
232270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
233279858Sjfv#ifdef PCI_IOV
234299546Serj	DEVMETHOD(pci_iov_init, ixl_iov_init),
235299546Serj	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
236299546Serj	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
237279858Sjfv#endif
238266423Sjfv	{0, 0}
239266423Sjfv};
240266423Sjfv
241270346Sjfvstatic driver_t ixl_driver = {
242270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
243266423Sjfv};
244266423Sjfv
245270346Sjfvdevclass_t ixl_devclass;
246270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
247266423Sjfv
248270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
249270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
250279860Sjfv#ifdef DEV_NETMAP
251279860SjfvMODULE_DEPEND(ixl, netmap, 1, 1, 1);
252279860Sjfv#endif /* DEV_NETMAP */
253279860Sjfv
254266423Sjfv/*
255269198Sjfv** Global reset mutex
256269198Sjfv*/
257270346Sjfvstatic struct mtx ixl_reset_mtx;
258269198Sjfv
259269198Sjfv/*
260270346Sjfv** TUNEABLE PARAMETERS:
261270346Sjfv*/
262270346Sjfv
263270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
264270346Sjfv                   "IXL driver parameters");
265270346Sjfv
266270346Sjfv/*
267266423Sjfv * MSIX should be the default for best performance,
268266423Sjfv * but this allows it to be forced off for testing.
269266423Sjfv */
270270346Sjfvstatic int ixl_enable_msix = 1;
271270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
272270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
273270346Sjfv    "Enable MSI-X interrupts");
274266423Sjfv
275266423Sjfv/*
276266423Sjfv** Number of descriptors per ring:
277266423Sjfv**   - TX and RX are the same size
278266423Sjfv*/
279270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
280270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
281270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
282270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
283266423Sjfv
284266423Sjfv/*
285266423Sjfv** This can be set manually, if left as 0 the
286266423Sjfv** number of queues will be calculated based
287266423Sjfv** on cpus and msix vectors available.
288266423Sjfv*/
289270346Sjfvint ixl_max_queues = 0;
290270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
291270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
292270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
293266423Sjfv
294266423Sjfv/*
295266423Sjfv** Controls for Interrupt Throttling
296266423Sjfv**	- true/false for dynamic adjustment
297266423Sjfv** 	- default values for static ITR
298266423Sjfv*/
299270346Sjfvint ixl_dynamic_rx_itr = 0;
300270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
301270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
302270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
303266423Sjfv
304270346Sjfvint ixl_dynamic_tx_itr = 0;
305270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
306270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
307270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
308266423Sjfv
309270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
310270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
311270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
312270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
313270346Sjfv
314270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
315270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
316270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
317270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
318270346Sjfv
319270346Sjfv#ifdef IXL_FDIR
320270346Sjfvstatic int ixl_enable_fdir = 1;
321270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
322266423Sjfv/* Rate at which we sample */
323270346Sjfvint ixl_atr_rate = 20;
324270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
325266423Sjfv#endif
326266423Sjfv
327279860Sjfv#ifdef DEV_NETMAP
328279860Sjfv#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
329279860Sjfv#include <dev/netmap/if_ixl_netmap.h>
330279860Sjfv#endif /* DEV_NETMAP */
331274205Sjfv
332270346Sjfvstatic char *ixl_fc_string[6] = {
333266423Sjfv	"None",
334266423Sjfv	"Rx",
335266423Sjfv	"Tx",
336266423Sjfv	"Full",
337266423Sjfv	"Priority",
338266423Sjfv	"Default"
339266423Sjfv};
340266423Sjfv
341279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
342269198Sjfv
343279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
344279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
345279858Sjfv
346266423Sjfv/*********************************************************************
347266423Sjfv *  Device identification routine
348266423Sjfv *
349270346Sjfv *  ixl_probe determines if the driver should be loaded on
350266423Sjfv *  the hardware based on PCI vendor/device id of the device.
351266423Sjfv *
352266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
353266423Sjfv *********************************************************************/
354266423Sjfv
355266423Sjfvstatic int
356270346Sjfvixl_probe(device_t dev)
357266423Sjfv{
358270346Sjfv	ixl_vendor_info_t *ent;
359266423Sjfv
360266423Sjfv	u16	pci_vendor_id, pci_device_id;
361266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
362266423Sjfv	char	device_name[256];
363269198Sjfv	static bool lock_init = FALSE;
364266423Sjfv
365270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
366266423Sjfv
367266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
368266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
369266423Sjfv		return (ENXIO);
370266423Sjfv
371266423Sjfv	pci_device_id = pci_get_device(dev);
372266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
373266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
374266423Sjfv
375270346Sjfv	ent = ixl_vendor_info_array;
376266423Sjfv	while (ent->vendor_id != 0) {
377266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
378266423Sjfv		    (pci_device_id == ent->device_id) &&
379266423Sjfv
380266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
381266423Sjfv		     (ent->subvendor_id == 0)) &&
382266423Sjfv
383266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
384266423Sjfv		     (ent->subdevice_id == 0))) {
385266423Sjfv			sprintf(device_name, "%s, Version - %s",
386270346Sjfv				ixl_strings[ent->index],
387270346Sjfv				ixl_driver_version);
388266423Sjfv			device_set_desc_copy(dev, device_name);
389269198Sjfv			/* One shot mutex init */
390269198Sjfv			if (lock_init == FALSE) {
391269198Sjfv				lock_init = TRUE;
392270346Sjfv				mtx_init(&ixl_reset_mtx,
393270346Sjfv				    "ixl_reset",
394270346Sjfv				    "IXL RESET Lock", MTX_DEF);
395269198Sjfv			}
396266423Sjfv			return (BUS_PROBE_DEFAULT);
397266423Sjfv		}
398266423Sjfv		ent++;
399266423Sjfv	}
400266423Sjfv	return (ENXIO);
401266423Sjfv}
402266423Sjfv
403266423Sjfv/*********************************************************************
404266423Sjfv *  Device initialization routine
405266423Sjfv *
406266423Sjfv *  The attach entry point is called when the driver is being loaded.
407266423Sjfv *  This routine identifies the type of hardware, allocates all resources
408266423Sjfv *  and initializes the hardware.
409266423Sjfv *
410266423Sjfv *  return 0 on success, positive on failure
411266423Sjfv *********************************************************************/
412266423Sjfv
413266423Sjfvstatic int
414270346Sjfvixl_attach(device_t dev)
415266423Sjfv{
416270346Sjfv	struct ixl_pf	*pf;
417266423Sjfv	struct i40e_hw	*hw;
418270346Sjfv	struct ixl_vsi *vsi;
419266423Sjfv	u16		bus;
420266423Sjfv	int             error = 0;
421279858Sjfv#ifdef PCI_IOV
422279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
423279858Sjfv	int		iov_error;
424279858Sjfv#endif
425266423Sjfv
426270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
427266423Sjfv
428266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
429266423Sjfv	pf = device_get_softc(dev);
430266423Sjfv	pf->dev = pf->osdep.dev = dev;
431266423Sjfv	hw = &pf->hw;
432266423Sjfv
433266423Sjfv	/*
434266423Sjfv	** Note this assumes we have a single embedded VSI,
435266423Sjfv	** this could be enhanced later to allocate multiple
436266423Sjfv	*/
437266423Sjfv	vsi = &pf->vsi;
438266423Sjfv	vsi->dev = pf->dev;
439266423Sjfv
440266423Sjfv	/* Core Lock Init*/
441270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
442266423Sjfv
443266423Sjfv	/* Set up the timer callout */
444266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
445266423Sjfv
446266423Sjfv	/* Set up sysctls */
447266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
448266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
449266423Sjfv	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
450270346Sjfv	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
451266423Sjfv
452269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
453269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
454269198Sjfv	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
455270346Sjfv	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
456269198Sjfv
457270346Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
458270346Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
459270346Sjfv	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
460270346Sjfv	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
461270346Sjfv
462274205Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
463274205Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
464274205Sjfv	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
465274205Sjfv	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
466274205Sjfv
467266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
468266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
469273377Shselasky	    OID_AUTO, "rx_itr", CTLFLAG_RW,
470270346Sjfv	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
471266423Sjfv
472266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
473266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
474273377Shselasky	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
475270346Sjfv	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
476266423Sjfv
477266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
478266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
479273377Shselasky	    OID_AUTO, "tx_itr", CTLFLAG_RW,
480270346Sjfv	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
481266423Sjfv
482266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
483266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
484273377Shselasky	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
485270346Sjfv	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
486266423Sjfv
487277084Sjfv#ifdef IXL_DEBUG_SYSCTL
488266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
489266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
490284049Sjfv	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
491284049Sjfv	    ixl_debug_info, "I", "Debug Information");
492284049Sjfv
493284049Sjfv	/* Debug shared-code message level */
494284049Sjfv	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
495284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
496284049Sjfv	    OID_AUTO, "debug_mask", CTLFLAG_RW,
497284049Sjfv	    &pf->hw.debug_mask, 0, "Debug Message Level");
498284049Sjfv
499284049Sjfv	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
500284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
501284049Sjfv	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
502284049Sjfv	    0, "PF/VF Virtual Channel debug level");
503284049Sjfv
504284049Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
505284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
506266423Sjfv	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
507270346Sjfv	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
508266423Sjfv
509266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511266423Sjfv	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
512270346Sjfv	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
513266423Sjfv
514266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
515266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
516266423Sjfv	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
517270346Sjfv	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
518269198Sjfv
519269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
520269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
521274205Sjfv	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
522274205Sjfv	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
523269198Sjfv
524269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
525269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
526274205Sjfv	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
527274205Sjfv	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
528266423Sjfv#endif
529266423Sjfv
530274205Sjfv	/* Save off the PCI information */
531266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
532266423Sjfv	hw->device_id = pci_get_device(dev);
533266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
534266423Sjfv	hw->subsystem_vendor_id =
535266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
536266423Sjfv	hw->subsystem_device_id =
537266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
538266423Sjfv
539269198Sjfv	hw->bus.device = pci_get_slot(dev);
540266423Sjfv	hw->bus.func = pci_get_function(dev);
541266423Sjfv
542279858Sjfv	pf->vc_debug_lvl = 1;
543279858Sjfv
544266423Sjfv	/* Do PCI setup - map BAR0, etc */
545270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
546266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
547266423Sjfv		error = ENXIO;
548266423Sjfv		goto err_out;
549266423Sjfv	}
550266423Sjfv
551266423Sjfv	/* Establish a clean starting point */
552269198Sjfv	i40e_clear_hw(hw);
553266423Sjfv	error = i40e_pf_reset(hw);
554266423Sjfv	if (error) {
555269198Sjfv		device_printf(dev,"PF reset failure %x\n", error);
556269198Sjfv		error = EIO;
557269198Sjfv		goto err_out;
558269198Sjfv	}
559266423Sjfv
560266423Sjfv	/* Set admin queue parameters */
561270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
562270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
563270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
564270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
565266423Sjfv
566266423Sjfv	/* Initialize the shared code */
567266423Sjfv	error = i40e_init_shared_code(hw);
568266423Sjfv	if (error) {
569266423Sjfv		device_printf(dev,"Unable to initialize the shared code\n");
570266423Sjfv		error = EIO;
571266423Sjfv		goto err_out;
572266423Sjfv	}
573266423Sjfv
574266423Sjfv	/* Set up the admin queue */
575266423Sjfv	error = i40e_init_adminq(hw);
576266423Sjfv	if (error) {
577269198Sjfv		device_printf(dev, "The driver for the device stopped "
578269198Sjfv		    "because the NVM image is newer than expected.\n"
579269198Sjfv		    "You must install the most recent version of "
580269198Sjfv		    " the network driver.\n");
581266423Sjfv		goto err_out;
582266423Sjfv	}
583270346Sjfv	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
584266423Sjfv
585269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
586269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
587269198Sjfv		device_printf(dev, "The driver for the device detected "
588269198Sjfv		    "a newer version of the NVM image than expected.\n"
589269198Sjfv		    "Please install the most recent version of the network driver.\n");
590269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
591269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
592269198Sjfv		device_printf(dev, "The driver for the device detected "
593269198Sjfv		    "an older version of the NVM image than expected.\n"
594269198Sjfv		    "Please update the NVM image.\n");
595266423Sjfv
596266423Sjfv	/* Clear PXE mode */
597266423Sjfv	i40e_clear_pxe_mode(hw);
598266423Sjfv
599266423Sjfv	/* Get capabilities from the device */
600270346Sjfv	error = ixl_get_hw_capabilities(pf);
601266423Sjfv	if (error) {
602266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
603266423Sjfv		goto err_get_cap;
604266423Sjfv	}
605266423Sjfv
606266423Sjfv	/* Set up host memory cache */
607279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
608279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
609266423Sjfv	if (error) {
610266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
611266423Sjfv		goto err_get_cap;
612266423Sjfv	}
613266423Sjfv
614266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
615266423Sjfv	if (error) {
616266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
617266423Sjfv		goto err_mac_hmc;
618266423Sjfv	}
619266423Sjfv
620269198Sjfv	/* Disable LLDP from the firmware */
621269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
622269198Sjfv
623266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
624266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
625266423Sjfv	if (error) {
626266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
627266423Sjfv		goto err_mac_hmc;
628266423Sjfv	}
629266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
630266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
631266423Sjfv
632274205Sjfv	/* Set up VSI and queues */
633270346Sjfv	if (ixl_setup_stations(pf) != 0) {
634266423Sjfv		device_printf(dev, "setup stations failed!\n");
635266423Sjfv		error = ENOMEM;
636266423Sjfv		goto err_mac_hmc;
637266423Sjfv	}
638266423Sjfv
639266423Sjfv	/* Initialize mac filter list for VSI */
640266423Sjfv	SLIST_INIT(&vsi->ftl);
641266423Sjfv
642279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
643279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
644279033Sjfv		i40e_msec_delay(75);
645279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
646299547Serj		if (error) {
647279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
648279033Sjfv			    pf->hw.aq.asq_last_status);
649299547Serj			goto err_late;
650299547Serj		}
651270346Sjfv	}
652279033Sjfv
653266423Sjfv	/* Determine link state */
654299547Serj	hw->phy.get_link_info = TRUE;
655284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
656266423Sjfv
657299547Serj	/* Setup OS network interface / ifnet */
658274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
659274205Sjfv		device_printf(dev, "interface setup failed!\n");
660274205Sjfv		error = EIO;
661266423Sjfv		goto err_late;
662274205Sjfv	}
663266423Sjfv
664279033Sjfv	error = ixl_switch_config(pf);
665279033Sjfv	if (error) {
666299547Serj		device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error);
667299546Serj		goto err_late;
668279033Sjfv	}
669279033Sjfv
670299547Serj	/* Limit PHY interrupts to link, autoneg, and modules failure */
671299545Serj	error = i40e_aq_set_phy_int_mask(hw,
672299547Serj	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL,
673299547Serj	    NULL);
674299547Serj        if (error) {
675299547Serj		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
676299547Serj		    " aq_err %d\n", error, hw->aq.asq_last_status);
677299547Serj		goto err_late;
678299547Serj	}
679279033Sjfv
680266423Sjfv	/* Get the bus configuration and set the shared code */
681270346Sjfv	bus = ixl_get_bus_info(hw, dev);
682266423Sjfv	i40e_set_pci_config_data(hw, bus);
683266423Sjfv
684299546Serj	/* Initialize taskqueues */
685299546Serj	ixl_init_taskqueues(pf);
686299546Serj
687266423Sjfv	/* Initialize statistics */
688270346Sjfv	ixl_pf_reset_stats(pf);
689270346Sjfv	ixl_update_stats_counters(pf);
690270346Sjfv	ixl_add_hw_stats(pf);
691266423Sjfv
692266423Sjfv	/* Register for VLAN events */
693266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
694270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
695266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
696270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
697266423Sjfv
698279858Sjfv#ifdef PCI_IOV
699279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
700279858Sjfv	if (pf->msix > 1) {
701279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
702279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
703279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
704279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
705279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
706279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
707279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
708279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
709279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
710274205Sjfv
711279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
712279858Sjfv		if (iov_error != 0)
713279858Sjfv			device_printf(dev,
714279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
715279858Sjfv			    iov_error);
716279858Sjfv	}
717279858Sjfv#endif
718279858Sjfv
719279860Sjfv#ifdef DEV_NETMAP
720279860Sjfv	ixl_netmap_attach(vsi);
721279860Sjfv#endif /* DEV_NETMAP */
722270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
723266423Sjfv	return (0);
724266423Sjfv
725266423Sjfverr_late:
726274205Sjfv	if (vsi->ifp != NULL)
727274205Sjfv		if_free(vsi->ifp);
728266423Sjfverr_mac_hmc:
729266423Sjfv	i40e_shutdown_lan_hmc(hw);
730266423Sjfverr_get_cap:
731266423Sjfv	i40e_shutdown_adminq(hw);
732266423Sjfverr_out:
733270346Sjfv	ixl_free_pci_resources(pf);
734274205Sjfv	ixl_free_vsi(vsi);
735270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
736266423Sjfv	return (error);
737266423Sjfv}
738266423Sjfv
739266423Sjfv/*********************************************************************
740266423Sjfv *  Device removal routine
741266423Sjfv *
742266423Sjfv *  The detach entry point is called when the driver is being removed.
743266423Sjfv *  This routine stops the adapter and deallocates all the resources
744266423Sjfv *  that were allocated for driver operation.
745266423Sjfv *
746266423Sjfv *  return 0 on success, positive on failure
747266423Sjfv *********************************************************************/
748266423Sjfv
749266423Sjfvstatic int
750270346Sjfvixl_detach(device_t dev)
751266423Sjfv{
752270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
753266423Sjfv	struct i40e_hw		*hw = &pf->hw;
754270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
755266423Sjfv	i40e_status		status;
756279858Sjfv#ifdef PCI_IOV
757279858Sjfv	int			error;
758279858Sjfv#endif
759266423Sjfv
760270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
761266423Sjfv
762266423Sjfv	/* Make sure VLANS are not using driver */
763266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
764266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
765266423Sjfv		return (EBUSY);
766266423Sjfv	}
767266423Sjfv
768279858Sjfv#ifdef PCI_IOV
769279858Sjfv	error = pci_iov_detach(dev);
770279858Sjfv	if (error != 0) {
771279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
772279858Sjfv		return (error);
773279858Sjfv	}
774279858Sjfv#endif
775279858Sjfv
776279033Sjfv	ether_ifdetach(vsi->ifp);
777299547Serj	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
778279033Sjfv		ixl_stop(pf);
779266423Sjfv
780299546Serj	ixl_free_taskqueues(pf);
781266423Sjfv
782266423Sjfv	/* Shutdown LAN HMC */
783266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
784266423Sjfv	if (status)
785266423Sjfv		device_printf(dev,
786266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
787266423Sjfv
788266423Sjfv	/* Shutdown admin queue */
789266423Sjfv	status = i40e_shutdown_adminq(hw);
790266423Sjfv	if (status)
791266423Sjfv		device_printf(dev,
792266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
793266423Sjfv
794266423Sjfv	/* Unregister VLAN events */
795266423Sjfv	if (vsi->vlan_attach != NULL)
796266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
797266423Sjfv	if (vsi->vlan_detach != NULL)
798266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
799266423Sjfv
800266423Sjfv	callout_drain(&pf->timer);
801279860Sjfv#ifdef DEV_NETMAP
802279860Sjfv	netmap_detach(vsi->ifp);
803279860Sjfv#endif /* DEV_NETMAP */
804270346Sjfv	ixl_free_pci_resources(pf);
805266423Sjfv	bus_generic_detach(dev);
806266423Sjfv	if_free(vsi->ifp);
807270346Sjfv	ixl_free_vsi(vsi);
808270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
809266423Sjfv	return (0);
810266423Sjfv}
811266423Sjfv
812266423Sjfv/*********************************************************************
813266423Sjfv *
814266423Sjfv *  Shutdown entry point
815266423Sjfv *
816266423Sjfv **********************************************************************/
817266423Sjfv
818266423Sjfvstatic int
819270346Sjfvixl_shutdown(device_t dev)
820266423Sjfv{
821270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
822270346Sjfv	ixl_stop(pf);
823266423Sjfv	return (0);
824266423Sjfv}
825266423Sjfv
826266423Sjfv
827266423Sjfv/*********************************************************************
828266423Sjfv *
829266423Sjfv *  Get the hardware capabilities
830266423Sjfv *
831266423Sjfv **********************************************************************/
832266423Sjfv
833266423Sjfvstatic int
834270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
835266423Sjfv{
836266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
837266423Sjfv	struct i40e_hw	*hw = &pf->hw;
838266423Sjfv	device_t 	dev = pf->dev;
839266423Sjfv	int             error, len;
840266423Sjfv	u16		needed;
841266423Sjfv	bool		again = TRUE;
842266423Sjfv
843266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
844266423Sjfvretry:
845266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
846266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
847266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
848266423Sjfv                return (ENOMEM);
849266423Sjfv	}
850266423Sjfv
851266423Sjfv	/* This populates the hw struct */
852266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
853266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
854266423Sjfv	free(buf, M_DEVBUF);
855266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
856266423Sjfv	    (again == TRUE)) {
857266423Sjfv		/* retry once with a larger buffer */
858266423Sjfv		again = FALSE;
859266423Sjfv		len = needed;
860266423Sjfv		goto retry;
861266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
862266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
863266423Sjfv		    pf->hw.aq.asq_last_status);
864266423Sjfv		return (ENODEV);
865266423Sjfv	}
866266423Sjfv
867266423Sjfv	/* Capture this PF's starting queue pair */
868266423Sjfv	pf->qbase = hw->func_caps.base_queue;
869266423Sjfv
870270346Sjfv#ifdef IXL_DEBUG
871266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
872266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
873266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
874266423Sjfv	    hw->func_caps.num_msix_vectors,
875266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
876266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
877266423Sjfv	    hw->func_caps.fd_filters_best_effort,
878266423Sjfv	    hw->func_caps.num_tx_qp,
879266423Sjfv	    hw->func_caps.num_rx_qp,
880266423Sjfv	    hw->func_caps.base_queue);
881266423Sjfv#endif
882266423Sjfv	return (error);
883266423Sjfv}
884266423Sjfv
885266423Sjfvstatic void
886270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
887266423Sjfv{
888266423Sjfv	device_t 	dev = vsi->dev;
889266423Sjfv
890266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
891266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
892266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
893266423Sjfv		if (mask & IFCAP_TXCSUM) {
894266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
895266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
896270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
897270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
898266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
899266423Sjfv			}
900266423Sjfv		}
901266423Sjfv		else if (mask & IFCAP_TSO4) {
902266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
903270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
904266423Sjfv			device_printf(dev,
905266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
906266423Sjfv		}
907266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
908266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
909266423Sjfv		if (mask & IFCAP_TXCSUM)
910266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
911266423Sjfv		else if (mask & IFCAP_TSO4)
912266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
913266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
914266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
915266423Sjfv		if (mask & IFCAP_TXCSUM) {
916270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
917266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
918266423Sjfv			device_printf(dev,
919266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
920266423Sjfv		} else if (mask & IFCAP_TSO4)
921266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
922266423Sjfv	}
923266423Sjfv
924266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
925266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
926266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
927266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
928266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
929270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
930270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
931266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
932266423Sjfv			}
933266423Sjfv		} else if (mask & IFCAP_TSO6) {
934266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
935270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
936266423Sjfv			device_printf(dev,
937266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
938266423Sjfv		}
939266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
940266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
941266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
942266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
943266423Sjfv		else if (mask & IFCAP_TSO6)
944266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
945266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
946266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
947266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
948270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
949266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
950266423Sjfv			device_printf(dev,
951266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
952266423Sjfv		} else if (mask & IFCAP_TSO6)
953266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
954266423Sjfv	}
955266423Sjfv}
956266423Sjfv
957266423Sjfv/*********************************************************************
958266423Sjfv *  Ioctl entry point
959266423Sjfv *
960270346Sjfv *  ixl_ioctl is called when the user wants to configure the
961266423Sjfv *  interface.
962266423Sjfv *
963266423Sjfv *  return 0 on success, positive on failure
964266423Sjfv **********************************************************************/
965266423Sjfv
966266423Sjfvstatic int
967270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
968266423Sjfv{
969270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
970279858Sjfv	struct ixl_pf	*pf = vsi->back;
971299547Serj	struct ifreq	*ifr = (struct ifreq *)data;
972299547Serj	struct ifdrv	*ifd = (struct ifdrv *)data;
973266423Sjfv#if defined(INET) || defined(INET6)
974266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
975266423Sjfv	bool		avoid_reset = FALSE;
976266423Sjfv#endif
977266423Sjfv	int             error = 0;
978266423Sjfv
979266423Sjfv	switch (command) {
980266423Sjfv
981266423Sjfv        case SIOCSIFADDR:
982266423Sjfv#ifdef INET
983266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
984266423Sjfv			avoid_reset = TRUE;
985266423Sjfv#endif
986266423Sjfv#ifdef INET6
987266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
988266423Sjfv			avoid_reset = TRUE;
989266423Sjfv#endif
990266423Sjfv#if defined(INET) || defined(INET6)
991266423Sjfv		/*
992266423Sjfv		** Calling init results in link renegotiation,
993266423Sjfv		** so we avoid doing it when possible.
994266423Sjfv		*/
995266423Sjfv		if (avoid_reset) {
996266423Sjfv			ifp->if_flags |= IFF_UP;
997266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
998270346Sjfv				ixl_init(pf);
999271900Sbz#ifdef INET
1000266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
1001266423Sjfv				arp_ifinit(ifp, ifa);
1002271900Sbz#endif
1003266423Sjfv		} else
1004266423Sjfv			error = ether_ioctl(ifp, command, data);
1005266423Sjfv		break;
1006266423Sjfv#endif
1007266423Sjfv	case SIOCSIFMTU:
1008266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1009270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1010266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1011266423Sjfv			error = EINVAL;
1012266423Sjfv		} else {
1013270346Sjfv			IXL_PF_LOCK(pf);
1014266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
1015266423Sjfv			vsi->max_frame_size =
1016266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1017266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
1018270346Sjfv			ixl_init_locked(pf);
1019270346Sjfv			IXL_PF_UNLOCK(pf);
1020266423Sjfv		}
1021266423Sjfv		break;
1022266423Sjfv	case SIOCSIFFLAGS:
1023266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1024270346Sjfv		IXL_PF_LOCK(pf);
1025266423Sjfv		if (ifp->if_flags & IFF_UP) {
1026266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1027266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
1028266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
1029270346Sjfv					ixl_set_promisc(vsi);
1030266423Sjfv				}
1031299547Serj			} else {
1032299547Serj				IXL_PF_UNLOCK(pf);
1033299547Serj				ixl_init(pf);
1034299547Serj				IXL_PF_LOCK(pf);
1035299547Serj			}
1036299547Serj		} else {
1037299547Serj			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1038299547Serj				IXL_PF_UNLOCK(pf);
1039270346Sjfv				ixl_stop(pf);
1040299547Serj				IXL_PF_LOCK(pf);
1041299547Serj			}
1042299547Serj		}
1043266423Sjfv		pf->if_flags = ifp->if_flags;
1044270346Sjfv		IXL_PF_UNLOCK(pf);
1045266423Sjfv		break;
1046299547Serj	case SIOCSDRVSPEC:
1047299547Serj	case SIOCGDRVSPEC:
1048299547Serj		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
1049299547Serj		    "Info)\n");
1050299547Serj
1051299547Serj		/* NVM update command */
1052299547Serj		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
1053299547Serj			error = ixl_handle_nvmupd_cmd(pf, ifd);
1054299547Serj		else
1055299547Serj			error = EINVAL;
1056299547Serj		break;
1057266423Sjfv	case SIOCADDMULTI:
1058266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1059266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1060270346Sjfv			IXL_PF_LOCK(pf);
1061270346Sjfv			ixl_disable_intr(vsi);
1062270346Sjfv			ixl_add_multi(vsi);
1063270346Sjfv			ixl_enable_intr(vsi);
1064270346Sjfv			IXL_PF_UNLOCK(pf);
1065266423Sjfv		}
1066266423Sjfv		break;
1067266423Sjfv	case SIOCDELMULTI:
1068266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1069266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1070270346Sjfv			IXL_PF_LOCK(pf);
1071270346Sjfv			ixl_disable_intr(vsi);
1072270346Sjfv			ixl_del_multi(vsi);
1073270346Sjfv			ixl_enable_intr(vsi);
1074270346Sjfv			IXL_PF_UNLOCK(pf);
1075266423Sjfv		}
1076266423Sjfv		break;
1077266423Sjfv	case SIOCSIFMEDIA:
1078266423Sjfv	case SIOCGIFMEDIA:
1079284049Sjfv#ifdef IFM_ETH_XTYPE
1080284049Sjfv	case SIOCGIFXMEDIA:
1081284049Sjfv#endif
1082266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1083266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1084266423Sjfv		break;
1085266423Sjfv	case SIOCSIFCAP:
1086266423Sjfv	{
1087266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1088266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1089266423Sjfv
1090270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1091266423Sjfv
1092266423Sjfv		if (mask & IFCAP_RXCSUM)
1093266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1094266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1095266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1096266423Sjfv		if (mask & IFCAP_LRO)
1097266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1098266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1099266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1100266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1101266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1102266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1103266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1104266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1105270346Sjfv			IXL_PF_LOCK(pf);
1106270346Sjfv			ixl_init_locked(pf);
1107270346Sjfv			IXL_PF_UNLOCK(pf);
1108266423Sjfv		}
1109266423Sjfv		VLAN_CAPABILITIES(ifp);
1110266423Sjfv
1111266423Sjfv		break;
1112266423Sjfv	}
1113266423Sjfv
1114266423Sjfv	default:
1115270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1116266423Sjfv		error = ether_ioctl(ifp, command, data);
1117266423Sjfv		break;
1118266423Sjfv	}
1119266423Sjfv
1120266423Sjfv	return (error);
1121266423Sjfv}
1122266423Sjfv
1123266423Sjfv
1124266423Sjfv/*********************************************************************
1125266423Sjfv *  Init entry point
1126266423Sjfv *
1127266423Sjfv *  This routine is used in two ways. It is used by the stack as
1128266423Sjfv *  init entry point in network interface structure. It is also used
1129266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1130266423Sjfv *  consistent state.
1131266423Sjfv *
1132266423Sjfv *  return 0 on success, positive on failure
1133266423Sjfv **********************************************************************/
1134266423Sjfv
1135266423Sjfvstatic void
1136270346Sjfvixl_init_locked(struct ixl_pf *pf)
1137266423Sjfv{
1138266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1139270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1140266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1141266423Sjfv	device_t 	dev = pf->dev;
1142266423Sjfv	struct i40e_filter_control_settings	filter;
1143266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1144266423Sjfv	int		ret;
1145266423Sjfv
1146266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1147270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1148266423Sjfv
1149299547Serj	ixl_stop_locked(pf);
1150299547Serj
1151266423Sjfv	/* Get the latest mac address... User might use a LAA */
1152266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1153266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1154299546Serj	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1155299546Serj	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1156299546Serj		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1157266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1158266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1159266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1160266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1161266423Sjfv		    hw->mac.addr, NULL);
1162266423Sjfv		if (ret) {
1163266423Sjfv			device_printf(dev, "LLA address"
1164266423Sjfv			 "change failed!!\n");
1165266423Sjfv			return;
1166299546Serj		} else {
1167299546Serj			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1168266423Sjfv		}
1169266423Sjfv	}
1170266423Sjfv
1171266423Sjfv	/* Set the various hardware offload abilities */
1172266423Sjfv	ifp->if_hwassist = 0;
1173266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1174266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1175266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1176266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1177266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1178266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1179266423Sjfv
1180266423Sjfv	/* Set up the device filtering */
1181266423Sjfv	bzero(&filter, sizeof(filter));
1182266423Sjfv	filter.enable_ethtype = TRUE;
1183266423Sjfv	filter.enable_macvlan = TRUE;
1184270346Sjfv#ifdef IXL_FDIR
1185266423Sjfv	filter.enable_fdir = TRUE;
1186266423Sjfv#endif
1187266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1188266423Sjfv		device_printf(dev, "set_filter_control() failed\n");
1189266423Sjfv
1190266423Sjfv	/* Set up RSS */
1191270346Sjfv	ixl_config_rss(vsi);
1192266423Sjfv
1193266423Sjfv	/*
1194279033Sjfv	** Prepare the VSI: rings, hmc contexts, etc...
1195266423Sjfv	*/
1196270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1197270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1198266423Sjfv		return;
1199266423Sjfv	}
1200266423Sjfv
1201266423Sjfv	/* Add protocol filters to list */
1202270346Sjfv	ixl_init_filters(vsi);
1203266423Sjfv
1204266423Sjfv	/* Setup vlan's if needed */
1205270346Sjfv	ixl_setup_vlan_filters(vsi);
1206266423Sjfv
1207266423Sjfv	/* Start the local timer */
1208270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1209266423Sjfv
1210266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1211270346Sjfv	if (ixl_enable_msix) {
1212270346Sjfv		ixl_configure_msix(pf);
1213270346Sjfv		ixl_configure_itr(pf);
1214266423Sjfv	} else
1215270346Sjfv		ixl_configure_legacy(pf);
1216266423Sjfv
1217270346Sjfv	ixl_enable_rings(vsi);
1218266423Sjfv
1219266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1220266423Sjfv
1221279858Sjfv	ixl_reconfigure_filters(vsi);
1222279858Sjfv
1223266423Sjfv	/* Set MTU in hardware*/
1224270346Sjfv	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1225270346Sjfv	    TRUE, 0, NULL);
1226270346Sjfv	if (aq_error)
1227270346Sjfv		device_printf(vsi->dev,
1228270346Sjfv			"aq_set_mac_config in init error, code %d\n",
1229270346Sjfv		    aq_error);
1230266423Sjfv
1231266423Sjfv	/* And now turn on interrupts */
1232270346Sjfv	ixl_enable_intr(vsi);
1233266423Sjfv
1234299547Serj	/* Get link info */
1235299547Serj	hw->phy.get_link_info = TRUE;
1236299547Serj	i40e_get_link_status(hw, &pf->link_up);
1237299547Serj	ixl_update_link_status(pf);
1238299547Serj
1239266423Sjfv	/* Now inform the stack we're ready */
1240266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1241266423Sjfv	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1242266423Sjfv
1243266423Sjfv	return;
1244266423Sjfv}
1245266423Sjfv
1246266423Sjfvstatic void
1247270346Sjfvixl_init(void *arg)
1248266423Sjfv{
1249270346Sjfv	struct ixl_pf *pf = arg;
1250299547Serj	int ret = 0;
1251266423Sjfv
1252299547Serj	/* Set up interrupt routing here */
1253299547Serj	if (pf->msix > 1)
1254299547Serj		ret = ixl_assign_vsi_msix(pf);
1255299547Serj	else
1256299547Serj		ret = ixl_assign_vsi_legacy(pf);
1257299547Serj	if (ret) {
1258299547Serj		device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret);
1259299547Serj		return;
1260299547Serj	}
1261299547Serj
1262270346Sjfv	IXL_PF_LOCK(pf);
1263270346Sjfv	ixl_init_locked(pf);
1264270346Sjfv	IXL_PF_UNLOCK(pf);
1265266423Sjfv	return;
1266266423Sjfv}
1267266423Sjfv
1268266423Sjfv/*
1269266423Sjfv**
1270266423Sjfv** MSIX Interrupt Handlers and Tasklets
1271266423Sjfv**
1272266423Sjfv*/
1273266423Sjfvstatic void
1274270346Sjfvixl_handle_que(void *context, int pending)
1275266423Sjfv{
1276270346Sjfv	struct ixl_queue *que = context;
1277270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1278266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1279266423Sjfv	struct tx_ring  *txr = &que->txr;
1280266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1281266423Sjfv	bool		more;
1282266423Sjfv
1283266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1284270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1285270346Sjfv		IXL_TX_LOCK(txr);
1286270346Sjfv		ixl_txeof(que);
1287266423Sjfv		if (!drbr_empty(ifp, txr->br))
1288270346Sjfv			ixl_mq_start_locked(ifp, txr);
1289270346Sjfv		IXL_TX_UNLOCK(txr);
1290266423Sjfv		if (more) {
1291266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1292266423Sjfv			return;
1293266423Sjfv		}
1294266423Sjfv	}
1295266423Sjfv
1296266423Sjfv	/* Reenable this interrupt - hmmm */
1297270346Sjfv	ixl_enable_queue(hw, que->me);
1298266423Sjfv	return;
1299266423Sjfv}
1300266423Sjfv
1301266423Sjfv
1302266423Sjfv/*********************************************************************
1303266423Sjfv *
1304266423Sjfv *  Legacy Interrupt Service routine
1305266423Sjfv *
1306266423Sjfv **********************************************************************/
1307266423Sjfvvoid
1308270346Sjfvixl_intr(void *arg)
1309266423Sjfv{
1310270346Sjfv	struct ixl_pf		*pf = arg;
1311266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1312270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1313270346Sjfv	struct ixl_queue	*que = vsi->queues;
1314266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1315266423Sjfv	struct tx_ring		*txr = &que->txr;
1316266423Sjfv        u32			reg, icr0, mask;
1317266423Sjfv	bool			more_tx, more_rx;
1318266423Sjfv
1319266423Sjfv	++que->irqs;
1320266423Sjfv
1321266423Sjfv	/* Protect against spurious interrupts */
1322266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1323266423Sjfv		return;
1324266423Sjfv
1325266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1326266423Sjfv
1327266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1328266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1329266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1330266423Sjfv
1331266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1332266423Sjfv
1333279858Sjfv#ifdef PCI_IOV
1334279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1335279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1336279858Sjfv#endif
1337279858Sjfv
1338266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1339266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1340266423Sjfv		return;
1341266423Sjfv	}
1342266423Sjfv
1343270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1344266423Sjfv
1345270346Sjfv	IXL_TX_LOCK(txr);
1346270346Sjfv	more_tx = ixl_txeof(que);
1347266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1348266423Sjfv		more_tx = 1;
1349270346Sjfv	IXL_TX_UNLOCK(txr);
1350266423Sjfv
1351266423Sjfv	/* re-enable other interrupt causes */
1352266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1353266423Sjfv
1354266423Sjfv	/* And now the queues */
1355266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1356266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1357266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1358266423Sjfv
1359266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1360266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1361266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1362266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1363266423Sjfv
1364270346Sjfv	ixl_enable_legacy(hw);
1365266423Sjfv
1366266423Sjfv	return;
1367266423Sjfv}
1368266423Sjfv
1369266423Sjfv
1370266423Sjfv/*********************************************************************
1371266423Sjfv *
1372266423Sjfv *  MSIX VSI Interrupt Service routine
1373266423Sjfv *
1374266423Sjfv **********************************************************************/
1375266423Sjfvvoid
1376270346Sjfvixl_msix_que(void *arg)
1377266423Sjfv{
1378270346Sjfv	struct ixl_queue	*que = arg;
1379270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1380266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1381266423Sjfv	struct tx_ring	*txr = &que->txr;
1382266423Sjfv	bool		more_tx, more_rx;
1383266423Sjfv
1384269198Sjfv	/* Protect against spurious interrupts */
1385269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1386269198Sjfv		return;
1387269198Sjfv
1388266423Sjfv	++que->irqs;
1389266423Sjfv
1390270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1391266423Sjfv
1392270346Sjfv	IXL_TX_LOCK(txr);
1393270346Sjfv	more_tx = ixl_txeof(que);
1394266423Sjfv	/*
1395266423Sjfv	** Make certain that if the stack
1396266423Sjfv	** has anything queued the task gets
1397266423Sjfv	** scheduled to handle it.
1398266423Sjfv	*/
1399266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1400266423Sjfv		more_tx = 1;
1401270346Sjfv	IXL_TX_UNLOCK(txr);
1402266423Sjfv
1403270346Sjfv	ixl_set_queue_rx_itr(que);
1404270346Sjfv	ixl_set_queue_tx_itr(que);
1405266423Sjfv
1406266423Sjfv	if (more_tx || more_rx)
1407266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1408266423Sjfv	else
1409270346Sjfv		ixl_enable_queue(hw, que->me);
1410266423Sjfv
1411266423Sjfv	return;
1412266423Sjfv}
1413266423Sjfv
1414266423Sjfv
1415266423Sjfv/*********************************************************************
1416266423Sjfv *
1417266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1418266423Sjfv *
1419266423Sjfv **********************************************************************/
1420266423Sjfvstatic void
1421270346Sjfvixl_msix_adminq(void *arg)
1422266423Sjfv{
1423270346Sjfv	struct ixl_pf	*pf = arg;
1424266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1425266423Sjfv	u32		reg, mask;
1426266423Sjfv
1427266423Sjfv	++pf->admin_irq;
1428266423Sjfv
1429266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1430266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1431266423Sjfv
1432266423Sjfv	/* Check on the cause */
1433266423Sjfv	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1434266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1435266423Sjfv
1436269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1437270346Sjfv		ixl_handle_mdd_event(pf);
1438266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1439269198Sjfv	}
1440266423Sjfv
1441279858Sjfv#ifdef PCI_IOV
1442279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1443266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1444279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1445279858Sjfv	}
1446279858Sjfv#endif
1447266423Sjfv
1448266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1449266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1450266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1451266423Sjfv
1452266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1453266423Sjfv	return;
1454266423Sjfv}
1455266423Sjfv
1456266423Sjfv/*********************************************************************
1457266423Sjfv *
1458266423Sjfv *  Media Ioctl callback
1459266423Sjfv *
1460266423Sjfv *  This routine is called whenever the user queries the status of
1461266423Sjfv *  the interface using ifconfig.
1462266423Sjfv *
1463266423Sjfv **********************************************************************/
1464266423Sjfvstatic void
1465270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1466266423Sjfv{
1467270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1468279858Sjfv	struct ixl_pf	*pf = vsi->back;
1469266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1470266423Sjfv
1471270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1472270346Sjfv	IXL_PF_LOCK(pf);
1473266423Sjfv
1474279858Sjfv	hw->phy.get_link_info = TRUE;
1475284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
1476270346Sjfv	ixl_update_link_status(pf);
1477266423Sjfv
1478266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1479266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1480266423Sjfv
1481279858Sjfv	if (!pf->link_up) {
1482270346Sjfv		IXL_PF_UNLOCK(pf);
1483266423Sjfv		return;
1484266423Sjfv	}
1485266423Sjfv
1486266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1487299545Serj
1488299545Serj	/* Hardware always does full-duplex */
1489266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1490266423Sjfv
1491266423Sjfv	switch (hw->phy.link_info.phy_type) {
1492266423Sjfv		/* 100 M */
1493266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1494266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1495266423Sjfv			break;
1496266423Sjfv		/* 1 G */
1497266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1498266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1499266423Sjfv			break;
1500269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1501269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1502269198Sjfv			break;
1503269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1504269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1505269198Sjfv			break;
1506266423Sjfv		/* 10 G */
1507266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1508266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1509266423Sjfv			break;
1510266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1511266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1512266423Sjfv			break;
1513266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1514266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1515266423Sjfv			break;
1516270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1517270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1518270346Sjfv			break;
1519266423Sjfv		/* 40 G */
1520266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1521266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1522266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1523266423Sjfv			break;
1524266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1525266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1526266423Sjfv			break;
1527266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1528266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1529266423Sjfv			break;
1530284049Sjfv#ifndef IFM_ETH_XTYPE
1531284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1532284049Sjfv			ifmr->ifm_active |= IFM_1000_CX;
1533284049Sjfv			break;
1534284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1535284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1536284049Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1537284049Sjfv			break;
1538284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1539284049Sjfv			ifmr->ifm_active |= IFM_10G_CX4;
1540284049Sjfv			break;
1541284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1542284049Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1543284049Sjfv			break;
1544279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1545279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1546284049Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1547279033Sjfv			break;
1548284049Sjfv#else
1549284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1550284049Sjfv			ifmr->ifm_active |= IFM_1000_KX;
1551284049Sjfv			break;
1552284049Sjfv		/* ERJ: What's the difference between these? */
1553284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1554284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1555284049Sjfv			ifmr->ifm_active |= IFM_10G_CR1;
1556284049Sjfv			break;
1557284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1558284049Sjfv			ifmr->ifm_active |= IFM_10G_KX4;
1559284049Sjfv			break;
1560284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1561284049Sjfv			ifmr->ifm_active |= IFM_10G_KR;
1562284049Sjfv			break;
1563299545Serj		/* Our single 20G media type */
1564284049Sjfv		case I40E_PHY_TYPE_20GBASE_KR2:
1565284049Sjfv			ifmr->ifm_active |= IFM_20G_KR2;
1566284049Sjfv			break;
1567284049Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1568284049Sjfv			ifmr->ifm_active |= IFM_40G_KR4;
1569284049Sjfv			break;
1570284049Sjfv		case I40E_PHY_TYPE_XLPPI:
1571284049Sjfv			ifmr->ifm_active |= IFM_40G_XLPPI;
1572284049Sjfv			break;
1573284049Sjfv#endif
1574266423Sjfv		default:
1575266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1576266423Sjfv			break;
1577266423Sjfv	}
1578266423Sjfv	/* Report flow control status as well */
1579266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1580266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1581266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1582266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1583266423Sjfv
1584270346Sjfv	IXL_PF_UNLOCK(pf);
1585266423Sjfv
1586266423Sjfv	return;
1587266423Sjfv}
1588266423Sjfv
1589299545Serj/*
1590299545Serj * NOTE: Fortville does not support forcing media speeds. Instead,
1591299545Serj * use the set_advertise sysctl to set the speeds Fortville
1592299545Serj * will advertise or be allowed to operate at.
1593299545Serj */
1594266423Sjfvstatic int
1595270346Sjfvixl_media_change(struct ifnet * ifp)
1596266423Sjfv{
1597270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1598266423Sjfv	struct ifmedia *ifm = &vsi->media;
1599266423Sjfv
1600270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1601266423Sjfv
1602266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1603266423Sjfv		return (EINVAL);
1604266423Sjfv
1605299545Serj	if_printf(ifp, "Media change is not supported.\n");
1606269198Sjfv
1607269198Sjfv	return (ENODEV);
1608266423Sjfv}
1609266423Sjfv
1610266423Sjfv
1611270346Sjfv#ifdef IXL_FDIR
1612266423Sjfv/*
1613266423Sjfv** ATR: Application Targetted Receive - creates a filter
1614266423Sjfv**	based on TX flow info that will keep the receive
1615266423Sjfv**	portion of the flow on the same queue. Based on the
1616266423Sjfv**	implementation this is only available for TCP connections
1617266423Sjfv*/
1618266423Sjfvvoid
1619270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1620266423Sjfv{
1621270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1622266423Sjfv	struct tx_ring			*txr = &que->txr;
1623266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1624266423Sjfv	u32				ptype, dtype;
1625266423Sjfv	int				idx;
1626266423Sjfv
1627266423Sjfv	/* check if ATR is enabled and sample rate */
1628270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1629266423Sjfv		return;
1630266423Sjfv	/*
1631266423Sjfv	** We sample all TCP SYN/FIN packets,
1632266423Sjfv	** or at the selected sample rate
1633266423Sjfv	*/
1634266423Sjfv	txr->atr_count++;
1635266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1636266423Sjfv	    (txr->atr_count < txr->atr_rate))
1637266423Sjfv                return;
1638266423Sjfv	txr->atr_count = 0;
1639266423Sjfv
1640266423Sjfv	/* Get a descriptor to use */
1641266423Sjfv	idx = txr->next_avail;
1642266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1643266423Sjfv	if (++idx == que->num_desc)
1644266423Sjfv		idx = 0;
1645266423Sjfv	txr->avail--;
1646266423Sjfv	txr->next_avail = idx;
1647266423Sjfv
1648266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1649266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1650266423Sjfv
1651266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1652266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1653266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1654266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1655266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1656266423Sjfv
1657266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1658266423Sjfv
1659266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1660266423Sjfv
1661266423Sjfv	/*
1662266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1663266423Sjfv	** the filter, otherwise its an update.
1664266423Sjfv	*/
1665266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1666266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1667266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1668266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1669266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1670266423Sjfv
1671266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1672266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1673266423Sjfv
1674266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1675266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1676266423Sjfv
1677266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1678266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1679266423Sjfv	return;
1680266423Sjfv}
1681266423Sjfv#endif
1682266423Sjfv
1683266423Sjfv
1684266423Sjfvstatic void
1685270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1686266423Sjfv{
1687266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1688266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1689266423Sjfv	int		err, mcnt = 0;
1690266423Sjfv	bool		uni = FALSE, multi = FALSE;
1691266423Sjfv
1692266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1693266423Sjfv                multi = TRUE;
1694266423Sjfv	else { /* Need to count the multicast addresses */
1695266423Sjfv		struct  ifmultiaddr *ifma;
1696266423Sjfv		if_maddr_rlock(ifp);
1697266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1698266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1699266423Sjfv                                continue;
1700266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1701266423Sjfv                                break;
1702266423Sjfv                        mcnt++;
1703266423Sjfv		}
1704266423Sjfv		if_maddr_runlock(ifp);
1705266423Sjfv	}
1706266423Sjfv
1707266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1708266423Sjfv                multi = TRUE;
1709266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1710266423Sjfv		uni = TRUE;
1711266423Sjfv
1712266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1713266423Sjfv	    vsi->seid, uni, NULL);
1714266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1715266423Sjfv	    vsi->seid, multi, NULL);
1716266423Sjfv	return;
1717266423Sjfv}
1718266423Sjfv
1719266423Sjfv/*********************************************************************
1720266423Sjfv * 	Filter Routines
1721266423Sjfv *
1722266423Sjfv *	Routines for multicast and vlan filter management.
1723266423Sjfv *
1724266423Sjfv *********************************************************************/
1725266423Sjfvstatic void
1726270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1727266423Sjfv{
1728266423Sjfv	struct	ifmultiaddr	*ifma;
1729266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1730266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1731266423Sjfv	int			mcnt = 0, flags;
1732266423Sjfv
1733270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1734266423Sjfv
1735266423Sjfv	if_maddr_rlock(ifp);
1736266423Sjfv	/*
1737266423Sjfv	** First just get a count, to decide if we
1738266423Sjfv	** we simply use multicast promiscuous.
1739266423Sjfv	*/
1740266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1741266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1742266423Sjfv			continue;
1743266423Sjfv		mcnt++;
1744266423Sjfv	}
1745266423Sjfv	if_maddr_runlock(ifp);
1746266423Sjfv
1747266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1748266423Sjfv		/* delete existing MC filters */
1749270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1750266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1751266423Sjfv		    vsi->seid, TRUE, NULL);
1752266423Sjfv		return;
1753266423Sjfv	}
1754266423Sjfv
1755266423Sjfv	mcnt = 0;
1756266423Sjfv	if_maddr_rlock(ifp);
1757266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1758266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1759266423Sjfv			continue;
1760270346Sjfv		ixl_add_mc_filter(vsi,
1761266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1762266423Sjfv		mcnt++;
1763266423Sjfv	}
1764266423Sjfv	if_maddr_runlock(ifp);
1765266423Sjfv	if (mcnt > 0) {
1766270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1767270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1768266423Sjfv	}
1769266423Sjfv
1770270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1771266423Sjfv	return;
1772266423Sjfv}
1773266423Sjfv
1774266423Sjfvstatic void
1775270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1776266423Sjfv{
1777266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1778266423Sjfv	struct ifmultiaddr	*ifma;
1779270346Sjfv	struct ixl_mac_filter	*f;
1780266423Sjfv	int			mcnt = 0;
1781266423Sjfv	bool		match = FALSE;
1782266423Sjfv
1783270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1784266423Sjfv
1785266423Sjfv	/* Search for removed multicast addresses */
1786266423Sjfv	if_maddr_rlock(ifp);
1787266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1788270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1789266423Sjfv			match = FALSE;
1790266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1791266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1792266423Sjfv					continue;
1793266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1794266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1795266423Sjfv					match = TRUE;
1796266423Sjfv					break;
1797266423Sjfv				}
1798266423Sjfv			}
1799266423Sjfv			if (match == FALSE) {
1800270346Sjfv				f->flags |= IXL_FILTER_DEL;
1801266423Sjfv				mcnt++;
1802266423Sjfv			}
1803266423Sjfv		}
1804266423Sjfv	}
1805266423Sjfv	if_maddr_runlock(ifp);
1806266423Sjfv
1807266423Sjfv	if (mcnt > 0)
1808270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1809266423Sjfv}
1810266423Sjfv
1811266423Sjfv
1812266423Sjfv/*********************************************************************
1813266423Sjfv *  Timer routine
1814266423Sjfv *
1815266423Sjfv *  This routine checks for link status,updates statistics,
1816266423Sjfv *  and runs the watchdog check.
1817266423Sjfv *
1818266423Sjfv **********************************************************************/
1819266423Sjfv
1820266423Sjfvstatic void
1821270346Sjfvixl_local_timer(void *arg)
1822266423Sjfv{
1823270346Sjfv	struct ixl_pf		*pf = arg;
1824266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1825270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1826270346Sjfv	struct ixl_queue	*que = vsi->queues;
1827266423Sjfv	device_t		dev = pf->dev;
1828266423Sjfv	int			hung = 0;
1829266423Sjfv	u32			mask;
1830266423Sjfv
1831266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1832266423Sjfv
1833266423Sjfv	/* Fire off the adminq task */
1834266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1835266423Sjfv
1836266423Sjfv	/* Update stats */
1837270346Sjfv	ixl_update_stats_counters(pf);
1838266423Sjfv
1839266423Sjfv	/*
1840269198Sjfv	** Check status of the queues
1841266423Sjfv	*/
1842266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1843266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1844266423Sjfv
1845266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1846266423Sjfv		/* Any queues with outstanding work get a sw irq */
1847266423Sjfv		if (que->busy)
1848266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1849266423Sjfv		/*
1850266423Sjfv		** Each time txeof runs without cleaning, but there
1851266423Sjfv		** are uncleaned descriptors it increments busy. If
1852266423Sjfv		** we get to 5 we declare it hung.
1853266423Sjfv		*/
1854270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1855269198Sjfv			++hung;
1856269198Sjfv			/* Mark the queue as inactive */
1857269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1858269198Sjfv			continue;
1859269198Sjfv		} else {
1860269198Sjfv			/* Check if we've come back from hung */
1861269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1862269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1863269198Sjfv		}
1864270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1865277084Sjfv#ifdef IXL_DEBUG
1866266423Sjfv			device_printf(dev,"Warning queue %d "
1867269198Sjfv			    "appears to be hung!\n", i);
1868277084Sjfv#endif
1869270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1870266423Sjfv			++hung;
1871266423Sjfv		}
1872266423Sjfv	}
1873266423Sjfv	/* Only reinit if all queues show hung */
1874266423Sjfv	if (hung == vsi->num_queues)
1875266423Sjfv		goto hung;
1876266423Sjfv
1877270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1878266423Sjfv	return;
1879266423Sjfv
1880266423Sjfvhung:
1881266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1882270346Sjfv	ixl_init_locked(pf);
1883266423Sjfv}
1884266423Sjfv
1885266423Sjfv/*
1886266423Sjfv** Note: this routine updates the OS on the link state
1887266423Sjfv**	the real check of the hardware only happens with
1888266423Sjfv**	a link interrupt.
1889266423Sjfv*/
1890266423Sjfvstatic void
1891270346Sjfvixl_update_link_status(struct ixl_pf *pf)
1892266423Sjfv{
1893270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1894266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1895266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1896266423Sjfv	device_t		dev = pf->dev;
1897266423Sjfv
1898299547Serj	if (pf->link_up) {
1899266423Sjfv		if (vsi->link_active == FALSE) {
1900279033Sjfv			pf->fc = hw->fc.current_mode;
1901266423Sjfv			if (bootverbose) {
1902266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
1903266423Sjfv				    " Flow Control: %s\n",
1904279858Sjfv				    ((pf->link_speed ==
1905279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
1906279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
1907266423Sjfv			}
1908266423Sjfv			vsi->link_active = TRUE;
1909277084Sjfv			/*
1910277084Sjfv			** Warn user if link speed on NPAR enabled
1911277084Sjfv			** partition is not at least 10GB
1912277084Sjfv			*/
1913277084Sjfv			if (hw->func_caps.npar_enable &&
1914279858Sjfv			   (hw->phy.link_info.link_speed ==
1915279858Sjfv			   I40E_LINK_SPEED_1GB ||
1916279858Sjfv			   hw->phy.link_info.link_speed ==
1917279858Sjfv			   I40E_LINK_SPEED_100MB))
1918279858Sjfv				device_printf(dev, "The partition detected"
1919279858Sjfv				    "link speed that is less than 10Gbps\n");
1920266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
1921266423Sjfv		}
1922266423Sjfv	} else { /* Link down */
1923266423Sjfv		if (vsi->link_active == TRUE) {
1924266423Sjfv			if (bootverbose)
1925299547Serj				device_printf(dev, "Link is Down\n");
1926266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
1927266423Sjfv			vsi->link_active = FALSE;
1928266423Sjfv		}
1929266423Sjfv	}
1930266423Sjfv
1931266423Sjfv	return;
1932266423Sjfv}
1933266423Sjfv
1934299547Serjstatic void
1935299547Serjixl_stop(struct ixl_pf *pf)
1936299547Serj{
1937299547Serj	IXL_PF_LOCK(pf);
1938299547Serj	ixl_stop_locked(pf);
1939299547Serj	IXL_PF_UNLOCK(pf);
1940299547Serj
1941299547Serj	ixl_free_interrupt_resources(pf);
1942299547Serj}
1943299547Serj
1944266423Sjfv/*********************************************************************
1945266423Sjfv *
1946266423Sjfv *  This routine disables all traffic on the adapter by issuing a
1947266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
1948266423Sjfv *
1949266423Sjfv **********************************************************************/
1950266423Sjfv
1951266423Sjfvstatic void
1952299547Serjixl_stop_locked(struct ixl_pf *pf)
1953266423Sjfv{
1954270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1955266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1956266423Sjfv
1957299547Serj	INIT_DEBUGOUT("ixl_stop: begin\n");
1958266423Sjfv
1959299547Serj	IXL_PF_LOCK_ASSERT(pf);
1960299547Serj
1961299547Serj	/* Stop the local timer */
1962299547Serj	callout_stop(&pf->timer);
1963299547Serj
1964279858Sjfv	if (pf->num_vfs == 0)
1965279858Sjfv		ixl_disable_intr(vsi);
1966279858Sjfv	else
1967279858Sjfv		ixl_disable_rings_intr(vsi);
1968270346Sjfv	ixl_disable_rings(vsi);
1969266423Sjfv
1970266423Sjfv	/* Tell the stack that the interface is no longer active */
1971266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1972266423Sjfv
1973266423Sjfv	return;
1974266423Sjfv}
1975266423Sjfv
1976266423Sjfv
1977266423Sjfv/*********************************************************************
1978266423Sjfv *
1979266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1980266423Sjfv *
1981266423Sjfv **********************************************************************/
1982266423Sjfvstatic int
1983270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
1984266423Sjfv{
1985266423Sjfv	device_t        dev = pf->dev;
1986270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1987270346Sjfv	struct		ixl_queue *que = vsi->queues;
1988266423Sjfv	int 		error, rid = 0;
1989266423Sjfv
1990266423Sjfv	if (pf->msix == 1)
1991266423Sjfv		rid = 1;
1992266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1993266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
1994266423Sjfv	if (pf->res == NULL) {
1995266423Sjfv		device_printf(dev,"Unable to allocate"
1996266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
1997266423Sjfv		return (ENXIO);
1998266423Sjfv	}
1999266423Sjfv
2000266423Sjfv	/* Set the handler function */
2001266423Sjfv	error = bus_setup_intr(dev, pf->res,
2002266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2003270346Sjfv	    ixl_intr, pf, &pf->tag);
2004266423Sjfv	if (error) {
2005266423Sjfv		pf->res = NULL;
2006266423Sjfv		device_printf(dev, "Failed to register legacy/msi handler");
2007266423Sjfv		return (error);
2008266423Sjfv	}
2009266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
2010270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2011270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
2012270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2013266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
2014266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2015266423Sjfv	    device_get_nameunit(dev));
2016270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2017279858Sjfv
2018279858Sjfv#ifdef PCI_IOV
2019279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2020279858Sjfv#endif
2021279858Sjfv
2022270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2023266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
2024266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2025266423Sjfv	    device_get_nameunit(dev));
2026266423Sjfv
2027266423Sjfv	return (0);
2028266423Sjfv}
2029266423Sjfv
2030299546Serjstatic void
2031299546Serjixl_init_taskqueues(struct ixl_pf *pf)
2032299546Serj{
2033299546Serj	struct ixl_vsi *vsi = &pf->vsi;
2034299546Serj	struct ixl_queue *que = vsi->queues;
2035299546Serj	device_t dev = pf->dev;
2036266423Sjfv
2037299546Serj	/* Tasklet for Admin Queue */
2038299546Serj	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2039299546Serj#ifdef PCI_IOV
2040299546Serj	/* VFLR Tasklet */
2041299546Serj	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2042299546Serj#endif
2043299546Serj
2044299546Serj	/* Create and start PF taskqueue */
2045299546Serj	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2046299546Serj	    taskqueue_thread_enqueue, &pf->tq);
2047299546Serj	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2048299546Serj	    device_get_nameunit(dev));
2049299546Serj
2050299546Serj	/* Create queue tasks and start queue taskqueues */
2051299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2052299546Serj		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2053299546Serj		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2054299546Serj		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2055299546Serj		    taskqueue_thread_enqueue, &que->tq);
2056299546Serj#ifdef RSS
2057299546Serj		CPU_SETOF(cpu_id, &cpu_mask);
2058299546Serj		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2059299546Serj		    &cpu_mask, "%s (bucket %d)",
2060299546Serj		    device_get_nameunit(dev), cpu_id);
2061299546Serj#else
2062299546Serj		taskqueue_start_threads(&que->tq, 1, PI_NET,
2063299546Serj		    "%s (que %d)", device_get_nameunit(dev), que->me);
2064299546Serj#endif
2065299546Serj	}
2066299546Serj
2067299546Serj}
2068299546Serj
2069299546Serjstatic void
2070299546Serjixl_free_taskqueues(struct ixl_pf *pf)
2071299546Serj{
2072299546Serj	struct ixl_vsi		*vsi = &pf->vsi;
2073299546Serj	struct ixl_queue	*que = vsi->queues;
2074299546Serj
2075299546Serj	if (pf->tq)
2076299546Serj		taskqueue_free(pf->tq);
2077299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2078299546Serj		if (que->tq)
2079299546Serj			taskqueue_free(que->tq);
2080299546Serj	}
2081299546Serj}
2082299546Serj
2083266423Sjfv/*********************************************************************
2084266423Sjfv *
2085266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
2086266423Sjfv *
2087266423Sjfv **********************************************************************/
2088266423Sjfvstatic int
2089270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
2090266423Sjfv{
2091266423Sjfv	device_t	dev = pf->dev;
2092270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
2093270346Sjfv	struct 		ixl_queue *que = vsi->queues;
2094266423Sjfv	struct		tx_ring	 *txr;
2095266423Sjfv	int 		error, rid, vector = 0;
2096299545Serj#ifdef	RSS
2097299545Serj	cpuset_t cpu_mask;
2098299545Serj#endif
2099266423Sjfv
2100299546Serj	/* Admin Queue interrupt vector is 0 */
2101266423Sjfv	rid = vector + 1;
2102266423Sjfv	pf->res = bus_alloc_resource_any(dev,
2103266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2104266423Sjfv	if (!pf->res) {
2105299546Serj		device_printf(dev, "Unable to allocate"
2106299546Serj		    " bus resource: Adminq interrupt [rid=%d]\n", rid);
2107266423Sjfv		return (ENXIO);
2108266423Sjfv	}
2109266423Sjfv	/* Set the adminq vector and handler */
2110266423Sjfv	error = bus_setup_intr(dev, pf->res,
2111266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2112270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
2113266423Sjfv	if (error) {
2114266423Sjfv		pf->res = NULL;
2115266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
2116266423Sjfv		return (error);
2117266423Sjfv	}
2118266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2119266423Sjfv	pf->admvec = vector;
2120266423Sjfv	++vector;
2121266423Sjfv
2122266423Sjfv	/* Now set up the stations */
2123266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2124277084Sjfv		int cpu_id = i;
2125266423Sjfv		rid = vector + 1;
2126266423Sjfv		txr = &que->txr;
2127266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2128266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
2129266423Sjfv		if (que->res == NULL) {
2130299546Serj			device_printf(dev, "Unable to allocate"
2131299546Serj		    	    " bus resource: que interrupt [rid=%d]\n", rid);
2132266423Sjfv			return (ENXIO);
2133266423Sjfv		}
2134266423Sjfv		/* Set the handler function */
2135266423Sjfv		error = bus_setup_intr(dev, que->res,
2136266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2137270346Sjfv		    ixl_msix_que, que, &que->tag);
2138266423Sjfv		if (error) {
2139266423Sjfv			que->res = NULL;
2140266423Sjfv			device_printf(dev, "Failed to register que handler");
2141266423Sjfv			return (error);
2142266423Sjfv		}
2143299546Serj		bus_describe_intr(dev, que->res, que->tag, "que%d", i);
2144266423Sjfv		/* Bind the vector to a CPU */
2145277084Sjfv#ifdef RSS
2146277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2147277084Sjfv#endif
2148277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2149266423Sjfv		que->msix = vector;
2150266423Sjfv	}
2151266423Sjfv
2152266423Sjfv	return (0);
2153266423Sjfv}
2154266423Sjfv
2155266423Sjfv
2156266423Sjfv/*
2157266423Sjfv * Allocate MSI/X vectors
2158266423Sjfv */
2159266423Sjfvstatic int
2160270346Sjfvixl_init_msix(struct ixl_pf *pf)
2161266423Sjfv{
2162266423Sjfv	device_t dev = pf->dev;
2163266423Sjfv	int rid, want, vectors, queues, available;
2164266423Sjfv
2165266423Sjfv	/* Override by tuneable */
2166270346Sjfv	if (ixl_enable_msix == 0)
2167266423Sjfv		goto msi;
2168266423Sjfv
2169269198Sjfv	/*
2170269198Sjfv	** When used in a virtualized environment
2171269198Sjfv	** PCI BUSMASTER capability may not be set
2172269198Sjfv	** so explicity set it here and rewrite
2173269198Sjfv	** the ENABLE in the MSIX control register
2174269198Sjfv	** at this point to cause the host to
2175269198Sjfv	** successfully initialize us.
2176269198Sjfv	*/
2177269198Sjfv	{
2178269198Sjfv		u16 pci_cmd_word;
2179269198Sjfv		int msix_ctrl;
2180269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2181269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2182269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2183269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2184269198Sjfv		rid += PCIR_MSIX_CTRL;
2185269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2186269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2187269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2188269198Sjfv	}
2189269198Sjfv
2190266423Sjfv	/* First try MSI/X */
2191270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2192266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2193266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2194266423Sjfv       	if (!pf->msix_mem) {
2195266423Sjfv		/* May not be enabled */
2196266423Sjfv		device_printf(pf->dev,
2197266423Sjfv		    "Unable to map MSIX table \n");
2198266423Sjfv		goto msi;
2199266423Sjfv	}
2200266423Sjfv
2201266423Sjfv	available = pci_msix_count(dev);
2202266423Sjfv	if (available == 0) { /* system has msix disabled */
2203266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2204266423Sjfv		    rid, pf->msix_mem);
2205266423Sjfv		pf->msix_mem = NULL;
2206266423Sjfv		goto msi;
2207266423Sjfv	}
2208266423Sjfv
2209266423Sjfv	/* Figure out a reasonable auto config value */
2210266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2211266423Sjfv
2212299546Serj	/* Override with hardcoded value if it's less than autoconfig count */
2213270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2214270346Sjfv		queues = ixl_max_queues;
2215299546Serj	else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
2216299546Serj		device_printf(dev, "ixl_max_queues > # of cpus, using "
2217299546Serj		    "autoconfig amount...\n");
2218299546Serj	/* Or limit maximum auto-configured queues to 8 */
2219299546Serj	else if ((ixl_max_queues == 0) && (queues > 8))
2220299546Serj		queues = 8;
2221266423Sjfv
2222277084Sjfv#ifdef  RSS
2223277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2224277084Sjfv	if (queues > rss_getnumbuckets())
2225277084Sjfv		queues = rss_getnumbuckets();
2226277084Sjfv#endif
2227277084Sjfv
2228266423Sjfv	/*
2229266423Sjfv	** Want one vector (RX/TX pair) per queue
2230266423Sjfv	** plus an additional for the admin queue.
2231266423Sjfv	*/
2232266423Sjfv	want = queues + 1;
2233266423Sjfv	if (want <= available)	/* Have enough */
2234266423Sjfv		vectors = want;
2235266423Sjfv	else {
2236266423Sjfv               	device_printf(pf->dev,
2237266423Sjfv		    "MSIX Configuration Problem, "
2238266423Sjfv		    "%d vectors available but %d wanted!\n",
2239266423Sjfv		    available, want);
2240266423Sjfv		return (0); /* Will go to Legacy setup */
2241266423Sjfv	}
2242266423Sjfv
2243266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2244266423Sjfv               	device_printf(pf->dev,
2245266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2246266423Sjfv		pf->msix = vectors;
2247266423Sjfv		pf->vsi.num_queues = queues;
2248277084Sjfv#ifdef RSS
2249277084Sjfv		/*
2250277084Sjfv		 * If we're doing RSS, the number of queues needs to
2251277084Sjfv		 * match the number of RSS buckets that are configured.
2252277084Sjfv		 *
2253277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2254277084Sjfv		 *   up with queues that get no traffic.
2255277084Sjfv		 *
2256277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2257277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2258277084Sjfv		 *   so there'll be some contention.
2259277084Sjfv		 */
2260277084Sjfv		if (queues != rss_getnumbuckets()) {
2261277084Sjfv			device_printf(dev,
2262277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2263277084Sjfv			    "; performance will be impacted.\n",
2264277084Sjfv			    __func__, queues, rss_getnumbuckets());
2265277084Sjfv		}
2266277084Sjfv#endif
2267266423Sjfv		return (vectors);
2268266423Sjfv	}
2269266423Sjfvmsi:
2270266423Sjfv       	vectors = pci_msi_count(dev);
2271266423Sjfv	pf->vsi.num_queues = 1;
2272266423Sjfv	pf->msix = 1;
2273270346Sjfv	ixl_max_queues = 1;
2274270346Sjfv	ixl_enable_msix = 0;
2275266423Sjfv       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2276299547Serj		device_printf(pf->dev, "Using an MSI interrupt\n");
2277266423Sjfv	else {
2278266423Sjfv		pf->msix = 0;
2279299547Serj		device_printf(pf->dev, "Using a Legacy interrupt\n");
2280266423Sjfv	}
2281266423Sjfv	return (vectors);
2282266423Sjfv}
2283266423Sjfv
2284266423Sjfv
2285266423Sjfv/*
2286299547Serj * Plumb MSIX vectors
2287266423Sjfv */
2288266423Sjfvstatic void
2289270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2290266423Sjfv{
2291266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2292270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2293266423Sjfv	u32		reg;
2294266423Sjfv	u16		vector = 1;
2295266423Sjfv
2296266423Sjfv	/* First set up the adminq - vector 0 */
2297266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2298266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2299266423Sjfv
2300266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2301266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2302266423Sjfv	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2303266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2304266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2305266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2306266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2307266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2308266423Sjfv
2309299547Serj	/*
2310299547Serj	 * 0x7FF is the end of the queue list.
2311299547Serj	 * This means we won't use MSI-X vector 0 for a queue interrupt
2312299547Serj	 * in MSIX mode.
2313299547Serj	 */
2314266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2315299547Serj	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
2316299547Serj	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
2317266423Sjfv
2318266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2319266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2320266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2321266423Sjfv
2322266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2323266423Sjfv
2324266423Sjfv	/* Next configure the queues */
2325266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2326299545Serj		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2327266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2328266423Sjfv
2329266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2330270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2331266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2332266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2333266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2334266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2335266423Sjfv
2336266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2337270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2338266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2339299545Serj		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2340266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2341299545Serj		if (i == (vsi->num_queues - 1))
2342299545Serj			reg |= (IXL_QUEUE_EOL
2343299545Serj			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2344266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2345266423Sjfv	}
2346266423Sjfv}
2347266423Sjfv
2348266423Sjfv/*
2349266423Sjfv * Configure for MSI single vector operation
2350266423Sjfv */
2351266423Sjfvstatic void
2352270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2353266423Sjfv{
2354266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2355266423Sjfv	u32		reg;
2356266423Sjfv
2357266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2358266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2359266423Sjfv
2360266423Sjfv	/* Setup "other" causes */
2361266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2362266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2363266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2364266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2365266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2366266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2367266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2368266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2369266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2370266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2371266423Sjfv	    ;
2372266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2373266423Sjfv
2374266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2375266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2376266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2377266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2378266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2379266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2380266423Sjfv
2381266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2382266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2383266423Sjfv
2384266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2385266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2386270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2387266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2388266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2389266423Sjfv
2390266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2391270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2392270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2393266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2394266423Sjfv
2395266423Sjfv	/* Next enable the queue pair */
2396266423Sjfv	reg = rd32(hw, I40E_QTX_ENA(0));
2397266423Sjfv	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2398266423Sjfv	wr32(hw, I40E_QTX_ENA(0), reg);
2399266423Sjfv
2400266423Sjfv	reg = rd32(hw, I40E_QRX_ENA(0));
2401266423Sjfv	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2402266423Sjfv	wr32(hw, I40E_QRX_ENA(0), reg);
2403266423Sjfv}
2404266423Sjfv
2405266423Sjfv
2406266423Sjfv/*
2407266423Sjfv * Set the Initial ITR state
2408266423Sjfv */
2409266423Sjfvstatic void
2410270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2411266423Sjfv{
2412266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2413270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2414270346Sjfv	struct ixl_queue	*que = vsi->queues;
2415266423Sjfv
2416270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2417270346Sjfv	if (ixl_dynamic_rx_itr)
2418270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2419270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2420270346Sjfv	if (ixl_dynamic_tx_itr)
2421270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2422266423Sjfv
2423266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2424266423Sjfv		struct tx_ring	*txr = &que->txr;
2425266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2426266423Sjfv
2427270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2428266423Sjfv		    vsi->rx_itr_setting);
2429266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2430270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2431270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2432266423Sjfv		    vsi->tx_itr_setting);
2433266423Sjfv		txr->itr = vsi->tx_itr_setting;
2434270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2435266423Sjfv	}
2436266423Sjfv}
2437266423Sjfv
2438266423Sjfv
2439266423Sjfvstatic int
2440270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2441266423Sjfv{
2442266423Sjfv	int             rid;
2443266423Sjfv	device_t        dev = pf->dev;
2444266423Sjfv
2445266423Sjfv	rid = PCIR_BAR(0);
2446266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2447266423Sjfv	    &rid, RF_ACTIVE);
2448266423Sjfv
2449266423Sjfv	if (!(pf->pci_mem)) {
2450266423Sjfv		device_printf(dev,"Unable to allocate bus resource: memory\n");
2451266423Sjfv		return (ENXIO);
2452266423Sjfv	}
2453266423Sjfv
2454266423Sjfv	pf->osdep.mem_bus_space_tag =
2455266423Sjfv		rman_get_bustag(pf->pci_mem);
2456266423Sjfv	pf->osdep.mem_bus_space_handle =
2457266423Sjfv		rman_get_bushandle(pf->pci_mem);
2458270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2459272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2460266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2461266423Sjfv
2462266423Sjfv	pf->hw.back = &pf->osdep;
2463266423Sjfv
2464266423Sjfv	/*
2465266423Sjfv	** Now setup MSI or MSI/X, should
2466266423Sjfv	** return us the number of supported
2467266423Sjfv	** vectors. (Will be 1 for MSI)
2468266423Sjfv	*/
2469270346Sjfv	pf->msix = ixl_init_msix(pf);
2470266423Sjfv	return (0);
2471266423Sjfv}
2472266423Sjfv
2473266423Sjfvstatic void
2474299547Serjixl_free_interrupt_resources(struct ixl_pf *pf)
2475266423Sjfv{
2476270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2477270346Sjfv	struct ixl_queue	*que = vsi->queues;
2478266423Sjfv	device_t		dev = pf->dev;
2479299547Serj	int rid;
2480266423Sjfv
2481266423Sjfv	/* We may get here before stations are setup */
2482270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2483266423Sjfv		goto early;
2484266423Sjfv
2485266423Sjfv	/*
2486266423Sjfv	**  Release all msix VSI resources:
2487266423Sjfv	*/
2488266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2489266423Sjfv		rid = que->msix + 1;
2490266423Sjfv		if (que->tag != NULL) {
2491266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2492266423Sjfv			que->tag = NULL;
2493266423Sjfv		}
2494299547Serj		if (que->res != NULL) {
2495266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2496299547Serj			que->res = NULL;
2497299547Serj		}
2498266423Sjfv	}
2499266423Sjfv
2500266423Sjfvearly:
2501266423Sjfv	/* Clean the AdminQ interrupt last */
2502266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2503266423Sjfv		rid = pf->admvec + 1;
2504266423Sjfv	else
2505266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2506266423Sjfv
2507266423Sjfv	if (pf->tag != NULL) {
2508266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2509266423Sjfv		pf->tag = NULL;
2510266423Sjfv	}
2511299547Serj	if (pf->res != NULL) {
2512266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2513299547Serj		pf->res = NULL;
2514299547Serj	}
2515299547Serj}
2516266423Sjfv
2517299547Serjstatic void
2518299547Serjixl_free_pci_resources(struct ixl_pf *pf)
2519299547Serj{
2520299547Serj	device_t		dev = pf->dev;
2521299547Serj	int			memrid;
2522299547Serj
2523299547Serj	ixl_free_interrupt_resources(pf);
2524299547Serj
2525266423Sjfv	if (pf->msix)
2526266423Sjfv		pci_release_msi(dev);
2527266423Sjfv
2528299547Serj	memrid = PCIR_BAR(IXL_BAR);
2529299547Serj
2530266423Sjfv	if (pf->msix_mem != NULL)
2531266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2532266423Sjfv		    memrid, pf->msix_mem);
2533266423Sjfv
2534266423Sjfv	if (pf->pci_mem != NULL)
2535266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2536266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2537266423Sjfv
2538266423Sjfv	return;
2539266423Sjfv}
2540266423Sjfv
2541274205Sjfvstatic void
2542274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2543274205Sjfv{
2544274205Sjfv	/* Display supported media types */
2545274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2546274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2547266423Sjfv
2548274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2549274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2550279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2551279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2552279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2553279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2554274205Sjfv
2555284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2556279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2557274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2558274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2559279033Sjfv
2560274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2561274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2562274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2563274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2564274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2565274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2566274205Sjfv
2567279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2568279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2569279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2570279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2571279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2572274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2573274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2574274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2575274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2576274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2577284049Sjfv
2578284049Sjfv#ifndef IFM_ETH_XTYPE
2579284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2580284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2581284049Sjfv
2582284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2583284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2584284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2585284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2586284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2587284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2588284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2589284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2590284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2591284049Sjfv
2592284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2593284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2594284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2595284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2596284049Sjfv#else
2597284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2598284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2599284049Sjfv
2600284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2601284049Sjfv	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2602284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2603284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2604284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2605284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2606284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2607284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2608284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2609284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2610284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2611284049Sjfv
2612284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2613284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2614284049Sjfv
2615284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2616284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2617284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2618284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2619284049Sjfv#endif
2620274205Sjfv}
2621274205Sjfv
2622266423Sjfv/*********************************************************************
2623266423Sjfv *
2624266423Sjfv *  Setup networking device structure and register an interface.
2625266423Sjfv *
2626266423Sjfv **********************************************************************/
2627266423Sjfvstatic int
2628270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2629266423Sjfv{
2630266423Sjfv	struct ifnet		*ifp;
2631266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2632270346Sjfv	struct ixl_queue	*que = vsi->queues;
2633279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2634266423Sjfv	enum i40e_status_code aq_error = 0;
2635266423Sjfv
2636270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2637266423Sjfv
2638266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2639266423Sjfv	if (ifp == NULL) {
2640266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2641266423Sjfv		return (-1);
2642266423Sjfv	}
2643266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2644266423Sjfv	ifp->if_mtu = ETHERMTU;
2645299546Serj	ifp->if_baudrate = IF_Gbps(40);
2646270346Sjfv	ifp->if_init = ixl_init;
2647266423Sjfv	ifp->if_softc = vsi;
2648266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2649270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2650266423Sjfv
2651274205Sjfv#if __FreeBSD_version >= 1100036
2652272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2653272227Sglebius#endif
2654272227Sglebius
2655270346Sjfv	ifp->if_transmit = ixl_mq_start;
2656266423Sjfv
2657270346Sjfv	ifp->if_qflush = ixl_qflush;
2658266423Sjfv
2659266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2660266423Sjfv
2661266423Sjfv	vsi->max_frame_size =
2662266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2663266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2664266423Sjfv
2665266423Sjfv	/*
2666266423Sjfv	 * Tell the upper layer(s) we support long frames.
2667266423Sjfv	 */
2668270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2669266423Sjfv
2670266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2671266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2672266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2673266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2674266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2675266423Sjfv
2676266423Sjfv	/* VLAN capabilties */
2677266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2678266423Sjfv			     |  IFCAP_VLAN_HWTSO
2679266423Sjfv			     |  IFCAP_VLAN_MTU
2680266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2681266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2682266423Sjfv
2683266423Sjfv	/*
2684266423Sjfv	** Don't turn this on by default, if vlans are
2685266423Sjfv	** created on another pseudo device (eg. lagg)
2686266423Sjfv	** then vlan events are not passed thru, breaking
2687266423Sjfv	** operation, but with HW FILTER off it works. If
2688270346Sjfv	** using vlans directly on the ixl driver you can
2689266423Sjfv	** enable this and get full hardware tag filtering.
2690266423Sjfv	*/
2691266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2692266423Sjfv
2693266423Sjfv	/*
2694266423Sjfv	 * Specify the media types supported by this adapter and register
2695266423Sjfv	 * callbacks to update media and link information
2696266423Sjfv	 */
2697270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2698270346Sjfv		     ixl_media_status);
2699266423Sjfv
2700279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2701279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2702279033Sjfv	/* May need delay to detect fiber correctly */
2703274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2704274205Sjfv		i40e_msec_delay(200);
2705277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2706279033Sjfv		    TRUE, &abilities, NULL);
2707279033Sjfv	}
2708279033Sjfv	if (aq_error) {
2709274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2710274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2711274205Sjfv		else
2712279033Sjfv			device_printf(dev,
2713279033Sjfv			    "Error getting supported media types, err %d,"
2714279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2715279033Sjfv		return (0);
2716279033Sjfv	}
2717266423Sjfv
2718279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2719279033Sjfv
2720266423Sjfv	/* Use autoselect media by default */
2721266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2722266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2723266423Sjfv
2724274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2725274205Sjfv
2726266423Sjfv	return (0);
2727266423Sjfv}
2728266423Sjfv
2729279858Sjfv/*
2730299547Serj** Run when the Admin Queue gets a link state change interrupt.
2731279858Sjfv*/
2732279858Sjfvstatic void
2733279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2734266423Sjfv{
2735279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2736299547Serj	device_t dev = pf->dev;
2737279858Sjfv	struct i40e_aqc_get_link_status *status =
2738279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2739266423Sjfv
2740299547Serj	/* Firmware workaround: may need to wait for link to actually come up... */
2741299547Serj	if (!pf->link_up && (status->link_info & I40E_AQ_SIGNAL_DETECT)) {
2742299547Serj		device_printf(dev, "%s: Waiting...\n", __func__);
2743299547Serj		i40e_msec_delay(4000);
2744299547Serj	}
2745299547Serj
2746299547Serj	/* Request link status from adapter */
2747279858Sjfv	hw->phy.get_link_info = TRUE;
2748299547Serj	i40e_get_link_status(hw, &pf->link_up);
2749299547Serj
2750299547Serj	/* Print out message if an unqualified module is found */
2751279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2752279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2753279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2754299547Serj		device_printf(dev, "Link failed because "
2755299547Serj		    "an unqualified module was detected!\n");
2756279858Sjfv
2757299547Serj	/* Update OS link info */
2758299547Serj	ixl_update_link_status(pf);
2759266423Sjfv}
2760266423Sjfv
2761266423Sjfv/*********************************************************************
2762266423Sjfv *
2763279033Sjfv *  Get Firmware Switch configuration
2764279033Sjfv *	- this will need to be more robust when more complex
2765279033Sjfv *	  switch configurations are enabled.
2766266423Sjfv *
2767266423Sjfv **********************************************************************/
2768266423Sjfvstatic int
2769279033Sjfvixl_switch_config(struct ixl_pf *pf)
2770266423Sjfv{
2771279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2772279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2773266423Sjfv	device_t 	dev = vsi->dev;
2774266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2775266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2776279858Sjfv	int	ret;
2777266423Sjfv	u16	next = 0;
2778266423Sjfv
2779279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2780266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2781266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2782266423Sjfv	    sizeof(aq_buf), &next, NULL);
2783266423Sjfv	if (ret) {
2784279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2785279858Sjfv		    ret);
2786266423Sjfv		return (ret);
2787266423Sjfv	}
2788270346Sjfv#ifdef IXL_DEBUG
2789279858Sjfv	device_printf(dev,
2790279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2791266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2792279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2793279858Sjfv		device_printf(dev,
2794279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2795279858Sjfv		    sw_config->element[i].element_type,
2796279858Sjfv		    sw_config->element[i].seid,
2797279858Sjfv		    sw_config->element[i].uplink_seid,
2798279858Sjfv		    sw_config->element[i].downlink_seid);
2799279858Sjfv	}
2800266423Sjfv#endif
2801279033Sjfv	/* Simplified due to a single VSI at the moment */
2802279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2803279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2804266423Sjfv	vsi->seid = sw_config->element[0].seid;
2805279033Sjfv	return (ret);
2806279033Sjfv}
2807266423Sjfv
2808279033Sjfv/*********************************************************************
2809279033Sjfv *
2810279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2811279033Sjfv *  			 like the number of descriptors, buffer size,
2812279033Sjfv *			 plus we init the rings thru this function.
2813279033Sjfv *
2814279033Sjfv **********************************************************************/
2815279033Sjfvstatic int
2816279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2817279033Sjfv{
2818279858Sjfv	struct ixl_pf		*pf = vsi->back;
2819279033Sjfv	struct ixl_queue	*que = vsi->queues;
2820279033Sjfv	device_t		dev = vsi->dev;
2821279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2822279033Sjfv	struct i40e_vsi_context	ctxt;
2823279033Sjfv	int			err = 0;
2824279033Sjfv
2825266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2826266423Sjfv	ctxt.seid = vsi->seid;
2827279858Sjfv	if (pf->veb_seid != 0)
2828279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2829266423Sjfv	ctxt.pf_num = hw->pf_id;
2830279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2831279033Sjfv	if (err) {
2832279033Sjfv		device_printf(dev,"get vsi params failed %x!!\n", err);
2833279033Sjfv		return (err);
2834266423Sjfv	}
2835270346Sjfv#ifdef IXL_DEBUG
2836266423Sjfv	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2837266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2838266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2839266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2840266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2841266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2842266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2843266423Sjfv#endif
2844266423Sjfv	/*
2845266423Sjfv	** Set the queue and traffic class bits
2846266423Sjfv	**  - when multiple traffic classes are supported
2847266423Sjfv	**    this will need to be more robust.
2848266423Sjfv	*/
2849266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2850266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2851299545Serj	ctxt.info.queue_mapping[0] = 0;
2852299545Serj	ctxt.info.tc_mapping[0] = 0x0800;
2853266423Sjfv
2854266423Sjfv	/* Set VLAN receive stripping mode */
2855266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2856266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2857266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2858266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2859266423Sjfv	else
2860266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2861266423Sjfv
2862266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2863266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2864266423Sjfv
2865266423Sjfv	/* Reset VSI statistics */
2866270346Sjfv	ixl_vsi_reset_stats(vsi);
2867266423Sjfv	vsi->hw_filters_add = 0;
2868266423Sjfv	vsi->hw_filters_del = 0;
2869266423Sjfv
2870279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2871279858Sjfv
2872279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2873279033Sjfv	if (err) {
2874266423Sjfv		device_printf(dev,"update vsi params failed %x!!\n",
2875266423Sjfv		   hw->aq.asq_last_status);
2876279033Sjfv		return (err);
2877279033Sjfv	}
2878266423Sjfv
2879266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2880266423Sjfv		struct tx_ring		*txr = &que->txr;
2881266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2882266423Sjfv		struct i40e_hmc_obj_txq tctx;
2883266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2884266423Sjfv		u32			txctl;
2885266423Sjfv		u16			size;
2886266423Sjfv
2887266423Sjfv
2888266423Sjfv		/* Setup the HMC TX Context  */
2889266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2890266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2891266423Sjfv		tctx.new_context = 1;
2892279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2893266423Sjfv		tctx.qlen = que->num_desc;
2894266423Sjfv		tctx.fc_ena = 0;
2895269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2896269198Sjfv		/* Enable HEAD writeback */
2897269198Sjfv		tctx.head_wb_ena = 1;
2898269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
2899269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
2900266423Sjfv		tctx.rdylist_act = 0;
2901266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
2902266423Sjfv		if (err) {
2903266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
2904266423Sjfv			break;
2905266423Sjfv		}
2906266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2907266423Sjfv		if (err) {
2908266423Sjfv			device_printf(dev, "Unable to set TX context\n");
2909266423Sjfv			break;
2910266423Sjfv		}
2911266423Sjfv		/* Associate the ring with this PF */
2912266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
2913266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2914266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
2915266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
2916270346Sjfv		ixl_flush(hw);
2917266423Sjfv
2918266423Sjfv		/* Do ring (re)init */
2919270346Sjfv		ixl_init_tx_ring(que);
2920266423Sjfv
2921266423Sjfv		/* Next setup the HMC RX Context  */
2922279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
2923266423Sjfv			rxr->mbuf_sz = MCLBYTES;
2924266423Sjfv		else
2925266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
2926266423Sjfv
2927266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2928266423Sjfv
2929266423Sjfv		/* Set up an RX context for the HMC */
2930266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2931266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2932266423Sjfv		/* ignore header split for now */
2933266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2934266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2935266423Sjfv		    vsi->max_frame_size : max_rxmax;
2936266423Sjfv		rctx.dtype = 0;
2937266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
2938266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
2939279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2940266423Sjfv		rctx.qlen = que->num_desc;
2941266423Sjfv		rctx.tphrdesc_ena = 1;
2942266423Sjfv		rctx.tphwdesc_ena = 1;
2943266423Sjfv		rctx.tphdata_ena = 0;
2944266423Sjfv		rctx.tphhead_ena = 0;
2945266423Sjfv		rctx.lrxqthresh = 2;
2946266423Sjfv		rctx.crcstrip = 1;
2947266423Sjfv		rctx.l2tsel = 1;
2948266423Sjfv		rctx.showiv = 1;
2949266423Sjfv		rctx.fc_ena = 0;
2950266423Sjfv		rctx.prefena = 1;
2951266423Sjfv
2952266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
2953266423Sjfv		if (err) {
2954266423Sjfv			device_printf(dev,
2955266423Sjfv			    "Unable to clear RX context %d\n", i);
2956266423Sjfv			break;
2957266423Sjfv		}
2958266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2959266423Sjfv		if (err) {
2960266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
2961266423Sjfv			break;
2962266423Sjfv		}
2963270346Sjfv		err = ixl_init_rx_ring(que);
2964266423Sjfv		if (err) {
2965266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2966266423Sjfv			break;
2967266423Sjfv		}
2968299545Serj		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2969279860Sjfv#ifdef DEV_NETMAP
2970279860Sjfv		/* preserve queue */
2971279860Sjfv		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2972279860Sjfv			struct netmap_adapter *na = NA(vsi->ifp);
2973279860Sjfv			struct netmap_kring *kring = &na->rx_rings[i];
2974279860Sjfv			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2975279860Sjfv			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2976279860Sjfv		} else
2977279860Sjfv#endif /* DEV_NETMAP */
2978266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2979266423Sjfv	}
2980266423Sjfv	return (err);
2981266423Sjfv}
2982266423Sjfv
2983266423Sjfv
2984266423Sjfv/*********************************************************************
2985266423Sjfv *
2986266423Sjfv *  Free all VSI structs.
2987266423Sjfv *
2988266423Sjfv **********************************************************************/
2989266423Sjfvvoid
2990270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
2991266423Sjfv{
2992270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2993270346Sjfv	struct ixl_queue	*que = vsi->queues;
2994266423Sjfv
2995266423Sjfv	/* Free station queues */
2996266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2997266423Sjfv		struct tx_ring *txr = &que->txr;
2998266423Sjfv		struct rx_ring *rxr = &que->rxr;
2999266423Sjfv
3000266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
3001266423Sjfv			continue;
3002270346Sjfv		IXL_TX_LOCK(txr);
3003270346Sjfv		ixl_free_que_tx(que);
3004266423Sjfv		if (txr->base)
3005271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3006270346Sjfv		IXL_TX_UNLOCK(txr);
3007270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
3008266423Sjfv
3009266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
3010266423Sjfv			continue;
3011270346Sjfv		IXL_RX_LOCK(rxr);
3012270346Sjfv		ixl_free_que_rx(que);
3013266423Sjfv		if (rxr->base)
3014271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3015270346Sjfv		IXL_RX_UNLOCK(rxr);
3016270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
3017266423Sjfv
3018266423Sjfv	}
3019266423Sjfv	free(vsi->queues, M_DEVBUF);
3020266423Sjfv
3021266423Sjfv	/* Free VSI filter list */
3022279858Sjfv	ixl_free_mac_filters(vsi);
3023279858Sjfv}
3024279858Sjfv
3025279858Sjfvstatic void
3026279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
3027279858Sjfv{
3028279858Sjfv	struct ixl_mac_filter *f;
3029279858Sjfv
3030266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
3031266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
3032266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
3033266423Sjfv		free(f, M_DEVBUF);
3034266423Sjfv	}
3035266423Sjfv}
3036266423Sjfv
3037266423Sjfv
3038266423Sjfv/*********************************************************************
3039266423Sjfv *
3040266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
3041266423Sjfv *  associated queues, rings and the descriptors associated with each,
3042266423Sjfv *  called only once at attach.
3043266423Sjfv *
3044266423Sjfv **********************************************************************/
3045266423Sjfvstatic int
3046270346Sjfvixl_setup_stations(struct ixl_pf *pf)
3047266423Sjfv{
3048266423Sjfv	device_t		dev = pf->dev;
3049270346Sjfv	struct ixl_vsi		*vsi;
3050270346Sjfv	struct ixl_queue	*que;
3051266423Sjfv	struct tx_ring		*txr;
3052266423Sjfv	struct rx_ring		*rxr;
3053266423Sjfv	int 			rsize, tsize;
3054266423Sjfv	int			error = I40E_SUCCESS;
3055266423Sjfv
3056266423Sjfv	vsi = &pf->vsi;
3057266423Sjfv	vsi->back = (void *)pf;
3058266423Sjfv	vsi->hw = &pf->hw;
3059266423Sjfv	vsi->id = 0;
3060266423Sjfv	vsi->num_vlans = 0;
3061279858Sjfv	vsi->back = pf;
3062266423Sjfv
3063266423Sjfv	/* Get memory for the station queues */
3064266423Sjfv        if (!(vsi->queues =
3065270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
3066266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3067266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
3068266423Sjfv                error = ENOMEM;
3069266423Sjfv                goto early;
3070266423Sjfv        }
3071266423Sjfv
3072266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3073266423Sjfv		que = &vsi->queues[i];
3074270346Sjfv		que->num_desc = ixl_ringsz;
3075266423Sjfv		que->me = i;
3076266423Sjfv		que->vsi = vsi;
3077269198Sjfv		/* mark the queue as active */
3078269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
3079266423Sjfv		txr = &que->txr;
3080266423Sjfv		txr->que = que;
3081269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
3082266423Sjfv
3083266423Sjfv		/* Initialize the TX lock */
3084266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3085266423Sjfv		    device_get_nameunit(dev), que->me);
3086266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
3087266423Sjfv		/* Create the TX descriptor ring */
3088269198Sjfv		tsize = roundup2((que->num_desc *
3089269198Sjfv		    sizeof(struct i40e_tx_desc)) +
3090269198Sjfv		    sizeof(u32), DBA_ALIGN);
3091271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3092271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3093266423Sjfv			device_printf(dev,
3094266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
3095266423Sjfv			error = ENOMEM;
3096266423Sjfv			goto fail;
3097266423Sjfv		}
3098266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3099266423Sjfv		bzero((void *)txr->base, tsize);
3100266423Sjfv       		/* Now allocate transmit soft structs for the ring */
3101270346Sjfv       		if (ixl_allocate_tx_data(que)) {
3102266423Sjfv			device_printf(dev,
3103266423Sjfv			    "Critical Failure setting up TX structures\n");
3104266423Sjfv			error = ENOMEM;
3105266423Sjfv			goto fail;
3106266423Sjfv       		}
3107266423Sjfv		/* Allocate a buf ring */
3108266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3109299547Serj		    M_NOWAIT, &txr->mtx);
3110266423Sjfv		if (txr->br == NULL) {
3111266423Sjfv			device_printf(dev,
3112266423Sjfv			    "Critical Failure setting up TX buf ring\n");
3113266423Sjfv			error = ENOMEM;
3114266423Sjfv			goto fail;
3115266423Sjfv       		}
3116266423Sjfv
3117266423Sjfv		/*
3118266423Sjfv		 * Next the RX queues...
3119266423Sjfv		 */
3120266423Sjfv		rsize = roundup2(que->num_desc *
3121266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3122266423Sjfv		rxr = &que->rxr;
3123266423Sjfv		rxr->que = que;
3124269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
3125266423Sjfv
3126266423Sjfv		/* Initialize the RX side lock */
3127266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3128266423Sjfv		    device_get_nameunit(dev), que->me);
3129266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3130266423Sjfv
3131271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3132271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3133266423Sjfv			device_printf(dev,
3134266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
3135266423Sjfv			error = ENOMEM;
3136266423Sjfv			goto fail;
3137266423Sjfv		}
3138266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3139266423Sjfv		bzero((void *)rxr->base, rsize);
3140266423Sjfv
3141266423Sjfv        	/* Allocate receive soft structs for the ring*/
3142270346Sjfv		if (ixl_allocate_rx_data(que)) {
3143266423Sjfv			device_printf(dev,
3144266423Sjfv			    "Critical Failure setting up receive structs\n");
3145266423Sjfv			error = ENOMEM;
3146266423Sjfv			goto fail;
3147266423Sjfv		}
3148266423Sjfv	}
3149266423Sjfv
3150266423Sjfv	return (0);
3151266423Sjfv
3152266423Sjfvfail:
3153266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3154266423Sjfv		que = &vsi->queues[i];
3155266423Sjfv		rxr = &que->rxr;
3156266423Sjfv		txr = &que->txr;
3157266423Sjfv		if (rxr->base)
3158271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3159266423Sjfv		if (txr->base)
3160271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3161266423Sjfv	}
3162266423Sjfv
3163266423Sjfvearly:
3164266423Sjfv	return (error);
3165266423Sjfv}
3166266423Sjfv
3167266423Sjfv/*
3168266423Sjfv** Provide a update to the queue RX
3169266423Sjfv** interrupt moderation value.
3170266423Sjfv*/
3171266423Sjfvstatic void
3172270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
3173266423Sjfv{
3174270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3175266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3176266423Sjfv	struct rx_ring	*rxr = &que->rxr;
3177266423Sjfv	u16		rx_itr;
3178266423Sjfv	u16		rx_latency = 0;
3179266423Sjfv	int		rx_bytes;
3180266423Sjfv
3181266423Sjfv
3182266423Sjfv	/* Idle, do nothing */
3183266423Sjfv	if (rxr->bytes == 0)
3184266423Sjfv		return;
3185266423Sjfv
3186270346Sjfv	if (ixl_dynamic_rx_itr) {
3187266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
3188266423Sjfv		rx_itr = rxr->itr;
3189266423Sjfv
3190266423Sjfv		/* Adjust latency range */
3191266423Sjfv		switch (rxr->latency) {
3192270346Sjfv		case IXL_LOW_LATENCY:
3193266423Sjfv			if (rx_bytes > 10) {
3194270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3195270346Sjfv				rx_itr = IXL_ITR_20K;
3196266423Sjfv			}
3197266423Sjfv			break;
3198270346Sjfv		case IXL_AVE_LATENCY:
3199266423Sjfv			if (rx_bytes > 20) {
3200270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3201270346Sjfv				rx_itr = IXL_ITR_8K;
3202266423Sjfv			} else if (rx_bytes <= 10) {
3203270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3204270346Sjfv				rx_itr = IXL_ITR_100K;
3205266423Sjfv			}
3206266423Sjfv			break;
3207270346Sjfv		case IXL_BULK_LATENCY:
3208266423Sjfv			if (rx_bytes <= 20) {
3209270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3210270346Sjfv				rx_itr = IXL_ITR_20K;
3211266423Sjfv			}
3212266423Sjfv			break;
3213266423Sjfv       		 }
3214266423Sjfv
3215266423Sjfv		rxr->latency = rx_latency;
3216266423Sjfv
3217266423Sjfv		if (rx_itr != rxr->itr) {
3218266423Sjfv			/* do an exponential smoothing */
3219266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3220266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3221270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3222270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3223266423Sjfv			    que->me), rxr->itr);
3224266423Sjfv		}
3225266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3226270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3227270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3228266423Sjfv		/* Update the hardware if needed */
3229266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3230266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3231270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3232266423Sjfv			    que->me), rxr->itr);
3233266423Sjfv		}
3234266423Sjfv	}
3235266423Sjfv	rxr->bytes = 0;
3236266423Sjfv	rxr->packets = 0;
3237266423Sjfv	return;
3238266423Sjfv}
3239266423Sjfv
3240266423Sjfv
3241266423Sjfv/*
3242266423Sjfv** Provide a update to the queue TX
3243266423Sjfv** interrupt moderation value.
3244266423Sjfv*/
3245266423Sjfvstatic void
3246270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3247266423Sjfv{
3248270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3249266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3250266423Sjfv	struct tx_ring	*txr = &que->txr;
3251266423Sjfv	u16		tx_itr;
3252266423Sjfv	u16		tx_latency = 0;
3253266423Sjfv	int		tx_bytes;
3254266423Sjfv
3255266423Sjfv
3256266423Sjfv	/* Idle, do nothing */
3257266423Sjfv	if (txr->bytes == 0)
3258266423Sjfv		return;
3259266423Sjfv
3260270346Sjfv	if (ixl_dynamic_tx_itr) {
3261266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3262266423Sjfv		tx_itr = txr->itr;
3263266423Sjfv
3264266423Sjfv		switch (txr->latency) {
3265270346Sjfv		case IXL_LOW_LATENCY:
3266266423Sjfv			if (tx_bytes > 10) {
3267270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3268270346Sjfv				tx_itr = IXL_ITR_20K;
3269266423Sjfv			}
3270266423Sjfv			break;
3271270346Sjfv		case IXL_AVE_LATENCY:
3272266423Sjfv			if (tx_bytes > 20) {
3273270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3274270346Sjfv				tx_itr = IXL_ITR_8K;
3275266423Sjfv			} else if (tx_bytes <= 10) {
3276270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3277270346Sjfv				tx_itr = IXL_ITR_100K;
3278266423Sjfv			}
3279266423Sjfv			break;
3280270346Sjfv		case IXL_BULK_LATENCY:
3281266423Sjfv			if (tx_bytes <= 20) {
3282270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3283270346Sjfv				tx_itr = IXL_ITR_20K;
3284266423Sjfv			}
3285266423Sjfv			break;
3286266423Sjfv		}
3287266423Sjfv
3288266423Sjfv		txr->latency = tx_latency;
3289266423Sjfv
3290266423Sjfv		if (tx_itr != txr->itr) {
3291266423Sjfv       	         /* do an exponential smoothing */
3292266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3293266423Sjfv			    ((9 * tx_itr) + txr->itr);
3294270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3295270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3296266423Sjfv			    que->me), txr->itr);
3297266423Sjfv		}
3298266423Sjfv
3299266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3300270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3301270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3302266423Sjfv		/* Update the hardware if needed */
3303266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3304266423Sjfv			txr->itr = vsi->tx_itr_setting;
3305270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3306266423Sjfv			    que->me), txr->itr);
3307266423Sjfv		}
3308266423Sjfv	}
3309266423Sjfv	txr->bytes = 0;
3310266423Sjfv	txr->packets = 0;
3311266423Sjfv	return;
3312266423Sjfv}
3313266423Sjfv
3314279858Sjfv#define QUEUE_NAME_LEN 32
3315266423Sjfv
3316266423Sjfvstatic void
3317279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3318279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3319279858Sjfv{
3320279858Sjfv	struct sysctl_oid *tree;
3321279858Sjfv	struct sysctl_oid_list *child;
3322279858Sjfv	struct sysctl_oid_list *vsi_list;
3323279858Sjfv
3324279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3325279858Sjfv	child = SYSCTL_CHILDREN(tree);
3326279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3327279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3328279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3329279858Sjfv
3330279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3331279858Sjfv}
3332279858Sjfv
3333279858Sjfvstatic void
3334270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3335266423Sjfv{
3336266423Sjfv	device_t dev = pf->dev;
3337270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3338270346Sjfv	struct ixl_queue *queues = vsi->queues;
3339269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3340266423Sjfv
3341266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3342266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3343266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3344279858Sjfv	struct sysctl_oid_list *vsi_list;
3345266423Sjfv
3346279858Sjfv	struct sysctl_oid *queue_node;
3347279858Sjfv	struct sysctl_oid_list *queue_list;
3348266423Sjfv
3349269198Sjfv	struct tx_ring *txr;
3350269198Sjfv	struct rx_ring *rxr;
3351279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3352266423Sjfv
3353266423Sjfv	/* Driver statistics */
3354266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3355266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3356266423Sjfv			"Watchdog timeouts");
3357266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3358266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3359266423Sjfv			"Admin Queue IRQ Handled");
3360266423Sjfv
3361279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3362279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3363266423Sjfv
3364266423Sjfv	/* Queue statistics */
3365266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3366269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3367279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3368279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3369266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3370266423Sjfv
3371269198Sjfv		txr = &(queues[q].txr);
3372269198Sjfv		rxr = &(queues[q].rxr);
3373269198Sjfv
3374269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3375266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3376266423Sjfv				"m_defrag() failed");
3377269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3378266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3379266423Sjfv				"Driver dropped packets");
3380266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3381266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3382266423Sjfv				"irqs on this queue");
3383269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3384266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3385266423Sjfv				"TSO");
3386269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3387266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3388266423Sjfv				"Driver tx dma failure in xmit");
3389266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3390266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3391266423Sjfv				"Queue No Descriptor Available");
3392266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3393266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3394266423Sjfv				"Queue Packets Transmitted");
3395266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3396270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3397266423Sjfv				"Queue Bytes Transmitted");
3398266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3399266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3400266423Sjfv				"Queue Packets Received");
3401266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3402266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3403266423Sjfv				"Queue Bytes Received");
3404266423Sjfv	}
3405266423Sjfv
3406266423Sjfv	/* MAC stats */
3407270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3408266423Sjfv}
3409266423Sjfv
3410266423Sjfvstatic void
3411270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3412266423Sjfv	struct sysctl_oid_list *child,
3413266423Sjfv	struct i40e_eth_stats *eth_stats)
3414266423Sjfv{
3415270346Sjfv	struct ixl_sysctl_info ctls[] =
3416266423Sjfv	{
3417266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3418266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3419266423Sjfv			"Unicast Packets Received"},
3420266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3421266423Sjfv			"Multicast Packets Received"},
3422266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3423266423Sjfv			"Broadcast Packets Received"},
3424269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3425266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3426266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3427266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3428266423Sjfv			"Multicast Packets Transmitted"},
3429266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3430266423Sjfv			"Broadcast Packets Transmitted"},
3431266423Sjfv		// end
3432266423Sjfv		{0,0,0}
3433266423Sjfv	};
3434266423Sjfv
3435270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3436297753Spfg	while (entry->stat != NULL)
3437266423Sjfv	{
3438266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3439266423Sjfv				CTLFLAG_RD, entry->stat,
3440266423Sjfv				entry->description);
3441266423Sjfv		entry++;
3442266423Sjfv	}
3443266423Sjfv}
3444266423Sjfv
3445266423Sjfvstatic void
3446270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3447266423Sjfv	struct sysctl_oid_list *child,
3448266423Sjfv	struct i40e_hw_port_stats *stats)
3449266423Sjfv{
3450269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3451266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3452266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3453266423Sjfv
3454266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3455270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3456266423Sjfv
3457270346Sjfv	struct ixl_sysctl_info ctls[] =
3458266423Sjfv	{
3459266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3460266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3461266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3462266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3463266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3464266423Sjfv		/* Packet Reception Stats */
3465266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3466266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3467266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3468266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3469266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3470266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3471266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3472266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3473266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3474266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3475266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3476266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3477266423Sjfv		/* Packet Transmission Stats */
3478266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3479266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3480266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3481266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3482266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3483266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3484266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3485266423Sjfv		/* Flow control */
3486266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3487266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3488266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3489266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3490266423Sjfv		/* End */
3491266423Sjfv		{0,0,0}
3492266423Sjfv	};
3493266423Sjfv
3494270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3495297753Spfg	while (entry->stat != NULL)
3496266423Sjfv	{
3497266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3498266423Sjfv				CTLFLAG_RD, entry->stat,
3499266423Sjfv				entry->description);
3500266423Sjfv		entry++;
3501266423Sjfv	}
3502266423Sjfv}
3503266423Sjfv
3504284049Sjfv
3505266423Sjfv/*
3506270346Sjfv** ixl_config_rss - setup RSS
3507266423Sjfv**  - note this is done for the single vsi
3508266423Sjfv*/
3509270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3510266423Sjfv{
3511270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3512266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3513266423Sjfv	u32		lut = 0;
3514277084Sjfv	u64		set_hena = 0, hena;
3515277084Sjfv	int		i, j, que_id;
3516277084Sjfv#ifdef RSS
3517277084Sjfv	u32		rss_hash_config;
3518277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3519277084Sjfv#else
3520277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3521277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3522277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3523277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3524277084Sjfv#endif
3525266423Sjfv
3526277084Sjfv#ifdef RSS
3527277084Sjfv        /* Fetch the configured RSS key */
3528277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3529277084Sjfv#endif
3530266423Sjfv
3531266423Sjfv	/* Fill out hash function seed */
3532277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3533277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3534266423Sjfv
3535266423Sjfv	/* Enable PCTYPES for RSS: */
3536277084Sjfv#ifdef RSS
3537277084Sjfv	rss_hash_config = rss_gethashconfig();
3538277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3539277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3540277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3541277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3542277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3543277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3544277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3545277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3546279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3547277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3548277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3549277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3550277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3551277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3552277084Sjfv#else
3553266423Sjfv	set_hena =
3554266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3555266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3556266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3557266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3558266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3559266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3560266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3561266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3562266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3563266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3564266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3565277084Sjfv#endif
3566266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3567266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3568266423Sjfv	hena |= set_hena;
3569266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3570266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3571266423Sjfv
3572266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3573266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3574266423Sjfv		if (j == vsi->num_queues)
3575266423Sjfv			j = 0;
3576277084Sjfv#ifdef RSS
3577277084Sjfv		/*
3578277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3579277084Sjfv		 * Cap it at the number of configured buckets (which is
3580277084Sjfv		 * num_queues.)
3581277084Sjfv		 */
3582277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3583277262Sjfv		que_id = que_id % vsi->num_queues;
3584277084Sjfv#else
3585277084Sjfv		que_id = j;
3586277084Sjfv#endif
3587266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3588277084Sjfv		lut = (lut << 8) | (que_id &
3589266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3590266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3591266423Sjfv		if ((i & 3) == 3)
3592266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3593266423Sjfv	}
3594270346Sjfv	ixl_flush(hw);
3595266423Sjfv}
3596266423Sjfv
3597266423Sjfv
3598266423Sjfv/*
3599266423Sjfv** This routine is run via an vlan config EVENT,
3600266423Sjfv** it enables us to use the HW Filter table since
3601266423Sjfv** we can get the vlan id. This just creates the
3602266423Sjfv** entry in the soft version of the VFTA, init will
3603266423Sjfv** repopulate the real table.
3604266423Sjfv*/
3605266423Sjfvstatic void
3606270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3607266423Sjfv{
3608270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3609266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3610270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3611266423Sjfv
3612266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3613266423Sjfv		return;
3614266423Sjfv
3615266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3616266423Sjfv		return;
3617266423Sjfv
3618270346Sjfv	IXL_PF_LOCK(pf);
3619266423Sjfv	++vsi->num_vlans;
3620270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3621270346Sjfv	IXL_PF_UNLOCK(pf);
3622266423Sjfv}
3623266423Sjfv
3624266423Sjfv/*
3625266423Sjfv** This routine is run via an vlan
3626266423Sjfv** unconfig EVENT, remove our entry
3627266423Sjfv** in the soft vfta.
3628266423Sjfv*/
3629266423Sjfvstatic void
3630270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3631266423Sjfv{
3632270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3633266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3634270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3635266423Sjfv
3636266423Sjfv	if (ifp->if_softc !=  arg)
3637266423Sjfv		return;
3638266423Sjfv
3639266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3640266423Sjfv		return;
3641266423Sjfv
3642270346Sjfv	IXL_PF_LOCK(pf);
3643266423Sjfv	--vsi->num_vlans;
3644270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3645270346Sjfv	IXL_PF_UNLOCK(pf);
3646266423Sjfv}
3647266423Sjfv
3648266423Sjfv/*
3649266423Sjfv** This routine updates vlan filters, called by init
3650266423Sjfv** it scans the filter table and then updates the hw
3651266423Sjfv** after a soft reset.
3652266423Sjfv*/
3653266423Sjfvstatic void
3654270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3655266423Sjfv{
3656270346Sjfv	struct ixl_mac_filter	*f;
3657266423Sjfv	int			cnt = 0, flags;
3658266423Sjfv
3659266423Sjfv	if (vsi->num_vlans == 0)
3660266423Sjfv		return;
3661266423Sjfv	/*
3662266423Sjfv	** Scan the filter list for vlan entries,
3663266423Sjfv	** mark them for addition and then call
3664266423Sjfv	** for the AQ update.
3665266423Sjfv	*/
3666266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3667270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3668266423Sjfv			f->flags |=
3669270346Sjfv			    (IXL_FILTER_ADD |
3670270346Sjfv			    IXL_FILTER_USED);
3671266423Sjfv			cnt++;
3672266423Sjfv		}
3673266423Sjfv	}
3674266423Sjfv	if (cnt == 0) {
3675266423Sjfv		printf("setup vlan: no filters found!\n");
3676266423Sjfv		return;
3677266423Sjfv	}
3678270346Sjfv	flags = IXL_FILTER_VLAN;
3679270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3680270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3681266423Sjfv	return;
3682266423Sjfv}
3683266423Sjfv
3684266423Sjfv/*
3685266423Sjfv** Initialize filter list and add filters that the hardware
3686266423Sjfv** needs to know about.
3687266423Sjfv*/
3688266423Sjfvstatic void
3689270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3690266423Sjfv{
3691269198Sjfv	/* Add broadcast address */
3692279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3693266423Sjfv}
3694266423Sjfv
3695266423Sjfv/*
3696266423Sjfv** This routine adds mulicast filters
3697266423Sjfv*/
3698266423Sjfvstatic void
3699270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3700266423Sjfv{
3701270346Sjfv	struct ixl_mac_filter *f;
3702266423Sjfv
3703266423Sjfv	/* Does one already exist */
3704270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3705266423Sjfv	if (f != NULL)
3706266423Sjfv		return;
3707266423Sjfv
3708270346Sjfv	f = ixl_get_filter(vsi);
3709266423Sjfv	if (f == NULL) {
3710266423Sjfv		printf("WARNING: no filter available!!\n");
3711266423Sjfv		return;
3712266423Sjfv	}
3713266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3714270346Sjfv	f->vlan = IXL_VLAN_ANY;
3715270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3716270346Sjfv	    | IXL_FILTER_MC);
3717266423Sjfv
3718266423Sjfv	return;
3719266423Sjfv}
3720266423Sjfv
3721279858Sjfvstatic void
3722279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3723279858Sjfv{
3724279858Sjfv
3725279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3726279858Sjfv}
3727279858Sjfv
3728266423Sjfv/*
3729266423Sjfv** This routine adds macvlan filters
3730266423Sjfv*/
3731266423Sjfvstatic void
3732270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3733266423Sjfv{
3734270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3735279858Sjfv	struct ixl_pf		*pf;
3736279858Sjfv	device_t		dev;
3737266423Sjfv
3738270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3739266423Sjfv
3740279858Sjfv	pf = vsi->back;
3741279858Sjfv	dev = pf->dev;
3742279858Sjfv
3743266423Sjfv	/* Does one already exist */
3744270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3745266423Sjfv	if (f != NULL)
3746266423Sjfv		return;
3747266423Sjfv	/*
3748266423Sjfv	** Is this the first vlan being registered, if so we
3749266423Sjfv	** need to remove the ANY filter that indicates we are
3750266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3751266423Sjfv	*/
3752270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3753270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3754266423Sjfv		if (tmp != NULL) {
3755270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3756270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3757266423Sjfv		}
3758266423Sjfv	}
3759266423Sjfv
3760270346Sjfv	f = ixl_get_filter(vsi);
3761266423Sjfv	if (f == NULL) {
3762266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3763266423Sjfv		return;
3764266423Sjfv	}
3765266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3766266423Sjfv	f->vlan = vlan;
3767270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3768270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3769270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3770279858Sjfv	else
3771279858Sjfv		vsi->num_macs++;
3772266423Sjfv
3773270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3774266423Sjfv	return;
3775266423Sjfv}
3776266423Sjfv
3777266423Sjfvstatic void
3778270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3779266423Sjfv{
3780270346Sjfv	struct ixl_mac_filter *f;
3781266423Sjfv
3782270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3783266423Sjfv	if (f == NULL)
3784266423Sjfv		return;
3785266423Sjfv
3786270346Sjfv	f->flags |= IXL_FILTER_DEL;
3787270346Sjfv	ixl_del_hw_filters(vsi, 1);
3788279858Sjfv	vsi->num_macs--;
3789266423Sjfv
3790266423Sjfv	/* Check if this is the last vlan removal */
3791270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3792266423Sjfv		/* Switch back to a non-vlan filter */
3793270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3794270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3795266423Sjfv	}
3796266423Sjfv	return;
3797266423Sjfv}
3798266423Sjfv
3799266423Sjfv/*
3800266423Sjfv** Find the filter with both matching mac addr and vlan id
3801266423Sjfv*/
3802270346Sjfvstatic struct ixl_mac_filter *
3803270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3804266423Sjfv{
3805270346Sjfv	struct ixl_mac_filter	*f;
3806266423Sjfv	bool			match = FALSE;
3807266423Sjfv
3808266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3809266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3810266423Sjfv			continue;
3811266423Sjfv		if (f->vlan == vlan) {
3812266423Sjfv			match = TRUE;
3813266423Sjfv			break;
3814266423Sjfv		}
3815266423Sjfv	}
3816266423Sjfv
3817266423Sjfv	if (!match)
3818266423Sjfv		f = NULL;
3819266423Sjfv	return (f);
3820266423Sjfv}
3821266423Sjfv
3822266423Sjfv/*
3823266423Sjfv** This routine takes additions to the vsi filter
3824266423Sjfv** table and creates an Admin Queue call to create
3825266423Sjfv** the filters in the hardware.
3826266423Sjfv*/
3827266423Sjfvstatic void
3828270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3829266423Sjfv{
3830266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3831270346Sjfv	struct ixl_mac_filter	*f;
3832279858Sjfv	struct ixl_pf		*pf;
3833279858Sjfv	struct i40e_hw		*hw;
3834279858Sjfv	device_t		dev;
3835279858Sjfv	int			err, j = 0;
3836266423Sjfv
3837279858Sjfv	pf = vsi->back;
3838279858Sjfv	dev = pf->dev;
3839279858Sjfv	hw = &pf->hw;
3840279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3841279858Sjfv
3842266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3843266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3844266423Sjfv	if (a == NULL) {
3845277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3846266423Sjfv		return;
3847266423Sjfv	}
3848266423Sjfv
3849266423Sjfv	/*
3850266423Sjfv	** Scan the filter list, each time we find one
3851266423Sjfv	** we add it to the admin queue array and turn off
3852266423Sjfv	** the add bit.
3853266423Sjfv	*/
3854266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3855266423Sjfv		if (f->flags == flags) {
3856266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3857266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3858279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3859279858Sjfv				b->vlan_tag = 0;
3860279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3861279858Sjfv			} else {
3862279858Sjfv				b->vlan_tag = f->vlan;
3863279858Sjfv				b->flags = 0;
3864279858Sjfv			}
3865279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3866270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3867266423Sjfv			j++;
3868266423Sjfv		}
3869266423Sjfv		if (j == cnt)
3870266423Sjfv			break;
3871266423Sjfv	}
3872266423Sjfv	if (j > 0) {
3873266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3874266423Sjfv		if (err)
3875279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3876279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3877266423Sjfv		else
3878266423Sjfv			vsi->hw_filters_add += j;
3879266423Sjfv	}
3880266423Sjfv	free(a, M_DEVBUF);
3881266423Sjfv	return;
3882266423Sjfv}
3883266423Sjfv
3884266423Sjfv/*
3885266423Sjfv** This routine takes removals in the vsi filter
3886266423Sjfv** table and creates an Admin Queue call to delete
3887266423Sjfv** the filters in the hardware.
3888266423Sjfv*/
3889266423Sjfvstatic void
3890270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3891266423Sjfv{
3892266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3893279858Sjfv	struct ixl_pf		*pf;
3894279858Sjfv	struct i40e_hw		*hw;
3895279858Sjfv	device_t		dev;
3896270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
3897266423Sjfv	int			err, j = 0;
3898266423Sjfv
3899270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
3900266423Sjfv
3901279858Sjfv	pf = vsi->back;
3902279858Sjfv	hw = &pf->hw;
3903279858Sjfv	dev = pf->dev;
3904279858Sjfv
3905266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3906266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3907266423Sjfv	if (d == NULL) {
3908266423Sjfv		printf("del hw filter failed to get memory\n");
3909266423Sjfv		return;
3910266423Sjfv	}
3911266423Sjfv
3912266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3913270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
3914266423Sjfv			e = &d[j]; // a pox on fvl long names :)
3915266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3916270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3917266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3918266423Sjfv			/* delete entry from vsi list */
3919270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3920266423Sjfv			free(f, M_DEVBUF);
3921266423Sjfv			j++;
3922266423Sjfv		}
3923266423Sjfv		if (j == cnt)
3924266423Sjfv			break;
3925266423Sjfv	}
3926266423Sjfv	if (j > 0) {
3927266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3928266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
3929266423Sjfv		   so we'll ignore that specific error. */
3930277084Sjfv		// TODO: Does this still occur on current firmwares?
3931266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3932266423Sjfv			int sc = 0;
3933266423Sjfv			for (int i = 0; i < j; i++)
3934266423Sjfv				sc += (!d[i].error_code);
3935266423Sjfv			vsi->hw_filters_del += sc;
3936266423Sjfv			device_printf(dev,
3937266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
3938266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
3939266423Sjfv		} else
3940266423Sjfv			vsi->hw_filters_del += j;
3941266423Sjfv	}
3942266423Sjfv	free(d, M_DEVBUF);
3943266423Sjfv
3944270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
3945266423Sjfv	return;
3946266423Sjfv}
3947266423Sjfv
3948279858Sjfvstatic int
3949270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
3950266423Sjfv{
3951279858Sjfv	struct ixl_pf	*pf = vsi->back;
3952279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3953279858Sjfv	int		index, error;
3954266423Sjfv	u32		reg;
3955266423Sjfv
3956279858Sjfv	error = 0;
3957266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3958279858Sjfv		index = vsi->first_queue + i;
3959279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3960266423Sjfv
3961279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3962266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3963266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
3964279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3965266423Sjfv		/* Verify the enable took */
3966266423Sjfv		for (int j = 0; j < 10; j++) {
3967279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3968266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3969266423Sjfv				break;
3970266423Sjfv			i40e_msec_delay(10);
3971266423Sjfv		}
3972279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3973279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
3974279858Sjfv			    index);
3975279858Sjfv			error = ETIMEDOUT;
3976279858Sjfv		}
3977266423Sjfv
3978279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3979266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3980266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
3981279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3982266423Sjfv		/* Verify the enable took */
3983266423Sjfv		for (int j = 0; j < 10; j++) {
3984279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3985266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3986266423Sjfv				break;
3987266423Sjfv			i40e_msec_delay(10);
3988266423Sjfv		}
3989279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3990279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
3991279858Sjfv			    index);
3992279858Sjfv			error = ETIMEDOUT;
3993279858Sjfv		}
3994266423Sjfv	}
3995279858Sjfv
3996279858Sjfv	return (error);
3997266423Sjfv}
3998266423Sjfv
3999279858Sjfvstatic int
4000270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
4001266423Sjfv{
4002279858Sjfv	struct ixl_pf	*pf = vsi->back;
4003279858Sjfv	struct i40e_hw	*hw = &pf->hw;
4004279858Sjfv	int		index, error;
4005266423Sjfv	u32		reg;
4006266423Sjfv
4007279858Sjfv	error = 0;
4008266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
4009279858Sjfv		index = vsi->first_queue + i;
4010279858Sjfv
4011279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
4012266423Sjfv		i40e_usec_delay(500);
4013266423Sjfv
4014279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
4015266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4016279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
4017266423Sjfv		/* Verify the disable took */
4018266423Sjfv		for (int j = 0; j < 10; j++) {
4019279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
4020266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4021266423Sjfv				break;
4022266423Sjfv			i40e_msec_delay(10);
4023266423Sjfv		}
4024279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
4025279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
4026279858Sjfv			    index);
4027279858Sjfv			error = ETIMEDOUT;
4028279858Sjfv		}
4029266423Sjfv
4030279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
4031266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4032279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
4033266423Sjfv		/* Verify the disable took */
4034266423Sjfv		for (int j = 0; j < 10; j++) {
4035279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
4036266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4037266423Sjfv				break;
4038266423Sjfv			i40e_msec_delay(10);
4039266423Sjfv		}
4040279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
4041279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
4042279858Sjfv			    index);
4043279858Sjfv			error = ETIMEDOUT;
4044279858Sjfv		}
4045266423Sjfv	}
4046279858Sjfv
4047279858Sjfv	return (error);
4048266423Sjfv}
4049266423Sjfv
4050269198Sjfv/**
4051270346Sjfv * ixl_handle_mdd_event
4052269198Sjfv *
4053269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
4054269198Sjfv * (But also detects events from the PF, as well)
4055269198Sjfv **/
4056270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
4057269198Sjfv{
4058269198Sjfv	struct i40e_hw *hw = &pf->hw;
4059269198Sjfv	device_t dev = pf->dev;
4060269198Sjfv	bool mdd_detected = false;
4061269198Sjfv	bool pf_mdd_detected = false;
4062269198Sjfv	u32 reg;
4063269198Sjfv
4064269198Sjfv	/* find what triggered the MDD event */
4065269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
4066269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4067269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
4068269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
4069269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4070269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
4071269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
4072269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
4073269198Sjfv		device_printf(dev,
4074269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4075269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
4076269198Sjfv			 event, queue, pf_num);
4077269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4078269198Sjfv		mdd_detected = true;
4079269198Sjfv	}
4080269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
4081269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4082269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
4083269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
4084269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4085269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
4086269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
4087269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
4088269198Sjfv		device_printf(dev,
4089269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4090269198Sjfv			 " on RX queue %d of function 0x%02x\n",
4091269198Sjfv			 event, queue, func);
4092269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4093269198Sjfv		mdd_detected = true;
4094269198Sjfv	}
4095269198Sjfv
4096269198Sjfv	if (mdd_detected) {
4097269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
4098269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4099269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4100269198Sjfv			device_printf(dev,
4101269198Sjfv				 "MDD TX event is for this function 0x%08x",
4102269198Sjfv				 reg);
4103269198Sjfv			pf_mdd_detected = true;
4104269198Sjfv		}
4105269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
4106269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4107269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4108269198Sjfv			device_printf(dev,
4109269198Sjfv				 "MDD RX event is for this function 0x%08x",
4110269198Sjfv				 reg);
4111269198Sjfv			pf_mdd_detected = true;
4112269198Sjfv		}
4113269198Sjfv	}
4114269198Sjfv
4115269198Sjfv	/* re-enable mdd interrupt cause */
4116269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4117269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4118269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4119270346Sjfv	ixl_flush(hw);
4120269198Sjfv}
4121269198Sjfv
4122266423Sjfvstatic void
4123270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
4124266423Sjfv{
4125266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4126270346Sjfv	struct ixl_queue	*que = vsi->queues;
4127266423Sjfv
4128270346Sjfv	if (ixl_enable_msix) {
4129270346Sjfv		ixl_enable_adminq(hw);
4130266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
4131270346Sjfv			ixl_enable_queue(hw, que->me);
4132266423Sjfv	} else
4133270346Sjfv		ixl_enable_legacy(hw);
4134266423Sjfv}
4135266423Sjfv
4136266423Sjfvstatic void
4137279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
4138266423Sjfv{
4139266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4140270346Sjfv	struct ixl_queue	*que = vsi->queues;
4141266423Sjfv
4142279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
4143279858Sjfv		ixl_disable_queue(hw, que->me);
4144279858Sjfv}
4145279858Sjfv
4146279858Sjfvstatic void
4147279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
4148279858Sjfv{
4149279858Sjfv	struct i40e_hw		*hw = vsi->hw;
4150279858Sjfv
4151279858Sjfv	if (ixl_enable_msix)
4152270346Sjfv		ixl_disable_adminq(hw);
4153279858Sjfv	else
4154270346Sjfv		ixl_disable_legacy(hw);
4155266423Sjfv}
4156266423Sjfv
4157266423Sjfvstatic void
4158270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
4159266423Sjfv{
4160266423Sjfv	u32		reg;
4161266423Sjfv
4162266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4163266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4164270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4165266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4166270346Sjfv	ixl_flush(hw);
4167266423Sjfv}
4168266423Sjfv
4169266423Sjfvstatic void
4170270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
4171266423Sjfv{
4172266423Sjfv	u32		reg;
4173266423Sjfv
4174270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4175266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4176299547Serj	ixl_flush(hw);
4177266423Sjfv}
4178266423Sjfv
4179266423Sjfvstatic void
4180270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
4181266423Sjfv{
4182266423Sjfv	u32		reg;
4183266423Sjfv
4184266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4185266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4186270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4187266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4188266423Sjfv}
4189266423Sjfv
4190266423Sjfvstatic void
4191270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
4192266423Sjfv{
4193266423Sjfv	u32		reg;
4194266423Sjfv
4195270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4196266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4197266423Sjfv}
4198266423Sjfv
4199266423Sjfvstatic void
4200270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4201266423Sjfv{
4202266423Sjfv	u32		reg;
4203266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4204266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4205270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4206266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4207266423Sjfv}
4208266423Sjfv
4209266423Sjfvstatic void
4210270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4211266423Sjfv{
4212266423Sjfv	u32		reg;
4213266423Sjfv
4214270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4215266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4216266423Sjfv}
4217266423Sjfv
4218266423Sjfvstatic void
4219270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4220266423Sjfv{
4221266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4222279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4223279858Sjfv	struct ixl_vf	*vf;
4224269198Sjfv
4225266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4226266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4227266423Sjfv
4228266423Sjfv	/* Update hw stats */
4229270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4230266423Sjfv			   pf->stat_offsets_loaded,
4231266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4232270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4233266423Sjfv			   pf->stat_offsets_loaded,
4234266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4235270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4236266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4237266423Sjfv			   pf->stat_offsets_loaded,
4238266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4239270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4240266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4241266423Sjfv			   pf->stat_offsets_loaded,
4242266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4243270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4244266423Sjfv			   pf->stat_offsets_loaded,
4245266423Sjfv			   &osd->eth.rx_discards,
4246266423Sjfv			   &nsd->eth.rx_discards);
4247270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4248266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4249266423Sjfv			   pf->stat_offsets_loaded,
4250266423Sjfv			   &osd->eth.rx_unicast,
4251266423Sjfv			   &nsd->eth.rx_unicast);
4252270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4253266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4254266423Sjfv			   pf->stat_offsets_loaded,
4255266423Sjfv			   &osd->eth.tx_unicast,
4256266423Sjfv			   &nsd->eth.tx_unicast);
4257270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4258266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4259266423Sjfv			   pf->stat_offsets_loaded,
4260266423Sjfv			   &osd->eth.rx_multicast,
4261266423Sjfv			   &nsd->eth.rx_multicast);
4262270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4263266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4264266423Sjfv			   pf->stat_offsets_loaded,
4265266423Sjfv			   &osd->eth.tx_multicast,
4266266423Sjfv			   &nsd->eth.tx_multicast);
4267270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4268266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4269266423Sjfv			   pf->stat_offsets_loaded,
4270266423Sjfv			   &osd->eth.rx_broadcast,
4271266423Sjfv			   &nsd->eth.rx_broadcast);
4272270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4273266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4274266423Sjfv			   pf->stat_offsets_loaded,
4275266423Sjfv			   &osd->eth.tx_broadcast,
4276266423Sjfv			   &nsd->eth.tx_broadcast);
4277266423Sjfv
4278270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4279266423Sjfv			   pf->stat_offsets_loaded,
4280266423Sjfv			   &osd->tx_dropped_link_down,
4281266423Sjfv			   &nsd->tx_dropped_link_down);
4282270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4283266423Sjfv			   pf->stat_offsets_loaded,
4284266423Sjfv			   &osd->mac_local_faults,
4285266423Sjfv			   &nsd->mac_local_faults);
4286270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4287266423Sjfv			   pf->stat_offsets_loaded,
4288266423Sjfv			   &osd->mac_remote_faults,
4289266423Sjfv			   &nsd->mac_remote_faults);
4290270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4291266423Sjfv			   pf->stat_offsets_loaded,
4292266423Sjfv			   &osd->rx_length_errors,
4293266423Sjfv			   &nsd->rx_length_errors);
4294266423Sjfv
4295269198Sjfv	/* Flow control (LFC) stats */
4296270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4297266423Sjfv			   pf->stat_offsets_loaded,
4298266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4299270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4300266423Sjfv			   pf->stat_offsets_loaded,
4301266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4302270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4303266423Sjfv			   pf->stat_offsets_loaded,
4304266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4305270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4306266423Sjfv			   pf->stat_offsets_loaded,
4307266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4308266423Sjfv
4309269198Sjfv	/* Packet size stats rx */
4310270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4311266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4312266423Sjfv			   pf->stat_offsets_loaded,
4313266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4314270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4315266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4316266423Sjfv			   pf->stat_offsets_loaded,
4317266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4318270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4319266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4320266423Sjfv			   pf->stat_offsets_loaded,
4321266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4322270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4323266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4324266423Sjfv			   pf->stat_offsets_loaded,
4325266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4326270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4327266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4328266423Sjfv			   pf->stat_offsets_loaded,
4329266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4330270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4331266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4332266423Sjfv			   pf->stat_offsets_loaded,
4333266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4334270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4335266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4336266423Sjfv			   pf->stat_offsets_loaded,
4337266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4338266423Sjfv
4339269198Sjfv	/* Packet size stats tx */
4340270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4341266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4342266423Sjfv			   pf->stat_offsets_loaded,
4343266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4344270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4345266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4346266423Sjfv			   pf->stat_offsets_loaded,
4347266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4348270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4349266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4350266423Sjfv			   pf->stat_offsets_loaded,
4351266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4352270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4353266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4354266423Sjfv			   pf->stat_offsets_loaded,
4355266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4356270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4357266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4358266423Sjfv			   pf->stat_offsets_loaded,
4359266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4360270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4361266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4362266423Sjfv			   pf->stat_offsets_loaded,
4363266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4364270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4365266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4366266423Sjfv			   pf->stat_offsets_loaded,
4367266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4368266423Sjfv
4369270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4370266423Sjfv			   pf->stat_offsets_loaded,
4371266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4372270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4373266423Sjfv			   pf->stat_offsets_loaded,
4374266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4375270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4376266423Sjfv			   pf->stat_offsets_loaded,
4377266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4378270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4379266423Sjfv			   pf->stat_offsets_loaded,
4380266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4381266423Sjfv	pf->stat_offsets_loaded = true;
4382269198Sjfv	/* End hw stats */
4383266423Sjfv
4384266423Sjfv	/* Update vsi stats */
4385279858Sjfv	ixl_update_vsi_stats(vsi);
4386266423Sjfv
4387279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4388279858Sjfv		vf = &pf->vfs[i];
4389279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4390279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4391279858Sjfv	}
4392266423Sjfv}
4393266423Sjfv
4394266423Sjfv/*
4395266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4396266423Sjfv**  - do outside interrupt since it might sleep
4397266423Sjfv*/
4398266423Sjfvstatic void
4399270346Sjfvixl_do_adminq(void *context, int pending)
4400266423Sjfv{
4401270346Sjfv	struct ixl_pf			*pf = context;
4402266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4403266423Sjfv	struct i40e_arq_event_info	event;
4404266423Sjfv	i40e_status			ret;
4405299547Serj	device_t			dev = pf->dev;
4406299547Serj	u32				loop = 0;
4407266423Sjfv	u16				opcode, result;
4408266423Sjfv
4409274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4410274205Sjfv	event.msg_buf = malloc(event.buf_len,
4411266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4412266423Sjfv	if (!event.msg_buf) {
4413299547Serj		device_printf(dev, "%s: Unable to allocate memory for Admin"
4414299547Serj		    " Queue event!\n", __func__);
4415266423Sjfv		return;
4416266423Sjfv	}
4417266423Sjfv
4418279858Sjfv	IXL_PF_LOCK(pf);
4419266423Sjfv	/* clean and process any events */
4420266423Sjfv	do {
4421266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4422266423Sjfv		if (ret)
4423266423Sjfv			break;
4424266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4425299547Serj#ifdef IXL_DEBUG
4426299547Serj		device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode);
4427299547Serj#endif
4428266423Sjfv		switch (opcode) {
4429266423Sjfv		case i40e_aqc_opc_get_link_status:
4430279858Sjfv			ixl_link_event(pf, &event);
4431266423Sjfv			break;
4432266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4433279858Sjfv#ifdef PCI_IOV
4434279858Sjfv			ixl_handle_vf_msg(pf, &event);
4435279858Sjfv#endif
4436266423Sjfv			break;
4437266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4438266423Sjfv		default:
4439266423Sjfv			break;
4440266423Sjfv		}
4441266423Sjfv
4442270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4443266423Sjfv
4444266423Sjfv	free(event.msg_buf, M_DEVBUF);
4445266423Sjfv
4446279858Sjfv	/*
4447279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4448279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4449279858Sjfv	 */
4450279858Sjfv	if (result > 0)
4451279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4452266423Sjfv	else
4453299547Serj		ixl_enable_adminq(hw);
4454279858Sjfv
4455279858Sjfv	IXL_PF_UNLOCK(pf);
4456266423Sjfv}
4457266423Sjfv
4458266423Sjfvstatic int
4459270346Sjfvixl_debug_info(SYSCTL_HANDLER_ARGS)
4460266423Sjfv{
4461270346Sjfv	struct ixl_pf	*pf;
4462266423Sjfv	int		error, input = 0;
4463266423Sjfv
4464266423Sjfv	error = sysctl_handle_int(oidp, &input, 0, req);
4465266423Sjfv
4466266423Sjfv	if (error || !req->newptr)
4467266423Sjfv		return (error);
4468266423Sjfv
4469266423Sjfv	if (input == 1) {
4470270346Sjfv		pf = (struct ixl_pf *)arg1;
4471270346Sjfv		ixl_print_debug_info(pf);
4472266423Sjfv	}
4473266423Sjfv
4474266423Sjfv	return (error);
4475266423Sjfv}
4476266423Sjfv
4477266423Sjfvstatic void
4478270346Sjfvixl_print_debug_info(struct ixl_pf *pf)
4479266423Sjfv{
4480266423Sjfv	struct i40e_hw		*hw = &pf->hw;
4481270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
4482270346Sjfv	struct ixl_queue	*que = vsi->queues;
4483266423Sjfv	struct rx_ring		*rxr = &que->rxr;
4484266423Sjfv	struct tx_ring		*txr = &que->txr;
4485266423Sjfv	u32			reg;
4486266423Sjfv
4487266423Sjfv
4488270799Sbz	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4489270799Sbz	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4490266423Sjfv	printf("RX next check = %x\n", rxr->next_check);
4491270799Sbz	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4492270799Sbz	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4493266423Sjfv	printf("TX desc avail = %x\n", txr->avail);
4494266423Sjfv
4495266423Sjfv	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4496266423Sjfv	 printf("RX Bytes = %x\n", reg);
4497266423Sjfv	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4498266423Sjfv	 printf("Port RX Bytes = %x\n", reg);
4499266423Sjfv	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4500266423Sjfv	 printf("RX discard = %x\n", reg);
4501266423Sjfv	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4502266423Sjfv	 printf("Port RX discard = %x\n", reg);
4503266423Sjfv
4504266423Sjfv	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4505266423Sjfv	 printf("TX errors = %x\n", reg);
4506266423Sjfv	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4507266423Sjfv	 printf("TX Bytes = %x\n", reg);
4508266423Sjfv
4509266423Sjfv	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4510266423Sjfv	 printf("RX undersize = %x\n", reg);
4511266423Sjfv	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4512266423Sjfv	 printf("RX fragments = %x\n", reg);
4513266423Sjfv	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4514266423Sjfv	 printf("RX oversize = %x\n", reg);
4515266423Sjfv	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4516266423Sjfv	 printf("RX length error = %x\n", reg);
4517266423Sjfv	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4518266423Sjfv	 printf("mac remote fault = %x\n", reg);
4519266423Sjfv	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4520266423Sjfv	 printf("mac local fault = %x\n", reg);
4521266423Sjfv}
4522266423Sjfv
4523266423Sjfv/**
4524266423Sjfv * Update VSI-specific ethernet statistics counters.
4525266423Sjfv **/
4526270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4527266423Sjfv{
4528270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4529266423Sjfv	struct i40e_hw *hw = &pf->hw;
4530266423Sjfv	struct i40e_eth_stats *es;
4531266423Sjfv	struct i40e_eth_stats *oes;
4532272227Sglebius	struct i40e_hw_port_stats *nsd;
4533266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4534266423Sjfv
4535266423Sjfv	es = &vsi->eth_stats;
4536266423Sjfv	oes = &vsi->eth_stats_offsets;
4537272227Sglebius	nsd = &pf->stats;
4538266423Sjfv
4539266423Sjfv	/* Gather up the stats that the hw collects */
4540270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4541266423Sjfv			   vsi->stat_offsets_loaded,
4542266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4543270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4544266423Sjfv			   vsi->stat_offsets_loaded,
4545266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4546266423Sjfv
4547270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4548266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4549266423Sjfv			   vsi->stat_offsets_loaded,
4550266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4551270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4552266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4553266423Sjfv			   vsi->stat_offsets_loaded,
4554266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4555270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4556266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4557266423Sjfv			   vsi->stat_offsets_loaded,
4558266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4559270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4560266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4561266423Sjfv			   vsi->stat_offsets_loaded,
4562266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4563266423Sjfv
4564270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4565266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4566266423Sjfv			   vsi->stat_offsets_loaded,
4567266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4568270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4569266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4570266423Sjfv			   vsi->stat_offsets_loaded,
4571266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4572270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4573266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4574266423Sjfv			   vsi->stat_offsets_loaded,
4575266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4576270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4577266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4578266423Sjfv			   vsi->stat_offsets_loaded,
4579266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4580266423Sjfv	vsi->stat_offsets_loaded = true;
4581279858Sjfv}
4582269198Sjfv
4583279858Sjfvstatic void
4584279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4585279858Sjfv{
4586279858Sjfv	struct ixl_pf		*pf;
4587279858Sjfv	struct ifnet		*ifp;
4588279858Sjfv	struct i40e_eth_stats	*es;
4589279858Sjfv	u64			tx_discards;
4590279858Sjfv
4591279858Sjfv	struct i40e_hw_port_stats *nsd;
4592279858Sjfv
4593279858Sjfv	pf = vsi->back;
4594279858Sjfv	ifp = vsi->ifp;
4595279858Sjfv	es = &vsi->eth_stats;
4596279858Sjfv	nsd = &pf->stats;
4597279858Sjfv
4598279858Sjfv	ixl_update_eth_stats(vsi);
4599279858Sjfv
4600272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4601279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4602272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4603272227Sglebius
4604269198Sjfv	/* Update ifnet stats */
4605272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4606269198Sjfv	                   es->rx_multicast +
4607272227Sglebius			   es->rx_broadcast);
4608272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4609269198Sjfv	                   es->tx_multicast +
4610272227Sglebius			   es->tx_broadcast);
4611272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4612272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4613272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4614272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4615269198Sjfv
4616279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4617279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4618279858Sjfv	    nsd->rx_jabber);
4619272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4620272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4621272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4622272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4623272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4624266423Sjfv}
4625266423Sjfv
4626266423Sjfv/**
4627266423Sjfv * Reset all of the stats for the given pf
4628266423Sjfv **/
4629270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4630266423Sjfv{
4631266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4632266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4633266423Sjfv	pf->stat_offsets_loaded = false;
4634266423Sjfv}
4635266423Sjfv
4636266423Sjfv/**
4637266423Sjfv * Resets all stats of the given vsi
4638266423Sjfv **/
4639270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4640266423Sjfv{
4641266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4642266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4643266423Sjfv	vsi->stat_offsets_loaded = false;
4644266423Sjfv}
4645266423Sjfv
4646266423Sjfv/**
4647266423Sjfv * Read and update a 48 bit stat from the hw
4648266423Sjfv *
4649266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4650266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4651266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4652266423Sjfv * to report stats that count from zero.
4653266423Sjfv **/
4654266423Sjfvstatic void
4655270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4656266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4657266423Sjfv{
4658266423Sjfv	u64 new_data;
4659266423Sjfv
4660270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4661266423Sjfv	new_data = rd64(hw, loreg);
4662266423Sjfv#else
4663266423Sjfv	/*
4664269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4665266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4666266423Sjfv	 */
4667266423Sjfv	new_data = rd32(hw, loreg);
4668266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4669266423Sjfv#endif
4670266423Sjfv
4671266423Sjfv	if (!offset_loaded)
4672266423Sjfv		*offset = new_data;
4673266423Sjfv	if (new_data >= *offset)
4674266423Sjfv		*stat = new_data - *offset;
4675266423Sjfv	else
4676266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4677266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4678266423Sjfv}
4679266423Sjfv
4680266423Sjfv/**
4681266423Sjfv * Read and update a 32 bit stat from the hw
4682266423Sjfv **/
4683266423Sjfvstatic void
4684270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4685266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4686266423Sjfv{
4687266423Sjfv	u32 new_data;
4688266423Sjfv
4689266423Sjfv	new_data = rd32(hw, reg);
4690266423Sjfv	if (!offset_loaded)
4691266423Sjfv		*offset = new_data;
4692266423Sjfv	if (new_data >= *offset)
4693266423Sjfv		*stat = (u32)(new_data - *offset);
4694266423Sjfv	else
4695266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4696266423Sjfv}
4697266423Sjfv
4698266423Sjfv/*
4699266423Sjfv** Set flow control using sysctl:
4700266423Sjfv** 	0 - off
4701266423Sjfv**	1 - rx pause
4702266423Sjfv**	2 - tx pause
4703266423Sjfv**	3 - full
4704266423Sjfv*/
4705266423Sjfvstatic int
4706270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4707266423Sjfv{
4708266423Sjfv	/*
4709266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4710266423Sjfv	 * if tx flow control is enabled.
4711299547Serj	 * ^ N/A for 40G ports
4712266423Sjfv	 */
4713270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4714266423Sjfv	struct i40e_hw *hw = &pf->hw;
4715266423Sjfv	device_t dev = pf->dev;
4716279033Sjfv	int error = 0;
4717266423Sjfv	enum i40e_status_code aq_error = 0;
4718266423Sjfv	u8 fc_aq_err = 0;
4719266423Sjfv
4720279033Sjfv	/* Get request */
4721279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4722266423Sjfv	if ((error) || (req->newptr == NULL))
4723269198Sjfv		return (error);
4724279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4725266423Sjfv		device_printf(dev,
4726266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4727266423Sjfv		return (EINVAL);
4728266423Sjfv	}
4729266423Sjfv
4730269198Sjfv	/*
4731269198Sjfv	** Changing flow control mode currently does not work on
4732269198Sjfv	** 40GBASE-CR4 PHYs
4733269198Sjfv	*/
4734269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4735269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4736269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4737269198Sjfv		    " on 40GBase-CR4 media.\n");
4738269198Sjfv		return (ENODEV);
4739269198Sjfv	}
4740269198Sjfv
4741266423Sjfv	/* Set fc ability for port */
4742279033Sjfv	hw->fc.requested_mode = pf->fc;
4743269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4744269198Sjfv	if (aq_error) {
4745269198Sjfv		device_printf(dev,
4746269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4747269198Sjfv		    __func__, aq_error, fc_aq_err);
4748299547Serj		return (EIO);
4749269198Sjfv	}
4750266423Sjfv
4751299547Serj	/* Get new link state */
4752299547Serj	i40e_msec_delay(250);
4753299547Serj	hw->phy.get_link_info = TRUE;
4754299547Serj	i40e_get_link_status(hw, &pf->link_up);
4755299547Serj
4756269198Sjfv	return (0);
4757269198Sjfv}
4758266423Sjfv
4759270346Sjfvstatic int
4760270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4761270346Sjfv{
4762270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4763270346Sjfv	struct i40e_hw *hw = &pf->hw;
4764270346Sjfv	int error = 0, index = 0;
4765270346Sjfv
4766270346Sjfv	char *speeds[] = {
4767270346Sjfv		"Unknown",
4768270346Sjfv		"100M",
4769270346Sjfv		"1G",
4770270346Sjfv		"10G",
4771270346Sjfv		"40G",
4772270346Sjfv		"20G"
4773270346Sjfv	};
4774270346Sjfv
4775270346Sjfv	ixl_update_link_status(pf);
4776270346Sjfv
4777270346Sjfv	switch (hw->phy.link_info.link_speed) {
4778270346Sjfv	case I40E_LINK_SPEED_100MB:
4779270346Sjfv		index = 1;
4780270346Sjfv		break;
4781270346Sjfv	case I40E_LINK_SPEED_1GB:
4782270346Sjfv		index = 2;
4783270346Sjfv		break;
4784270346Sjfv	case I40E_LINK_SPEED_10GB:
4785270346Sjfv		index = 3;
4786270346Sjfv		break;
4787270346Sjfv	case I40E_LINK_SPEED_40GB:
4788270346Sjfv		index = 4;
4789270346Sjfv		break;
4790270346Sjfv	case I40E_LINK_SPEED_20GB:
4791270346Sjfv		index = 5;
4792270346Sjfv		break;
4793270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
4794270346Sjfv	default:
4795270346Sjfv		index = 0;
4796270346Sjfv		break;
4797270346Sjfv	}
4798270346Sjfv
4799270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
4800270346Sjfv	    strlen(speeds[index]), req);
4801270346Sjfv	return (error);
4802270346Sjfv}
4803270346Sjfv
4804274205Sjfvstatic int
4805274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4806274205Sjfv{
4807274205Sjfv	struct i40e_hw *hw = &pf->hw;
4808274205Sjfv	device_t dev = pf->dev;
4809274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4810274205Sjfv	struct i40e_aq_set_phy_config config;
4811274205Sjfv	enum i40e_status_code aq_error = 0;
4812274205Sjfv
4813274205Sjfv	/* Get current capability information */
4814279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4815279033Sjfv	    FALSE, FALSE, &abilities, NULL);
4816274205Sjfv	if (aq_error) {
4817279033Sjfv		device_printf(dev,
4818279033Sjfv		    "%s: Error getting phy capabilities %d,"
4819274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4820274205Sjfv		    hw->aq.asq_last_status);
4821274205Sjfv		return (EAGAIN);
4822274205Sjfv	}
4823274205Sjfv
4824274205Sjfv	/* Prepare new config */
4825274205Sjfv	bzero(&config, sizeof(config));
4826274205Sjfv	config.phy_type = abilities.phy_type;
4827274205Sjfv	config.abilities = abilities.abilities
4828274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4829274205Sjfv	config.eee_capability = abilities.eee_capability;
4830274205Sjfv	config.eeer = abilities.eeer_val;
4831274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
4832274205Sjfv	/* Translate into aq cmd link_speed */
4833279858Sjfv	if (speeds & 0x8)
4834279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
4835274205Sjfv	if (speeds & 0x4)
4836274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
4837274205Sjfv	if (speeds & 0x2)
4838274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
4839274205Sjfv	if (speeds & 0x1)
4840274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
4841274205Sjfv
4842274205Sjfv	/* Do aq command & restart link */
4843274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4844274205Sjfv	if (aq_error) {
4845279033Sjfv		device_printf(dev,
4846279033Sjfv		    "%s: Error setting new phy config %d,"
4847274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4848274205Sjfv		    hw->aq.asq_last_status);
4849274205Sjfv		return (EAGAIN);
4850274205Sjfv	}
4851274205Sjfv
4852277084Sjfv	/*
4853277084Sjfv	** This seems a bit heavy handed, but we
4854277084Sjfv	** need to get a reinit on some devices
4855277084Sjfv	*/
4856277084Sjfv	IXL_PF_LOCK(pf);
4857299547Serj	ixl_stop_locked(pf);
4858277084Sjfv	ixl_init_locked(pf);
4859277084Sjfv	IXL_PF_UNLOCK(pf);
4860277084Sjfv
4861274205Sjfv	return (0);
4862274205Sjfv}
4863274205Sjfv
4864269198Sjfv/*
4865269198Sjfv** Control link advertise speed:
4866270346Sjfv**	Flags:
4867270346Sjfv**	0x1 - advertise 100 Mb
4868270346Sjfv**	0x2 - advertise 1G
4869270346Sjfv**	0x4 - advertise 10G
4870279858Sjfv**	0x8 - advertise 20G
4871269198Sjfv**
4872269198Sjfv** Does not work on 40G devices.
4873269198Sjfv*/
4874269198Sjfvstatic int
4875270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
4876269198Sjfv{
4877270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4878269198Sjfv	struct i40e_hw *hw = &pf->hw;
4879269198Sjfv	device_t dev = pf->dev;
4880270346Sjfv	int requested_ls = 0;
4881269198Sjfv	int error = 0;
4882266423Sjfv
4883269198Sjfv	/*
4884269198Sjfv	** FW doesn't support changing advertised speed
4885269198Sjfv	** for 40G devices; speed is always 40G.
4886269198Sjfv	*/
4887269198Sjfv	if (i40e_is_40G_device(hw->device_id))
4888269198Sjfv		return (ENODEV);
4889266423Sjfv
4890269198Sjfv	/* Read in new mode */
4891270346Sjfv	requested_ls = pf->advertised_speed;
4892269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4893269198Sjfv	if ((error) || (req->newptr == NULL))
4894269198Sjfv		return (error);
4895279858Sjfv	/* Check for sane value */
4896279858Sjfv	if (requested_ls < 0x1 || requested_ls > 0xE) {
4897279858Sjfv		device_printf(dev, "Invalid advertised speed; "
4898279858Sjfv		    "valid modes are 0x1 through 0xE\n");
4899269198Sjfv		return (EINVAL);
4900266423Sjfv	}
4901279858Sjfv	/* Then check for validity based on adapter type */
4902279858Sjfv	switch (hw->device_id) {
4903279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
4904299545Serj	case I40E_DEV_ID_10G_BASE_T4:
4905279858Sjfv		if (requested_ls & 0x8) {
4906279858Sjfv			device_printf(dev,
4907279858Sjfv			    "20Gbs speed not supported on this device.\n");
4908279858Sjfv			return (EINVAL);
4909279858Sjfv		}
4910279858Sjfv		break;
4911279858Sjfv	case I40E_DEV_ID_20G_KR2:
4912299545Serj	case I40E_DEV_ID_20G_KR2_A:
4913279858Sjfv		if (requested_ls & 0x1) {
4914279858Sjfv			device_printf(dev,
4915279858Sjfv			    "100Mbs speed not supported on this device.\n");
4916279858Sjfv			return (EINVAL);
4917279858Sjfv		}
4918279858Sjfv		break;
4919279858Sjfv	default:
4920279858Sjfv		if (requested_ls & ~0x6) {
4921279858Sjfv			device_printf(dev,
4922279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
4923279858Sjfv			return (EINVAL);
4924279858Sjfv		}
4925279858Sjfv		break;
4926279858Sjfv	}
4927269198Sjfv
4928269198Sjfv	/* Exit if no change */
4929270346Sjfv	if (pf->advertised_speed == requested_ls)
4930269198Sjfv		return (0);
4931269198Sjfv
4932274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
4933274205Sjfv	if (error)
4934274205Sjfv		return (error);
4935270346Sjfv
4936270346Sjfv	pf->advertised_speed = requested_ls;
4937270346Sjfv	ixl_update_link_status(pf);
4938269198Sjfv	return (0);
4939266423Sjfv}
4940266423Sjfv
4941266423Sjfv/*
4942266423Sjfv** Get the width and transaction speed of
4943266423Sjfv** the bus this adapter is plugged into.
4944266423Sjfv*/
4945266423Sjfvstatic u16
4946270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4947266423Sjfv{
4948266423Sjfv        u16                     link;
4949266423Sjfv        u32                     offset;
4950266423Sjfv
4951266423Sjfv        /* Get the PCI Express Capabilities offset */
4952266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4953266423Sjfv
4954266423Sjfv        /* ...and read the Link Status Register */
4955266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4956266423Sjfv
4957266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
4958266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
4959266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
4960266423Sjfv                break;
4961266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
4962266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
4963266423Sjfv                break;
4964266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
4965266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
4966266423Sjfv                break;
4967266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
4968266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
4969266423Sjfv                break;
4970266423Sjfv        default:
4971266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
4972266423Sjfv                break;
4973266423Sjfv        }
4974266423Sjfv
4975266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
4976266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
4977266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
4978266423Sjfv                break;
4979266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
4980266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
4981266423Sjfv                break;
4982266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
4983266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
4984266423Sjfv                break;
4985266423Sjfv        default:
4986266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
4987266423Sjfv                break;
4988266423Sjfv        }
4989266423Sjfv
4990266423Sjfv
4991266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4992266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4993266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4994266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4995266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4996266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4997266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4998266423Sjfv            ("Unknown"));
4999266423Sjfv
5000266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
5001266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
5002266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
5003279858Sjfv                    " for this device\n     may be insufficient for"
5004279858Sjfv                    " optimal performance.\n");
5005266423Sjfv                device_printf(dev, "For expected performance a x8 "
5006266423Sjfv                    "PCIE Gen3 slot is required.\n");
5007266423Sjfv        }
5008266423Sjfv
5009266423Sjfv        return (link);
5010266423Sjfv}
5011266423Sjfv
5012274205Sjfvstatic int
5013274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
5014274205Sjfv{
5015274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
5016274205Sjfv	struct i40e_hw	*hw = &pf->hw;
5017274205Sjfv	char		buf[32];
5018274205Sjfv
5019274205Sjfv	snprintf(buf, sizeof(buf),
5020274205Sjfv	    "f%d.%d a%d.%d n%02x.%02x e%08x",
5021274205Sjfv	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
5022274205Sjfv	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
5023274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
5024274205Sjfv	    IXL_NVM_VERSION_HI_SHIFT,
5025274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
5026274205Sjfv	    IXL_NVM_VERSION_LO_SHIFT,
5027274205Sjfv	    hw->nvm.eetrack);
5028274205Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5029274205Sjfv}
5030274205Sjfv
5031299547Serjstatic int
5032299547Serjixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5033299547Serj{
5034299547Serj	struct i40e_hw *hw = &pf->hw;
5035299547Serj	struct i40e_nvm_access *nvma;
5036299547Serj	device_t dev = pf->dev;
5037299547Serj	enum i40e_status_code status = 0;
5038299547Serj	int perrno;
5039274205Sjfv
5040299547Serj	DEBUGFUNC("ixl_handle_nvmupd_cmd");
5041299547Serj
5042299547Serj	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
5043299547Serj	    ifd->ifd_data == NULL) {
5044299547Serj		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__);
5045299547Serj		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__,
5046299547Serj		    ifd->ifd_len, sizeof(struct i40e_nvm_access));
5047299547Serj		device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data);
5048299547Serj		return (EINVAL);
5049299547Serj	}
5050299547Serj
5051299547Serj	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5052299547Serj
5053299547Serj	status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5054299547Serj
5055299547Serj	return (status) ? perrno : 0;
5056299547Serj}
5057299547Serj
5058277084Sjfv#ifdef IXL_DEBUG_SYSCTL
5059266423Sjfvstatic int
5060270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5061266423Sjfv{
5062270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5063266423Sjfv	struct i40e_hw *hw = &pf->hw;
5064266423Sjfv	struct i40e_link_status link_status;
5065266423Sjfv	char buf[512];
5066266423Sjfv
5067266423Sjfv	enum i40e_status_code aq_error = 0;
5068266423Sjfv
5069266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
5070266423Sjfv	if (aq_error) {
5071266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
5072266423Sjfv		return (EPERM);
5073266423Sjfv	}
5074266423Sjfv
5075266423Sjfv	sprintf(buf, "\n"
5076266423Sjfv	    "PHY Type : %#04x\n"
5077266423Sjfv	    "Speed    : %#04x\n"
5078266423Sjfv	    "Link info: %#04x\n"
5079266423Sjfv	    "AN info  : %#04x\n"
5080266423Sjfv	    "Ext info : %#04x",
5081266423Sjfv	    link_status.phy_type, link_status.link_speed,
5082266423Sjfv	    link_status.link_info, link_status.an_info,
5083266423Sjfv	    link_status.ext_info);
5084266423Sjfv
5085266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5086266423Sjfv}
5087266423Sjfv
5088266423Sjfvstatic int
5089270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5090266423Sjfv{
5091279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
5092279858Sjfv	struct i40e_hw		*hw = &pf->hw;
5093279858Sjfv	char			buf[512];
5094279858Sjfv	enum i40e_status_code	aq_error = 0;
5095266423Sjfv
5096279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
5097266423Sjfv
5098279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
5099279858Sjfv	    TRUE, FALSE, &abilities, NULL);
5100266423Sjfv	if (aq_error) {
5101266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5102266423Sjfv		return (EPERM);
5103266423Sjfv	}
5104266423Sjfv
5105266423Sjfv	sprintf(buf, "\n"
5106266423Sjfv	    "PHY Type : %#010x\n"
5107266423Sjfv	    "Speed    : %#04x\n"
5108266423Sjfv	    "Abilities: %#04x\n"
5109266423Sjfv	    "EEE cap  : %#06x\n"
5110266423Sjfv	    "EEER reg : %#010x\n"
5111266423Sjfv	    "D3 Lpan  : %#04x",
5112279858Sjfv	    abilities.phy_type, abilities.link_speed,
5113279858Sjfv	    abilities.abilities, abilities.eee_capability,
5114279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
5115266423Sjfv
5116266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5117266423Sjfv}
5118266423Sjfv
5119266423Sjfvstatic int
5120270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5121266423Sjfv{
5122270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5123270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
5124270346Sjfv	struct ixl_mac_filter *f;
5125266423Sjfv	char *buf, *buf_i;
5126266423Sjfv
5127266423Sjfv	int error = 0;
5128266423Sjfv	int ftl_len = 0;
5129266423Sjfv	int ftl_counter = 0;
5130266423Sjfv	int buf_len = 0;
5131266423Sjfv	int entry_len = 42;
5132266423Sjfv
5133266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5134266423Sjfv		ftl_len++;
5135266423Sjfv	}
5136266423Sjfv
5137266423Sjfv	if (ftl_len < 1) {
5138266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
5139266423Sjfv		return (0);
5140266423Sjfv	}
5141266423Sjfv
5142266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5143266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5144266423Sjfv
5145266423Sjfv	sprintf(buf_i++, "\n");
5146266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5147266423Sjfv		sprintf(buf_i,
5148266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
5149266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5150266423Sjfv		buf_i += entry_len;
5151266423Sjfv		/* don't print '\n' for last entry */
5152266423Sjfv		if (++ftl_counter != ftl_len) {
5153266423Sjfv			sprintf(buf_i, "\n");
5154266423Sjfv			buf_i++;
5155266423Sjfv		}
5156266423Sjfv	}
5157266423Sjfv
5158266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5159266423Sjfv	if (error)
5160266423Sjfv		printf("sysctl error: %d\n", error);
5161266423Sjfv	free(buf, M_DEVBUF);
5162266423Sjfv	return error;
5163266423Sjfv}
5164269198Sjfv
5165270346Sjfv#define IXL_SW_RES_SIZE 0x14
5166269198Sjfvstatic int
5167277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
5168277084Sjfv{
5169277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5170284049Sjfv	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5171284049Sjfv	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5172277084Sjfv
5173277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
5174277084Sjfv}
5175277084Sjfv
5176277084Sjfvstatic int
5177274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5178269198Sjfv{
5179270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5180269198Sjfv	struct i40e_hw *hw = &pf->hw;
5181269198Sjfv	device_t dev = pf->dev;
5182269198Sjfv	struct sbuf *buf;
5183269198Sjfv	int error = 0;
5184269198Sjfv
5185269198Sjfv	u8 num_entries;
5186270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5187269198Sjfv
5188299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5189269198Sjfv	if (!buf) {
5190269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
5191269198Sjfv		return (ENOMEM);
5192269198Sjfv	}
5193269198Sjfv
5194277084Sjfv	bzero(resp, sizeof(resp));
5195269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5196269198Sjfv				resp,
5197270346Sjfv				IXL_SW_RES_SIZE,
5198269198Sjfv				NULL);
5199269198Sjfv	if (error) {
5200279858Sjfv		device_printf(dev,
5201279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5202269198Sjfv		    __func__, error, hw->aq.asq_last_status);
5203269198Sjfv		sbuf_delete(buf);
5204269198Sjfv		return error;
5205269198Sjfv	}
5206269198Sjfv
5207277084Sjfv	/* Sort entries by type for display */
5208277084Sjfv	qsort(resp, num_entries,
5209277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5210277084Sjfv	    &ixl_res_alloc_cmp);
5211277084Sjfv
5212269198Sjfv	sbuf_cat(buf, "\n");
5213277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5214269198Sjfv	sbuf_printf(buf,
5215269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5216269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5217269198Sjfv	for (int i = 0; i < num_entries; i++) {
5218269198Sjfv		sbuf_printf(buf,
5219269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5220269198Sjfv		    resp[i].resource_type,
5221269198Sjfv		    resp[i].guaranteed,
5222269198Sjfv		    resp[i].total,
5223269198Sjfv		    resp[i].used,
5224269198Sjfv		    resp[i].total_unalloced);
5225269198Sjfv		if (i < num_entries - 1)
5226269198Sjfv			sbuf_cat(buf, "\n");
5227269198Sjfv	}
5228269198Sjfv
5229269198Sjfv	error = sbuf_finish(buf);
5230299546Serj	if (error)
5231299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5232299545Serj
5233290708Ssmh	sbuf_delete(buf);
5234299545Serj	return error;
5235274205Sjfv}
5236269198Sjfv
5237274205Sjfv/*
5238274205Sjfv** Caller must init and delete sbuf; this function will clear and
5239274205Sjfv** finish it for caller.
5240274205Sjfv*/
5241274205Sjfvstatic char *
5242274205Sjfvixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5243274205Sjfv{
5244274205Sjfv	sbuf_clear(s);
5245274205Sjfv
5246274205Sjfv	if (seid == 0 && uplink)
5247274205Sjfv		sbuf_cat(s, "Network");
5248274205Sjfv	else if (seid == 0)
5249274205Sjfv		sbuf_cat(s, "Host");
5250274205Sjfv	else if (seid == 1)
5251274205Sjfv		sbuf_cat(s, "EMP");
5252274205Sjfv	else if (seid <= 5)
5253274205Sjfv		sbuf_printf(s, "MAC %d", seid - 2);
5254274205Sjfv	else if (seid <= 15)
5255274205Sjfv		sbuf_cat(s, "Reserved");
5256274205Sjfv	else if (seid <= 31)
5257274205Sjfv		sbuf_printf(s, "PF %d", seid - 16);
5258274205Sjfv	else if (seid <= 159)
5259274205Sjfv		sbuf_printf(s, "VF %d", seid - 32);
5260274205Sjfv	else if (seid <= 287)
5261274205Sjfv		sbuf_cat(s, "Reserved");
5262274205Sjfv	else if (seid <= 511)
5263274205Sjfv		sbuf_cat(s, "Other"); // for other structures
5264274205Sjfv	else if (seid <= 895)
5265274205Sjfv		sbuf_printf(s, "VSI %d", seid - 512);
5266274205Sjfv	else if (seid <= 1023)
5267274205Sjfv		sbuf_printf(s, "Reserved");
5268274205Sjfv	else
5269274205Sjfv		sbuf_cat(s, "Invalid");
5270274205Sjfv
5271274205Sjfv	sbuf_finish(s);
5272274205Sjfv	return sbuf_data(s);
5273269198Sjfv}
5274269198Sjfv
5275274205Sjfvstatic int
5276274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5277274205Sjfv{
5278274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5279274205Sjfv	struct i40e_hw *hw = &pf->hw;
5280274205Sjfv	device_t dev = pf->dev;
5281274205Sjfv	struct sbuf *buf;
5282274205Sjfv	struct sbuf *nmbuf;
5283274205Sjfv	int error = 0;
5284274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5285274205Sjfv
5286274205Sjfv	u16 next = 0;
5287274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5288274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5289274205Sjfv
5290299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5291274205Sjfv	if (!buf) {
5292274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5293274205Sjfv		return (ENOMEM);
5294274205Sjfv	}
5295274205Sjfv
5296274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5297274205Sjfv	    sizeof(aq_buf), &next, NULL);
5298274205Sjfv	if (error) {
5299279858Sjfv		device_printf(dev,
5300279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5301274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5302274205Sjfv		sbuf_delete(buf);
5303274205Sjfv		return error;
5304274205Sjfv	}
5305274205Sjfv
5306274205Sjfv	nmbuf = sbuf_new_auto();
5307274205Sjfv	if (!nmbuf) {
5308274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5309299546Serj		sbuf_delete(buf);
5310274205Sjfv		return (ENOMEM);
5311274205Sjfv	}
5312274205Sjfv
5313274205Sjfv	sbuf_cat(buf, "\n");
5314274205Sjfv	// Assuming <= 255 elements in switch
5315274205Sjfv	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5316274205Sjfv	/* Exclude:
5317274205Sjfv	** Revision -- all elements are revision 1 for now
5318274205Sjfv	*/
5319274205Sjfv	sbuf_printf(buf,
5320274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5321274205Sjfv	    "                |          |          | (uplink)\n");
5322274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5323274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5324274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5325274205Sjfv		sbuf_cat(buf, " ");
5326279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5327279858Sjfv		    sw_config->element[i].seid, false));
5328274205Sjfv		sbuf_cat(buf, " | ");
5329279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5330279858Sjfv		    sw_config->element[i].uplink_seid, true));
5331274205Sjfv		sbuf_cat(buf, "   ");
5332279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5333279858Sjfv		    sw_config->element[i].downlink_seid, false));
5334274205Sjfv		sbuf_cat(buf, "   ");
5335274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5336274205Sjfv		if (i < sw_config->header.num_reported - 1)
5337274205Sjfv			sbuf_cat(buf, "\n");
5338274205Sjfv	}
5339274205Sjfv	sbuf_delete(nmbuf);
5340274205Sjfv
5341274205Sjfv	error = sbuf_finish(buf);
5342299546Serj	if (error)
5343299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5344299545Serj
5345274205Sjfv	sbuf_delete(buf);
5346274205Sjfv
5347274205Sjfv	return (error);
5348274205Sjfv}
5349279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5350274205Sjfv
5351279858Sjfv#ifdef PCI_IOV
5352269198Sjfvstatic int
5353279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5354269198Sjfv{
5355279858Sjfv	struct i40e_hw *hw;
5356279858Sjfv	struct ixl_vsi *vsi;
5357279858Sjfv	struct i40e_vsi_context vsi_ctx;
5358279858Sjfv	int i;
5359279858Sjfv	uint16_t first_queue;
5360279858Sjfv	enum i40e_status_code code;
5361269198Sjfv
5362279858Sjfv	hw = &pf->hw;
5363279858Sjfv	vsi = &pf->vsi;
5364269198Sjfv
5365279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5366279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5367279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5368279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5369279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5370279858Sjfv
5371279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5372279858Sjfv
5373279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5374279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5375279858Sjfv
5376279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5377279858Sjfv	vsi_ctx.info.sec_flags = 0;
5378279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5379279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5380279858Sjfv
5381279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5382279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5383279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5384279858Sjfv
5385279858Sjfv	vsi_ctx.info.valid_sections |=
5386279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5387279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5388279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5389279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5390279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5391279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5392279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5393279858Sjfv
5394279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5395279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5396279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5397279858Sjfv
5398279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5399279858Sjfv	if (code != I40E_SUCCESS)
5400279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5401279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5402279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5403279858Sjfv	vf->vsi.first_queue = first_queue;
5404279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5405279858Sjfv
5406279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5407279858Sjfv	if (code != I40E_SUCCESS)
5408279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5409279858Sjfv
5410279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5411279858Sjfv	if (code != I40E_SUCCESS) {
5412279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5413279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5414279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5415269198Sjfv	}
5416269198Sjfv
5417279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5418279858Sjfv	return (0);
5419279858Sjfv}
5420279858Sjfv
5421279858Sjfvstatic int
5422279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5423279858Sjfv{
5424279858Sjfv	struct i40e_hw *hw;
5425279858Sjfv	int error;
5426279858Sjfv
5427279858Sjfv	hw = &pf->hw;
5428279858Sjfv
5429279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5430279858Sjfv	if (error != 0)
5431269198Sjfv		return (error);
5432279858Sjfv
5433279858Sjfv	vf->vsi.hw_filters_add = 0;
5434279858Sjfv	vf->vsi.hw_filters_del = 0;
5435279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5436279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5437279858Sjfv
5438279858Sjfv	return (0);
5439279858Sjfv}
5440279858Sjfv
5441279858Sjfvstatic void
5442279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5443279858Sjfv    uint32_t val)
5444279858Sjfv{
5445279858Sjfv	uint32_t qtable;
5446279858Sjfv	int index, shift;
5447279858Sjfv
5448279858Sjfv	/*
5449279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5450279858Sjfv	 * gymnastics to convert the queue number into a register index and
5451279858Sjfv	 * shift.
5452279858Sjfv	 */
5453279858Sjfv	index = qnum / 2;
5454279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5455279858Sjfv
5456279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5457279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5458279858Sjfv	qtable |= val << shift;
5459279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5460279858Sjfv}
5461279858Sjfv
5462279858Sjfvstatic void
5463279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5464279858Sjfv{
5465279858Sjfv	struct i40e_hw *hw;
5466279858Sjfv	uint32_t qtable;
5467279858Sjfv	int i;
5468279858Sjfv
5469279858Sjfv	hw = &pf->hw;
5470279858Sjfv
5471279858Sjfv	/*
5472279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5473279858Sjfv	 * so we have to use non-contiguous mappings.
5474279858Sjfv	 */
5475279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5476279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5477279858Sjfv
5478279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5479279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5480279858Sjfv
5481279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5482279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5483279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5484279858Sjfv
5485279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5486279858Sjfv	}
5487279858Sjfv
5488279858Sjfv	/* Map queues allocated to VF to its VSI. */
5489279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5490279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5491279858Sjfv
5492279858Sjfv	/* Set rest of VSI queues as unused. */
5493279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5494279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5495279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5496279858Sjfv
5497279858Sjfv	ixl_flush(hw);
5498279858Sjfv}
5499279858Sjfv
5500279858Sjfvstatic void
5501279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5502279858Sjfv{
5503279858Sjfv	struct i40e_hw *hw;
5504279858Sjfv
5505279858Sjfv	hw = &pf->hw;
5506279858Sjfv
5507279858Sjfv	if (vsi->seid == 0)
5508279858Sjfv		return;
5509279858Sjfv
5510279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5511279858Sjfv}
5512279858Sjfv
5513279858Sjfvstatic void
5514279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5515279858Sjfv{
5516279858Sjfv
5517279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5518279858Sjfv	ixl_flush(hw);
5519279858Sjfv}
5520279858Sjfv
5521279858Sjfvstatic void
5522279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5523279858Sjfv{
5524279858Sjfv
5525279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5526279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5527279858Sjfv	ixl_flush(hw);
5528279858Sjfv}
5529279858Sjfv
5530279858Sjfvstatic void
5531279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5532279858Sjfv{
5533279858Sjfv	struct i40e_hw *hw;
5534279858Sjfv	uint32_t vfint_reg, vpint_reg;
5535279858Sjfv	int i;
5536279858Sjfv
5537279858Sjfv	hw = &pf->hw;
5538279858Sjfv
5539279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5540279858Sjfv
5541279858Sjfv	/* Index 0 has a special register. */
5542279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5543279858Sjfv
5544279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5545279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5546279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5547279858Sjfv	}
5548279858Sjfv
5549279858Sjfv	/* Index 0 has a special register. */
5550279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5551279858Sjfv
5552279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5553279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5554279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5555279858Sjfv	}
5556279858Sjfv
5557279858Sjfv	vf->vsi.num_queues = 0;
5558279858Sjfv}
5559279858Sjfv
5560279858Sjfvstatic int
5561279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5562279858Sjfv{
5563279858Sjfv	struct i40e_hw *hw;
5564279858Sjfv	int i;
5565279858Sjfv	uint16_t global_vf_num;
5566279858Sjfv	uint32_t ciad;
5567279858Sjfv
5568279858Sjfv	hw = &pf->hw;
5569279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5570279858Sjfv
5571279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5572279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5573279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5574279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5575279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5576279858Sjfv			return (0);
5577279858Sjfv		DELAY(1);
5578279858Sjfv	}
5579279858Sjfv
5580279858Sjfv	return (ETIMEDOUT);
5581279858Sjfv}
5582279858Sjfv
5583279858Sjfvstatic void
5584279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5585279858Sjfv{
5586279858Sjfv	struct i40e_hw *hw;
5587279858Sjfv	uint32_t vfrtrig;
5588279858Sjfv
5589279858Sjfv	hw = &pf->hw;
5590279858Sjfv
5591279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5592279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5593279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5594279858Sjfv	ixl_flush(hw);
5595279858Sjfv
5596279858Sjfv	ixl_reinit_vf(pf, vf);
5597279858Sjfv}
5598279858Sjfv
5599279858Sjfvstatic void
5600279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5601279858Sjfv{
5602279858Sjfv	struct i40e_hw *hw;
5603279858Sjfv	uint32_t vfrstat, vfrtrig;
5604279858Sjfv	int i, error;
5605279858Sjfv
5606279858Sjfv	hw = &pf->hw;
5607279858Sjfv
5608279858Sjfv	error = ixl_flush_pcie(pf, vf);
5609279858Sjfv	if (error != 0)
5610279858Sjfv		device_printf(pf->dev,
5611279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5612279858Sjfv		    vf->vf_num);
5613279858Sjfv
5614279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5615279858Sjfv		DELAY(10);
5616279858Sjfv
5617279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5618279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5619279858Sjfv			break;
5620279858Sjfv	}
5621279858Sjfv
5622279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5623279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5624279858Sjfv
5625279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5626279858Sjfv
5627279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5628279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5629279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5630279858Sjfv
5631279858Sjfv	if (vf->vsi.seid != 0)
5632279858Sjfv		ixl_disable_rings(&vf->vsi);
5633279858Sjfv
5634279858Sjfv	ixl_vf_release_resources(pf, vf);
5635279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5636279858Sjfv	ixl_vf_map_queues(pf, vf);
5637279858Sjfv
5638279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5639279858Sjfv	ixl_flush(hw);
5640279858Sjfv}
5641279858Sjfv
5642279858Sjfvstatic const char *
5643279858Sjfvixl_vc_opcode_str(uint16_t op)
5644279858Sjfv{
5645279858Sjfv
5646279858Sjfv	switch (op) {
5647279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5648279858Sjfv		return ("VERSION");
5649279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5650279858Sjfv		return ("RESET_VF");
5651279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5652279858Sjfv		return ("GET_VF_RESOURCES");
5653279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5654279858Sjfv		return ("CONFIG_TX_QUEUE");
5655279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5656279858Sjfv		return ("CONFIG_RX_QUEUE");
5657279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5658279858Sjfv		return ("CONFIG_VSI_QUEUES");
5659279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5660279858Sjfv		return ("CONFIG_IRQ_MAP");
5661279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5662279858Sjfv		return ("ENABLE_QUEUES");
5663279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5664279858Sjfv		return ("DISABLE_QUEUES");
5665279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5666279858Sjfv		return ("ADD_ETHER_ADDRESS");
5667279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5668279858Sjfv		return ("DEL_ETHER_ADDRESS");
5669279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5670279858Sjfv		return ("ADD_VLAN");
5671279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5672279858Sjfv		return ("DEL_VLAN");
5673279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5674279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5675279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5676279858Sjfv		return ("GET_STATS");
5677279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5678279858Sjfv		return ("FCOE");
5679279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5680279858Sjfv		return ("EVENT");
5681279858Sjfv	default:
5682279858Sjfv		return ("UNKNOWN");
5683279858Sjfv	}
5684279858Sjfv}
5685279858Sjfv
5686279858Sjfvstatic int
5687279858Sjfvixl_vc_opcode_level(uint16_t opcode)
5688279858Sjfv{
5689279858Sjfv
5690279858Sjfv	switch (opcode) {
5691279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5692279858Sjfv		return (10);
5693279858Sjfv	default:
5694279858Sjfv		return (5);
5695279858Sjfv	}
5696279858Sjfv}
5697279858Sjfv
5698279858Sjfvstatic void
5699279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5700279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
5701279858Sjfv{
5702279858Sjfv	struct i40e_hw *hw;
5703279858Sjfv	int global_vf_id;
5704279858Sjfv
5705279858Sjfv	hw = &pf->hw;
5706279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5707279858Sjfv
5708279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5709279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5710279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5711279858Sjfv
5712279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5713279858Sjfv}
5714279858Sjfv
5715279858Sjfvstatic void
5716279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5717279858Sjfv{
5718279858Sjfv
5719279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5720279858Sjfv}
5721279858Sjfv
5722279858Sjfvstatic void
5723279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5724279858Sjfv    enum i40e_status_code status, const char *file, int line)
5725279858Sjfv{
5726279858Sjfv
5727279858Sjfv	I40E_VC_DEBUG(pf, 1,
5728279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5729279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5730279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5731279858Sjfv}
5732279858Sjfv
5733279858Sjfvstatic void
5734279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5735279858Sjfv    uint16_t msg_size)
5736279858Sjfv{
5737279858Sjfv	struct i40e_virtchnl_version_info reply;
5738279858Sjfv
5739279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5740279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5741279858Sjfv		    I40E_ERR_PARAM);
5742279858Sjfv		return;
5743279858Sjfv	}
5744279858Sjfv
5745279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5746279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5747279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5748279858Sjfv	    sizeof(reply));
5749279858Sjfv}
5750279858Sjfv
5751279858Sjfvstatic void
5752279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5753279858Sjfv    uint16_t msg_size)
5754279858Sjfv{
5755279858Sjfv
5756279858Sjfv	if (msg_size != 0) {
5757279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5758279858Sjfv		    I40E_ERR_PARAM);
5759279858Sjfv		return;
5760279858Sjfv	}
5761279858Sjfv
5762279858Sjfv	ixl_reset_vf(pf, vf);
5763279858Sjfv
5764279858Sjfv	/* No response to a reset message. */
5765279858Sjfv}
5766279858Sjfv
5767279858Sjfvstatic void
5768279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5769279858Sjfv    uint16_t msg_size)
5770279858Sjfv{
5771279858Sjfv	struct i40e_virtchnl_vf_resource reply;
5772279858Sjfv
5773279858Sjfv	if (msg_size != 0) {
5774279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5775279858Sjfv		    I40E_ERR_PARAM);
5776279858Sjfv		return;
5777279858Sjfv	}
5778279858Sjfv
5779279858Sjfv	bzero(&reply, sizeof(reply));
5780279858Sjfv
5781279858Sjfv	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5782279858Sjfv
5783279858Sjfv	reply.num_vsis = 1;
5784279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
5785279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5786279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5787279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5788279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5789279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5790279858Sjfv
5791279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5792279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
5793279858Sjfv}
5794279858Sjfv
5795279858Sjfvstatic int
5796279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5797279858Sjfv    struct i40e_virtchnl_txq_info *info)
5798279858Sjfv{
5799279858Sjfv	struct i40e_hw *hw;
5800279858Sjfv	struct i40e_hmc_obj_txq txq;
5801279858Sjfv	uint16_t global_queue_num, global_vf_num;
5802279858Sjfv	enum i40e_status_code status;
5803279858Sjfv	uint32_t qtx_ctl;
5804279858Sjfv
5805279858Sjfv	hw = &pf->hw;
5806279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5807279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5808279858Sjfv	bzero(&txq, sizeof(txq));
5809279858Sjfv
5810279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5811279858Sjfv	if (status != I40E_SUCCESS)
5812269198Sjfv		return (EINVAL);
5813279858Sjfv
5814279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5815279858Sjfv
5816279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
5817279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
5818279858Sjfv	txq.qlen = info->ring_len;
5819279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5820279858Sjfv	txq.rdylist_act = 0;
5821279858Sjfv
5822279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5823279858Sjfv	if (status != I40E_SUCCESS)
5824279858Sjfv		return (EINVAL);
5825279858Sjfv
5826279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5827279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5828279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5829279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5830279858Sjfv	ixl_flush(hw);
5831279858Sjfv
5832279858Sjfv	return (0);
5833279858Sjfv}
5834279858Sjfv
5835279858Sjfvstatic int
5836279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5837279858Sjfv    struct i40e_virtchnl_rxq_info *info)
5838279858Sjfv{
5839279858Sjfv	struct i40e_hw *hw;
5840279858Sjfv	struct i40e_hmc_obj_rxq rxq;
5841279858Sjfv	uint16_t global_queue_num;
5842279858Sjfv	enum i40e_status_code status;
5843279858Sjfv
5844279858Sjfv	hw = &pf->hw;
5845279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5846279858Sjfv	bzero(&rxq, sizeof(rxq));
5847279858Sjfv
5848279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5849279858Sjfv		return (EINVAL);
5850279858Sjfv
5851279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5852279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
5853279858Sjfv		return (EINVAL);
5854279858Sjfv
5855279858Sjfv	if (info->splithdr_enabled) {
5856279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5857279858Sjfv			return (EINVAL);
5858279858Sjfv
5859279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
5860279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5861279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5862279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5863279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5864279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5865279858Sjfv
5866279858Sjfv		rxq.dtype = 2;
5867269198Sjfv	}
5868269198Sjfv
5869279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5870279858Sjfv	if (status != I40E_SUCCESS)
5871279858Sjfv		return (EINVAL);
5872269198Sjfv
5873279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5874279858Sjfv	rxq.qlen = info->ring_len;
5875269198Sjfv
5876279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5877269198Sjfv
5878279858Sjfv	rxq.dsize = 1;
5879279858Sjfv	rxq.crcstrip = 1;
5880279858Sjfv	rxq.l2tsel = 1;
5881269198Sjfv
5882279858Sjfv	rxq.rxmax = info->max_pkt_size;
5883279858Sjfv	rxq.tphrdesc_ena = 1;
5884279858Sjfv	rxq.tphwdesc_ena = 1;
5885279858Sjfv	rxq.tphdata_ena = 1;
5886279858Sjfv	rxq.tphhead_ena = 1;
5887279858Sjfv	rxq.lrxqthresh = 2;
5888279858Sjfv	rxq.prefena = 1;
5889279858Sjfv
5890279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5891279858Sjfv	if (status != I40E_SUCCESS)
5892279858Sjfv		return (EINVAL);
5893279858Sjfv
5894279858Sjfv	return (0);
5895279858Sjfv}
5896279858Sjfv
5897279858Sjfvstatic void
5898279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5899279858Sjfv    uint16_t msg_size)
5900279858Sjfv{
5901279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
5902279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
5903279858Sjfv	int i;
5904279858Sjfv
5905279858Sjfv	if (msg_size < sizeof(*info)) {
5906279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5907279858Sjfv		    I40E_ERR_PARAM);
5908279858Sjfv		return;
5909279858Sjfv	}
5910279858Sjfv
5911279858Sjfv	info = msg;
5912279858Sjfv	if (info->num_queue_pairs == 0) {
5913279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5914279858Sjfv		    I40E_ERR_PARAM);
5915279858Sjfv		return;
5916279858Sjfv	}
5917279858Sjfv
5918279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5919279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5920279858Sjfv		    I40E_ERR_PARAM);
5921279858Sjfv		return;
5922279858Sjfv	}
5923279858Sjfv
5924279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
5925279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5926279858Sjfv		    I40E_ERR_PARAM);
5927279858Sjfv		return;
5928279858Sjfv	}
5929279858Sjfv
5930279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
5931279858Sjfv		pair = &info->qpair[i];
5932279858Sjfv
5933279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5934279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5935279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
5936279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
5937279858Sjfv
5938279858Sjfv			i40e_send_vf_nack(pf, vf,
5939279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5940279858Sjfv			return;
5941279858Sjfv		}
5942279858Sjfv
5943279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5944279858Sjfv			i40e_send_vf_nack(pf, vf,
5945279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5946279858Sjfv			return;
5947279858Sjfv		}
5948279858Sjfv
5949279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5950279858Sjfv			i40e_send_vf_nack(pf, vf,
5951279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5952279858Sjfv			return;
5953279858Sjfv		}
5954279858Sjfv	}
5955279858Sjfv
5956279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5957279858Sjfv}
5958279858Sjfv
5959279858Sjfvstatic void
5960279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
5961279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
5962279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
5963279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
5964279858Sjfv{
5965279858Sjfv	uint32_t offset, qctl;
5966279858Sjfv	uint16_t itr_indx;
5967279858Sjfv
5968279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
5969279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
5970279858Sjfv		itr_indx = vector->rxitr_idx;
5971279858Sjfv	} else {
5972279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
5973279858Sjfv		itr_indx = vector->txitr_idx;
5974279858Sjfv	}
5975279858Sjfv
5976279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5977279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5978279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5979279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5980279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5981279858Sjfv
5982279858Sjfv	wr32(&pf->hw, offset, qctl);
5983279858Sjfv
5984279858Sjfv	*last_type = cur_type;
5985279858Sjfv	*last_queue = cur_queue;
5986279858Sjfv}
5987279858Sjfv
5988279858Sjfvstatic void
5989279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5990279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
5991279858Sjfv{
5992279858Sjfv	struct i40e_hw *hw;
5993279858Sjfv	u_int qindex;
5994279858Sjfv	enum i40e_queue_type type, last_type;
5995279858Sjfv	uint32_t lnklst_reg;
5996279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5997279858Sjfv
5998279858Sjfv	hw = &pf->hw;
5999279858Sjfv
6000279858Sjfv	rxq_map = vector->rxq_map;
6001279858Sjfv	txq_map = vector->txq_map;
6002279858Sjfv
6003279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
6004279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
6005279858Sjfv
6006279858Sjfv	/*
6007279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
6008279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
6009279858Sjfv	 * both at once here.
6010279858Sjfv	 */
6011279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
6012279858Sjfv		if (txq_map != 0) {
6013279858Sjfv			qindex = ffs(txq_map) - 1;
6014279858Sjfv			type = I40E_QUEUE_TYPE_TX;
6015279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6016279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6017279858Sjfv			    &last_type, &last_queue);
6018279858Sjfv			txq_map &= ~(1 << qindex);
6019279858Sjfv		}
6020279858Sjfv
6021279858Sjfv		if (rxq_map != 0) {
6022279858Sjfv			qindex = ffs(rxq_map) - 1;
6023279858Sjfv			type = I40E_QUEUE_TYPE_RX;
6024279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6025279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6026279858Sjfv			    &last_type, &last_queue);
6027279858Sjfv			rxq_map &= ~(1 << qindex);
6028279858Sjfv		}
6029279858Sjfv	}
6030279858Sjfv
6031279858Sjfv	if (vector->vector_id == 0)
6032279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
6033279858Sjfv	else
6034279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
6035279858Sjfv		    vf->vf_num);
6036279858Sjfv	wr32(hw, lnklst_reg,
6037279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
6038279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
6039279858Sjfv
6040279858Sjfv	ixl_flush(hw);
6041279858Sjfv}
6042279858Sjfv
6043279858Sjfvstatic void
6044279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6045279858Sjfv    uint16_t msg_size)
6046279858Sjfv{
6047279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
6048279858Sjfv	struct i40e_virtchnl_vector_map *vector;
6049279858Sjfv	struct i40e_hw *hw;
6050279858Sjfv	int i, largest_txq, largest_rxq;
6051279858Sjfv
6052279858Sjfv	hw = &pf->hw;
6053279858Sjfv
6054279858Sjfv	if (msg_size < sizeof(*map)) {
6055279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6056279858Sjfv		    I40E_ERR_PARAM);
6057279858Sjfv		return;
6058279858Sjfv	}
6059279858Sjfv
6060279858Sjfv	map = msg;
6061279858Sjfv	if (map->num_vectors == 0) {
6062279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6063279858Sjfv		    I40E_ERR_PARAM);
6064279858Sjfv		return;
6065279858Sjfv	}
6066279858Sjfv
6067279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
6068279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6069279858Sjfv		    I40E_ERR_PARAM);
6070279858Sjfv		return;
6071279858Sjfv	}
6072279858Sjfv
6073279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
6074279858Sjfv		vector = &map->vecmap[i];
6075279858Sjfv
6076279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6077279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
6078279858Sjfv			i40e_send_vf_nack(pf, vf,
6079279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6080279858Sjfv			return;
6081279858Sjfv		}
6082279858Sjfv
6083279858Sjfv		if (vector->rxq_map != 0) {
6084279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
6085279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
6086279858Sjfv				i40e_send_vf_nack(pf, vf,
6087279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6088279858Sjfv				    I40E_ERR_PARAM);
6089279858Sjfv				return;
6090279858Sjfv			}
6091279858Sjfv		}
6092279858Sjfv
6093279858Sjfv		if (vector->txq_map != 0) {
6094279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
6095279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
6096279858Sjfv				i40e_send_vf_nack(pf, vf,
6097279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6098279858Sjfv				    I40E_ERR_PARAM);
6099279858Sjfv				return;
6100279858Sjfv			}
6101279858Sjfv		}
6102279858Sjfv
6103279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6104279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6105279858Sjfv			i40e_send_vf_nack(pf, vf,
6106279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6107279858Sjfv			    I40E_ERR_PARAM);
6108279858Sjfv			return;
6109279858Sjfv		}
6110279858Sjfv
6111279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
6112279858Sjfv	}
6113279858Sjfv
6114279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6115279858Sjfv}
6116279858Sjfv
6117279858Sjfvstatic void
6118279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6119279858Sjfv    uint16_t msg_size)
6120279858Sjfv{
6121279858Sjfv	struct i40e_virtchnl_queue_select *select;
6122279858Sjfv	int error;
6123279858Sjfv
6124279858Sjfv	if (msg_size != sizeof(*select)) {
6125279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6126279858Sjfv		    I40E_ERR_PARAM);
6127279858Sjfv		return;
6128279858Sjfv	}
6129279858Sjfv
6130279858Sjfv	select = msg;
6131279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6132279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6133279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6134279858Sjfv		    I40E_ERR_PARAM);
6135279858Sjfv		return;
6136279858Sjfv	}
6137279858Sjfv
6138279858Sjfv	error = ixl_enable_rings(&vf->vsi);
6139269198Sjfv	if (error) {
6140279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6141279858Sjfv		    I40E_ERR_TIMEOUT);
6142279858Sjfv		return;
6143269198Sjfv	}
6144269198Sjfv
6145279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6146269198Sjfv}
6147266423Sjfv
6148279858Sjfvstatic void
6149279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6150279858Sjfv    void *msg, uint16_t msg_size)
6151279858Sjfv{
6152279858Sjfv	struct i40e_virtchnl_queue_select *select;
6153279858Sjfv	int error;
6154279858Sjfv
6155279858Sjfv	if (msg_size != sizeof(*select)) {
6156279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6157279858Sjfv		    I40E_ERR_PARAM);
6158279858Sjfv		return;
6159279858Sjfv	}
6160279858Sjfv
6161279858Sjfv	select = msg;
6162279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6163279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6164279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6165279858Sjfv		    I40E_ERR_PARAM);
6166279858Sjfv		return;
6167279858Sjfv	}
6168279858Sjfv
6169279858Sjfv	error = ixl_disable_rings(&vf->vsi);
6170279858Sjfv	if (error) {
6171279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6172279858Sjfv		    I40E_ERR_TIMEOUT);
6173279858Sjfv		return;
6174279858Sjfv	}
6175279858Sjfv
6176279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6177279858Sjfv}
6178279858Sjfv
6179279858Sjfvstatic boolean_t
6180279858Sjfvixl_zero_mac(const uint8_t *addr)
6181279858Sjfv{
6182279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6183279858Sjfv
6184279858Sjfv	return (cmp_etheraddr(addr, zero));
6185279858Sjfv}
6186279858Sjfv
6187279858Sjfvstatic boolean_t
6188279858Sjfvixl_bcast_mac(const uint8_t *addr)
6189279858Sjfv{
6190279858Sjfv
6191279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
6192279858Sjfv}
6193279858Sjfv
6194279858Sjfvstatic int
6195279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6196279858Sjfv{
6197279858Sjfv
6198279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6199279858Sjfv		return (EINVAL);
6200279858Sjfv
6201279858Sjfv	/*
6202279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
6203279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6204279858Sjfv	 * is not its assigned MAC.
6205279858Sjfv	 */
6206279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6207279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6208279858Sjfv		return (EPERM);
6209279858Sjfv
6210279858Sjfv	return (0);
6211279858Sjfv}
6212279858Sjfv
6213279858Sjfvstatic void
6214279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6215279858Sjfv    uint16_t msg_size)
6216279858Sjfv{
6217279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6218279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6219279858Sjfv	struct ixl_vsi *vsi;
6220279858Sjfv	int i;
6221279858Sjfv	size_t expected_size;
6222279858Sjfv
6223279858Sjfv	vsi = &vf->vsi;
6224279858Sjfv
6225279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6226279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6227279858Sjfv		    I40E_ERR_PARAM);
6228279858Sjfv		return;
6229279858Sjfv	}
6230279858Sjfv
6231279858Sjfv	addr_list = msg;
6232279858Sjfv	expected_size = sizeof(*addr_list) +
6233279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6234279858Sjfv
6235279858Sjfv	if (addr_list->num_elements == 0 ||
6236279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6237279858Sjfv	    msg_size != expected_size) {
6238279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6239279858Sjfv		    I40E_ERR_PARAM);
6240279858Sjfv		return;
6241279858Sjfv	}
6242279858Sjfv
6243279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6244279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6245279858Sjfv			i40e_send_vf_nack(pf, vf,
6246279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6247279858Sjfv			return;
6248279858Sjfv		}
6249279858Sjfv	}
6250279858Sjfv
6251279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6252279858Sjfv		addr = &addr_list->list[i];
6253279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6254279858Sjfv	}
6255279858Sjfv
6256279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6257279858Sjfv}
6258279858Sjfv
6259279858Sjfvstatic void
6260279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6261279858Sjfv    uint16_t msg_size)
6262279858Sjfv{
6263279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6264279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6265279858Sjfv	size_t expected_size;
6266279858Sjfv	int i;
6267279858Sjfv
6268279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6269279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6270279858Sjfv		    I40E_ERR_PARAM);
6271279858Sjfv		return;
6272279858Sjfv	}
6273279858Sjfv
6274279858Sjfv	addr_list = msg;
6275279858Sjfv	expected_size = sizeof(*addr_list) +
6276279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6277279858Sjfv
6278279858Sjfv	if (addr_list->num_elements == 0 ||
6279279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6280279858Sjfv	    msg_size != expected_size) {
6281279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6282279858Sjfv		    I40E_ERR_PARAM);
6283279858Sjfv		return;
6284279858Sjfv	}
6285279858Sjfv
6286279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6287279858Sjfv		addr = &addr_list->list[i];
6288279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6289279858Sjfv			i40e_send_vf_nack(pf, vf,
6290279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6291279858Sjfv			return;
6292279858Sjfv		}
6293279858Sjfv	}
6294279858Sjfv
6295279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6296279858Sjfv		addr = &addr_list->list[i];
6297279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6298279858Sjfv	}
6299279858Sjfv
6300279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6301279858Sjfv}
6302279858Sjfv
6303279858Sjfvstatic enum i40e_status_code
6304279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6305279858Sjfv{
6306279858Sjfv	struct i40e_vsi_context vsi_ctx;
6307279858Sjfv
6308279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6309279858Sjfv
6310279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6311279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6312279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6313279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6314279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6315279858Sjfv}
6316279858Sjfv
6317279858Sjfvstatic void
6318279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6319279858Sjfv    uint16_t msg_size)
6320279858Sjfv{
6321279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6322279858Sjfv	enum i40e_status_code code;
6323279858Sjfv	size_t expected_size;
6324279858Sjfv	int i;
6325279858Sjfv
6326279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6327279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6328279858Sjfv		    I40E_ERR_PARAM);
6329279858Sjfv		return;
6330279858Sjfv	}
6331279858Sjfv
6332279858Sjfv	filter_list = msg;
6333279858Sjfv	expected_size = sizeof(*filter_list) +
6334279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6335279858Sjfv	if (filter_list->num_elements == 0 ||
6336279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6337279858Sjfv	    msg_size != expected_size) {
6338279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6339279858Sjfv		    I40E_ERR_PARAM);
6340279858Sjfv		return;
6341279858Sjfv	}
6342279858Sjfv
6343279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6344279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6345279858Sjfv		    I40E_ERR_PARAM);
6346279858Sjfv		return;
6347279858Sjfv	}
6348279858Sjfv
6349279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6350279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6351279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6352279858Sjfv			    I40E_ERR_PARAM);
6353279858Sjfv			return;
6354279858Sjfv		}
6355279858Sjfv	}
6356279858Sjfv
6357279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6358279858Sjfv	if (code != I40E_SUCCESS) {
6359279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6360279858Sjfv		    I40E_ERR_PARAM);
6361279858Sjfv	}
6362279858Sjfv
6363279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6364279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6365279858Sjfv
6366279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6367279858Sjfv}
6368279858Sjfv
6369279858Sjfvstatic void
6370279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6371279858Sjfv    uint16_t msg_size)
6372279858Sjfv{
6373279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6374279858Sjfv	int i;
6375279858Sjfv	size_t expected_size;
6376279858Sjfv
6377279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6378279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6379279858Sjfv		    I40E_ERR_PARAM);
6380279858Sjfv		return;
6381279858Sjfv	}
6382279858Sjfv
6383279858Sjfv	filter_list = msg;
6384279858Sjfv	expected_size = sizeof(*filter_list) +
6385279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6386279858Sjfv	if (filter_list->num_elements == 0 ||
6387279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6388279858Sjfv	    msg_size != expected_size) {
6389279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6390279858Sjfv		    I40E_ERR_PARAM);
6391279858Sjfv		return;
6392279858Sjfv	}
6393279858Sjfv
6394279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6395279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6396279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6397279858Sjfv			    I40E_ERR_PARAM);
6398279858Sjfv			return;
6399279858Sjfv		}
6400279858Sjfv	}
6401279858Sjfv
6402279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6403279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6404279858Sjfv		    I40E_ERR_PARAM);
6405279858Sjfv		return;
6406279858Sjfv	}
6407279858Sjfv
6408279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6409279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6410279858Sjfv
6411279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6412279858Sjfv}
6413279858Sjfv
6414279858Sjfvstatic void
6415279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6416279858Sjfv    void *msg, uint16_t msg_size)
6417279858Sjfv{
6418279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6419279858Sjfv	enum i40e_status_code code;
6420279858Sjfv
6421279858Sjfv	if (msg_size != sizeof(*info)) {
6422279858Sjfv		i40e_send_vf_nack(pf, vf,
6423279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6424279858Sjfv		return;
6425279858Sjfv	}
6426279858Sjfv
6427295787Skevlo	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6428279858Sjfv		i40e_send_vf_nack(pf, vf,
6429279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6430279858Sjfv		return;
6431279858Sjfv	}
6432279858Sjfv
6433279858Sjfv	info = msg;
6434279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6435279858Sjfv		i40e_send_vf_nack(pf, vf,
6436279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6437279858Sjfv		return;
6438279858Sjfv	}
6439279858Sjfv
6440279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6441279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6442279858Sjfv	if (code != I40E_SUCCESS) {
6443279858Sjfv		i40e_send_vf_nack(pf, vf,
6444279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6445279858Sjfv		return;
6446279858Sjfv	}
6447279858Sjfv
6448279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6449279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6450279858Sjfv	if (code != I40E_SUCCESS) {
6451279858Sjfv		i40e_send_vf_nack(pf, vf,
6452279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6453279858Sjfv		return;
6454279858Sjfv	}
6455279858Sjfv
6456279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6457279858Sjfv}
6458279858Sjfv
6459279858Sjfvstatic void
6460279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6461279858Sjfv    uint16_t msg_size)
6462279858Sjfv{
6463279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6464279858Sjfv
6465279858Sjfv	if (msg_size != sizeof(*queue)) {
6466279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6467279858Sjfv		    I40E_ERR_PARAM);
6468279858Sjfv		return;
6469279858Sjfv	}
6470279858Sjfv
6471279858Sjfv	queue = msg;
6472279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6473279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6474279858Sjfv		    I40E_ERR_PARAM);
6475279858Sjfv		return;
6476279858Sjfv	}
6477279858Sjfv
6478279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6479279858Sjfv
6480279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6481279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6482279858Sjfv}
6483279858Sjfv
6484279858Sjfvstatic void
6485279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6486279858Sjfv{
6487279858Sjfv	struct ixl_vf *vf;
6488279858Sjfv	void *msg;
6489279858Sjfv	uint16_t vf_num, msg_size;
6490279858Sjfv	uint32_t opcode;
6491279858Sjfv
6492279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6493279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6494279858Sjfv
6495279858Sjfv	if (vf_num >= pf->num_vfs) {
6496279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6497279858Sjfv		return;
6498279858Sjfv	}
6499279858Sjfv
6500279858Sjfv	vf = &pf->vfs[vf_num];
6501279858Sjfv	msg = event->msg_buf;
6502279858Sjfv	msg_size = event->msg_len;
6503279858Sjfv
6504279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6505279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6506279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6507279858Sjfv
6508279858Sjfv	switch (opcode) {
6509279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6510279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6511279858Sjfv		break;
6512279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6513279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6514279858Sjfv		break;
6515279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6516279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6517279858Sjfv		break;
6518279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6519279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6520279858Sjfv		break;
6521279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6522279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6523279858Sjfv		break;
6524279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6525279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6526279858Sjfv		break;
6527279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6528279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6529279858Sjfv		break;
6530279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6531279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6532279858Sjfv		break;
6533279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6534279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6535279858Sjfv		break;
6536279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6537279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6538279858Sjfv		break;
6539279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6540279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6541279858Sjfv		break;
6542279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6543279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6544279858Sjfv		break;
6545279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6546279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6547279858Sjfv		break;
6548279858Sjfv
6549279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6550279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6551279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6552279858Sjfv	default:
6553279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6554279858Sjfv		break;
6555279858Sjfv	}
6556279858Sjfv}
6557279858Sjfv
6558279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6559279858Sjfvstatic void
6560279858Sjfvixl_handle_vflr(void *arg, int pending)
6561279858Sjfv{
6562279858Sjfv	struct ixl_pf *pf;
6563279858Sjfv	struct i40e_hw *hw;
6564279858Sjfv	uint16_t global_vf_num;
6565279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6566279858Sjfv	int i;
6567279858Sjfv
6568279858Sjfv	pf = arg;
6569279858Sjfv	hw = &pf->hw;
6570279858Sjfv
6571279858Sjfv	IXL_PF_LOCK(pf);
6572279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6573279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6574279858Sjfv
6575279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6576279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6577279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6578279858Sjfv		if (vflrstat & vflrstat_mask) {
6579279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6580279858Sjfv			    vflrstat_mask);
6581279858Sjfv
6582279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6583279858Sjfv		}
6584279858Sjfv	}
6585279858Sjfv
6586279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6587279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6588279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6589279858Sjfv	ixl_flush(hw);
6590279858Sjfv
6591279858Sjfv	IXL_PF_UNLOCK(pf);
6592279858Sjfv}
6593279858Sjfv
6594279858Sjfvstatic int
6595279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6596279858Sjfv{
6597279858Sjfv
6598279858Sjfv	switch (err) {
6599279858Sjfv	case I40E_AQ_RC_EPERM:
6600279858Sjfv		return (EPERM);
6601279858Sjfv	case I40E_AQ_RC_ENOENT:
6602279858Sjfv		return (ENOENT);
6603279858Sjfv	case I40E_AQ_RC_ESRCH:
6604279858Sjfv		return (ESRCH);
6605279858Sjfv	case I40E_AQ_RC_EINTR:
6606279858Sjfv		return (EINTR);
6607279858Sjfv	case I40E_AQ_RC_EIO:
6608279858Sjfv		return (EIO);
6609279858Sjfv	case I40E_AQ_RC_ENXIO:
6610279858Sjfv		return (ENXIO);
6611279858Sjfv	case I40E_AQ_RC_E2BIG:
6612279858Sjfv		return (E2BIG);
6613279858Sjfv	case I40E_AQ_RC_EAGAIN:
6614279858Sjfv		return (EAGAIN);
6615279858Sjfv	case I40E_AQ_RC_ENOMEM:
6616279858Sjfv		return (ENOMEM);
6617279858Sjfv	case I40E_AQ_RC_EACCES:
6618279858Sjfv		return (EACCES);
6619279858Sjfv	case I40E_AQ_RC_EFAULT:
6620279858Sjfv		return (EFAULT);
6621279858Sjfv	case I40E_AQ_RC_EBUSY:
6622279858Sjfv		return (EBUSY);
6623279858Sjfv	case I40E_AQ_RC_EEXIST:
6624279858Sjfv		return (EEXIST);
6625279858Sjfv	case I40E_AQ_RC_EINVAL:
6626279858Sjfv		return (EINVAL);
6627279858Sjfv	case I40E_AQ_RC_ENOTTY:
6628279858Sjfv		return (ENOTTY);
6629279858Sjfv	case I40E_AQ_RC_ENOSPC:
6630279858Sjfv		return (ENOSPC);
6631279858Sjfv	case I40E_AQ_RC_ENOSYS:
6632279858Sjfv		return (ENOSYS);
6633279858Sjfv	case I40E_AQ_RC_ERANGE:
6634279858Sjfv		return (ERANGE);
6635279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6636279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6637279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6638279858Sjfv		return (EFAULT);
6639279858Sjfv	case I40E_AQ_RC_EMODE:
6640279858Sjfv		return (EPERM);
6641279858Sjfv	case I40E_AQ_RC_EFBIG:
6642279858Sjfv		return (EFBIG);
6643279858Sjfv	default:
6644279858Sjfv		return (EINVAL);
6645279858Sjfv	}
6646279858Sjfv}
6647279858Sjfv
6648279858Sjfvstatic int
6649299546Serjixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6650279858Sjfv{
6651279858Sjfv	struct ixl_pf *pf;
6652279858Sjfv	struct i40e_hw *hw;
6653279858Sjfv	struct ixl_vsi *pf_vsi;
6654279858Sjfv	enum i40e_status_code ret;
6655279858Sjfv	int i, error;
6656279858Sjfv
6657279858Sjfv	pf = device_get_softc(dev);
6658279858Sjfv	hw = &pf->hw;
6659279858Sjfv	pf_vsi = &pf->vsi;
6660279858Sjfv
6661279858Sjfv	IXL_PF_LOCK(pf);
6662279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6663279858Sjfv	    M_ZERO);
6664279858Sjfv
6665279858Sjfv	if (pf->vfs == NULL) {
6666279858Sjfv		error = ENOMEM;
6667279858Sjfv		goto fail;
6668279858Sjfv	}
6669279858Sjfv
6670279858Sjfv	for (i = 0; i < num_vfs; i++)
6671279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
6672279858Sjfv
6673279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6674279858Sjfv	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6675279858Sjfv	if (ret != I40E_SUCCESS) {
6676279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6677279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6678279858Sjfv		    error);
6679279858Sjfv		goto fail;
6680279858Sjfv	}
6681279858Sjfv
6682279858Sjfv	ixl_configure_msix(pf);
6683279858Sjfv	ixl_enable_adminq(hw);
6684279858Sjfv
6685279858Sjfv	pf->num_vfs = num_vfs;
6686279858Sjfv	IXL_PF_UNLOCK(pf);
6687279858Sjfv	return (0);
6688279858Sjfv
6689279858Sjfvfail:
6690279858Sjfv	free(pf->vfs, M_IXL);
6691279858Sjfv	pf->vfs = NULL;
6692279858Sjfv	IXL_PF_UNLOCK(pf);
6693279858Sjfv	return (error);
6694279858Sjfv}
6695279858Sjfv
6696279858Sjfvstatic void
6697299546Serjixl_iov_uninit(device_t dev)
6698279858Sjfv{
6699279858Sjfv	struct ixl_pf *pf;
6700279858Sjfv	struct i40e_hw *hw;
6701279858Sjfv	struct ixl_vsi *vsi;
6702279858Sjfv	struct ifnet *ifp;
6703279858Sjfv	struct ixl_vf *vfs;
6704279858Sjfv	int i, num_vfs;
6705279858Sjfv
6706279858Sjfv	pf = device_get_softc(dev);
6707279858Sjfv	hw = &pf->hw;
6708279858Sjfv	vsi = &pf->vsi;
6709279858Sjfv	ifp = vsi->ifp;
6710279858Sjfv
6711279858Sjfv	IXL_PF_LOCK(pf);
6712279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6713279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
6714279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6715279858Sjfv	}
6716279858Sjfv
6717279858Sjfv	if (pf->veb_seid != 0) {
6718279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6719279858Sjfv		pf->veb_seid = 0;
6720279858Sjfv	}
6721279858Sjfv
6722279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6723279858Sjfv		ixl_disable_intr(vsi);
6724279858Sjfv
6725279858Sjfv	vfs = pf->vfs;
6726279858Sjfv	num_vfs = pf->num_vfs;
6727279858Sjfv
6728279858Sjfv	pf->vfs = NULL;
6729279858Sjfv	pf->num_vfs = 0;
6730279858Sjfv	IXL_PF_UNLOCK(pf);
6731279858Sjfv
6732279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6733279858Sjfv	for (i = 0; i < num_vfs; i++)
6734279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
6735279858Sjfv	free(vfs, M_IXL);
6736279858Sjfv}
6737279858Sjfv
6738279858Sjfvstatic int
6739279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6740279858Sjfv{
6741279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
6742279858Sjfv	struct ixl_pf *pf;
6743279858Sjfv	struct ixl_vf *vf;
6744279858Sjfv	const void *mac;
6745279858Sjfv	size_t size;
6746279858Sjfv	int error;
6747279858Sjfv
6748279858Sjfv	pf = device_get_softc(dev);
6749279858Sjfv	vf = &pf->vfs[vfnum];
6750279858Sjfv
6751279858Sjfv	IXL_PF_LOCK(pf);
6752279858Sjfv	vf->vf_num = vfnum;
6753279858Sjfv
6754279858Sjfv	vf->vsi.back = pf;
6755279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
6756279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
6757279858Sjfv
6758279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
6759279858Sjfv	if (error != 0)
6760279858Sjfv		goto out;
6761279858Sjfv
6762279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
6763279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
6764279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6765279858Sjfv
6766279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
6767279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6768279858Sjfv	} else
6769279858Sjfv		/*
6770279858Sjfv		 * If the administrator has not specified a MAC address then
6771279858Sjfv		 * we must allow the VF to choose one.
6772279858Sjfv		 */
6773279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6774279858Sjfv
6775279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
6776279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6777279858Sjfv
6778279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
6779279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6780279858Sjfv
6781279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6782279858Sjfv
6783279858Sjfv	ixl_reset_vf(pf, vf);
6784279858Sjfvout:
6785279858Sjfv	IXL_PF_UNLOCK(pf);
6786279858Sjfv	if (error == 0) {
6787279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6788279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6789279858Sjfv	}
6790279858Sjfv
6791279858Sjfv	return (error);
6792279858Sjfv}
6793279858Sjfv#endif /* PCI_IOV */
6794