if_ixl.c revision 279860
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 279860 2015-03-10 19:55:43Z jfv $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51279858Sjfvchar ixl_driver_version[] = "1.4.1";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73279858Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
74266423Sjfv	/* required last entry */
75266423Sjfv	{0, 0, 0, 0, 0}
76266423Sjfv};
77266423Sjfv
78266423Sjfv/*********************************************************************
79266423Sjfv *  Table of branding strings
80266423Sjfv *********************************************************************/
81266423Sjfv
82270346Sjfvstatic char    *ixl_strings[] = {
83266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
84266423Sjfv};
85266423Sjfv
86266423Sjfv
87266423Sjfv/*********************************************************************
88266423Sjfv *  Function prototypes
89266423Sjfv *********************************************************************/
90270346Sjfvstatic int      ixl_probe(device_t);
91270346Sjfvstatic int      ixl_attach(device_t);
92270346Sjfvstatic int      ixl_detach(device_t);
93270346Sjfvstatic int      ixl_shutdown(device_t);
94270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
95270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
96270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
97270346Sjfvstatic void	ixl_init(void *);
98270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
99270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
100270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
101270346Sjfvstatic int      ixl_media_change(struct ifnet *);
102270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
103270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
104270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
105270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
106279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
107270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
108270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
109270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
110270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
111270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
112270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
113270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
114270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
115270346Sjfvstatic void	ixl_local_timer(void *);
116270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
117279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
118270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
119270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
120270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
121274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
122266423Sjfv
123279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
124279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
125279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
126279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
127279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
128266423Sjfv
129270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
130270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
131270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
132270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
133270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
134270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
135266423Sjfv
136270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
137270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
138270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
139270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
140270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
141270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
142266423Sjfv
143270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
144279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
145270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
146270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
147270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
148270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
149270346Sjfvstatic struct ixl_mac_filter *
150270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
151270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
152279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
153266423Sjfv
154279858Sjfv
155266423Sjfv/* Sysctl debug interface */
156270346Sjfvstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
157270346Sjfvstatic void	ixl_print_debug_info(struct ixl_pf *);
158266423Sjfv
159266423Sjfv/* The MSI/X Interrupt handlers */
160270346Sjfvstatic void	ixl_intr(void *);
161270346Sjfvstatic void	ixl_msix_que(void *);
162270346Sjfvstatic void	ixl_msix_adminq(void *);
163270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
164266423Sjfv
165266423Sjfv/* Deferred interrupt tasklets */
166270346Sjfvstatic void	ixl_do_adminq(void *, int);
167266423Sjfv
168266423Sjfv/* Sysctl handlers */
169270346Sjfvstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
170270346Sjfvstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
171270346Sjfvstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
172274205Sjfvstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
173266423Sjfv
174266423Sjfv/* Statistics */
175270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
176270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
177266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
178270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
179266423Sjfv		    struct sysctl_oid_list *,
180266423Sjfv		    struct i40e_eth_stats *);
181270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
182270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
183279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
184270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
185270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
186270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
187266423Sjfv		    u64 *, u64 *);
188270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
189266423Sjfv		    u64 *, u64 *);
190266423Sjfv
191277084Sjfv#ifdef IXL_DEBUG_SYSCTL
192270346Sjfvstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
193270346Sjfvstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
194270346Sjfvstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
195274205Sjfvstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
196274205Sjfvstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
197266423Sjfv#endif
198266423Sjfv
199279858Sjfv#ifdef PCI_IOV
200279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
201279858Sjfv
202279858Sjfvstatic int	ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
203279858Sjfvstatic void	ixl_uninit_iov(device_t dev);
204279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
205279858Sjfv
206279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
207279858Sjfv		    struct i40e_arq_event_info *);
208279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
209279858Sjfv
210279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
211279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
212279858Sjfv#endif
213279858Sjfv
214266423Sjfv/*********************************************************************
215266423Sjfv *  FreeBSD Device Interface Entry Points
216266423Sjfv *********************************************************************/
217266423Sjfv
218270346Sjfvstatic device_method_t ixl_methods[] = {
219266423Sjfv	/* Device interface */
220270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
221270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
222270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
223270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
224279858Sjfv#ifdef PCI_IOV
225279858Sjfv	DEVMETHOD(pci_init_iov, ixl_init_iov),
226279858Sjfv	DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
227279858Sjfv	DEVMETHOD(pci_add_vf, ixl_add_vf),
228279858Sjfv#endif
229266423Sjfv	{0, 0}
230266423Sjfv};
231266423Sjfv
232270346Sjfvstatic driver_t ixl_driver = {
233270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
234266423Sjfv};
235266423Sjfv
236270346Sjfvdevclass_t ixl_devclass;
237270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
238266423Sjfv
239270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
240270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
241266423Sjfv
242279860Sjfv#ifdef DEV_NETMAP
243279860SjfvMODULE_DEPEND(ixl, netmap, 1, 1, 1);
244279860Sjfv#endif /* DEV_NETMAP */
245279860Sjfv
246279860Sjfv
247266423Sjfv/*
248269198Sjfv** Global reset mutex
249269198Sjfv*/
250270346Sjfvstatic struct mtx ixl_reset_mtx;
251269198Sjfv
252269198Sjfv/*
253270346Sjfv** TUNEABLE PARAMETERS:
254270346Sjfv*/
255270346Sjfv
256270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
257270346Sjfv                   "IXL driver parameters");
258270346Sjfv
259270346Sjfv/*
260266423Sjfv * MSIX should be the default for best performance,
261266423Sjfv * but this allows it to be forced off for testing.
262266423Sjfv */
263270346Sjfvstatic int ixl_enable_msix = 1;
264270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
265270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
266270346Sjfv    "Enable MSI-X interrupts");
267266423Sjfv
268266423Sjfv/*
269266423Sjfv** Number of descriptors per ring:
270266423Sjfv**   - TX and RX are the same size
271266423Sjfv*/
272270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
273270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
274270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
275270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
276266423Sjfv
277266423Sjfv/*
278266423Sjfv** This can be set manually, if left as 0 the
279266423Sjfv** number of queues will be calculated based
280266423Sjfv** on cpus and msix vectors available.
281266423Sjfv*/
282270346Sjfvint ixl_max_queues = 0;
283270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
284270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
285270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
286266423Sjfv
287266423Sjfv/*
288266423Sjfv** Controls for Interrupt Throttling
289266423Sjfv**	- true/false for dynamic adjustment
290266423Sjfv** 	- default values for static ITR
291266423Sjfv*/
292270346Sjfvint ixl_dynamic_rx_itr = 0;
293270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
294270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
295270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
296266423Sjfv
297270346Sjfvint ixl_dynamic_tx_itr = 0;
298270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
299270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
300270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
301266423Sjfv
302270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
303270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
304270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
305270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
306270346Sjfv
307270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
308270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
309270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
310270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
311270346Sjfv
312270346Sjfv#ifdef IXL_FDIR
313270346Sjfvstatic int ixl_enable_fdir = 1;
314270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
315266423Sjfv/* Rate at which we sample */
316270346Sjfvint ixl_atr_rate = 20;
317270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
318266423Sjfv#endif
319266423Sjfv
320279860Sjfv#ifdef DEV_NETMAP
321279860Sjfv#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
322279860Sjfv#include <dev/netmap/if_ixl_netmap.h>
323279860Sjfv#endif /* DEV_NETMAP */
324274205Sjfv
325270346Sjfvstatic char *ixl_fc_string[6] = {
326266423Sjfv	"None",
327266423Sjfv	"Rx",
328266423Sjfv	"Tx",
329266423Sjfv	"Full",
330266423Sjfv	"Priority",
331266423Sjfv	"Default"
332266423Sjfv};
333266423Sjfv
334279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
335269198Sjfv
336279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
337279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
338279858Sjfv
339266423Sjfv/*********************************************************************
340266423Sjfv *  Device identification routine
341266423Sjfv *
342270346Sjfv *  ixl_probe determines if the driver should be loaded on
343266423Sjfv *  the hardware based on PCI vendor/device id of the device.
344266423Sjfv *
345266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
346266423Sjfv *********************************************************************/
347266423Sjfv
348266423Sjfvstatic int
349270346Sjfvixl_probe(device_t dev)
350266423Sjfv{
351270346Sjfv	ixl_vendor_info_t *ent;
352266423Sjfv
353266423Sjfv	u16	pci_vendor_id, pci_device_id;
354266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
355266423Sjfv	char	device_name[256];
356269198Sjfv	static bool lock_init = FALSE;
357266423Sjfv
358270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
359266423Sjfv
360266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
361266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
362266423Sjfv		return (ENXIO);
363266423Sjfv
364266423Sjfv	pci_device_id = pci_get_device(dev);
365266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
366266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
367266423Sjfv
368270346Sjfv	ent = ixl_vendor_info_array;
369266423Sjfv	while (ent->vendor_id != 0) {
370266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
371266423Sjfv		    (pci_device_id == ent->device_id) &&
372266423Sjfv
373266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
374266423Sjfv		     (ent->subvendor_id == 0)) &&
375266423Sjfv
376266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
377266423Sjfv		     (ent->subdevice_id == 0))) {
378266423Sjfv			sprintf(device_name, "%s, Version - %s",
379270346Sjfv				ixl_strings[ent->index],
380270346Sjfv				ixl_driver_version);
381266423Sjfv			device_set_desc_copy(dev, device_name);
382269198Sjfv			/* One shot mutex init */
383269198Sjfv			if (lock_init == FALSE) {
384269198Sjfv				lock_init = TRUE;
385270346Sjfv				mtx_init(&ixl_reset_mtx,
386270346Sjfv				    "ixl_reset",
387270346Sjfv				    "IXL RESET Lock", MTX_DEF);
388269198Sjfv			}
389266423Sjfv			return (BUS_PROBE_DEFAULT);
390266423Sjfv		}
391266423Sjfv		ent++;
392266423Sjfv	}
393266423Sjfv	return (ENXIO);
394266423Sjfv}
395266423Sjfv
396266423Sjfv/*********************************************************************
397266423Sjfv *  Device initialization routine
398266423Sjfv *
399266423Sjfv *  The attach entry point is called when the driver is being loaded.
400266423Sjfv *  This routine identifies the type of hardware, allocates all resources
401266423Sjfv *  and initializes the hardware.
402266423Sjfv *
403266423Sjfv *  return 0 on success, positive on failure
404266423Sjfv *********************************************************************/
405266423Sjfv
406266423Sjfvstatic int
407270346Sjfvixl_attach(device_t dev)
408266423Sjfv{
409270346Sjfv	struct ixl_pf	*pf;
410266423Sjfv	struct i40e_hw	*hw;
411270346Sjfv	struct ixl_vsi *vsi;
412266423Sjfv	u16		bus;
413266423Sjfv	int             error = 0;
414279858Sjfv#ifdef PCI_IOV
415279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
416279858Sjfv	int		iov_error;
417279858Sjfv#endif
418266423Sjfv
419270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
420266423Sjfv
421266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
422266423Sjfv	pf = device_get_softc(dev);
423266423Sjfv	pf->dev = pf->osdep.dev = dev;
424266423Sjfv	hw = &pf->hw;
425266423Sjfv
426266423Sjfv	/*
427266423Sjfv	** Note this assumes we have a single embedded VSI,
428266423Sjfv	** this could be enhanced later to allocate multiple
429266423Sjfv	*/
430266423Sjfv	vsi = &pf->vsi;
431266423Sjfv	vsi->dev = pf->dev;
432266423Sjfv
433266423Sjfv	/* Core Lock Init*/
434270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
435266423Sjfv
436266423Sjfv	/* Set up the timer callout */
437266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
438266423Sjfv
439266423Sjfv	/* Set up sysctls */
440266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
441266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442266423Sjfv	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
443270346Sjfv	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
444266423Sjfv
445269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447269198Sjfv	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
448270346Sjfv	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
449269198Sjfv
450270346Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451270346Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452270346Sjfv	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
453270346Sjfv	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
454270346Sjfv
455274205Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456274205Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457274205Sjfv	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
458274205Sjfv	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
459274205Sjfv
460266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
461266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462273377Shselasky	    OID_AUTO, "rx_itr", CTLFLAG_RW,
463270346Sjfv	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
464266423Sjfv
465266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
466266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467273377Shselasky	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
468270346Sjfv	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
469266423Sjfv
470266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
471266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472273377Shselasky	    OID_AUTO, "tx_itr", CTLFLAG_RW,
473270346Sjfv	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
474266423Sjfv
475266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
476266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
477273377Shselasky	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
478270346Sjfv	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
479266423Sjfv
480277084Sjfv#ifdef IXL_DEBUG_SYSCTL
481266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
482266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483266423Sjfv	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
484270346Sjfv	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
485266423Sjfv
486266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488266423Sjfv	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
489270346Sjfv	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
490266423Sjfv
491266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
492266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
493266423Sjfv	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
494270346Sjfv	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
495269198Sjfv
496269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
497269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498274205Sjfv	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
499274205Sjfv	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
500269198Sjfv
501269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
502269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
503274205Sjfv	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
504274205Sjfv	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
505266423Sjfv#endif
506266423Sjfv
507274205Sjfv	/* Save off the PCI information */
508266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
509266423Sjfv	hw->device_id = pci_get_device(dev);
510266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
511266423Sjfv	hw->subsystem_vendor_id =
512266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
513266423Sjfv	hw->subsystem_device_id =
514266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
515266423Sjfv
516269198Sjfv	hw->bus.device = pci_get_slot(dev);
517266423Sjfv	hw->bus.func = pci_get_function(dev);
518266423Sjfv
519279858Sjfv	pf->vc_debug_lvl = 1;
520279858Sjfv
521266423Sjfv	/* Do PCI setup - map BAR0, etc */
522270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
523266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
524266423Sjfv		error = ENXIO;
525266423Sjfv		goto err_out;
526266423Sjfv	}
527266423Sjfv
528266423Sjfv	/* Create for initial debugging use */
529266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
530266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
531266423Sjfv	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
532270346Sjfv	    ixl_debug_info, "I", "Debug Information");
533266423Sjfv
534266423Sjfv
535266423Sjfv	/* Establish a clean starting point */
536269198Sjfv	i40e_clear_hw(hw);
537266423Sjfv	error = i40e_pf_reset(hw);
538266423Sjfv	if (error) {
539269198Sjfv		device_printf(dev,"PF reset failure %x\n", error);
540269198Sjfv		error = EIO;
541269198Sjfv		goto err_out;
542269198Sjfv	}
543266423Sjfv
544266423Sjfv	/* Set admin queue parameters */
545270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
546270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
547270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
548270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
549266423Sjfv
550266423Sjfv	/* Initialize the shared code */
551266423Sjfv	error = i40e_init_shared_code(hw);
552266423Sjfv	if (error) {
553266423Sjfv		device_printf(dev,"Unable to initialize the shared code\n");
554266423Sjfv		error = EIO;
555266423Sjfv		goto err_out;
556266423Sjfv	}
557266423Sjfv
558266423Sjfv	/* Set up the admin queue */
559266423Sjfv	error = i40e_init_adminq(hw);
560266423Sjfv	if (error) {
561269198Sjfv		device_printf(dev, "The driver for the device stopped "
562269198Sjfv		    "because the NVM image is newer than expected.\n"
563269198Sjfv		    "You must install the most recent version of "
564269198Sjfv		    " the network driver.\n");
565266423Sjfv		goto err_out;
566266423Sjfv	}
567270346Sjfv	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
568266423Sjfv
569269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
570269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
571269198Sjfv		device_printf(dev, "The driver for the device detected "
572269198Sjfv		    "a newer version of the NVM image than expected.\n"
573269198Sjfv		    "Please install the most recent version of the network driver.\n");
574269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
575269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
576269198Sjfv		device_printf(dev, "The driver for the device detected "
577269198Sjfv		    "an older version of the NVM image than expected.\n"
578269198Sjfv		    "Please update the NVM image.\n");
579266423Sjfv
580266423Sjfv	/* Clear PXE mode */
581266423Sjfv	i40e_clear_pxe_mode(hw);
582266423Sjfv
583266423Sjfv	/* Get capabilities from the device */
584270346Sjfv	error = ixl_get_hw_capabilities(pf);
585266423Sjfv	if (error) {
586266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
587266423Sjfv		goto err_get_cap;
588266423Sjfv	}
589266423Sjfv
590266423Sjfv	/* Set up host memory cache */
591279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
592279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
593266423Sjfv	if (error) {
594266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
595266423Sjfv		goto err_get_cap;
596266423Sjfv	}
597266423Sjfv
598266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
599266423Sjfv	if (error) {
600266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
601266423Sjfv		goto err_mac_hmc;
602266423Sjfv	}
603266423Sjfv
604269198Sjfv	/* Disable LLDP from the firmware */
605269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
606269198Sjfv
607266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
608266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
609266423Sjfv	if (error) {
610266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
611266423Sjfv		goto err_mac_hmc;
612266423Sjfv	}
613266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
614266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
615266423Sjfv
616274205Sjfv	/* Set up VSI and queues */
617270346Sjfv	if (ixl_setup_stations(pf) != 0) {
618266423Sjfv		device_printf(dev, "setup stations failed!\n");
619266423Sjfv		error = ENOMEM;
620266423Sjfv		goto err_mac_hmc;
621266423Sjfv	}
622266423Sjfv
623266423Sjfv	/* Initialize mac filter list for VSI */
624266423Sjfv	SLIST_INIT(&vsi->ftl);
625266423Sjfv
626266423Sjfv	/* Set up interrupt routing here */
627266423Sjfv	if (pf->msix > 1)
628270346Sjfv		error = ixl_assign_vsi_msix(pf);
629266423Sjfv	else
630270346Sjfv		error = ixl_assign_vsi_legacy(pf);
631266423Sjfv	if (error)
632266423Sjfv		goto err_late;
633266423Sjfv
634279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
635279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
636279033Sjfv		i40e_msec_delay(75);
637279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
638279033Sjfv		if (error)
639279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
640279033Sjfv			    pf->hw.aq.asq_last_status);
641270346Sjfv	}
642279033Sjfv
643266423Sjfv	/* Determine link state */
644279858Sjfv	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
645279858Sjfv	pf->link_up = i40e_get_link_status(hw);
646266423Sjfv
647266423Sjfv	/* Setup OS specific network interface */
648274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
649274205Sjfv		device_printf(dev, "interface setup failed!\n");
650274205Sjfv		error = EIO;
651266423Sjfv		goto err_late;
652274205Sjfv	}
653266423Sjfv
654279033Sjfv	error = ixl_switch_config(pf);
655279033Sjfv	if (error) {
656279033Sjfv		device_printf(dev, "Initial switch config failed: %d\n", error);
657279033Sjfv		goto err_mac_hmc;
658279033Sjfv	}
659279033Sjfv
660279033Sjfv	/* Limit phy interrupts to link and modules failure */
661279033Sjfv	error = i40e_aq_set_phy_int_mask(hw,
662279033Sjfv	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
663279033Sjfv        if (error)
664279033Sjfv		device_printf(dev, "set phy mask failed: %d\n", error);
665279033Sjfv
666266423Sjfv	/* Get the bus configuration and set the shared code */
667270346Sjfv	bus = ixl_get_bus_info(hw, dev);
668266423Sjfv	i40e_set_pci_config_data(hw, bus);
669266423Sjfv
670266423Sjfv	/* Initialize statistics */
671270346Sjfv	ixl_pf_reset_stats(pf);
672270346Sjfv	ixl_update_stats_counters(pf);
673270346Sjfv	ixl_add_hw_stats(pf);
674266423Sjfv
675266423Sjfv	/* Register for VLAN events */
676266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
677270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
678266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
679270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
680266423Sjfv
681279858Sjfv#ifdef PCI_IOV
682279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
683279858Sjfv	if (pf->msix > 1) {
684279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
685279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
686279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
687279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
688279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
689279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
690279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
691279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
692279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
693274205Sjfv
694279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
695279858Sjfv		if (iov_error != 0)
696279858Sjfv			device_printf(dev,
697279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
698279858Sjfv			    iov_error);
699279858Sjfv	}
700279858Sjfv#endif
701279858Sjfv
702279860Sjfv#ifdef DEV_NETMAP
703279860Sjfv	ixl_netmap_attach(vsi);
704279860Sjfv#endif /* DEV_NETMAP */
705279860Sjfv
706270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
707266423Sjfv	return (0);
708266423Sjfv
709266423Sjfverr_late:
710274205Sjfv	if (vsi->ifp != NULL)
711274205Sjfv		if_free(vsi->ifp);
712266423Sjfverr_mac_hmc:
713266423Sjfv	i40e_shutdown_lan_hmc(hw);
714266423Sjfverr_get_cap:
715266423Sjfv	i40e_shutdown_adminq(hw);
716266423Sjfverr_out:
717270346Sjfv	ixl_free_pci_resources(pf);
718274205Sjfv	ixl_free_vsi(vsi);
719270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
720266423Sjfv	return (error);
721266423Sjfv}
722266423Sjfv
723266423Sjfv/*********************************************************************
724266423Sjfv *  Device removal routine
725266423Sjfv *
726266423Sjfv *  The detach entry point is called when the driver is being removed.
727266423Sjfv *  This routine stops the adapter and deallocates all the resources
728266423Sjfv *  that were allocated for driver operation.
729266423Sjfv *
730266423Sjfv *  return 0 on success, positive on failure
731266423Sjfv *********************************************************************/
732266423Sjfv
733266423Sjfvstatic int
734270346Sjfvixl_detach(device_t dev)
735266423Sjfv{
736270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
737266423Sjfv	struct i40e_hw		*hw = &pf->hw;
738270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
739270346Sjfv	struct ixl_queue	*que = vsi->queues;
740266423Sjfv	i40e_status		status;
741279858Sjfv#ifdef PCI_IOV
742279858Sjfv	int			error;
743279858Sjfv#endif
744266423Sjfv
745270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
746266423Sjfv
747266423Sjfv	/* Make sure VLANS are not using driver */
748266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
749266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
750266423Sjfv		return (EBUSY);
751266423Sjfv	}
752266423Sjfv
753279858Sjfv#ifdef PCI_IOV
754279858Sjfv	error = pci_iov_detach(dev);
755279858Sjfv	if (error != 0) {
756279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
757279858Sjfv		return (error);
758279858Sjfv	}
759279858Sjfv#endif
760279858Sjfv
761279033Sjfv	ether_ifdetach(vsi->ifp);
762279033Sjfv	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
763279033Sjfv		IXL_PF_LOCK(pf);
764279033Sjfv		ixl_stop(pf);
765279033Sjfv		IXL_PF_UNLOCK(pf);
766279033Sjfv	}
767266423Sjfv
768266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
769266423Sjfv		if (que->tq) {
770266423Sjfv			taskqueue_drain(que->tq, &que->task);
771266423Sjfv			taskqueue_drain(que->tq, &que->tx_task);
772266423Sjfv			taskqueue_free(que->tq);
773266423Sjfv		}
774266423Sjfv	}
775266423Sjfv
776266423Sjfv	/* Shutdown LAN HMC */
777266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
778266423Sjfv	if (status)
779266423Sjfv		device_printf(dev,
780266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
781266423Sjfv
782266423Sjfv	/* Shutdown admin queue */
783266423Sjfv	status = i40e_shutdown_adminq(hw);
784266423Sjfv	if (status)
785266423Sjfv		device_printf(dev,
786266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
787266423Sjfv
788266423Sjfv	/* Unregister VLAN events */
789266423Sjfv	if (vsi->vlan_attach != NULL)
790266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
791266423Sjfv	if (vsi->vlan_detach != NULL)
792266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
793266423Sjfv
794266423Sjfv	callout_drain(&pf->timer);
795279860Sjfv#ifdef DEV_NETMAP
796279860Sjfv	netmap_detach(vsi->ifp);
797279860Sjfv#endif /* DEV_NETMAP */
798270346Sjfv	ixl_free_pci_resources(pf);
799266423Sjfv	bus_generic_detach(dev);
800266423Sjfv	if_free(vsi->ifp);
801270346Sjfv	ixl_free_vsi(vsi);
802270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
803266423Sjfv	return (0);
804266423Sjfv}
805266423Sjfv
806266423Sjfv/*********************************************************************
807266423Sjfv *
808266423Sjfv *  Shutdown entry point
809266423Sjfv *
810266423Sjfv **********************************************************************/
811266423Sjfv
812266423Sjfvstatic int
813270346Sjfvixl_shutdown(device_t dev)
814266423Sjfv{
815270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
816270346Sjfv	IXL_PF_LOCK(pf);
817270346Sjfv	ixl_stop(pf);
818270346Sjfv	IXL_PF_UNLOCK(pf);
819266423Sjfv	return (0);
820266423Sjfv}
821266423Sjfv
822266423Sjfv
823266423Sjfv/*********************************************************************
824266423Sjfv *
825266423Sjfv *  Get the hardware capabilities
826266423Sjfv *
827266423Sjfv **********************************************************************/
828266423Sjfv
829266423Sjfvstatic int
830270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
831266423Sjfv{
832266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
833266423Sjfv	struct i40e_hw	*hw = &pf->hw;
834266423Sjfv	device_t 	dev = pf->dev;
835266423Sjfv	int             error, len;
836266423Sjfv	u16		needed;
837266423Sjfv	bool		again = TRUE;
838266423Sjfv
839266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
840266423Sjfvretry:
841266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
842266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
843266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
844266423Sjfv                return (ENOMEM);
845266423Sjfv	}
846266423Sjfv
847266423Sjfv	/* This populates the hw struct */
848266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
849266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
850266423Sjfv	free(buf, M_DEVBUF);
851266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
852266423Sjfv	    (again == TRUE)) {
853266423Sjfv		/* retry once with a larger buffer */
854266423Sjfv		again = FALSE;
855266423Sjfv		len = needed;
856266423Sjfv		goto retry;
857266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
858266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
859266423Sjfv		    pf->hw.aq.asq_last_status);
860266423Sjfv		return (ENODEV);
861266423Sjfv	}
862266423Sjfv
863266423Sjfv	/* Capture this PF's starting queue pair */
864266423Sjfv	pf->qbase = hw->func_caps.base_queue;
865266423Sjfv
866270346Sjfv#ifdef IXL_DEBUG
867266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
868266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
869266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
870266423Sjfv	    hw->func_caps.num_msix_vectors,
871266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
872266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
873266423Sjfv	    hw->func_caps.fd_filters_best_effort,
874266423Sjfv	    hw->func_caps.num_tx_qp,
875266423Sjfv	    hw->func_caps.num_rx_qp,
876266423Sjfv	    hw->func_caps.base_queue);
877266423Sjfv#endif
878266423Sjfv	return (error);
879266423Sjfv}
880266423Sjfv
881266423Sjfvstatic void
882270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
883266423Sjfv{
884266423Sjfv	device_t 	dev = vsi->dev;
885266423Sjfv
886266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
887266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
888266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
889266423Sjfv		if (mask & IFCAP_TXCSUM) {
890266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
891266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
892270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
893270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
894266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
895266423Sjfv			}
896266423Sjfv		}
897266423Sjfv		else if (mask & IFCAP_TSO4) {
898266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
899270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
900266423Sjfv			device_printf(dev,
901266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
902266423Sjfv		}
903266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
904266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
905266423Sjfv		if (mask & IFCAP_TXCSUM)
906266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
907266423Sjfv		else if (mask & IFCAP_TSO4)
908266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
909266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
910266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
911266423Sjfv		if (mask & IFCAP_TXCSUM) {
912270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
913266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
914266423Sjfv			device_printf(dev,
915266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
916266423Sjfv		} else if (mask & IFCAP_TSO4)
917266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
918266423Sjfv	}
919266423Sjfv
920266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
921266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
922266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
923266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
924266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
925270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
926270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
927266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
928266423Sjfv			}
929266423Sjfv		} else if (mask & IFCAP_TSO6) {
930266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
931270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
932266423Sjfv			device_printf(dev,
933266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
934266423Sjfv		}
935266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
936266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
937266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
938266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
939266423Sjfv		else if (mask & IFCAP_TSO6)
940266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
941266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
942266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
943266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
944270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
945266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
946266423Sjfv			device_printf(dev,
947266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
948266423Sjfv		} else if (mask & IFCAP_TSO6)
949266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
950266423Sjfv	}
951266423Sjfv}
952266423Sjfv
953266423Sjfv/*********************************************************************
954266423Sjfv *  Ioctl entry point
955266423Sjfv *
956270346Sjfv *  ixl_ioctl is called when the user wants to configure the
957266423Sjfv *  interface.
958266423Sjfv *
959266423Sjfv *  return 0 on success, positive on failure
960266423Sjfv **********************************************************************/
961266423Sjfv
962266423Sjfvstatic int
963270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
964266423Sjfv{
965270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
966279858Sjfv	struct ixl_pf	*pf = vsi->back;
967266423Sjfv	struct ifreq	*ifr = (struct ifreq *) data;
968266423Sjfv#if defined(INET) || defined(INET6)
969266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
970266423Sjfv	bool		avoid_reset = FALSE;
971266423Sjfv#endif
972266423Sjfv	int             error = 0;
973266423Sjfv
974266423Sjfv	switch (command) {
975266423Sjfv
976266423Sjfv        case SIOCSIFADDR:
977266423Sjfv#ifdef INET
978266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
979266423Sjfv			avoid_reset = TRUE;
980266423Sjfv#endif
981266423Sjfv#ifdef INET6
982266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
983266423Sjfv			avoid_reset = TRUE;
984266423Sjfv#endif
985266423Sjfv#if defined(INET) || defined(INET6)
986266423Sjfv		/*
987266423Sjfv		** Calling init results in link renegotiation,
988266423Sjfv		** so we avoid doing it when possible.
989266423Sjfv		*/
990266423Sjfv		if (avoid_reset) {
991266423Sjfv			ifp->if_flags |= IFF_UP;
992266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
993270346Sjfv				ixl_init(pf);
994271900Sbz#ifdef INET
995266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
996266423Sjfv				arp_ifinit(ifp, ifa);
997271900Sbz#endif
998266423Sjfv		} else
999266423Sjfv			error = ether_ioctl(ifp, command, data);
1000266423Sjfv		break;
1001266423Sjfv#endif
1002266423Sjfv	case SIOCSIFMTU:
1003266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1004270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1005266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1006266423Sjfv			error = EINVAL;
1007266423Sjfv		} else {
1008270346Sjfv			IXL_PF_LOCK(pf);
1009266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
1010266423Sjfv			vsi->max_frame_size =
1011266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1012266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
1013270346Sjfv			ixl_init_locked(pf);
1014270346Sjfv			IXL_PF_UNLOCK(pf);
1015266423Sjfv		}
1016266423Sjfv		break;
1017266423Sjfv	case SIOCSIFFLAGS:
1018266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1019270346Sjfv		IXL_PF_LOCK(pf);
1020266423Sjfv		if (ifp->if_flags & IFF_UP) {
1021266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1022266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
1023266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
1024270346Sjfv					ixl_set_promisc(vsi);
1025266423Sjfv				}
1026266423Sjfv			} else
1027270346Sjfv				ixl_init_locked(pf);
1028266423Sjfv		} else
1029266423Sjfv			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1030270346Sjfv				ixl_stop(pf);
1031266423Sjfv		pf->if_flags = ifp->if_flags;
1032270346Sjfv		IXL_PF_UNLOCK(pf);
1033266423Sjfv		break;
1034266423Sjfv	case SIOCADDMULTI:
1035266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1036266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1037270346Sjfv			IXL_PF_LOCK(pf);
1038270346Sjfv			ixl_disable_intr(vsi);
1039270346Sjfv			ixl_add_multi(vsi);
1040270346Sjfv			ixl_enable_intr(vsi);
1041270346Sjfv			IXL_PF_UNLOCK(pf);
1042266423Sjfv		}
1043266423Sjfv		break;
1044266423Sjfv	case SIOCDELMULTI:
1045266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1046266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1047270346Sjfv			IXL_PF_LOCK(pf);
1048270346Sjfv			ixl_disable_intr(vsi);
1049270346Sjfv			ixl_del_multi(vsi);
1050270346Sjfv			ixl_enable_intr(vsi);
1051270346Sjfv			IXL_PF_UNLOCK(pf);
1052266423Sjfv		}
1053266423Sjfv		break;
1054266423Sjfv	case SIOCSIFMEDIA:
1055266423Sjfv	case SIOCGIFMEDIA:
1056266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1057266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1058266423Sjfv		break;
1059266423Sjfv	case SIOCSIFCAP:
1060266423Sjfv	{
1061266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1062266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1063266423Sjfv
1064270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1065266423Sjfv
1066266423Sjfv		if (mask & IFCAP_RXCSUM)
1067266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1068266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1069266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1070266423Sjfv		if (mask & IFCAP_LRO)
1071266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1072266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1073266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1074266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1075266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1076266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1077266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1078266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1079270346Sjfv			IXL_PF_LOCK(pf);
1080270346Sjfv			ixl_init_locked(pf);
1081270346Sjfv			IXL_PF_UNLOCK(pf);
1082266423Sjfv		}
1083266423Sjfv		VLAN_CAPABILITIES(ifp);
1084266423Sjfv
1085266423Sjfv		break;
1086266423Sjfv	}
1087266423Sjfv
1088266423Sjfv	default:
1089270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1090266423Sjfv		error = ether_ioctl(ifp, command, data);
1091266423Sjfv		break;
1092266423Sjfv	}
1093266423Sjfv
1094266423Sjfv	return (error);
1095266423Sjfv}
1096266423Sjfv
1097266423Sjfv
1098266423Sjfv/*********************************************************************
1099266423Sjfv *  Init entry point
1100266423Sjfv *
1101266423Sjfv *  This routine is used in two ways. It is used by the stack as
1102266423Sjfv *  init entry point in network interface structure. It is also used
1103266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1104266423Sjfv *  consistent state.
1105266423Sjfv *
1106266423Sjfv *  return 0 on success, positive on failure
1107266423Sjfv **********************************************************************/
1108266423Sjfv
1109266423Sjfvstatic void
1110270346Sjfvixl_init_locked(struct ixl_pf *pf)
1111266423Sjfv{
1112266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1113270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1114266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1115266423Sjfv	device_t 	dev = pf->dev;
1116266423Sjfv	struct i40e_filter_control_settings	filter;
1117266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1118266423Sjfv	int		ret;
1119266423Sjfv
1120266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1121270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1122270346Sjfv	ixl_stop(pf);
1123266423Sjfv
1124266423Sjfv	/* Get the latest mac address... User might use a LAA */
1125266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1126266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1127266423Sjfv	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1128266423Sjfv	    i40e_validate_mac_addr(tmpaddr)) {
1129266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1130266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1131266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1132266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1133266423Sjfv		    hw->mac.addr, NULL);
1134266423Sjfv		if (ret) {
1135266423Sjfv			device_printf(dev, "LLA address"
1136266423Sjfv			 "change failed!!\n");
1137266423Sjfv			return;
1138266423Sjfv		}
1139266423Sjfv	}
1140266423Sjfv
1141266423Sjfv	/* Set the various hardware offload abilities */
1142266423Sjfv	ifp->if_hwassist = 0;
1143266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1144266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1145266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1146266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1147266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1148266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1149266423Sjfv
1150266423Sjfv	/* Set up the device filtering */
1151266423Sjfv	bzero(&filter, sizeof(filter));
1152266423Sjfv	filter.enable_ethtype = TRUE;
1153266423Sjfv	filter.enable_macvlan = TRUE;
1154270346Sjfv#ifdef IXL_FDIR
1155266423Sjfv	filter.enable_fdir = TRUE;
1156266423Sjfv#endif
1157266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1158266423Sjfv		device_printf(dev, "set_filter_control() failed\n");
1159266423Sjfv
1160266423Sjfv	/* Set up RSS */
1161270346Sjfv	ixl_config_rss(vsi);
1162266423Sjfv
1163266423Sjfv	/*
1164279033Sjfv	** Prepare the VSI: rings, hmc contexts, etc...
1165266423Sjfv	*/
1166270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1167270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1168266423Sjfv		return;
1169266423Sjfv	}
1170266423Sjfv
1171266423Sjfv	/* Add protocol filters to list */
1172270346Sjfv	ixl_init_filters(vsi);
1173266423Sjfv
1174266423Sjfv	/* Setup vlan's if needed */
1175270346Sjfv	ixl_setup_vlan_filters(vsi);
1176266423Sjfv
1177266423Sjfv	/* Start the local timer */
1178270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1179266423Sjfv
1180266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1181270346Sjfv	if (ixl_enable_msix) {
1182270346Sjfv		ixl_configure_msix(pf);
1183270346Sjfv		ixl_configure_itr(pf);
1184266423Sjfv	} else
1185270346Sjfv		ixl_configure_legacy(pf);
1186266423Sjfv
1187270346Sjfv	ixl_enable_rings(vsi);
1188266423Sjfv
1189266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1190266423Sjfv
1191279858Sjfv	ixl_reconfigure_filters(vsi);
1192279858Sjfv
1193266423Sjfv	/* Set MTU in hardware*/
1194270346Sjfv	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1195270346Sjfv	    TRUE, 0, NULL);
1196270346Sjfv	if (aq_error)
1197270346Sjfv		device_printf(vsi->dev,
1198270346Sjfv			"aq_set_mac_config in init error, code %d\n",
1199270346Sjfv		    aq_error);
1200266423Sjfv
1201266423Sjfv	/* And now turn on interrupts */
1202270346Sjfv	ixl_enable_intr(vsi);
1203266423Sjfv
1204266423Sjfv	/* Now inform the stack we're ready */
1205266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1206266423Sjfv	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1207266423Sjfv
1208266423Sjfv	return;
1209266423Sjfv}
1210266423Sjfv
1211266423Sjfvstatic void
1212270346Sjfvixl_init(void *arg)
1213266423Sjfv{
1214270346Sjfv	struct ixl_pf *pf = arg;
1215266423Sjfv
1216270346Sjfv	IXL_PF_LOCK(pf);
1217270346Sjfv	ixl_init_locked(pf);
1218270346Sjfv	IXL_PF_UNLOCK(pf);
1219266423Sjfv	return;
1220266423Sjfv}
1221266423Sjfv
1222266423Sjfv/*
1223266423Sjfv**
1224266423Sjfv** MSIX Interrupt Handlers and Tasklets
1225266423Sjfv**
1226266423Sjfv*/
1227266423Sjfvstatic void
1228270346Sjfvixl_handle_que(void *context, int pending)
1229266423Sjfv{
1230270346Sjfv	struct ixl_queue *que = context;
1231270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1232266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1233266423Sjfv	struct tx_ring  *txr = &que->txr;
1234266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1235266423Sjfv	bool		more;
1236266423Sjfv
1237266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1238270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1239270346Sjfv		IXL_TX_LOCK(txr);
1240270346Sjfv		ixl_txeof(que);
1241266423Sjfv		if (!drbr_empty(ifp, txr->br))
1242270346Sjfv			ixl_mq_start_locked(ifp, txr);
1243270346Sjfv		IXL_TX_UNLOCK(txr);
1244266423Sjfv		if (more) {
1245266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1246266423Sjfv			return;
1247266423Sjfv		}
1248266423Sjfv	}
1249266423Sjfv
1250266423Sjfv	/* Reenable this interrupt - hmmm */
1251270346Sjfv	ixl_enable_queue(hw, que->me);
1252266423Sjfv	return;
1253266423Sjfv}
1254266423Sjfv
1255266423Sjfv
1256266423Sjfv/*********************************************************************
1257266423Sjfv *
1258266423Sjfv *  Legacy Interrupt Service routine
1259266423Sjfv *
1260266423Sjfv **********************************************************************/
1261266423Sjfvvoid
1262270346Sjfvixl_intr(void *arg)
1263266423Sjfv{
1264270346Sjfv	struct ixl_pf		*pf = arg;
1265266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1266270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1267270346Sjfv	struct ixl_queue	*que = vsi->queues;
1268266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1269266423Sjfv	struct tx_ring		*txr = &que->txr;
1270266423Sjfv        u32			reg, icr0, mask;
1271266423Sjfv	bool			more_tx, more_rx;
1272266423Sjfv
1273266423Sjfv	++que->irqs;
1274266423Sjfv
1275266423Sjfv	/* Protect against spurious interrupts */
1276266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1277266423Sjfv		return;
1278266423Sjfv
1279266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1280266423Sjfv
1281266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1282266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1283266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1284266423Sjfv
1285266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1286266423Sjfv
1287279858Sjfv#ifdef PCI_IOV
1288279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1289279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1290279858Sjfv#endif
1291279858Sjfv
1292266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1293266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1294266423Sjfv		return;
1295266423Sjfv	}
1296266423Sjfv
1297270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1298266423Sjfv
1299270346Sjfv	IXL_TX_LOCK(txr);
1300270346Sjfv	more_tx = ixl_txeof(que);
1301266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1302266423Sjfv		more_tx = 1;
1303270346Sjfv	IXL_TX_UNLOCK(txr);
1304266423Sjfv
1305266423Sjfv	/* re-enable other interrupt causes */
1306266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1307266423Sjfv
1308266423Sjfv	/* And now the queues */
1309266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1310266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1311266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1312266423Sjfv
1313266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1314266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1315266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1316266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1317266423Sjfv
1318270346Sjfv	ixl_enable_legacy(hw);
1319266423Sjfv
1320266423Sjfv	return;
1321266423Sjfv}
1322266423Sjfv
1323266423Sjfv
1324266423Sjfv/*********************************************************************
1325266423Sjfv *
1326266423Sjfv *  MSIX VSI Interrupt Service routine
1327266423Sjfv *
1328266423Sjfv **********************************************************************/
1329266423Sjfvvoid
1330270346Sjfvixl_msix_que(void *arg)
1331266423Sjfv{
1332270346Sjfv	struct ixl_queue	*que = arg;
1333270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1334266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1335266423Sjfv	struct tx_ring	*txr = &que->txr;
1336266423Sjfv	bool		more_tx, more_rx;
1337266423Sjfv
1338269198Sjfv	/* Protect against spurious interrupts */
1339269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1340269198Sjfv		return;
1341269198Sjfv
1342266423Sjfv	++que->irqs;
1343266423Sjfv
1344270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1345266423Sjfv
1346270346Sjfv	IXL_TX_LOCK(txr);
1347270346Sjfv	more_tx = ixl_txeof(que);
1348266423Sjfv	/*
1349266423Sjfv	** Make certain that if the stack
1350266423Sjfv	** has anything queued the task gets
1351266423Sjfv	** scheduled to handle it.
1352266423Sjfv	*/
1353266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1354266423Sjfv		more_tx = 1;
1355270346Sjfv	IXL_TX_UNLOCK(txr);
1356266423Sjfv
1357270346Sjfv	ixl_set_queue_rx_itr(que);
1358270346Sjfv	ixl_set_queue_tx_itr(que);
1359266423Sjfv
1360266423Sjfv	if (more_tx || more_rx)
1361266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1362266423Sjfv	else
1363270346Sjfv		ixl_enable_queue(hw, que->me);
1364266423Sjfv
1365266423Sjfv	return;
1366266423Sjfv}
1367266423Sjfv
1368266423Sjfv
1369266423Sjfv/*********************************************************************
1370266423Sjfv *
1371266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1372266423Sjfv *
1373266423Sjfv **********************************************************************/
1374266423Sjfvstatic void
1375270346Sjfvixl_msix_adminq(void *arg)
1376266423Sjfv{
1377270346Sjfv	struct ixl_pf	*pf = arg;
1378266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1379266423Sjfv	u32		reg, mask;
1380266423Sjfv
1381266423Sjfv	++pf->admin_irq;
1382266423Sjfv
1383266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1384266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1385266423Sjfv
1386266423Sjfv	/* Check on the cause */
1387266423Sjfv	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1388266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1389266423Sjfv
1390269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1391270346Sjfv		ixl_handle_mdd_event(pf);
1392266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1393269198Sjfv	}
1394266423Sjfv
1395279858Sjfv#ifdef PCI_IOV
1396279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1397266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1398279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1399279858Sjfv	}
1400279858Sjfv#endif
1401266423Sjfv
1402266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1403266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1404266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1405266423Sjfv
1406266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1407266423Sjfv	return;
1408266423Sjfv}
1409266423Sjfv
1410266423Sjfv/*********************************************************************
1411266423Sjfv *
1412266423Sjfv *  Media Ioctl callback
1413266423Sjfv *
1414266423Sjfv *  This routine is called whenever the user queries the status of
1415266423Sjfv *  the interface using ifconfig.
1416266423Sjfv *
1417266423Sjfv **********************************************************************/
1418266423Sjfvstatic void
1419270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1420266423Sjfv{
1421270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1422279858Sjfv	struct ixl_pf	*pf = vsi->back;
1423266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1424266423Sjfv
1425270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1426270346Sjfv	IXL_PF_LOCK(pf);
1427266423Sjfv
1428279858Sjfv	hw->phy.get_link_info = TRUE;
1429279858Sjfv	pf->link_up = i40e_get_link_status(hw);
1430270346Sjfv	ixl_update_link_status(pf);
1431266423Sjfv
1432266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1433266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1434266423Sjfv
1435279858Sjfv	if (!pf->link_up) {
1436270346Sjfv		IXL_PF_UNLOCK(pf);
1437266423Sjfv		return;
1438266423Sjfv	}
1439266423Sjfv
1440266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1441266423Sjfv	/* Hardware is always full-duplex */
1442266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1443266423Sjfv
1444266423Sjfv	switch (hw->phy.link_info.phy_type) {
1445266423Sjfv		/* 100 M */
1446266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1447266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1448266423Sjfv			break;
1449266423Sjfv		/* 1 G */
1450266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1451266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1452266423Sjfv			break;
1453269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1454269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1455269198Sjfv			break;
1456269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1457269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1458269198Sjfv			break;
1459266423Sjfv		/* 10 G */
1460279033Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1461266423Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1462266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1463279033Sjfv		/* Using this until a real KR media type */
1464279033Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1465279033Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1466266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1467266423Sjfv			break;
1468266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1469266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1470266423Sjfv			break;
1471266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1472266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1473266423Sjfv			break;
1474270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1475270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1476270346Sjfv			break;
1477266423Sjfv		/* 40 G */
1478266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1479266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1480266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1481266423Sjfv			break;
1482266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1483266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1484266423Sjfv			break;
1485266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1486266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1487266423Sjfv			break;
1488279033Sjfv		/*
1489279033Sjfv		** Set these to CR4 because OS does not
1490279033Sjfv		** have types available yet.
1491279033Sjfv		*/
1492279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1493279033Sjfv		case I40E_PHY_TYPE_XLAUI:
1494279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1495279033Sjfv		case I40E_PHY_TYPE_40GBASE_AOC:
1496279033Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1497279033Sjfv			break;
1498266423Sjfv		default:
1499266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1500266423Sjfv			break;
1501266423Sjfv	}
1502266423Sjfv	/* Report flow control status as well */
1503266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1504266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1505266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1506266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1507266423Sjfv
1508270346Sjfv	IXL_PF_UNLOCK(pf);
1509266423Sjfv
1510266423Sjfv	return;
1511266423Sjfv}
1512266423Sjfv
1513266423Sjfv/*********************************************************************
1514266423Sjfv *
1515266423Sjfv *  Media Ioctl callback
1516266423Sjfv *
1517266423Sjfv *  This routine is called when the user changes speed/duplex using
1518266423Sjfv *  media/mediopt option with ifconfig.
1519266423Sjfv *
1520266423Sjfv **********************************************************************/
1521266423Sjfvstatic int
1522270346Sjfvixl_media_change(struct ifnet * ifp)
1523266423Sjfv{
1524270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1525266423Sjfv	struct ifmedia *ifm = &vsi->media;
1526266423Sjfv
1527270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1528266423Sjfv
1529266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1530266423Sjfv		return (EINVAL);
1531266423Sjfv
1532269198Sjfv	if_printf(ifp, "Media change is currently not supported.\n");
1533269198Sjfv
1534269198Sjfv	return (ENODEV);
1535266423Sjfv}
1536266423Sjfv
1537266423Sjfv
1538270346Sjfv#ifdef IXL_FDIR
1539266423Sjfv/*
1540266423Sjfv** ATR: Application Targetted Receive - creates a filter
1541266423Sjfv**	based on TX flow info that will keep the receive
1542266423Sjfv**	portion of the flow on the same queue. Based on the
1543266423Sjfv**	implementation this is only available for TCP connections
1544266423Sjfv*/
1545266423Sjfvvoid
1546270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1547266423Sjfv{
1548270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1549266423Sjfv	struct tx_ring			*txr = &que->txr;
1550266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1551266423Sjfv	u32				ptype, dtype;
1552266423Sjfv	int				idx;
1553266423Sjfv
1554266423Sjfv	/* check if ATR is enabled and sample rate */
1555270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1556266423Sjfv		return;
1557266423Sjfv	/*
1558266423Sjfv	** We sample all TCP SYN/FIN packets,
1559266423Sjfv	** or at the selected sample rate
1560266423Sjfv	*/
1561266423Sjfv	txr->atr_count++;
1562266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1563266423Sjfv	    (txr->atr_count < txr->atr_rate))
1564266423Sjfv                return;
1565266423Sjfv	txr->atr_count = 0;
1566266423Sjfv
1567266423Sjfv	/* Get a descriptor to use */
1568266423Sjfv	idx = txr->next_avail;
1569266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1570266423Sjfv	if (++idx == que->num_desc)
1571266423Sjfv		idx = 0;
1572266423Sjfv	txr->avail--;
1573266423Sjfv	txr->next_avail = idx;
1574266423Sjfv
1575266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1576266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1577266423Sjfv
1578266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1579266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1580266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1581266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1582266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1583266423Sjfv
1584266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1585266423Sjfv
1586266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1587266423Sjfv
1588266423Sjfv	/*
1589266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1590266423Sjfv	** the filter, otherwise its an update.
1591266423Sjfv	*/
1592266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1593266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1594266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1595266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1596266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1597266423Sjfv
1598266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1599266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1600266423Sjfv
1601266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1602266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1603266423Sjfv
1604266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1605266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1606266423Sjfv	return;
1607266423Sjfv}
1608266423Sjfv#endif
1609266423Sjfv
1610266423Sjfv
1611266423Sjfvstatic void
1612270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1613266423Sjfv{
1614266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1615266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1616266423Sjfv	int		err, mcnt = 0;
1617266423Sjfv	bool		uni = FALSE, multi = FALSE;
1618266423Sjfv
1619266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1620266423Sjfv                multi = TRUE;
1621266423Sjfv	else { /* Need to count the multicast addresses */
1622266423Sjfv		struct  ifmultiaddr *ifma;
1623266423Sjfv		if_maddr_rlock(ifp);
1624266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1625266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1626266423Sjfv                                continue;
1627266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1628266423Sjfv                                break;
1629266423Sjfv                        mcnt++;
1630266423Sjfv		}
1631266423Sjfv		if_maddr_runlock(ifp);
1632266423Sjfv	}
1633266423Sjfv
1634266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1635266423Sjfv                multi = TRUE;
1636266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1637266423Sjfv		uni = TRUE;
1638266423Sjfv
1639266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1640266423Sjfv	    vsi->seid, uni, NULL);
1641266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1642266423Sjfv	    vsi->seid, multi, NULL);
1643266423Sjfv	return;
1644266423Sjfv}
1645266423Sjfv
1646266423Sjfv/*********************************************************************
1647266423Sjfv * 	Filter Routines
1648266423Sjfv *
1649266423Sjfv *	Routines for multicast and vlan filter management.
1650266423Sjfv *
1651266423Sjfv *********************************************************************/
1652266423Sjfvstatic void
1653270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1654266423Sjfv{
1655266423Sjfv	struct	ifmultiaddr	*ifma;
1656266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1657266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1658266423Sjfv	int			mcnt = 0, flags;
1659266423Sjfv
1660270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1661266423Sjfv
1662266423Sjfv	if_maddr_rlock(ifp);
1663266423Sjfv	/*
1664266423Sjfv	** First just get a count, to decide if we
1665266423Sjfv	** we simply use multicast promiscuous.
1666266423Sjfv	*/
1667266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1668266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1669266423Sjfv			continue;
1670266423Sjfv		mcnt++;
1671266423Sjfv	}
1672266423Sjfv	if_maddr_runlock(ifp);
1673266423Sjfv
1674266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1675266423Sjfv		/* delete existing MC filters */
1676270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1677266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1678266423Sjfv		    vsi->seid, TRUE, NULL);
1679266423Sjfv		return;
1680266423Sjfv	}
1681266423Sjfv
1682266423Sjfv	mcnt = 0;
1683266423Sjfv	if_maddr_rlock(ifp);
1684266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1685266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1686266423Sjfv			continue;
1687270346Sjfv		ixl_add_mc_filter(vsi,
1688266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1689266423Sjfv		mcnt++;
1690266423Sjfv	}
1691266423Sjfv	if_maddr_runlock(ifp);
1692266423Sjfv	if (mcnt > 0) {
1693270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1694270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1695266423Sjfv	}
1696266423Sjfv
1697270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1698266423Sjfv	return;
1699266423Sjfv}
1700266423Sjfv
1701266423Sjfvstatic void
1702270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1703266423Sjfv{
1704266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1705266423Sjfv	struct ifmultiaddr	*ifma;
1706270346Sjfv	struct ixl_mac_filter	*f;
1707266423Sjfv	int			mcnt = 0;
1708266423Sjfv	bool		match = FALSE;
1709266423Sjfv
1710270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1711266423Sjfv
1712266423Sjfv	/* Search for removed multicast addresses */
1713266423Sjfv	if_maddr_rlock(ifp);
1714266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1715270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1716266423Sjfv			match = FALSE;
1717266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1718266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1719266423Sjfv					continue;
1720266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1721266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1722266423Sjfv					match = TRUE;
1723266423Sjfv					break;
1724266423Sjfv				}
1725266423Sjfv			}
1726266423Sjfv			if (match == FALSE) {
1727270346Sjfv				f->flags |= IXL_FILTER_DEL;
1728266423Sjfv				mcnt++;
1729266423Sjfv			}
1730266423Sjfv		}
1731266423Sjfv	}
1732266423Sjfv	if_maddr_runlock(ifp);
1733266423Sjfv
1734266423Sjfv	if (mcnt > 0)
1735270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1736266423Sjfv}
1737266423Sjfv
1738266423Sjfv
1739266423Sjfv/*********************************************************************
1740266423Sjfv *  Timer routine
1741266423Sjfv *
1742266423Sjfv *  This routine checks for link status,updates statistics,
1743266423Sjfv *  and runs the watchdog check.
1744266423Sjfv *
1745266423Sjfv **********************************************************************/
1746266423Sjfv
1747266423Sjfvstatic void
1748270346Sjfvixl_local_timer(void *arg)
1749266423Sjfv{
1750270346Sjfv	struct ixl_pf		*pf = arg;
1751266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1752270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1753270346Sjfv	struct ixl_queue	*que = vsi->queues;
1754266423Sjfv	device_t		dev = pf->dev;
1755266423Sjfv	int			hung = 0;
1756266423Sjfv	u32			mask;
1757266423Sjfv
1758266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1759266423Sjfv
1760266423Sjfv	/* Fire off the adminq task */
1761266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1762266423Sjfv
1763266423Sjfv	/* Update stats */
1764270346Sjfv	ixl_update_stats_counters(pf);
1765266423Sjfv
1766266423Sjfv	/*
1767269198Sjfv	** Check status of the queues
1768266423Sjfv	*/
1769266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1770266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1771266423Sjfv
1772266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1773266423Sjfv		/* Any queues with outstanding work get a sw irq */
1774266423Sjfv		if (que->busy)
1775266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1776266423Sjfv		/*
1777266423Sjfv		** Each time txeof runs without cleaning, but there
1778266423Sjfv		** are uncleaned descriptors it increments busy. If
1779266423Sjfv		** we get to 5 we declare it hung.
1780266423Sjfv		*/
1781270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1782269198Sjfv			++hung;
1783269198Sjfv			/* Mark the queue as inactive */
1784269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1785269198Sjfv			continue;
1786269198Sjfv		} else {
1787269198Sjfv			/* Check if we've come back from hung */
1788269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1789269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1790269198Sjfv		}
1791270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1792277084Sjfv#ifdef IXL_DEBUG
1793266423Sjfv			device_printf(dev,"Warning queue %d "
1794269198Sjfv			    "appears to be hung!\n", i);
1795277084Sjfv#endif
1796270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1797266423Sjfv			++hung;
1798266423Sjfv		}
1799266423Sjfv	}
1800266423Sjfv	/* Only reinit if all queues show hung */
1801266423Sjfv	if (hung == vsi->num_queues)
1802266423Sjfv		goto hung;
1803266423Sjfv
1804270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1805266423Sjfv	return;
1806266423Sjfv
1807266423Sjfvhung:
1808266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1809270346Sjfv	ixl_init_locked(pf);
1810266423Sjfv}
1811266423Sjfv
1812266423Sjfv/*
1813266423Sjfv** Note: this routine updates the OS on the link state
1814266423Sjfv**	the real check of the hardware only happens with
1815266423Sjfv**	a link interrupt.
1816266423Sjfv*/
1817266423Sjfvstatic void
1818270346Sjfvixl_update_link_status(struct ixl_pf *pf)
1819266423Sjfv{
1820270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1821266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1822266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1823266423Sjfv	device_t		dev = pf->dev;
1824266423Sjfv
1825279858Sjfv	if (pf->link_up){
1826266423Sjfv		if (vsi->link_active == FALSE) {
1827279033Sjfv			pf->fc = hw->fc.current_mode;
1828266423Sjfv			if (bootverbose) {
1829266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
1830266423Sjfv				    " Flow Control: %s\n",
1831279858Sjfv				    ((pf->link_speed ==
1832279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
1833279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
1834266423Sjfv			}
1835266423Sjfv			vsi->link_active = TRUE;
1836277084Sjfv			/*
1837277084Sjfv			** Warn user if link speed on NPAR enabled
1838277084Sjfv			** partition is not at least 10GB
1839277084Sjfv			*/
1840277084Sjfv			if (hw->func_caps.npar_enable &&
1841279858Sjfv			   (hw->phy.link_info.link_speed ==
1842279858Sjfv			   I40E_LINK_SPEED_1GB ||
1843279858Sjfv			   hw->phy.link_info.link_speed ==
1844279858Sjfv			   I40E_LINK_SPEED_100MB))
1845279858Sjfv				device_printf(dev, "The partition detected"
1846279858Sjfv				    "link speed that is less than 10Gbps\n");
1847266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
1848266423Sjfv		}
1849266423Sjfv	} else { /* Link down */
1850266423Sjfv		if (vsi->link_active == TRUE) {
1851266423Sjfv			if (bootverbose)
1852266423Sjfv				device_printf(dev,"Link is Down\n");
1853266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
1854266423Sjfv			vsi->link_active = FALSE;
1855266423Sjfv		}
1856266423Sjfv	}
1857266423Sjfv
1858266423Sjfv	return;
1859266423Sjfv}
1860266423Sjfv
1861266423Sjfv/*********************************************************************
1862266423Sjfv *
1863266423Sjfv *  This routine disables all traffic on the adapter by issuing a
1864266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
1865266423Sjfv *
1866266423Sjfv **********************************************************************/
1867266423Sjfv
1868266423Sjfvstatic void
1869270346Sjfvixl_stop(struct ixl_pf *pf)
1870266423Sjfv{
1871270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1872266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1873266423Sjfv
1874266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1875266423Sjfv
1876270346Sjfv	INIT_DEBUGOUT("ixl_stop: begin\n");
1877279858Sjfv	if (pf->num_vfs == 0)
1878279858Sjfv		ixl_disable_intr(vsi);
1879279858Sjfv	else
1880279858Sjfv		ixl_disable_rings_intr(vsi);
1881270346Sjfv	ixl_disable_rings(vsi);
1882266423Sjfv
1883266423Sjfv	/* Tell the stack that the interface is no longer active */
1884266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1885266423Sjfv
1886266423Sjfv	/* Stop the local timer */
1887266423Sjfv	callout_stop(&pf->timer);
1888266423Sjfv
1889266423Sjfv	return;
1890266423Sjfv}
1891266423Sjfv
1892266423Sjfv
1893266423Sjfv/*********************************************************************
1894266423Sjfv *
1895266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1896266423Sjfv *
1897266423Sjfv **********************************************************************/
1898266423Sjfvstatic int
1899270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
1900266423Sjfv{
1901266423Sjfv	device_t        dev = pf->dev;
1902270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1903270346Sjfv	struct		ixl_queue *que = vsi->queues;
1904266423Sjfv	int 		error, rid = 0;
1905266423Sjfv
1906266423Sjfv	if (pf->msix == 1)
1907266423Sjfv		rid = 1;
1908266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1909266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
1910266423Sjfv	if (pf->res == NULL) {
1911266423Sjfv		device_printf(dev,"Unable to allocate"
1912266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
1913266423Sjfv		return (ENXIO);
1914266423Sjfv	}
1915266423Sjfv
1916266423Sjfv	/* Set the handler function */
1917266423Sjfv	error = bus_setup_intr(dev, pf->res,
1918266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1919270346Sjfv	    ixl_intr, pf, &pf->tag);
1920266423Sjfv	if (error) {
1921266423Sjfv		pf->res = NULL;
1922266423Sjfv		device_printf(dev, "Failed to register legacy/msi handler");
1923266423Sjfv		return (error);
1924266423Sjfv	}
1925266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1926270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1927270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1928270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1929266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
1930266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1931266423Sjfv	    device_get_nameunit(dev));
1932270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1933279858Sjfv
1934279858Sjfv#ifdef PCI_IOV
1935279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1936279858Sjfv#endif
1937279858Sjfv
1938270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1939266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
1940266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1941266423Sjfv	    device_get_nameunit(dev));
1942266423Sjfv
1943266423Sjfv	return (0);
1944266423Sjfv}
1945266423Sjfv
1946266423Sjfv
1947266423Sjfv/*********************************************************************
1948266423Sjfv *
1949266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1950266423Sjfv *
1951266423Sjfv **********************************************************************/
1952266423Sjfvstatic int
1953270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
1954266423Sjfv{
1955266423Sjfv	device_t	dev = pf->dev;
1956270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1957270346Sjfv	struct 		ixl_queue *que = vsi->queues;
1958266423Sjfv	struct		tx_ring	 *txr;
1959266423Sjfv	int 		error, rid, vector = 0;
1960279255Sadrian#ifdef	RSS
1961279255Sadrian	cpuset_t cpu_mask;
1962279255Sadrian#endif
1963266423Sjfv
1964266423Sjfv	/* Admin Que is vector 0*/
1965266423Sjfv	rid = vector + 1;
1966266423Sjfv	pf->res = bus_alloc_resource_any(dev,
1967266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1968266423Sjfv	if (!pf->res) {
1969266423Sjfv		device_printf(dev,"Unable to allocate"
1970266423Sjfv    	    " bus resource: Adminq interrupt [%d]\n", rid);
1971266423Sjfv		return (ENXIO);
1972266423Sjfv	}
1973266423Sjfv	/* Set the adminq vector and handler */
1974266423Sjfv	error = bus_setup_intr(dev, pf->res,
1975266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1976270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
1977266423Sjfv	if (error) {
1978266423Sjfv		pf->res = NULL;
1979266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
1980266423Sjfv		return (error);
1981266423Sjfv	}
1982266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1983266423Sjfv	pf->admvec = vector;
1984266423Sjfv	/* Tasklet for Admin Queue */
1985270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1986279858Sjfv
1987279858Sjfv#ifdef PCI_IOV
1988279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1989279858Sjfv#endif
1990279858Sjfv
1991270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1992266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
1993266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1994266423Sjfv	    device_get_nameunit(pf->dev));
1995266423Sjfv	++vector;
1996266423Sjfv
1997266423Sjfv	/* Now set up the stations */
1998266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1999277084Sjfv		int cpu_id = i;
2000266423Sjfv		rid = vector + 1;
2001266423Sjfv		txr = &que->txr;
2002266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2003266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
2004266423Sjfv		if (que->res == NULL) {
2005266423Sjfv			device_printf(dev,"Unable to allocate"
2006266423Sjfv		    	    " bus resource: que interrupt [%d]\n", vector);
2007266423Sjfv			return (ENXIO);
2008266423Sjfv		}
2009266423Sjfv		/* Set the handler function */
2010266423Sjfv		error = bus_setup_intr(dev, que->res,
2011266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2012270346Sjfv		    ixl_msix_que, que, &que->tag);
2013266423Sjfv		if (error) {
2014266423Sjfv			que->res = NULL;
2015266423Sjfv			device_printf(dev, "Failed to register que handler");
2016266423Sjfv			return (error);
2017266423Sjfv		}
2018266423Sjfv		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2019266423Sjfv		/* Bind the vector to a CPU */
2020277084Sjfv#ifdef RSS
2021277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2022277084Sjfv#endif
2023277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2024266423Sjfv		que->msix = vector;
2025270346Sjfv		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2026270346Sjfv		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2027270346Sjfv		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2028266423Sjfv		    taskqueue_thread_enqueue, &que->tq);
2029277084Sjfv#ifdef RSS
2030279299Sadrian		CPU_SETOF(cpu_id, &cpu_mask);
2031279255Sadrian		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2032279255Sadrian		    &cpu_mask, "%s (bucket %d)",
2033277084Sjfv		    device_get_nameunit(dev), cpu_id);
2034277084Sjfv#else
2035277084Sjfv		taskqueue_start_threads(&que->tq, 1, PI_NET,
2036277084Sjfv		    "%s que", device_get_nameunit(dev));
2037277084Sjfv#endif
2038266423Sjfv	}
2039266423Sjfv
2040266423Sjfv	return (0);
2041266423Sjfv}
2042266423Sjfv
2043266423Sjfv
2044266423Sjfv/*
2045266423Sjfv * Allocate MSI/X vectors
2046266423Sjfv */
2047266423Sjfvstatic int
2048270346Sjfvixl_init_msix(struct ixl_pf *pf)
2049266423Sjfv{
2050266423Sjfv	device_t dev = pf->dev;
2051266423Sjfv	int rid, want, vectors, queues, available;
2052266423Sjfv
2053266423Sjfv	/* Override by tuneable */
2054270346Sjfv	if (ixl_enable_msix == 0)
2055266423Sjfv		goto msi;
2056266423Sjfv
2057269198Sjfv	/*
2058269198Sjfv	** When used in a virtualized environment
2059269198Sjfv	** PCI BUSMASTER capability may not be set
2060269198Sjfv	** so explicity set it here and rewrite
2061269198Sjfv	** the ENABLE in the MSIX control register
2062269198Sjfv	** at this point to cause the host to
2063269198Sjfv	** successfully initialize us.
2064269198Sjfv	*/
2065269198Sjfv	{
2066269198Sjfv		u16 pci_cmd_word;
2067269198Sjfv		int msix_ctrl;
2068269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2069269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2070269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2071269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2072269198Sjfv		rid += PCIR_MSIX_CTRL;
2073269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2074269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2075269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2076269198Sjfv	}
2077269198Sjfv
2078266423Sjfv	/* First try MSI/X */
2079270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2080266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2081266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2082266423Sjfv       	if (!pf->msix_mem) {
2083266423Sjfv		/* May not be enabled */
2084266423Sjfv		device_printf(pf->dev,
2085266423Sjfv		    "Unable to map MSIX table \n");
2086266423Sjfv		goto msi;
2087266423Sjfv	}
2088266423Sjfv
2089266423Sjfv	available = pci_msix_count(dev);
2090266423Sjfv	if (available == 0) { /* system has msix disabled */
2091266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2092266423Sjfv		    rid, pf->msix_mem);
2093266423Sjfv		pf->msix_mem = NULL;
2094266423Sjfv		goto msi;
2095266423Sjfv	}
2096266423Sjfv
2097266423Sjfv	/* Figure out a reasonable auto config value */
2098266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2099266423Sjfv
2100266423Sjfv	/* Override with hardcoded value if sane */
2101270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2102270346Sjfv		queues = ixl_max_queues;
2103266423Sjfv
2104277084Sjfv#ifdef  RSS
2105277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2106277084Sjfv	if (queues > rss_getnumbuckets())
2107277084Sjfv		queues = rss_getnumbuckets();
2108277084Sjfv#endif
2109277084Sjfv
2110266423Sjfv	/*
2111266423Sjfv	** Want one vector (RX/TX pair) per queue
2112266423Sjfv	** plus an additional for the admin queue.
2113266423Sjfv	*/
2114266423Sjfv	want = queues + 1;
2115266423Sjfv	if (want <= available)	/* Have enough */
2116266423Sjfv		vectors = want;
2117266423Sjfv	else {
2118266423Sjfv               	device_printf(pf->dev,
2119266423Sjfv		    "MSIX Configuration Problem, "
2120266423Sjfv		    "%d vectors available but %d wanted!\n",
2121266423Sjfv		    available, want);
2122266423Sjfv		return (0); /* Will go to Legacy setup */
2123266423Sjfv	}
2124266423Sjfv
2125266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2126266423Sjfv               	device_printf(pf->dev,
2127266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2128266423Sjfv		pf->msix = vectors;
2129266423Sjfv		pf->vsi.num_queues = queues;
2130277084Sjfv#ifdef RSS
2131277084Sjfv		/*
2132277084Sjfv		 * If we're doing RSS, the number of queues needs to
2133277084Sjfv		 * match the number of RSS buckets that are configured.
2134277084Sjfv		 *
2135277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2136277084Sjfv		 *   up with queues that get no traffic.
2137277084Sjfv		 *
2138277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2139277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2140277084Sjfv		 *   so there'll be some contention.
2141277084Sjfv		 */
2142277084Sjfv		if (queues != rss_getnumbuckets()) {
2143277084Sjfv			device_printf(dev,
2144277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2145277084Sjfv			    "; performance will be impacted.\n",
2146277084Sjfv			    __func__, queues, rss_getnumbuckets());
2147277084Sjfv		}
2148277084Sjfv#endif
2149266423Sjfv		return (vectors);
2150266423Sjfv	}
2151266423Sjfvmsi:
2152266423Sjfv       	vectors = pci_msi_count(dev);
2153266423Sjfv	pf->vsi.num_queues = 1;
2154266423Sjfv	pf->msix = 1;
2155270346Sjfv	ixl_max_queues = 1;
2156270346Sjfv	ixl_enable_msix = 0;
2157266423Sjfv       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2158266423Sjfv               	device_printf(pf->dev,"Using an MSI interrupt\n");
2159266423Sjfv	else {
2160266423Sjfv		pf->msix = 0;
2161266423Sjfv               	device_printf(pf->dev,"Using a Legacy interrupt\n");
2162266423Sjfv	}
2163266423Sjfv	return (vectors);
2164266423Sjfv}
2165266423Sjfv
2166266423Sjfv
2167266423Sjfv/*
2168266423Sjfv * Plumb MSI/X vectors
2169266423Sjfv */
2170266423Sjfvstatic void
2171270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2172266423Sjfv{
2173266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2174270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2175266423Sjfv	u32		reg;
2176266423Sjfv	u16		vector = 1;
2177266423Sjfv
2178266423Sjfv	/* First set up the adminq - vector 0 */
2179266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2180266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2181266423Sjfv
2182266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2183266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2184266423Sjfv	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2185266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2186266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2187266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2188266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2189266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2190266423Sjfv
2191266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2192270346Sjfv	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2193266423Sjfv
2194266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2195266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2196266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2197266423Sjfv
2198266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2199266423Sjfv
2200266423Sjfv	/* Next configure the queues */
2201266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2202266423Sjfv		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2203266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2204266423Sjfv
2205266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2206270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2207266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2208266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2209266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2210266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2211266423Sjfv
2212266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2213270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2214266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2215266423Sjfv		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2216266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2217266423Sjfv		if (i == (vsi->num_queues - 1))
2218270346Sjfv			reg |= (IXL_QUEUE_EOL
2219266423Sjfv			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2220266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2221266423Sjfv	}
2222266423Sjfv}
2223266423Sjfv
2224266423Sjfv/*
2225266423Sjfv * Configure for MSI single vector operation
2226266423Sjfv */
2227266423Sjfvstatic void
2228270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2229266423Sjfv{
2230266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2231266423Sjfv	u32		reg;
2232266423Sjfv
2233266423Sjfv
2234266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2235266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2236266423Sjfv
2237266423Sjfv
2238266423Sjfv	/* Setup "other" causes */
2239266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2240266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2241266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2242266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2243266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2244266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2245266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2246266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2247266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2248266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2249266423Sjfv	    ;
2250266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2251266423Sjfv
2252266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2253266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2254266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2255266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2256266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2257266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2258266423Sjfv
2259266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2260266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2261266423Sjfv
2262266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2263266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2264270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2265266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2266266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2267266423Sjfv
2268266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2269270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2270270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2271266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2272266423Sjfv
2273266423Sjfv	/* Next enable the queue pair */
2274266423Sjfv	reg = rd32(hw, I40E_QTX_ENA(0));
2275266423Sjfv	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2276266423Sjfv	wr32(hw, I40E_QTX_ENA(0), reg);
2277266423Sjfv
2278266423Sjfv	reg = rd32(hw, I40E_QRX_ENA(0));
2279266423Sjfv	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2280266423Sjfv	wr32(hw, I40E_QRX_ENA(0), reg);
2281266423Sjfv}
2282266423Sjfv
2283266423Sjfv
2284266423Sjfv/*
2285266423Sjfv * Set the Initial ITR state
2286266423Sjfv */
2287266423Sjfvstatic void
2288270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2289266423Sjfv{
2290266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2291270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2292270346Sjfv	struct ixl_queue	*que = vsi->queues;
2293266423Sjfv
2294270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2295270346Sjfv	if (ixl_dynamic_rx_itr)
2296270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2297270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2298270346Sjfv	if (ixl_dynamic_tx_itr)
2299270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2300266423Sjfv
2301266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2302266423Sjfv		struct tx_ring	*txr = &que->txr;
2303266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2304266423Sjfv
2305270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2306266423Sjfv		    vsi->rx_itr_setting);
2307266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2308270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2309270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2310266423Sjfv		    vsi->tx_itr_setting);
2311266423Sjfv		txr->itr = vsi->tx_itr_setting;
2312270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2313266423Sjfv	}
2314266423Sjfv}
2315266423Sjfv
2316266423Sjfv
2317266423Sjfvstatic int
2318270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2319266423Sjfv{
2320266423Sjfv	int             rid;
2321266423Sjfv	device_t        dev = pf->dev;
2322266423Sjfv
2323266423Sjfv	rid = PCIR_BAR(0);
2324266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2325266423Sjfv	    &rid, RF_ACTIVE);
2326266423Sjfv
2327266423Sjfv	if (!(pf->pci_mem)) {
2328266423Sjfv		device_printf(dev,"Unable to allocate bus resource: memory\n");
2329266423Sjfv		return (ENXIO);
2330266423Sjfv	}
2331266423Sjfv
2332266423Sjfv	pf->osdep.mem_bus_space_tag =
2333266423Sjfv		rman_get_bustag(pf->pci_mem);
2334266423Sjfv	pf->osdep.mem_bus_space_handle =
2335266423Sjfv		rman_get_bushandle(pf->pci_mem);
2336270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2337272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2338266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2339266423Sjfv
2340266423Sjfv	pf->hw.back = &pf->osdep;
2341266423Sjfv
2342266423Sjfv	/*
2343266423Sjfv	** Now setup MSI or MSI/X, should
2344266423Sjfv	** return us the number of supported
2345266423Sjfv	** vectors. (Will be 1 for MSI)
2346266423Sjfv	*/
2347270346Sjfv	pf->msix = ixl_init_msix(pf);
2348266423Sjfv	return (0);
2349266423Sjfv}
2350266423Sjfv
2351266423Sjfvstatic void
2352270346Sjfvixl_free_pci_resources(struct ixl_pf * pf)
2353266423Sjfv{
2354270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2355270346Sjfv	struct ixl_queue	*que = vsi->queues;
2356266423Sjfv	device_t		dev = pf->dev;
2357266423Sjfv	int			rid, memrid;
2358266423Sjfv
2359270346Sjfv	memrid = PCIR_BAR(IXL_BAR);
2360266423Sjfv
2361266423Sjfv	/* We may get here before stations are setup */
2362270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2363266423Sjfv		goto early;
2364266423Sjfv
2365266423Sjfv	/*
2366266423Sjfv	**  Release all msix VSI resources:
2367266423Sjfv	*/
2368266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2369266423Sjfv		rid = que->msix + 1;
2370266423Sjfv		if (que->tag != NULL) {
2371266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2372266423Sjfv			que->tag = NULL;
2373266423Sjfv		}
2374266423Sjfv		if (que->res != NULL)
2375266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2376266423Sjfv	}
2377266423Sjfv
2378266423Sjfvearly:
2379266423Sjfv	/* Clean the AdminQ interrupt last */
2380266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2381266423Sjfv		rid = pf->admvec + 1;
2382266423Sjfv	else
2383266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2384266423Sjfv
2385266423Sjfv	if (pf->tag != NULL) {
2386266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2387266423Sjfv		pf->tag = NULL;
2388266423Sjfv	}
2389266423Sjfv	if (pf->res != NULL)
2390266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2391266423Sjfv
2392266423Sjfv	if (pf->msix)
2393266423Sjfv		pci_release_msi(dev);
2394266423Sjfv
2395266423Sjfv	if (pf->msix_mem != NULL)
2396266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2397266423Sjfv		    memrid, pf->msix_mem);
2398266423Sjfv
2399266423Sjfv	if (pf->pci_mem != NULL)
2400266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2401266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2402266423Sjfv
2403266423Sjfv	return;
2404266423Sjfv}
2405266423Sjfv
2406274205Sjfvstatic void
2407274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2408274205Sjfv{
2409274205Sjfv	/* Display supported media types */
2410274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2411274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2412266423Sjfv
2413274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2414274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2415279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2416279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2417279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2418279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2419274205Sjfv
2420274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2421279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) ||
2422279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR) ||
2423279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2424279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2425279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2426279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI) ||
2427274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2428274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2429279033Sjfv
2430274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2431274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2432274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2433274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2434274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2435274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2436274205Sjfv
2437279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2438279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2439279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2440279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2441279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLPPI) ||
2442279033Sjfv	    /* KR4 uses CR4 until the OS has the real media type */
2443279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2444274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2445279033Sjfv
2446274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2447274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2448274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2449274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2450274205Sjfv}
2451274205Sjfv
2452266423Sjfv/*********************************************************************
2453266423Sjfv *
2454266423Sjfv *  Setup networking device structure and register an interface.
2455266423Sjfv *
2456266423Sjfv **********************************************************************/
2457266423Sjfvstatic int
2458270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2459266423Sjfv{
2460266423Sjfv	struct ifnet		*ifp;
2461266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2462270346Sjfv	struct ixl_queue	*que = vsi->queues;
2463279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2464266423Sjfv	enum i40e_status_code aq_error = 0;
2465266423Sjfv
2466270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2467266423Sjfv
2468266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2469266423Sjfv	if (ifp == NULL) {
2470266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2471266423Sjfv		return (-1);
2472266423Sjfv	}
2473266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2474266423Sjfv	ifp->if_mtu = ETHERMTU;
2475266423Sjfv	ifp->if_baudrate = 4000000000;  // ??
2476270346Sjfv	ifp->if_init = ixl_init;
2477266423Sjfv	ifp->if_softc = vsi;
2478266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2479270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2480266423Sjfv
2481274205Sjfv#if __FreeBSD_version >= 1100036
2482272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2483272227Sglebius#endif
2484272227Sglebius
2485270346Sjfv	ifp->if_transmit = ixl_mq_start;
2486266423Sjfv
2487270346Sjfv	ifp->if_qflush = ixl_qflush;
2488266423Sjfv
2489266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2490266423Sjfv
2491266423Sjfv	vsi->max_frame_size =
2492266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2493266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2494266423Sjfv
2495266423Sjfv	/*
2496266423Sjfv	 * Tell the upper layer(s) we support long frames.
2497266423Sjfv	 */
2498270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2499266423Sjfv
2500266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2501266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2502266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2503266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2504266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2505266423Sjfv
2506266423Sjfv	/* VLAN capabilties */
2507266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2508266423Sjfv			     |  IFCAP_VLAN_HWTSO
2509266423Sjfv			     |  IFCAP_VLAN_MTU
2510266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2511266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2512266423Sjfv
2513266423Sjfv	/*
2514266423Sjfv	** Don't turn this on by default, if vlans are
2515266423Sjfv	** created on another pseudo device (eg. lagg)
2516266423Sjfv	** then vlan events are not passed thru, breaking
2517266423Sjfv	** operation, but with HW FILTER off it works. If
2518270346Sjfv	** using vlans directly on the ixl driver you can
2519266423Sjfv	** enable this and get full hardware tag filtering.
2520266423Sjfv	*/
2521266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2522266423Sjfv
2523266423Sjfv	/*
2524266423Sjfv	 * Specify the media types supported by this adapter and register
2525266423Sjfv	 * callbacks to update media and link information
2526266423Sjfv	 */
2527270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2528270346Sjfv		     ixl_media_status);
2529266423Sjfv
2530279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2531279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2532279033Sjfv	/* May need delay to detect fiber correctly */
2533274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2534274205Sjfv		i40e_msec_delay(200);
2535277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2536279033Sjfv		    TRUE, &abilities, NULL);
2537279033Sjfv	}
2538279033Sjfv	if (aq_error) {
2539274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2540274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2541274205Sjfv		else
2542279033Sjfv			device_printf(dev,
2543279033Sjfv			    "Error getting supported media types, err %d,"
2544279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2545279033Sjfv		return (0);
2546279033Sjfv	}
2547266423Sjfv
2548279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2549279033Sjfv
2550266423Sjfv	/* Use autoselect media by default */
2551266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2552266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2553266423Sjfv
2554274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2555274205Sjfv
2556266423Sjfv	return (0);
2557266423Sjfv}
2558266423Sjfv
2559279858Sjfv/*
2560279858Sjfv** Run when the Admin Queue gets a
2561279858Sjfv** link transition interrupt.
2562279858Sjfv*/
2563279858Sjfvstatic void
2564279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2565266423Sjfv{
2566279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2567279858Sjfv	struct i40e_aqc_get_link_status *status =
2568279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2569266423Sjfv	bool check;
2570266423Sjfv
2571279858Sjfv	hw->phy.get_link_info = TRUE;
2572266423Sjfv	check = i40e_get_link_status(hw);
2573279858Sjfv	pf->link_up = check;
2574270346Sjfv#ifdef IXL_DEBUG
2575266423Sjfv	printf("Link is %s\n", check ? "up":"down");
2576266423Sjfv#endif
2577279858Sjfv	/* Report if Unqualified modules are found */
2578279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2579279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2580279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2581279858Sjfv		device_printf(pf->dev, "Link failed because "
2582279858Sjfv		    "an unqualified module was detected\n");
2583279858Sjfv
2584279858Sjfv	return;
2585266423Sjfv}
2586266423Sjfv
2587266423Sjfv/*********************************************************************
2588266423Sjfv *
2589279033Sjfv *  Get Firmware Switch configuration
2590279033Sjfv *	- this will need to be more robust when more complex
2591279033Sjfv *	  switch configurations are enabled.
2592266423Sjfv *
2593266423Sjfv **********************************************************************/
2594266423Sjfvstatic int
2595279033Sjfvixl_switch_config(struct ixl_pf *pf)
2596266423Sjfv{
2597279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2598279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2599266423Sjfv	device_t 	dev = vsi->dev;
2600266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2601266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2602279858Sjfv	int	ret;
2603266423Sjfv	u16	next = 0;
2604266423Sjfv
2605279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2606266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2607266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2608266423Sjfv	    sizeof(aq_buf), &next, NULL);
2609266423Sjfv	if (ret) {
2610279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2611279858Sjfv		    ret);
2612266423Sjfv		return (ret);
2613266423Sjfv	}
2614270346Sjfv#ifdef IXL_DEBUG
2615279858Sjfv	device_printf(dev,
2616279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2617266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2618279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2619279858Sjfv		device_printf(dev,
2620279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2621279858Sjfv		    sw_config->element[i].element_type,
2622279858Sjfv		    sw_config->element[i].seid,
2623279858Sjfv		    sw_config->element[i].uplink_seid,
2624279858Sjfv		    sw_config->element[i].downlink_seid);
2625279858Sjfv	}
2626266423Sjfv#endif
2627279033Sjfv	/* Simplified due to a single VSI at the moment */
2628279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2629279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2630266423Sjfv	vsi->seid = sw_config->element[0].seid;
2631279033Sjfv	return (ret);
2632279033Sjfv}
2633266423Sjfv
2634279033Sjfv/*********************************************************************
2635279033Sjfv *
2636279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2637279033Sjfv *  			 like the number of descriptors, buffer size,
2638279033Sjfv *			 plus we init the rings thru this function.
2639279033Sjfv *
2640279033Sjfv **********************************************************************/
2641279033Sjfvstatic int
2642279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2643279033Sjfv{
2644279858Sjfv	struct ixl_pf		*pf = vsi->back;
2645279033Sjfv	struct ixl_queue	*que = vsi->queues;
2646279033Sjfv	device_t		dev = vsi->dev;
2647279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2648279033Sjfv	struct i40e_vsi_context	ctxt;
2649279033Sjfv	int			err = 0;
2650279033Sjfv
2651266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2652266423Sjfv	ctxt.seid = vsi->seid;
2653279858Sjfv	if (pf->veb_seid != 0)
2654279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2655266423Sjfv	ctxt.pf_num = hw->pf_id;
2656279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2657279033Sjfv	if (err) {
2658279033Sjfv		device_printf(dev,"get vsi params failed %x!!\n", err);
2659279033Sjfv		return (err);
2660266423Sjfv	}
2661270346Sjfv#ifdef IXL_DEBUG
2662266423Sjfv	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2663266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2664266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2665266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2666266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2667266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2668266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2669266423Sjfv#endif
2670266423Sjfv	/*
2671266423Sjfv	** Set the queue and traffic class bits
2672266423Sjfv	**  - when multiple traffic classes are supported
2673266423Sjfv	**    this will need to be more robust.
2674266423Sjfv	*/
2675266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2676266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2677266423Sjfv	ctxt.info.queue_mapping[0] = 0;
2678266423Sjfv	ctxt.info.tc_mapping[0] = 0x0800;
2679266423Sjfv
2680266423Sjfv	/* Set VLAN receive stripping mode */
2681266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2682266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2683266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2684266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2685266423Sjfv	else
2686266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2687266423Sjfv
2688266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2689266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2690266423Sjfv
2691266423Sjfv	/* Reset VSI statistics */
2692270346Sjfv	ixl_vsi_reset_stats(vsi);
2693266423Sjfv	vsi->hw_filters_add = 0;
2694266423Sjfv	vsi->hw_filters_del = 0;
2695266423Sjfv
2696279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2697279858Sjfv
2698279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2699279033Sjfv	if (err) {
2700266423Sjfv		device_printf(dev,"update vsi params failed %x!!\n",
2701266423Sjfv		   hw->aq.asq_last_status);
2702279033Sjfv		return (err);
2703279033Sjfv	}
2704266423Sjfv
2705266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2706266423Sjfv		struct tx_ring		*txr = &que->txr;
2707266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2708266423Sjfv		struct i40e_hmc_obj_txq tctx;
2709266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2710266423Sjfv		u32			txctl;
2711266423Sjfv		u16			size;
2712266423Sjfv
2713266423Sjfv
2714266423Sjfv		/* Setup the HMC TX Context  */
2715266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2716266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2717266423Sjfv		tctx.new_context = 1;
2718279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2719266423Sjfv		tctx.qlen = que->num_desc;
2720266423Sjfv		tctx.fc_ena = 0;
2721269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2722269198Sjfv		/* Enable HEAD writeback */
2723269198Sjfv		tctx.head_wb_ena = 1;
2724269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
2725269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
2726266423Sjfv		tctx.rdylist_act = 0;
2727266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
2728266423Sjfv		if (err) {
2729266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
2730266423Sjfv			break;
2731266423Sjfv		}
2732266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2733266423Sjfv		if (err) {
2734266423Sjfv			device_printf(dev, "Unable to set TX context\n");
2735266423Sjfv			break;
2736266423Sjfv		}
2737266423Sjfv		/* Associate the ring with this PF */
2738266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
2739266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2740266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
2741266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
2742270346Sjfv		ixl_flush(hw);
2743266423Sjfv
2744266423Sjfv		/* Do ring (re)init */
2745270346Sjfv		ixl_init_tx_ring(que);
2746266423Sjfv
2747266423Sjfv		/* Next setup the HMC RX Context  */
2748279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
2749266423Sjfv			rxr->mbuf_sz = MCLBYTES;
2750266423Sjfv		else
2751266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
2752266423Sjfv
2753266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2754266423Sjfv
2755266423Sjfv		/* Set up an RX context for the HMC */
2756266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2757266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2758266423Sjfv		/* ignore header split for now */
2759266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2760266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2761266423Sjfv		    vsi->max_frame_size : max_rxmax;
2762266423Sjfv		rctx.dtype = 0;
2763266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
2764266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
2765279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2766266423Sjfv		rctx.qlen = que->num_desc;
2767266423Sjfv		rctx.tphrdesc_ena = 1;
2768266423Sjfv		rctx.tphwdesc_ena = 1;
2769266423Sjfv		rctx.tphdata_ena = 0;
2770266423Sjfv		rctx.tphhead_ena = 0;
2771266423Sjfv		rctx.lrxqthresh = 2;
2772266423Sjfv		rctx.crcstrip = 1;
2773266423Sjfv		rctx.l2tsel = 1;
2774266423Sjfv		rctx.showiv = 1;
2775266423Sjfv		rctx.fc_ena = 0;
2776266423Sjfv		rctx.prefena = 1;
2777266423Sjfv
2778266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
2779266423Sjfv		if (err) {
2780266423Sjfv			device_printf(dev,
2781266423Sjfv			    "Unable to clear RX context %d\n", i);
2782266423Sjfv			break;
2783266423Sjfv		}
2784266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2785266423Sjfv		if (err) {
2786266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
2787266423Sjfv			break;
2788266423Sjfv		}
2789270346Sjfv		err = ixl_init_rx_ring(que);
2790266423Sjfv		if (err) {
2791266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2792266423Sjfv			break;
2793266423Sjfv		}
2794266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2795279860Sjfv#ifdef DEV_NETMAP
2796279860Sjfv		/* preserve queue */
2797279860Sjfv		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2798279860Sjfv			struct netmap_adapter *na = NA(vsi->ifp);
2799279860Sjfv			struct netmap_kring *kring = &na->rx_rings[i];
2800279860Sjfv			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2801279860Sjfv			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2802279860Sjfv		} else
2803279860Sjfv#endif /* DEV_NETMAP */
2804266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2805266423Sjfv	}
2806266423Sjfv	return (err);
2807266423Sjfv}
2808266423Sjfv
2809266423Sjfv
2810266423Sjfv/*********************************************************************
2811266423Sjfv *
2812266423Sjfv *  Free all VSI structs.
2813266423Sjfv *
2814266423Sjfv **********************************************************************/
2815266423Sjfvvoid
2816270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
2817266423Sjfv{
2818270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2819270346Sjfv	struct ixl_queue	*que = vsi->queues;
2820266423Sjfv
2821266423Sjfv	/* Free station queues */
2822266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2823266423Sjfv		struct tx_ring *txr = &que->txr;
2824266423Sjfv		struct rx_ring *rxr = &que->rxr;
2825266423Sjfv
2826266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2827266423Sjfv			continue;
2828270346Sjfv		IXL_TX_LOCK(txr);
2829270346Sjfv		ixl_free_que_tx(que);
2830266423Sjfv		if (txr->base)
2831271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
2832270346Sjfv		IXL_TX_UNLOCK(txr);
2833270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
2834266423Sjfv
2835266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2836266423Sjfv			continue;
2837270346Sjfv		IXL_RX_LOCK(rxr);
2838270346Sjfv		ixl_free_que_rx(que);
2839266423Sjfv		if (rxr->base)
2840271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2841270346Sjfv		IXL_RX_UNLOCK(rxr);
2842270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
2843266423Sjfv
2844266423Sjfv	}
2845266423Sjfv	free(vsi->queues, M_DEVBUF);
2846266423Sjfv
2847266423Sjfv	/* Free VSI filter list */
2848279858Sjfv	ixl_free_mac_filters(vsi);
2849279858Sjfv}
2850279858Sjfv
2851279858Sjfvstatic void
2852279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
2853279858Sjfv{
2854279858Sjfv	struct ixl_mac_filter *f;
2855279858Sjfv
2856266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
2857266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
2858266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2859266423Sjfv		free(f, M_DEVBUF);
2860266423Sjfv	}
2861266423Sjfv}
2862266423Sjfv
2863266423Sjfv
2864266423Sjfv/*********************************************************************
2865266423Sjfv *
2866266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
2867266423Sjfv *  associated queues, rings and the descriptors associated with each,
2868266423Sjfv *  called only once at attach.
2869266423Sjfv *
2870266423Sjfv **********************************************************************/
2871266423Sjfvstatic int
2872270346Sjfvixl_setup_stations(struct ixl_pf *pf)
2873266423Sjfv{
2874266423Sjfv	device_t		dev = pf->dev;
2875270346Sjfv	struct ixl_vsi		*vsi;
2876270346Sjfv	struct ixl_queue	*que;
2877266423Sjfv	struct tx_ring		*txr;
2878266423Sjfv	struct rx_ring		*rxr;
2879266423Sjfv	int 			rsize, tsize;
2880266423Sjfv	int			error = I40E_SUCCESS;
2881266423Sjfv
2882266423Sjfv	vsi = &pf->vsi;
2883266423Sjfv	vsi->back = (void *)pf;
2884266423Sjfv	vsi->hw = &pf->hw;
2885266423Sjfv	vsi->id = 0;
2886266423Sjfv	vsi->num_vlans = 0;
2887279858Sjfv	vsi->back = pf;
2888266423Sjfv
2889266423Sjfv	/* Get memory for the station queues */
2890266423Sjfv        if (!(vsi->queues =
2891270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2892266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2893266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
2894266423Sjfv                error = ENOMEM;
2895266423Sjfv                goto early;
2896266423Sjfv        }
2897266423Sjfv
2898266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
2899266423Sjfv		que = &vsi->queues[i];
2900270346Sjfv		que->num_desc = ixl_ringsz;
2901266423Sjfv		que->me = i;
2902266423Sjfv		que->vsi = vsi;
2903269198Sjfv		/* mark the queue as active */
2904269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
2905266423Sjfv		txr = &que->txr;
2906266423Sjfv		txr->que = que;
2907269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
2908266423Sjfv
2909266423Sjfv		/* Initialize the TX lock */
2910266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2911266423Sjfv		    device_get_nameunit(dev), que->me);
2912266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2913266423Sjfv		/* Create the TX descriptor ring */
2914269198Sjfv		tsize = roundup2((que->num_desc *
2915269198Sjfv		    sizeof(struct i40e_tx_desc)) +
2916269198Sjfv		    sizeof(u32), DBA_ALIGN);
2917271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
2918271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2919266423Sjfv			device_printf(dev,
2920266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
2921266423Sjfv			error = ENOMEM;
2922266423Sjfv			goto fail;
2923266423Sjfv		}
2924266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2925266423Sjfv		bzero((void *)txr->base, tsize);
2926266423Sjfv       		/* Now allocate transmit soft structs for the ring */
2927270346Sjfv       		if (ixl_allocate_tx_data(que)) {
2928266423Sjfv			device_printf(dev,
2929266423Sjfv			    "Critical Failure setting up TX structures\n");
2930266423Sjfv			error = ENOMEM;
2931266423Sjfv			goto fail;
2932266423Sjfv       		}
2933266423Sjfv		/* Allocate a buf ring */
2934266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2935266423Sjfv		    M_WAITOK, &txr->mtx);
2936266423Sjfv		if (txr->br == NULL) {
2937266423Sjfv			device_printf(dev,
2938266423Sjfv			    "Critical Failure setting up TX buf ring\n");
2939266423Sjfv			error = ENOMEM;
2940266423Sjfv			goto fail;
2941266423Sjfv       		}
2942266423Sjfv
2943266423Sjfv		/*
2944266423Sjfv		 * Next the RX queues...
2945266423Sjfv		 */
2946266423Sjfv		rsize = roundup2(que->num_desc *
2947266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2948266423Sjfv		rxr = &que->rxr;
2949266423Sjfv		rxr->que = que;
2950269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
2951266423Sjfv
2952266423Sjfv		/* Initialize the RX side lock */
2953266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2954266423Sjfv		    device_get_nameunit(dev), que->me);
2955266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2956266423Sjfv
2957271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
2958271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2959266423Sjfv			device_printf(dev,
2960266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
2961266423Sjfv			error = ENOMEM;
2962266423Sjfv			goto fail;
2963266423Sjfv		}
2964266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2965266423Sjfv		bzero((void *)rxr->base, rsize);
2966266423Sjfv
2967266423Sjfv        	/* Allocate receive soft structs for the ring*/
2968270346Sjfv		if (ixl_allocate_rx_data(que)) {
2969266423Sjfv			device_printf(dev,
2970266423Sjfv			    "Critical Failure setting up receive structs\n");
2971266423Sjfv			error = ENOMEM;
2972266423Sjfv			goto fail;
2973266423Sjfv		}
2974266423Sjfv	}
2975266423Sjfv
2976266423Sjfv	return (0);
2977266423Sjfv
2978266423Sjfvfail:
2979266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
2980266423Sjfv		que = &vsi->queues[i];
2981266423Sjfv		rxr = &que->rxr;
2982266423Sjfv		txr = &que->txr;
2983266423Sjfv		if (rxr->base)
2984271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2985266423Sjfv		if (txr->base)
2986271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
2987266423Sjfv	}
2988266423Sjfv
2989266423Sjfvearly:
2990266423Sjfv	return (error);
2991266423Sjfv}
2992266423Sjfv
2993266423Sjfv/*
2994266423Sjfv** Provide a update to the queue RX
2995266423Sjfv** interrupt moderation value.
2996266423Sjfv*/
2997266423Sjfvstatic void
2998270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
2999266423Sjfv{
3000270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3001266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3002266423Sjfv	struct rx_ring	*rxr = &que->rxr;
3003266423Sjfv	u16		rx_itr;
3004266423Sjfv	u16		rx_latency = 0;
3005266423Sjfv	int		rx_bytes;
3006266423Sjfv
3007266423Sjfv
3008266423Sjfv	/* Idle, do nothing */
3009266423Sjfv	if (rxr->bytes == 0)
3010266423Sjfv		return;
3011266423Sjfv
3012270346Sjfv	if (ixl_dynamic_rx_itr) {
3013266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
3014266423Sjfv		rx_itr = rxr->itr;
3015266423Sjfv
3016266423Sjfv		/* Adjust latency range */
3017266423Sjfv		switch (rxr->latency) {
3018270346Sjfv		case IXL_LOW_LATENCY:
3019266423Sjfv			if (rx_bytes > 10) {
3020270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3021270346Sjfv				rx_itr = IXL_ITR_20K;
3022266423Sjfv			}
3023266423Sjfv			break;
3024270346Sjfv		case IXL_AVE_LATENCY:
3025266423Sjfv			if (rx_bytes > 20) {
3026270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3027270346Sjfv				rx_itr = IXL_ITR_8K;
3028266423Sjfv			} else if (rx_bytes <= 10) {
3029270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3030270346Sjfv				rx_itr = IXL_ITR_100K;
3031266423Sjfv			}
3032266423Sjfv			break;
3033270346Sjfv		case IXL_BULK_LATENCY:
3034266423Sjfv			if (rx_bytes <= 20) {
3035270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3036270346Sjfv				rx_itr = IXL_ITR_20K;
3037266423Sjfv			}
3038266423Sjfv			break;
3039266423Sjfv       		 }
3040266423Sjfv
3041266423Sjfv		rxr->latency = rx_latency;
3042266423Sjfv
3043266423Sjfv		if (rx_itr != rxr->itr) {
3044266423Sjfv			/* do an exponential smoothing */
3045266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3046266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3047270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3048270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3049266423Sjfv			    que->me), rxr->itr);
3050266423Sjfv		}
3051266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3052270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3053270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3054266423Sjfv		/* Update the hardware if needed */
3055266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3056266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3057270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3058266423Sjfv			    que->me), rxr->itr);
3059266423Sjfv		}
3060266423Sjfv	}
3061266423Sjfv	rxr->bytes = 0;
3062266423Sjfv	rxr->packets = 0;
3063266423Sjfv	return;
3064266423Sjfv}
3065266423Sjfv
3066266423Sjfv
3067266423Sjfv/*
3068266423Sjfv** Provide a update to the queue TX
3069266423Sjfv** interrupt moderation value.
3070266423Sjfv*/
3071266423Sjfvstatic void
3072270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3073266423Sjfv{
3074270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3075266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3076266423Sjfv	struct tx_ring	*txr = &que->txr;
3077266423Sjfv	u16		tx_itr;
3078266423Sjfv	u16		tx_latency = 0;
3079266423Sjfv	int		tx_bytes;
3080266423Sjfv
3081266423Sjfv
3082266423Sjfv	/* Idle, do nothing */
3083266423Sjfv	if (txr->bytes == 0)
3084266423Sjfv		return;
3085266423Sjfv
3086270346Sjfv	if (ixl_dynamic_tx_itr) {
3087266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3088266423Sjfv		tx_itr = txr->itr;
3089266423Sjfv
3090266423Sjfv		switch (txr->latency) {
3091270346Sjfv		case IXL_LOW_LATENCY:
3092266423Sjfv			if (tx_bytes > 10) {
3093270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3094270346Sjfv				tx_itr = IXL_ITR_20K;
3095266423Sjfv			}
3096266423Sjfv			break;
3097270346Sjfv		case IXL_AVE_LATENCY:
3098266423Sjfv			if (tx_bytes > 20) {
3099270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3100270346Sjfv				tx_itr = IXL_ITR_8K;
3101266423Sjfv			} else if (tx_bytes <= 10) {
3102270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3103270346Sjfv				tx_itr = IXL_ITR_100K;
3104266423Sjfv			}
3105266423Sjfv			break;
3106270346Sjfv		case IXL_BULK_LATENCY:
3107266423Sjfv			if (tx_bytes <= 20) {
3108270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3109270346Sjfv				tx_itr = IXL_ITR_20K;
3110266423Sjfv			}
3111266423Sjfv			break;
3112266423Sjfv		}
3113266423Sjfv
3114266423Sjfv		txr->latency = tx_latency;
3115266423Sjfv
3116266423Sjfv		if (tx_itr != txr->itr) {
3117266423Sjfv       	         /* do an exponential smoothing */
3118266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3119266423Sjfv			    ((9 * tx_itr) + txr->itr);
3120270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3121270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3122266423Sjfv			    que->me), txr->itr);
3123266423Sjfv		}
3124266423Sjfv
3125266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3126270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3127270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3128266423Sjfv		/* Update the hardware if needed */
3129266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3130266423Sjfv			txr->itr = vsi->tx_itr_setting;
3131270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3132266423Sjfv			    que->me), txr->itr);
3133266423Sjfv		}
3134266423Sjfv	}
3135266423Sjfv	txr->bytes = 0;
3136266423Sjfv	txr->packets = 0;
3137266423Sjfv	return;
3138266423Sjfv}
3139266423Sjfv
3140279858Sjfv#define QUEUE_NAME_LEN 32
3141266423Sjfv
3142266423Sjfvstatic void
3143279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3144279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3145279858Sjfv{
3146279858Sjfv	struct sysctl_oid *tree;
3147279858Sjfv	struct sysctl_oid_list *child;
3148279858Sjfv	struct sysctl_oid_list *vsi_list;
3149279858Sjfv
3150279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3151279858Sjfv	child = SYSCTL_CHILDREN(tree);
3152279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3153279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3154279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3155279858Sjfv
3156279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3157279858Sjfv}
3158279858Sjfv
3159279858Sjfvstatic void
3160270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3161266423Sjfv{
3162266423Sjfv	device_t dev = pf->dev;
3163270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3164270346Sjfv	struct ixl_queue *queues = vsi->queues;
3165269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3166266423Sjfv
3167266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3168266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3169266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3170279858Sjfv	struct sysctl_oid_list *vsi_list;
3171266423Sjfv
3172279858Sjfv	struct sysctl_oid *queue_node;
3173279858Sjfv	struct sysctl_oid_list *queue_list;
3174266423Sjfv
3175269198Sjfv	struct tx_ring *txr;
3176269198Sjfv	struct rx_ring *rxr;
3177279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3178266423Sjfv
3179266423Sjfv	/* Driver statistics */
3180266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3181266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3182266423Sjfv			"Watchdog timeouts");
3183266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3184266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3185266423Sjfv			"Admin Queue IRQ Handled");
3186266423Sjfv
3187279858Sjfv	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "vc_debug_level",
3188279858Sjfv	    CTLFLAG_RW, &pf->vc_debug_lvl, 0,
3189279858Sjfv	    "PF/VF Virtual Channel debug logging level");
3190266423Sjfv
3191279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3192279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3193266423Sjfv
3194266423Sjfv	/* Queue statistics */
3195266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3196269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3197279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3198279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3199266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3200266423Sjfv
3201269198Sjfv		txr = &(queues[q].txr);
3202269198Sjfv		rxr = &(queues[q].rxr);
3203269198Sjfv
3204269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3205266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3206266423Sjfv				"m_defrag() failed");
3207269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3208266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3209266423Sjfv				"Driver dropped packets");
3210266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3211266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3212266423Sjfv				"irqs on this queue");
3213269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3214266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3215266423Sjfv				"TSO");
3216269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3217266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3218266423Sjfv				"Driver tx dma failure in xmit");
3219266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3220266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3221266423Sjfv				"Queue No Descriptor Available");
3222266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3223266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3224266423Sjfv				"Queue Packets Transmitted");
3225266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3226270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3227266423Sjfv				"Queue Bytes Transmitted");
3228266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3229266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3230266423Sjfv				"Queue Packets Received");
3231266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3232266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3233266423Sjfv				"Queue Bytes Received");
3234266423Sjfv	}
3235266423Sjfv
3236266423Sjfv	/* MAC stats */
3237270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3238266423Sjfv}
3239266423Sjfv
3240266423Sjfvstatic void
3241270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3242266423Sjfv	struct sysctl_oid_list *child,
3243266423Sjfv	struct i40e_eth_stats *eth_stats)
3244266423Sjfv{
3245270346Sjfv	struct ixl_sysctl_info ctls[] =
3246266423Sjfv	{
3247266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3248266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3249266423Sjfv			"Unicast Packets Received"},
3250266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3251266423Sjfv			"Multicast Packets Received"},
3252266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3253266423Sjfv			"Broadcast Packets Received"},
3254269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3255266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3256266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3257266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3258266423Sjfv			"Multicast Packets Transmitted"},
3259266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3260266423Sjfv			"Broadcast Packets Transmitted"},
3261266423Sjfv		// end
3262266423Sjfv		{0,0,0}
3263266423Sjfv	};
3264266423Sjfv
3265270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3266266423Sjfv	while (entry->stat != 0)
3267266423Sjfv	{
3268266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3269266423Sjfv				CTLFLAG_RD, entry->stat,
3270266423Sjfv				entry->description);
3271266423Sjfv		entry++;
3272266423Sjfv	}
3273266423Sjfv}
3274266423Sjfv
3275266423Sjfvstatic void
3276270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3277266423Sjfv	struct sysctl_oid_list *child,
3278266423Sjfv	struct i40e_hw_port_stats *stats)
3279266423Sjfv{
3280269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3281266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3282266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3283266423Sjfv
3284266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3285270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3286266423Sjfv
3287270346Sjfv	struct ixl_sysctl_info ctls[] =
3288266423Sjfv	{
3289266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3290266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3291266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3292266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3293266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3294266423Sjfv		/* Packet Reception Stats */
3295266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3296266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3297266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3298266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3299266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3300266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3301266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3302266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3303266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3304266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3305266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3306266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3307266423Sjfv		/* Packet Transmission Stats */
3308266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3309266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3310266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3311266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3312266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3313266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3314266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3315266423Sjfv		/* Flow control */
3316266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3317266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3318266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3319266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3320266423Sjfv		/* End */
3321266423Sjfv		{0,0,0}
3322266423Sjfv	};
3323266423Sjfv
3324270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3325266423Sjfv	while (entry->stat != 0)
3326266423Sjfv	{
3327266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3328266423Sjfv				CTLFLAG_RD, entry->stat,
3329266423Sjfv				entry->description);
3330266423Sjfv		entry++;
3331266423Sjfv	}
3332266423Sjfv}
3333266423Sjfv
3334266423Sjfv/*
3335270346Sjfv** ixl_config_rss - setup RSS
3336266423Sjfv**  - note this is done for the single vsi
3337266423Sjfv*/
3338270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3339266423Sjfv{
3340270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3341266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3342266423Sjfv	u32		lut = 0;
3343277084Sjfv	u64		set_hena = 0, hena;
3344277084Sjfv	int		i, j, que_id;
3345277084Sjfv#ifdef RSS
3346277084Sjfv	u32		rss_hash_config;
3347277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3348277084Sjfv#else
3349277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3350277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3351277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3352277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3353277084Sjfv#endif
3354266423Sjfv
3355277084Sjfv#ifdef RSS
3356277084Sjfv        /* Fetch the configured RSS key */
3357277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3358277084Sjfv#endif
3359266423Sjfv
3360266423Sjfv	/* Fill out hash function seed */
3361277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3362277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3363266423Sjfv
3364266423Sjfv	/* Enable PCTYPES for RSS: */
3365277084Sjfv#ifdef RSS
3366277084Sjfv	rss_hash_config = rss_gethashconfig();
3367277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3368277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3369277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3370277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3371277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3372277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3373277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3374277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3375279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3376277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3377277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3378277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3379277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3380277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3381277084Sjfv#else
3382266423Sjfv	set_hena =
3383266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3384266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3385266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3386266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3387266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3388266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3389266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3390266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3391266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3392266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3393266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3394277084Sjfv#endif
3395266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3396266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3397266423Sjfv	hena |= set_hena;
3398266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3399266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3400266423Sjfv
3401266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3402266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3403266423Sjfv		if (j == vsi->num_queues)
3404266423Sjfv			j = 0;
3405277084Sjfv#ifdef RSS
3406277084Sjfv		/*
3407277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3408277084Sjfv		 * Cap it at the number of configured buckets (which is
3409277084Sjfv		 * num_queues.)
3410277084Sjfv		 */
3411277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3412277262Sjfv		que_id = que_id % vsi->num_queues;
3413277084Sjfv#else
3414277084Sjfv		que_id = j;
3415277084Sjfv#endif
3416266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3417277084Sjfv		lut = (lut << 8) | (que_id &
3418266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3419266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3420266423Sjfv		if ((i & 3) == 3)
3421266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3422266423Sjfv	}
3423270346Sjfv	ixl_flush(hw);
3424266423Sjfv}
3425266423Sjfv
3426266423Sjfv
3427266423Sjfv/*
3428266423Sjfv** This routine is run via an vlan config EVENT,
3429266423Sjfv** it enables us to use the HW Filter table since
3430266423Sjfv** we can get the vlan id. This just creates the
3431266423Sjfv** entry in the soft version of the VFTA, init will
3432266423Sjfv** repopulate the real table.
3433266423Sjfv*/
3434266423Sjfvstatic void
3435270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3436266423Sjfv{
3437270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3438266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3439270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3440266423Sjfv
3441266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3442266423Sjfv		return;
3443266423Sjfv
3444266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3445266423Sjfv		return;
3446266423Sjfv
3447270346Sjfv	IXL_PF_LOCK(pf);
3448266423Sjfv	++vsi->num_vlans;
3449270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3450270346Sjfv	IXL_PF_UNLOCK(pf);
3451266423Sjfv}
3452266423Sjfv
3453266423Sjfv/*
3454266423Sjfv** This routine is run via an vlan
3455266423Sjfv** unconfig EVENT, remove our entry
3456266423Sjfv** in the soft vfta.
3457266423Sjfv*/
3458266423Sjfvstatic void
3459270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3460266423Sjfv{
3461270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3462266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3463270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3464266423Sjfv
3465266423Sjfv	if (ifp->if_softc !=  arg)
3466266423Sjfv		return;
3467266423Sjfv
3468266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3469266423Sjfv		return;
3470266423Sjfv
3471270346Sjfv	IXL_PF_LOCK(pf);
3472266423Sjfv	--vsi->num_vlans;
3473270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3474270346Sjfv	IXL_PF_UNLOCK(pf);
3475266423Sjfv}
3476266423Sjfv
3477266423Sjfv/*
3478266423Sjfv** This routine updates vlan filters, called by init
3479266423Sjfv** it scans the filter table and then updates the hw
3480266423Sjfv** after a soft reset.
3481266423Sjfv*/
3482266423Sjfvstatic void
3483270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3484266423Sjfv{
3485270346Sjfv	struct ixl_mac_filter	*f;
3486266423Sjfv	int			cnt = 0, flags;
3487266423Sjfv
3488266423Sjfv	if (vsi->num_vlans == 0)
3489266423Sjfv		return;
3490266423Sjfv	/*
3491266423Sjfv	** Scan the filter list for vlan entries,
3492266423Sjfv	** mark them for addition and then call
3493266423Sjfv	** for the AQ update.
3494266423Sjfv	*/
3495266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3496270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3497266423Sjfv			f->flags |=
3498270346Sjfv			    (IXL_FILTER_ADD |
3499270346Sjfv			    IXL_FILTER_USED);
3500266423Sjfv			cnt++;
3501266423Sjfv		}
3502266423Sjfv	}
3503266423Sjfv	if (cnt == 0) {
3504266423Sjfv		printf("setup vlan: no filters found!\n");
3505266423Sjfv		return;
3506266423Sjfv	}
3507270346Sjfv	flags = IXL_FILTER_VLAN;
3508270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3509270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3510266423Sjfv	return;
3511266423Sjfv}
3512266423Sjfv
3513266423Sjfv/*
3514266423Sjfv** Initialize filter list and add filters that the hardware
3515266423Sjfv** needs to know about.
3516266423Sjfv*/
3517266423Sjfvstatic void
3518270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3519266423Sjfv{
3520269198Sjfv	/* Add broadcast address */
3521279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3522266423Sjfv}
3523266423Sjfv
3524266423Sjfv/*
3525266423Sjfv** This routine adds mulicast filters
3526266423Sjfv*/
3527266423Sjfvstatic void
3528270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3529266423Sjfv{
3530270346Sjfv	struct ixl_mac_filter *f;
3531266423Sjfv
3532266423Sjfv	/* Does one already exist */
3533270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3534266423Sjfv	if (f != NULL)
3535266423Sjfv		return;
3536266423Sjfv
3537270346Sjfv	f = ixl_get_filter(vsi);
3538266423Sjfv	if (f == NULL) {
3539266423Sjfv		printf("WARNING: no filter available!!\n");
3540266423Sjfv		return;
3541266423Sjfv	}
3542266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3543270346Sjfv	f->vlan = IXL_VLAN_ANY;
3544270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3545270346Sjfv	    | IXL_FILTER_MC);
3546266423Sjfv
3547266423Sjfv	return;
3548266423Sjfv}
3549266423Sjfv
3550279858Sjfvstatic void
3551279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3552279858Sjfv{
3553279858Sjfv
3554279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3555279858Sjfv}
3556279858Sjfv
3557266423Sjfv/*
3558266423Sjfv** This routine adds macvlan filters
3559266423Sjfv*/
3560266423Sjfvstatic void
3561270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3562266423Sjfv{
3563270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3564279858Sjfv	struct ixl_pf		*pf;
3565279858Sjfv	device_t		dev;
3566266423Sjfv
3567270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3568266423Sjfv
3569279858Sjfv	pf = vsi->back;
3570279858Sjfv	dev = pf->dev;
3571279858Sjfv
3572266423Sjfv	/* Does one already exist */
3573270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3574266423Sjfv	if (f != NULL)
3575266423Sjfv		return;
3576266423Sjfv	/*
3577266423Sjfv	** Is this the first vlan being registered, if so we
3578266423Sjfv	** need to remove the ANY filter that indicates we are
3579266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3580266423Sjfv	*/
3581270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3582270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3583266423Sjfv		if (tmp != NULL) {
3584270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3585270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3586266423Sjfv		}
3587266423Sjfv	}
3588266423Sjfv
3589270346Sjfv	f = ixl_get_filter(vsi);
3590266423Sjfv	if (f == NULL) {
3591266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3592266423Sjfv		return;
3593266423Sjfv	}
3594266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3595266423Sjfv	f->vlan = vlan;
3596270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3597270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3598270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3599279858Sjfv	else
3600279858Sjfv		vsi->num_macs++;
3601266423Sjfv
3602270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3603266423Sjfv	return;
3604266423Sjfv}
3605266423Sjfv
3606266423Sjfvstatic void
3607270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3608266423Sjfv{
3609270346Sjfv	struct ixl_mac_filter *f;
3610266423Sjfv
3611270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3612266423Sjfv	if (f == NULL)
3613266423Sjfv		return;
3614266423Sjfv
3615270346Sjfv	f->flags |= IXL_FILTER_DEL;
3616270346Sjfv	ixl_del_hw_filters(vsi, 1);
3617279858Sjfv	vsi->num_macs--;
3618266423Sjfv
3619266423Sjfv	/* Check if this is the last vlan removal */
3620270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3621266423Sjfv		/* Switch back to a non-vlan filter */
3622270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3623270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3624266423Sjfv	}
3625266423Sjfv	return;
3626266423Sjfv}
3627266423Sjfv
3628266423Sjfv/*
3629266423Sjfv** Find the filter with both matching mac addr and vlan id
3630266423Sjfv*/
3631270346Sjfvstatic struct ixl_mac_filter *
3632270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3633266423Sjfv{
3634270346Sjfv	struct ixl_mac_filter	*f;
3635266423Sjfv	bool			match = FALSE;
3636266423Sjfv
3637266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3638266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3639266423Sjfv			continue;
3640266423Sjfv		if (f->vlan == vlan) {
3641266423Sjfv			match = TRUE;
3642266423Sjfv			break;
3643266423Sjfv		}
3644266423Sjfv	}
3645266423Sjfv
3646266423Sjfv	if (!match)
3647266423Sjfv		f = NULL;
3648266423Sjfv	return (f);
3649266423Sjfv}
3650266423Sjfv
3651266423Sjfv/*
3652266423Sjfv** This routine takes additions to the vsi filter
3653266423Sjfv** table and creates an Admin Queue call to create
3654266423Sjfv** the filters in the hardware.
3655266423Sjfv*/
3656266423Sjfvstatic void
3657270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3658266423Sjfv{
3659266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3660270346Sjfv	struct ixl_mac_filter	*f;
3661279858Sjfv	struct ixl_pf		*pf;
3662279858Sjfv	struct i40e_hw		*hw;
3663279858Sjfv	device_t		dev;
3664279858Sjfv	int			err, j = 0;
3665266423Sjfv
3666279858Sjfv	pf = vsi->back;
3667279858Sjfv	dev = pf->dev;
3668279858Sjfv	hw = &pf->hw;
3669279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3670279858Sjfv
3671266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3672266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3673266423Sjfv	if (a == NULL) {
3674277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3675266423Sjfv		return;
3676266423Sjfv	}
3677266423Sjfv
3678266423Sjfv	/*
3679266423Sjfv	** Scan the filter list, each time we find one
3680266423Sjfv	** we add it to the admin queue array and turn off
3681266423Sjfv	** the add bit.
3682266423Sjfv	*/
3683266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3684266423Sjfv		if (f->flags == flags) {
3685266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3686266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3687279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3688279858Sjfv				b->vlan_tag = 0;
3689279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3690279858Sjfv			} else {
3691279858Sjfv				b->vlan_tag = f->vlan;
3692279858Sjfv				b->flags = 0;
3693279858Sjfv			}
3694279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3695270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3696266423Sjfv			j++;
3697266423Sjfv		}
3698266423Sjfv		if (j == cnt)
3699266423Sjfv			break;
3700266423Sjfv	}
3701266423Sjfv	if (j > 0) {
3702266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3703266423Sjfv		if (err)
3704279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3705279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3706266423Sjfv		else
3707266423Sjfv			vsi->hw_filters_add += j;
3708266423Sjfv	}
3709266423Sjfv	free(a, M_DEVBUF);
3710266423Sjfv	return;
3711266423Sjfv}
3712266423Sjfv
3713266423Sjfv/*
3714266423Sjfv** This routine takes removals in the vsi filter
3715266423Sjfv** table and creates an Admin Queue call to delete
3716266423Sjfv** the filters in the hardware.
3717266423Sjfv*/
3718266423Sjfvstatic void
3719270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3720266423Sjfv{
3721266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3722279858Sjfv	struct ixl_pf		*pf;
3723279858Sjfv	struct i40e_hw		*hw;
3724279858Sjfv	device_t		dev;
3725270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
3726266423Sjfv	int			err, j = 0;
3727266423Sjfv
3728270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
3729266423Sjfv
3730279858Sjfv	pf = vsi->back;
3731279858Sjfv	hw = &pf->hw;
3732279858Sjfv	dev = pf->dev;
3733279858Sjfv
3734266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3735266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3736266423Sjfv	if (d == NULL) {
3737266423Sjfv		printf("del hw filter failed to get memory\n");
3738266423Sjfv		return;
3739266423Sjfv	}
3740266423Sjfv
3741266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3742270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
3743266423Sjfv			e = &d[j]; // a pox on fvl long names :)
3744266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3745270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3746266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3747266423Sjfv			/* delete entry from vsi list */
3748270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3749266423Sjfv			free(f, M_DEVBUF);
3750266423Sjfv			j++;
3751266423Sjfv		}
3752266423Sjfv		if (j == cnt)
3753266423Sjfv			break;
3754266423Sjfv	}
3755266423Sjfv	if (j > 0) {
3756266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3757266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
3758266423Sjfv		   so we'll ignore that specific error. */
3759277084Sjfv		// TODO: Does this still occur on current firmwares?
3760266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3761266423Sjfv			int sc = 0;
3762266423Sjfv			for (int i = 0; i < j; i++)
3763266423Sjfv				sc += (!d[i].error_code);
3764266423Sjfv			vsi->hw_filters_del += sc;
3765266423Sjfv			device_printf(dev,
3766266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
3767266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
3768266423Sjfv		} else
3769266423Sjfv			vsi->hw_filters_del += j;
3770266423Sjfv	}
3771266423Sjfv	free(d, M_DEVBUF);
3772266423Sjfv
3773270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
3774266423Sjfv	return;
3775266423Sjfv}
3776266423Sjfv
3777279858Sjfvstatic int
3778270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
3779266423Sjfv{
3780279858Sjfv	struct ixl_pf	*pf = vsi->back;
3781279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3782279858Sjfv	int		index, error;
3783266423Sjfv	u32		reg;
3784266423Sjfv
3785279858Sjfv	error = 0;
3786266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3787279858Sjfv		index = vsi->first_queue + i;
3788279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3789266423Sjfv
3790279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3791266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3792266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
3793279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3794266423Sjfv		/* Verify the enable took */
3795266423Sjfv		for (int j = 0; j < 10; j++) {
3796279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3797266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3798266423Sjfv				break;
3799266423Sjfv			i40e_msec_delay(10);
3800266423Sjfv		}
3801279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3802279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
3803279858Sjfv			    index);
3804279858Sjfv			error = ETIMEDOUT;
3805279858Sjfv		}
3806266423Sjfv
3807279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3808266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3809266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
3810279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3811266423Sjfv		/* Verify the enable took */
3812266423Sjfv		for (int j = 0; j < 10; j++) {
3813279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3814266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3815266423Sjfv				break;
3816266423Sjfv			i40e_msec_delay(10);
3817266423Sjfv		}
3818279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3819279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
3820279858Sjfv			    index);
3821279858Sjfv			error = ETIMEDOUT;
3822279858Sjfv		}
3823266423Sjfv	}
3824279858Sjfv
3825279858Sjfv	return (error);
3826266423Sjfv}
3827266423Sjfv
3828279858Sjfvstatic int
3829270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
3830266423Sjfv{
3831279858Sjfv	struct ixl_pf	*pf = vsi->back;
3832279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3833279858Sjfv	int		index, error;
3834266423Sjfv	u32		reg;
3835266423Sjfv
3836279858Sjfv	error = 0;
3837266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3838279858Sjfv		index = vsi->first_queue + i;
3839279858Sjfv
3840279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3841266423Sjfv		i40e_usec_delay(500);
3842266423Sjfv
3843279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3844266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3845279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3846266423Sjfv		/* Verify the disable took */
3847266423Sjfv		for (int j = 0; j < 10; j++) {
3848279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3849266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3850266423Sjfv				break;
3851266423Sjfv			i40e_msec_delay(10);
3852266423Sjfv		}
3853279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3854279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
3855279858Sjfv			    index);
3856279858Sjfv			error = ETIMEDOUT;
3857279858Sjfv		}
3858266423Sjfv
3859279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3860266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3861279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3862266423Sjfv		/* Verify the disable took */
3863266423Sjfv		for (int j = 0; j < 10; j++) {
3864279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3865266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3866266423Sjfv				break;
3867266423Sjfv			i40e_msec_delay(10);
3868266423Sjfv		}
3869279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3870279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
3871279858Sjfv			    index);
3872279858Sjfv			error = ETIMEDOUT;
3873279858Sjfv		}
3874266423Sjfv	}
3875279858Sjfv
3876279858Sjfv	return (error);
3877266423Sjfv}
3878266423Sjfv
3879269198Sjfv/**
3880270346Sjfv * ixl_handle_mdd_event
3881269198Sjfv *
3882269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
3883269198Sjfv * (But also detects events from the PF, as well)
3884269198Sjfv **/
3885270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
3886269198Sjfv{
3887269198Sjfv	struct i40e_hw *hw = &pf->hw;
3888269198Sjfv	device_t dev = pf->dev;
3889269198Sjfv	bool mdd_detected = false;
3890269198Sjfv	bool pf_mdd_detected = false;
3891269198Sjfv	u32 reg;
3892269198Sjfv
3893269198Sjfv	/* find what triggered the MDD event */
3894269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
3895269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3896269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3897269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3898269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3899269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
3900269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3901269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
3902269198Sjfv		device_printf(dev,
3903269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3904269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
3905269198Sjfv			 event, queue, pf_num);
3906269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3907269198Sjfv		mdd_detected = true;
3908269198Sjfv	}
3909269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
3910269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3911269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3912269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3913269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3914269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
3915269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3916269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
3917269198Sjfv		device_printf(dev,
3918269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3919269198Sjfv			 " on RX queue %d of function 0x%02x\n",
3920269198Sjfv			 event, queue, func);
3921269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3922269198Sjfv		mdd_detected = true;
3923269198Sjfv	}
3924269198Sjfv
3925269198Sjfv	if (mdd_detected) {
3926269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
3927269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3928269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3929269198Sjfv			device_printf(dev,
3930269198Sjfv				 "MDD TX event is for this function 0x%08x",
3931269198Sjfv				 reg);
3932269198Sjfv			pf_mdd_detected = true;
3933269198Sjfv		}
3934269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
3935269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3936269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3937269198Sjfv			device_printf(dev,
3938269198Sjfv				 "MDD RX event is for this function 0x%08x",
3939269198Sjfv				 reg);
3940269198Sjfv			pf_mdd_detected = true;
3941269198Sjfv		}
3942269198Sjfv	}
3943269198Sjfv
3944269198Sjfv	/* re-enable mdd interrupt cause */
3945269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3946269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3947269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3948270346Sjfv	ixl_flush(hw);
3949269198Sjfv}
3950269198Sjfv
3951266423Sjfvstatic void
3952270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
3953266423Sjfv{
3954266423Sjfv	struct i40e_hw		*hw = vsi->hw;
3955270346Sjfv	struct ixl_queue	*que = vsi->queues;
3956266423Sjfv
3957270346Sjfv	if (ixl_enable_msix) {
3958270346Sjfv		ixl_enable_adminq(hw);
3959266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
3960270346Sjfv			ixl_enable_queue(hw, que->me);
3961266423Sjfv	} else
3962270346Sjfv		ixl_enable_legacy(hw);
3963266423Sjfv}
3964266423Sjfv
3965266423Sjfvstatic void
3966279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
3967266423Sjfv{
3968266423Sjfv	struct i40e_hw		*hw = vsi->hw;
3969270346Sjfv	struct ixl_queue	*que = vsi->queues;
3970266423Sjfv
3971279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
3972279858Sjfv		ixl_disable_queue(hw, que->me);
3973279858Sjfv}
3974279858Sjfv
3975279858Sjfvstatic void
3976279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
3977279858Sjfv{
3978279858Sjfv	struct i40e_hw		*hw = vsi->hw;
3979279858Sjfv
3980279858Sjfv	if (ixl_enable_msix)
3981270346Sjfv		ixl_disable_adminq(hw);
3982279858Sjfv	else
3983270346Sjfv		ixl_disable_legacy(hw);
3984266423Sjfv}
3985266423Sjfv
3986266423Sjfvstatic void
3987270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
3988266423Sjfv{
3989266423Sjfv	u32		reg;
3990266423Sjfv
3991266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3992266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3993270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3994266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3995270346Sjfv	ixl_flush(hw);
3996266423Sjfv	return;
3997266423Sjfv}
3998266423Sjfv
3999266423Sjfvstatic void
4000270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
4001266423Sjfv{
4002266423Sjfv	u32		reg;
4003266423Sjfv
4004270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4005266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4006266423Sjfv
4007266423Sjfv	return;
4008266423Sjfv}
4009266423Sjfv
4010266423Sjfvstatic void
4011270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
4012266423Sjfv{
4013266423Sjfv	u32		reg;
4014266423Sjfv
4015266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4016266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4017270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4018266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4019266423Sjfv}
4020266423Sjfv
4021266423Sjfvstatic void
4022270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
4023266423Sjfv{
4024266423Sjfv	u32		reg;
4025266423Sjfv
4026270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4027266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4028266423Sjfv
4029266423Sjfv	return;
4030266423Sjfv}
4031266423Sjfv
4032266423Sjfvstatic void
4033270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4034266423Sjfv{
4035266423Sjfv	u32		reg;
4036266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4037266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4038270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4039266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4040266423Sjfv}
4041266423Sjfv
4042266423Sjfvstatic void
4043270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4044266423Sjfv{
4045266423Sjfv	u32		reg;
4046266423Sjfv
4047270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4048266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4049266423Sjfv
4050266423Sjfv	return;
4051266423Sjfv}
4052266423Sjfv
4053266423Sjfvstatic void
4054270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4055266423Sjfv{
4056266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4057279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4058279858Sjfv	struct ixl_vf	*vf;
4059269198Sjfv
4060266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4061266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4062266423Sjfv
4063266423Sjfv	/* Update hw stats */
4064270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4065266423Sjfv			   pf->stat_offsets_loaded,
4066266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4067270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4068266423Sjfv			   pf->stat_offsets_loaded,
4069266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4070270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4071266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4072266423Sjfv			   pf->stat_offsets_loaded,
4073266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4074270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4075266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4076266423Sjfv			   pf->stat_offsets_loaded,
4077266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4078270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4079266423Sjfv			   pf->stat_offsets_loaded,
4080266423Sjfv			   &osd->eth.rx_discards,
4081266423Sjfv			   &nsd->eth.rx_discards);
4082270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4083266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4084266423Sjfv			   pf->stat_offsets_loaded,
4085266423Sjfv			   &osd->eth.rx_unicast,
4086266423Sjfv			   &nsd->eth.rx_unicast);
4087270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4088266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4089266423Sjfv			   pf->stat_offsets_loaded,
4090266423Sjfv			   &osd->eth.tx_unicast,
4091266423Sjfv			   &nsd->eth.tx_unicast);
4092270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4093266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4094266423Sjfv			   pf->stat_offsets_loaded,
4095266423Sjfv			   &osd->eth.rx_multicast,
4096266423Sjfv			   &nsd->eth.rx_multicast);
4097270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4098266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4099266423Sjfv			   pf->stat_offsets_loaded,
4100266423Sjfv			   &osd->eth.tx_multicast,
4101266423Sjfv			   &nsd->eth.tx_multicast);
4102270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4103266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4104266423Sjfv			   pf->stat_offsets_loaded,
4105266423Sjfv			   &osd->eth.rx_broadcast,
4106266423Sjfv			   &nsd->eth.rx_broadcast);
4107270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4108266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4109266423Sjfv			   pf->stat_offsets_loaded,
4110266423Sjfv			   &osd->eth.tx_broadcast,
4111266423Sjfv			   &nsd->eth.tx_broadcast);
4112266423Sjfv
4113270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4114266423Sjfv			   pf->stat_offsets_loaded,
4115266423Sjfv			   &osd->tx_dropped_link_down,
4116266423Sjfv			   &nsd->tx_dropped_link_down);
4117270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4118266423Sjfv			   pf->stat_offsets_loaded,
4119266423Sjfv			   &osd->mac_local_faults,
4120266423Sjfv			   &nsd->mac_local_faults);
4121270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4122266423Sjfv			   pf->stat_offsets_loaded,
4123266423Sjfv			   &osd->mac_remote_faults,
4124266423Sjfv			   &nsd->mac_remote_faults);
4125270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4126266423Sjfv			   pf->stat_offsets_loaded,
4127266423Sjfv			   &osd->rx_length_errors,
4128266423Sjfv			   &nsd->rx_length_errors);
4129266423Sjfv
4130269198Sjfv	/* Flow control (LFC) stats */
4131270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4132266423Sjfv			   pf->stat_offsets_loaded,
4133266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4134270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4135266423Sjfv			   pf->stat_offsets_loaded,
4136266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4137270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4138266423Sjfv			   pf->stat_offsets_loaded,
4139266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4140270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4141266423Sjfv			   pf->stat_offsets_loaded,
4142266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4143266423Sjfv
4144269198Sjfv	/* Packet size stats rx */
4145270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4146266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4147266423Sjfv			   pf->stat_offsets_loaded,
4148266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4149270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4150266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4151266423Sjfv			   pf->stat_offsets_loaded,
4152266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4153270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4154266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4155266423Sjfv			   pf->stat_offsets_loaded,
4156266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4157270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4158266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4159266423Sjfv			   pf->stat_offsets_loaded,
4160266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4161270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4162266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4163266423Sjfv			   pf->stat_offsets_loaded,
4164266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4165270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4166266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4167266423Sjfv			   pf->stat_offsets_loaded,
4168266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4169270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4170266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4171266423Sjfv			   pf->stat_offsets_loaded,
4172266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4173266423Sjfv
4174269198Sjfv	/* Packet size stats tx */
4175270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4176266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4177266423Sjfv			   pf->stat_offsets_loaded,
4178266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4179270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4180266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4181266423Sjfv			   pf->stat_offsets_loaded,
4182266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4183270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4184266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4185266423Sjfv			   pf->stat_offsets_loaded,
4186266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4187270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4188266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4189266423Sjfv			   pf->stat_offsets_loaded,
4190266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4191270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4192266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4193266423Sjfv			   pf->stat_offsets_loaded,
4194266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4195270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4196266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4197266423Sjfv			   pf->stat_offsets_loaded,
4198266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4199270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4200266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4201266423Sjfv			   pf->stat_offsets_loaded,
4202266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4203266423Sjfv
4204270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4205266423Sjfv			   pf->stat_offsets_loaded,
4206266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4207270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4208266423Sjfv			   pf->stat_offsets_loaded,
4209266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4210270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4211266423Sjfv			   pf->stat_offsets_loaded,
4212266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4213270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4214266423Sjfv			   pf->stat_offsets_loaded,
4215266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4216266423Sjfv	pf->stat_offsets_loaded = true;
4217269198Sjfv	/* End hw stats */
4218266423Sjfv
4219266423Sjfv	/* Update vsi stats */
4220279858Sjfv	ixl_update_vsi_stats(vsi);
4221266423Sjfv
4222279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4223279858Sjfv		vf = &pf->vfs[i];
4224279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4225279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4226279858Sjfv	}
4227266423Sjfv}
4228266423Sjfv
4229266423Sjfv/*
4230266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4231266423Sjfv**  - do outside interrupt since it might sleep
4232266423Sjfv*/
4233266423Sjfvstatic void
4234270346Sjfvixl_do_adminq(void *context, int pending)
4235266423Sjfv{
4236270346Sjfv	struct ixl_pf			*pf = context;
4237266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4238270346Sjfv	struct ixl_vsi			*vsi = &pf->vsi;
4239266423Sjfv	struct i40e_arq_event_info	event;
4240266423Sjfv	i40e_status			ret;
4241266423Sjfv	u32				reg, loop = 0;
4242266423Sjfv	u16				opcode, result;
4243266423Sjfv
4244274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4245274205Sjfv	event.msg_buf = malloc(event.buf_len,
4246266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4247266423Sjfv	if (!event.msg_buf) {
4248266423Sjfv		printf("Unable to allocate adminq memory\n");
4249266423Sjfv		return;
4250266423Sjfv	}
4251266423Sjfv
4252279858Sjfv	IXL_PF_LOCK(pf);
4253266423Sjfv	/* clean and process any events */
4254266423Sjfv	do {
4255266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4256266423Sjfv		if (ret)
4257266423Sjfv			break;
4258266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4259266423Sjfv		switch (opcode) {
4260266423Sjfv		case i40e_aqc_opc_get_link_status:
4261279858Sjfv			ixl_link_event(pf, &event);
4262270346Sjfv			ixl_update_link_status(pf);
4263266423Sjfv			break;
4264266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4265279858Sjfv#ifdef PCI_IOV
4266279858Sjfv			ixl_handle_vf_msg(pf, &event);
4267279858Sjfv#endif
4268266423Sjfv			break;
4269266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4270266423Sjfv			break;
4271266423Sjfv		default:
4272270346Sjfv#ifdef IXL_DEBUG
4273266423Sjfv			printf("AdminQ unknown event %x\n", opcode);
4274266423Sjfv#endif
4275266423Sjfv			break;
4276266423Sjfv		}
4277266423Sjfv
4278270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4279266423Sjfv
4280266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4281269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4282266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4283266423Sjfv	free(event.msg_buf, M_DEVBUF);
4284266423Sjfv
4285279858Sjfv	/*
4286279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4287279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4288279858Sjfv	 */
4289279858Sjfv	if (result > 0)
4290279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4291266423Sjfv	else
4292270346Sjfv		ixl_enable_intr(vsi);
4293279858Sjfv
4294279858Sjfv	IXL_PF_UNLOCK(pf);
4295266423Sjfv}
4296266423Sjfv
4297266423Sjfvstatic int
4298270346Sjfvixl_debug_info(SYSCTL_HANDLER_ARGS)
4299266423Sjfv{
4300270346Sjfv	struct ixl_pf	*pf;
4301266423Sjfv	int		error, input = 0;
4302266423Sjfv
4303266423Sjfv	error = sysctl_handle_int(oidp, &input, 0, req);
4304266423Sjfv
4305266423Sjfv	if (error || !req->newptr)
4306266423Sjfv		return (error);
4307266423Sjfv
4308266423Sjfv	if (input == 1) {
4309270346Sjfv		pf = (struct ixl_pf *)arg1;
4310270346Sjfv		ixl_print_debug_info(pf);
4311266423Sjfv	}
4312266423Sjfv
4313266423Sjfv	return (error);
4314266423Sjfv}
4315266423Sjfv
4316266423Sjfvstatic void
4317270346Sjfvixl_print_debug_info(struct ixl_pf *pf)
4318266423Sjfv{
4319266423Sjfv	struct i40e_hw		*hw = &pf->hw;
4320270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
4321270346Sjfv	struct ixl_queue	*que = vsi->queues;
4322266423Sjfv	struct rx_ring		*rxr = &que->rxr;
4323266423Sjfv	struct tx_ring		*txr = &que->txr;
4324266423Sjfv	u32			reg;
4325266423Sjfv
4326266423Sjfv
4327270799Sbz	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4328270799Sbz	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4329266423Sjfv	printf("RX next check = %x\n", rxr->next_check);
4330270799Sbz	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4331270799Sbz	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4332266423Sjfv	printf("TX desc avail = %x\n", txr->avail);
4333266423Sjfv
4334266423Sjfv	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4335266423Sjfv	 printf("RX Bytes = %x\n", reg);
4336266423Sjfv	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4337266423Sjfv	 printf("Port RX Bytes = %x\n", reg);
4338266423Sjfv	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4339266423Sjfv	 printf("RX discard = %x\n", reg);
4340266423Sjfv	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4341266423Sjfv	 printf("Port RX discard = %x\n", reg);
4342266423Sjfv
4343266423Sjfv	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4344266423Sjfv	 printf("TX errors = %x\n", reg);
4345266423Sjfv	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4346266423Sjfv	 printf("TX Bytes = %x\n", reg);
4347266423Sjfv
4348266423Sjfv	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4349266423Sjfv	 printf("RX undersize = %x\n", reg);
4350266423Sjfv	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4351266423Sjfv	 printf("RX fragments = %x\n", reg);
4352266423Sjfv	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4353266423Sjfv	 printf("RX oversize = %x\n", reg);
4354266423Sjfv	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4355266423Sjfv	 printf("RX length error = %x\n", reg);
4356266423Sjfv	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4357266423Sjfv	 printf("mac remote fault = %x\n", reg);
4358266423Sjfv	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4359266423Sjfv	 printf("mac local fault = %x\n", reg);
4360266423Sjfv}
4361266423Sjfv
4362266423Sjfv/**
4363266423Sjfv * Update VSI-specific ethernet statistics counters.
4364266423Sjfv **/
4365270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4366266423Sjfv{
4367270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4368266423Sjfv	struct i40e_hw *hw = &pf->hw;
4369266423Sjfv	struct i40e_eth_stats *es;
4370266423Sjfv	struct i40e_eth_stats *oes;
4371272227Sglebius	struct i40e_hw_port_stats *nsd;
4372266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4373266423Sjfv
4374266423Sjfv	es = &vsi->eth_stats;
4375266423Sjfv	oes = &vsi->eth_stats_offsets;
4376272227Sglebius	nsd = &pf->stats;
4377266423Sjfv
4378266423Sjfv	/* Gather up the stats that the hw collects */
4379270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4380266423Sjfv			   vsi->stat_offsets_loaded,
4381266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4382270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4383266423Sjfv			   vsi->stat_offsets_loaded,
4384266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4385266423Sjfv
4386270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4387266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4388266423Sjfv			   vsi->stat_offsets_loaded,
4389266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4390270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4391266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4392266423Sjfv			   vsi->stat_offsets_loaded,
4393266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4394270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4395266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4396266423Sjfv			   vsi->stat_offsets_loaded,
4397266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4398270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4399266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4400266423Sjfv			   vsi->stat_offsets_loaded,
4401266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4402266423Sjfv
4403270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4404266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4405266423Sjfv			   vsi->stat_offsets_loaded,
4406266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4407270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4408266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4409266423Sjfv			   vsi->stat_offsets_loaded,
4410266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4411270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4412266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4413266423Sjfv			   vsi->stat_offsets_loaded,
4414266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4415270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4416266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4417266423Sjfv			   vsi->stat_offsets_loaded,
4418266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4419266423Sjfv	vsi->stat_offsets_loaded = true;
4420279858Sjfv}
4421269198Sjfv
4422279858Sjfvstatic void
4423279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4424279858Sjfv{
4425279858Sjfv	struct ixl_pf		*pf;
4426279858Sjfv	struct ifnet		*ifp;
4427279858Sjfv	struct i40e_eth_stats	*es;
4428279858Sjfv	u64			tx_discards;
4429279858Sjfv
4430279858Sjfv	struct i40e_hw_port_stats *nsd;
4431279858Sjfv
4432279858Sjfv	pf = vsi->back;
4433279858Sjfv	ifp = vsi->ifp;
4434279858Sjfv	es = &vsi->eth_stats;
4435279858Sjfv	nsd = &pf->stats;
4436279858Sjfv
4437279858Sjfv	ixl_update_eth_stats(vsi);
4438279858Sjfv
4439272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4440279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4441272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4442272227Sglebius
4443269198Sjfv	/* Update ifnet stats */
4444272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4445269198Sjfv	                   es->rx_multicast +
4446272227Sglebius			   es->rx_broadcast);
4447272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4448269198Sjfv	                   es->tx_multicast +
4449272227Sglebius			   es->tx_broadcast);
4450272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4451272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4452272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4453272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4454269198Sjfv
4455279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4456279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4457279858Sjfv	    nsd->rx_jabber);
4458272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4459272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4460272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4461272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4462272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4463266423Sjfv}
4464266423Sjfv
4465266423Sjfv/**
4466266423Sjfv * Reset all of the stats for the given pf
4467266423Sjfv **/
4468270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4469266423Sjfv{
4470266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4471266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4472266423Sjfv	pf->stat_offsets_loaded = false;
4473266423Sjfv}
4474266423Sjfv
4475266423Sjfv/**
4476266423Sjfv * Resets all stats of the given vsi
4477266423Sjfv **/
4478270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4479266423Sjfv{
4480266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4481266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4482266423Sjfv	vsi->stat_offsets_loaded = false;
4483266423Sjfv}
4484266423Sjfv
4485266423Sjfv/**
4486266423Sjfv * Read and update a 48 bit stat from the hw
4487266423Sjfv *
4488266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4489266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4490266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4491266423Sjfv * to report stats that count from zero.
4492266423Sjfv **/
4493266423Sjfvstatic void
4494270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4495266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4496266423Sjfv{
4497266423Sjfv	u64 new_data;
4498266423Sjfv
4499270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4500266423Sjfv	new_data = rd64(hw, loreg);
4501266423Sjfv#else
4502266423Sjfv	/*
4503269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4504266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4505266423Sjfv	 */
4506266423Sjfv	new_data = rd32(hw, loreg);
4507266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4508266423Sjfv#endif
4509266423Sjfv
4510266423Sjfv	if (!offset_loaded)
4511266423Sjfv		*offset = new_data;
4512266423Sjfv	if (new_data >= *offset)
4513266423Sjfv		*stat = new_data - *offset;
4514266423Sjfv	else
4515266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4516266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4517266423Sjfv}
4518266423Sjfv
4519266423Sjfv/**
4520266423Sjfv * Read and update a 32 bit stat from the hw
4521266423Sjfv **/
4522266423Sjfvstatic void
4523270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4524266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4525266423Sjfv{
4526266423Sjfv	u32 new_data;
4527266423Sjfv
4528266423Sjfv	new_data = rd32(hw, reg);
4529266423Sjfv	if (!offset_loaded)
4530266423Sjfv		*offset = new_data;
4531266423Sjfv	if (new_data >= *offset)
4532266423Sjfv		*stat = (u32)(new_data - *offset);
4533266423Sjfv	else
4534266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4535266423Sjfv}
4536266423Sjfv
4537266423Sjfv/*
4538266423Sjfv** Set flow control using sysctl:
4539266423Sjfv** 	0 - off
4540266423Sjfv**	1 - rx pause
4541266423Sjfv**	2 - tx pause
4542266423Sjfv**	3 - full
4543266423Sjfv*/
4544266423Sjfvstatic int
4545270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4546266423Sjfv{
4547266423Sjfv	/*
4548266423Sjfv	 * TODO: ensure flow control is disabled if
4549266423Sjfv	 * priority flow control is enabled
4550266423Sjfv	 *
4551266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4552266423Sjfv	 * if tx flow control is enabled.
4553266423Sjfv	 */
4554270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4555266423Sjfv	struct i40e_hw *hw = &pf->hw;
4556266423Sjfv	device_t dev = pf->dev;
4557279033Sjfv	int error = 0;
4558266423Sjfv	enum i40e_status_code aq_error = 0;
4559266423Sjfv	u8 fc_aq_err = 0;
4560266423Sjfv
4561279033Sjfv	/* Get request */
4562279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4563266423Sjfv	if ((error) || (req->newptr == NULL))
4564269198Sjfv		return (error);
4565279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4566266423Sjfv		device_printf(dev,
4567266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4568266423Sjfv		return (EINVAL);
4569266423Sjfv	}
4570266423Sjfv
4571269198Sjfv	/*
4572269198Sjfv	** Changing flow control mode currently does not work on
4573269198Sjfv	** 40GBASE-CR4 PHYs
4574269198Sjfv	*/
4575269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4576269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4577269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4578269198Sjfv		    " on 40GBase-CR4 media.\n");
4579269198Sjfv		return (ENODEV);
4580269198Sjfv	}
4581269198Sjfv
4582266423Sjfv	/* Set fc ability for port */
4583279033Sjfv	hw->fc.requested_mode = pf->fc;
4584269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4585269198Sjfv	if (aq_error) {
4586269198Sjfv		device_printf(dev,
4587269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4588269198Sjfv		    __func__, aq_error, fc_aq_err);
4589269198Sjfv		return (EAGAIN);
4590269198Sjfv	}
4591266423Sjfv
4592269198Sjfv	return (0);
4593269198Sjfv}
4594266423Sjfv
4595270346Sjfvstatic int
4596270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4597270346Sjfv{
4598270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4599270346Sjfv	struct i40e_hw *hw = &pf->hw;
4600270346Sjfv	int error = 0, index = 0;
4601270346Sjfv
4602270346Sjfv	char *speeds[] = {
4603270346Sjfv		"Unknown",
4604270346Sjfv		"100M",
4605270346Sjfv		"1G",
4606270346Sjfv		"10G",
4607270346Sjfv		"40G",
4608270346Sjfv		"20G"
4609270346Sjfv	};
4610270346Sjfv
4611270346Sjfv	ixl_update_link_status(pf);
4612270346Sjfv
4613270346Sjfv	switch (hw->phy.link_info.link_speed) {
4614270346Sjfv	case I40E_LINK_SPEED_100MB:
4615270346Sjfv		index = 1;
4616270346Sjfv		break;
4617270346Sjfv	case I40E_LINK_SPEED_1GB:
4618270346Sjfv		index = 2;
4619270346Sjfv		break;
4620270346Sjfv	case I40E_LINK_SPEED_10GB:
4621270346Sjfv		index = 3;
4622270346Sjfv		break;
4623270346Sjfv	case I40E_LINK_SPEED_40GB:
4624270346Sjfv		index = 4;
4625270346Sjfv		break;
4626270346Sjfv	case I40E_LINK_SPEED_20GB:
4627270346Sjfv		index = 5;
4628270346Sjfv		break;
4629270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
4630270346Sjfv	default:
4631270346Sjfv		index = 0;
4632270346Sjfv		break;
4633270346Sjfv	}
4634270346Sjfv
4635270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
4636270346Sjfv	    strlen(speeds[index]), req);
4637270346Sjfv	return (error);
4638270346Sjfv}
4639270346Sjfv
4640274205Sjfvstatic int
4641274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4642274205Sjfv{
4643274205Sjfv	struct i40e_hw *hw = &pf->hw;
4644274205Sjfv	device_t dev = pf->dev;
4645274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4646274205Sjfv	struct i40e_aq_set_phy_config config;
4647274205Sjfv	enum i40e_status_code aq_error = 0;
4648274205Sjfv
4649274205Sjfv	/* Get current capability information */
4650279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4651279033Sjfv	    FALSE, FALSE, &abilities, NULL);
4652274205Sjfv	if (aq_error) {
4653279033Sjfv		device_printf(dev,
4654279033Sjfv		    "%s: Error getting phy capabilities %d,"
4655274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4656274205Sjfv		    hw->aq.asq_last_status);
4657274205Sjfv		return (EAGAIN);
4658274205Sjfv	}
4659274205Sjfv
4660274205Sjfv	/* Prepare new config */
4661274205Sjfv	bzero(&config, sizeof(config));
4662274205Sjfv	config.phy_type = abilities.phy_type;
4663274205Sjfv	config.abilities = abilities.abilities
4664274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4665274205Sjfv	config.eee_capability = abilities.eee_capability;
4666274205Sjfv	config.eeer = abilities.eeer_val;
4667274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
4668274205Sjfv	/* Translate into aq cmd link_speed */
4669279858Sjfv	if (speeds & 0x8)
4670279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
4671274205Sjfv	if (speeds & 0x4)
4672274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
4673274205Sjfv	if (speeds & 0x2)
4674274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
4675274205Sjfv	if (speeds & 0x1)
4676274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
4677274205Sjfv
4678274205Sjfv	/* Do aq command & restart link */
4679274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4680274205Sjfv	if (aq_error) {
4681279033Sjfv		device_printf(dev,
4682279033Sjfv		    "%s: Error setting new phy config %d,"
4683274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4684274205Sjfv		    hw->aq.asq_last_status);
4685274205Sjfv		return (EAGAIN);
4686274205Sjfv	}
4687274205Sjfv
4688277084Sjfv	/*
4689277084Sjfv	** This seems a bit heavy handed, but we
4690277084Sjfv	** need to get a reinit on some devices
4691277084Sjfv	*/
4692277084Sjfv	IXL_PF_LOCK(pf);
4693277084Sjfv	ixl_stop(pf);
4694277084Sjfv	ixl_init_locked(pf);
4695277084Sjfv	IXL_PF_UNLOCK(pf);
4696277084Sjfv
4697274205Sjfv	return (0);
4698274205Sjfv}
4699274205Sjfv
4700269198Sjfv/*
4701269198Sjfv** Control link advertise speed:
4702270346Sjfv**	Flags:
4703270346Sjfv**	0x1 - advertise 100 Mb
4704270346Sjfv**	0x2 - advertise 1G
4705270346Sjfv**	0x4 - advertise 10G
4706279858Sjfv**	0x8 - advertise 20G
4707269198Sjfv**
4708269198Sjfv** Does not work on 40G devices.
4709269198Sjfv*/
4710269198Sjfvstatic int
4711270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
4712269198Sjfv{
4713270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4714269198Sjfv	struct i40e_hw *hw = &pf->hw;
4715269198Sjfv	device_t dev = pf->dev;
4716270346Sjfv	int requested_ls = 0;
4717269198Sjfv	int error = 0;
4718266423Sjfv
4719269198Sjfv	/*
4720269198Sjfv	** FW doesn't support changing advertised speed
4721269198Sjfv	** for 40G devices; speed is always 40G.
4722269198Sjfv	*/
4723269198Sjfv	if (i40e_is_40G_device(hw->device_id))
4724269198Sjfv		return (ENODEV);
4725266423Sjfv
4726269198Sjfv	/* Read in new mode */
4727270346Sjfv	requested_ls = pf->advertised_speed;
4728269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4729269198Sjfv	if ((error) || (req->newptr == NULL))
4730269198Sjfv		return (error);
4731279858Sjfv	/* Check for sane value */
4732279858Sjfv	if (requested_ls < 0x1 || requested_ls > 0xE) {
4733279858Sjfv		device_printf(dev, "Invalid advertised speed; "
4734279858Sjfv		    "valid modes are 0x1 through 0xE\n");
4735269198Sjfv		return (EINVAL);
4736266423Sjfv	}
4737279858Sjfv	/* Then check for validity based on adapter type */
4738279858Sjfv	switch (hw->device_id) {
4739279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
4740279858Sjfv		if (requested_ls & 0x8) {
4741279858Sjfv			device_printf(dev,
4742279858Sjfv			    "20Gbs speed not supported on this device.\n");
4743279858Sjfv			return (EINVAL);
4744279858Sjfv		}
4745279858Sjfv		break;
4746279858Sjfv	case I40E_DEV_ID_20G_KR2:
4747279858Sjfv		if (requested_ls & 0x1) {
4748279858Sjfv			device_printf(dev,
4749279858Sjfv			    "100Mbs speed not supported on this device.\n");
4750279858Sjfv			return (EINVAL);
4751279858Sjfv		}
4752279858Sjfv		break;
4753279858Sjfv	default:
4754279858Sjfv		if (requested_ls & ~0x6) {
4755279858Sjfv			device_printf(dev,
4756279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
4757279858Sjfv			return (EINVAL);
4758279858Sjfv		}
4759279858Sjfv		break;
4760279858Sjfv	}
4761269198Sjfv
4762269198Sjfv	/* Exit if no change */
4763270346Sjfv	if (pf->advertised_speed == requested_ls)
4764269198Sjfv		return (0);
4765269198Sjfv
4766274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
4767274205Sjfv	if (error)
4768274205Sjfv		return (error);
4769270346Sjfv
4770270346Sjfv	pf->advertised_speed = requested_ls;
4771270346Sjfv	ixl_update_link_status(pf);
4772269198Sjfv	return (0);
4773266423Sjfv}
4774266423Sjfv
4775266423Sjfv/*
4776266423Sjfv** Get the width and transaction speed of
4777266423Sjfv** the bus this adapter is plugged into.
4778266423Sjfv*/
4779266423Sjfvstatic u16
4780270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4781266423Sjfv{
4782266423Sjfv        u16                     link;
4783266423Sjfv        u32                     offset;
4784266423Sjfv
4785266423Sjfv
4786266423Sjfv        /* Get the PCI Express Capabilities offset */
4787266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4788266423Sjfv
4789266423Sjfv        /* ...and read the Link Status Register */
4790266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4791266423Sjfv
4792266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
4793266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
4794266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
4795266423Sjfv                break;
4796266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
4797266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
4798266423Sjfv                break;
4799266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
4800266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
4801266423Sjfv                break;
4802266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
4803266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
4804266423Sjfv                break;
4805266423Sjfv        default:
4806266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
4807266423Sjfv                break;
4808266423Sjfv        }
4809266423Sjfv
4810266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
4811266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
4812266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
4813266423Sjfv                break;
4814266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
4815266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
4816266423Sjfv                break;
4817266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
4818266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
4819266423Sjfv                break;
4820266423Sjfv        default:
4821266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
4822266423Sjfv                break;
4823266423Sjfv        }
4824266423Sjfv
4825266423Sjfv
4826266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4827266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4828266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4829266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4830266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4831266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4832266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4833266423Sjfv            ("Unknown"));
4834266423Sjfv
4835266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4836266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
4837266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
4838279858Sjfv                    " for this device\n     may be insufficient for"
4839279858Sjfv                    " optimal performance.\n");
4840266423Sjfv                device_printf(dev, "For expected performance a x8 "
4841266423Sjfv                    "PCIE Gen3 slot is required.\n");
4842266423Sjfv        }
4843266423Sjfv
4844266423Sjfv        return (link);
4845266423Sjfv}
4846266423Sjfv
4847274205Sjfvstatic int
4848274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4849274205Sjfv{
4850274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4851274205Sjfv	struct i40e_hw	*hw = &pf->hw;
4852274205Sjfv	char		buf[32];
4853274205Sjfv
4854274205Sjfv	snprintf(buf, sizeof(buf),
4855274205Sjfv	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4856274205Sjfv	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4857274205Sjfv	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4858274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4859274205Sjfv	    IXL_NVM_VERSION_HI_SHIFT,
4860274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4861274205Sjfv	    IXL_NVM_VERSION_LO_SHIFT,
4862274205Sjfv	    hw->nvm.eetrack);
4863274205Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4864274205Sjfv}
4865274205Sjfv
4866274205Sjfv
4867277084Sjfv#ifdef IXL_DEBUG_SYSCTL
4868266423Sjfvstatic int
4869270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4870266423Sjfv{
4871270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4872266423Sjfv	struct i40e_hw *hw = &pf->hw;
4873266423Sjfv	struct i40e_link_status link_status;
4874266423Sjfv	char buf[512];
4875266423Sjfv
4876266423Sjfv	enum i40e_status_code aq_error = 0;
4877266423Sjfv
4878266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4879266423Sjfv	if (aq_error) {
4880266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4881266423Sjfv		return (EPERM);
4882266423Sjfv	}
4883266423Sjfv
4884266423Sjfv	sprintf(buf, "\n"
4885266423Sjfv	    "PHY Type : %#04x\n"
4886266423Sjfv	    "Speed    : %#04x\n"
4887266423Sjfv	    "Link info: %#04x\n"
4888266423Sjfv	    "AN info  : %#04x\n"
4889266423Sjfv	    "Ext info : %#04x",
4890266423Sjfv	    link_status.phy_type, link_status.link_speed,
4891266423Sjfv	    link_status.link_info, link_status.an_info,
4892266423Sjfv	    link_status.ext_info);
4893266423Sjfv
4894266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4895266423Sjfv}
4896266423Sjfv
4897266423Sjfvstatic int
4898270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4899266423Sjfv{
4900279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
4901279858Sjfv	struct i40e_hw		*hw = &pf->hw;
4902279858Sjfv	char			buf[512];
4903279858Sjfv	enum i40e_status_code	aq_error = 0;
4904266423Sjfv
4905279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4906266423Sjfv
4907279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4908279858Sjfv	    TRUE, FALSE, &abilities, NULL);
4909266423Sjfv	if (aq_error) {
4910266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4911266423Sjfv		return (EPERM);
4912266423Sjfv	}
4913266423Sjfv
4914266423Sjfv	sprintf(buf, "\n"
4915266423Sjfv	    "PHY Type : %#010x\n"
4916266423Sjfv	    "Speed    : %#04x\n"
4917266423Sjfv	    "Abilities: %#04x\n"
4918266423Sjfv	    "EEE cap  : %#06x\n"
4919266423Sjfv	    "EEER reg : %#010x\n"
4920266423Sjfv	    "D3 Lpan  : %#04x",
4921279858Sjfv	    abilities.phy_type, abilities.link_speed,
4922279858Sjfv	    abilities.abilities, abilities.eee_capability,
4923279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
4924266423Sjfv
4925266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4926266423Sjfv}
4927266423Sjfv
4928266423Sjfvstatic int
4929270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4930266423Sjfv{
4931270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4932270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
4933270346Sjfv	struct ixl_mac_filter *f;
4934266423Sjfv	char *buf, *buf_i;
4935266423Sjfv
4936266423Sjfv	int error = 0;
4937266423Sjfv	int ftl_len = 0;
4938266423Sjfv	int ftl_counter = 0;
4939266423Sjfv	int buf_len = 0;
4940266423Sjfv	int entry_len = 42;
4941266423Sjfv
4942266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
4943266423Sjfv		ftl_len++;
4944266423Sjfv	}
4945266423Sjfv
4946266423Sjfv	if (ftl_len < 1) {
4947266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
4948266423Sjfv		return (0);
4949266423Sjfv	}
4950266423Sjfv
4951266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4952266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4953266423Sjfv
4954266423Sjfv	sprintf(buf_i++, "\n");
4955266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
4956266423Sjfv		sprintf(buf_i,
4957266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
4958266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4959266423Sjfv		buf_i += entry_len;
4960266423Sjfv		/* don't print '\n' for last entry */
4961266423Sjfv		if (++ftl_counter != ftl_len) {
4962266423Sjfv			sprintf(buf_i, "\n");
4963266423Sjfv			buf_i++;
4964266423Sjfv		}
4965266423Sjfv	}
4966266423Sjfv
4967266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4968266423Sjfv	if (error)
4969266423Sjfv		printf("sysctl error: %d\n", error);
4970266423Sjfv	free(buf, M_DEVBUF);
4971266423Sjfv	return error;
4972266423Sjfv}
4973269198Sjfv
4974270346Sjfv#define IXL_SW_RES_SIZE 0x14
4975269198Sjfvstatic int
4976277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
4977277084Sjfv{
4978277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
4979277084Sjfv	one = (struct i40e_aqc_switch_resource_alloc_element_resp *)a;
4980277084Sjfv	two = (struct i40e_aqc_switch_resource_alloc_element_resp *)b;
4981277084Sjfv
4982277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
4983277084Sjfv}
4984277084Sjfv
4985277084Sjfvstatic int
4986274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
4987269198Sjfv{
4988270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4989269198Sjfv	struct i40e_hw *hw = &pf->hw;
4990269198Sjfv	device_t dev = pf->dev;
4991269198Sjfv	struct sbuf *buf;
4992269198Sjfv	int error = 0;
4993269198Sjfv
4994269198Sjfv	u8 num_entries;
4995270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4996269198Sjfv
4997269198Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4998269198Sjfv	if (!buf) {
4999269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
5000269198Sjfv		return (ENOMEM);
5001269198Sjfv	}
5002269198Sjfv
5003277084Sjfv	bzero(resp, sizeof(resp));
5004269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5005269198Sjfv				resp,
5006270346Sjfv				IXL_SW_RES_SIZE,
5007269198Sjfv				NULL);
5008269198Sjfv	if (error) {
5009279858Sjfv		device_printf(dev,
5010279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5011269198Sjfv		    __func__, error, hw->aq.asq_last_status);
5012269198Sjfv		sbuf_delete(buf);
5013269198Sjfv		return error;
5014269198Sjfv	}
5015269198Sjfv
5016277084Sjfv	/* Sort entries by type for display */
5017277084Sjfv	qsort(resp, num_entries,
5018277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5019277084Sjfv	    &ixl_res_alloc_cmp);
5020277084Sjfv
5021269198Sjfv	sbuf_cat(buf, "\n");
5022277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5023269198Sjfv	sbuf_printf(buf,
5024269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5025269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5026269198Sjfv	for (int i = 0; i < num_entries; i++) {
5027269198Sjfv		sbuf_printf(buf,
5028269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5029269198Sjfv		    resp[i].resource_type,
5030269198Sjfv		    resp[i].guaranteed,
5031269198Sjfv		    resp[i].total,
5032269198Sjfv		    resp[i].used,
5033269198Sjfv		    resp[i].total_unalloced);
5034269198Sjfv		if (i < num_entries - 1)
5035269198Sjfv			sbuf_cat(buf, "\n");
5036269198Sjfv	}
5037269198Sjfv
5038269198Sjfv	error = sbuf_finish(buf);
5039269198Sjfv	if (error) {
5040269198Sjfv		device_printf(dev, "Error finishing sbuf: %d\n", error);
5041269198Sjfv		sbuf_delete(buf);
5042269198Sjfv		return error;
5043269198Sjfv	}
5044269198Sjfv
5045269198Sjfv	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5046269198Sjfv	if (error)
5047269198Sjfv		device_printf(dev, "sysctl error: %d\n", error);
5048269198Sjfv	sbuf_delete(buf);
5049269198Sjfv	return error;
5050274205Sjfv}
5051269198Sjfv
5052274205Sjfv/*
5053274205Sjfv** Caller must init and delete sbuf; this function will clear and
5054274205Sjfv** finish it for caller.
5055274205Sjfv*/
5056274205Sjfvstatic char *
5057274205Sjfvixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5058274205Sjfv{
5059274205Sjfv	sbuf_clear(s);
5060274205Sjfv
5061274205Sjfv	if (seid == 0 && uplink)
5062274205Sjfv		sbuf_cat(s, "Network");
5063274205Sjfv	else if (seid == 0)
5064274205Sjfv		sbuf_cat(s, "Host");
5065274205Sjfv	else if (seid == 1)
5066274205Sjfv		sbuf_cat(s, "EMP");
5067274205Sjfv	else if (seid <= 5)
5068274205Sjfv		sbuf_printf(s, "MAC %d", seid - 2);
5069274205Sjfv	else if (seid <= 15)
5070274205Sjfv		sbuf_cat(s, "Reserved");
5071274205Sjfv	else if (seid <= 31)
5072274205Sjfv		sbuf_printf(s, "PF %d", seid - 16);
5073274205Sjfv	else if (seid <= 159)
5074274205Sjfv		sbuf_printf(s, "VF %d", seid - 32);
5075274205Sjfv	else if (seid <= 287)
5076274205Sjfv		sbuf_cat(s, "Reserved");
5077274205Sjfv	else if (seid <= 511)
5078274205Sjfv		sbuf_cat(s, "Other"); // for other structures
5079274205Sjfv	else if (seid <= 895)
5080274205Sjfv		sbuf_printf(s, "VSI %d", seid - 512);
5081274205Sjfv	else if (seid <= 1023)
5082274205Sjfv		sbuf_printf(s, "Reserved");
5083274205Sjfv	else
5084274205Sjfv		sbuf_cat(s, "Invalid");
5085274205Sjfv
5086274205Sjfv	sbuf_finish(s);
5087274205Sjfv	return sbuf_data(s);
5088269198Sjfv}
5089269198Sjfv
5090274205Sjfvstatic int
5091274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5092274205Sjfv{
5093274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5094274205Sjfv	struct i40e_hw *hw = &pf->hw;
5095274205Sjfv	device_t dev = pf->dev;
5096274205Sjfv	struct sbuf *buf;
5097274205Sjfv	struct sbuf *nmbuf;
5098274205Sjfv	int error = 0;
5099274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5100274205Sjfv
5101274205Sjfv	u16 next = 0;
5102274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5103274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5104274205Sjfv
5105274205Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5106274205Sjfv	if (!buf) {
5107274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5108274205Sjfv		return (ENOMEM);
5109274205Sjfv	}
5110274205Sjfv
5111274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5112274205Sjfv	    sizeof(aq_buf), &next, NULL);
5113274205Sjfv	if (error) {
5114279858Sjfv		device_printf(dev,
5115279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5116274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5117274205Sjfv		sbuf_delete(buf);
5118274205Sjfv		return error;
5119274205Sjfv	}
5120274205Sjfv
5121274205Sjfv	nmbuf = sbuf_new_auto();
5122274205Sjfv	if (!nmbuf) {
5123274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5124274205Sjfv		return (ENOMEM);
5125274205Sjfv	}
5126274205Sjfv
5127274205Sjfv	sbuf_cat(buf, "\n");
5128274205Sjfv	// Assuming <= 255 elements in switch
5129274205Sjfv	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5130274205Sjfv	/* Exclude:
5131274205Sjfv	** Revision -- all elements are revision 1 for now
5132274205Sjfv	*/
5133274205Sjfv	sbuf_printf(buf,
5134274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5135274205Sjfv	    "                |          |          | (uplink)\n");
5136274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5137274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5138274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5139274205Sjfv		sbuf_cat(buf, " ");
5140279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5141279858Sjfv		    sw_config->element[i].seid, false));
5142274205Sjfv		sbuf_cat(buf, " | ");
5143279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5144279858Sjfv		    sw_config->element[i].uplink_seid, true));
5145274205Sjfv		sbuf_cat(buf, "   ");
5146279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5147279858Sjfv		    sw_config->element[i].downlink_seid, false));
5148274205Sjfv		sbuf_cat(buf, "   ");
5149274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5150274205Sjfv		if (i < sw_config->header.num_reported - 1)
5151274205Sjfv			sbuf_cat(buf, "\n");
5152274205Sjfv	}
5153274205Sjfv	sbuf_delete(nmbuf);
5154274205Sjfv
5155274205Sjfv	error = sbuf_finish(buf);
5156274205Sjfv	if (error) {
5157274205Sjfv		device_printf(dev, "Error finishing sbuf: %d\n", error);
5158274205Sjfv		sbuf_delete(buf);
5159274205Sjfv		return error;
5160274205Sjfv	}
5161274205Sjfv
5162274205Sjfv	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5163274205Sjfv	if (error)
5164274205Sjfv		device_printf(dev, "sysctl error: %d\n", error);
5165274205Sjfv	sbuf_delete(buf);
5166274205Sjfv
5167274205Sjfv	return (error);
5168274205Sjfv}
5169279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5170274205Sjfv
5171279858Sjfv
5172279858Sjfv#ifdef PCI_IOV
5173269198Sjfvstatic int
5174279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5175269198Sjfv{
5176279858Sjfv	struct i40e_hw *hw;
5177279858Sjfv	struct ixl_vsi *vsi;
5178279858Sjfv	struct i40e_vsi_context vsi_ctx;
5179279858Sjfv	int i;
5180279858Sjfv	uint16_t first_queue;
5181279858Sjfv	enum i40e_status_code code;
5182269198Sjfv
5183279858Sjfv	hw = &pf->hw;
5184279858Sjfv	vsi = &pf->vsi;
5185269198Sjfv
5186279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5187279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5188279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5189279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5190279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5191279858Sjfv
5192279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5193279858Sjfv
5194279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5195279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5196279858Sjfv
5197279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5198279858Sjfv	vsi_ctx.info.sec_flags = 0;
5199279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5200279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5201279858Sjfv
5202279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5203279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5204279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5205279858Sjfv
5206279858Sjfv	vsi_ctx.info.valid_sections |=
5207279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5208279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5209279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5210279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5211279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5212279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5213279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5214279858Sjfv
5215279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5216279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5217279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5218279858Sjfv
5219279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5220279858Sjfv	if (code != I40E_SUCCESS)
5221279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5222279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5223279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5224279858Sjfv	vf->vsi.first_queue = first_queue;
5225279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5226279858Sjfv
5227279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5228279858Sjfv	if (code != I40E_SUCCESS)
5229279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5230279858Sjfv
5231279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5232279858Sjfv	if (code != I40E_SUCCESS) {
5233279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5234279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5235279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5236269198Sjfv	}
5237269198Sjfv
5238279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5239279858Sjfv	return (0);
5240279858Sjfv}
5241279858Sjfv
5242279858Sjfvstatic int
5243279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5244279858Sjfv{
5245279858Sjfv	struct i40e_hw *hw;
5246279858Sjfv	int error;
5247279858Sjfv
5248279858Sjfv	hw = &pf->hw;
5249279858Sjfv
5250279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5251279858Sjfv	if (error != 0)
5252269198Sjfv		return (error);
5253279858Sjfv
5254279858Sjfv	vf->vsi.hw_filters_add = 0;
5255279858Sjfv	vf->vsi.hw_filters_del = 0;
5256279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5257279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5258279858Sjfv
5259279858Sjfv	return (0);
5260279858Sjfv}
5261279858Sjfv
5262279858Sjfvstatic void
5263279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5264279858Sjfv    uint32_t val)
5265279858Sjfv{
5266279858Sjfv	uint32_t qtable;
5267279858Sjfv	int index, shift;
5268279858Sjfv
5269279858Sjfv	/*
5270279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5271279858Sjfv	 * gymnastics to convert the queue number into a register index and
5272279858Sjfv	 * shift.
5273279858Sjfv	 */
5274279858Sjfv	index = qnum / 2;
5275279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5276279858Sjfv
5277279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5278279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5279279858Sjfv	qtable |= val << shift;
5280279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5281279858Sjfv}
5282279858Sjfv
5283279858Sjfvstatic void
5284279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5285279858Sjfv{
5286279858Sjfv	struct i40e_hw *hw;
5287279858Sjfv	uint32_t qtable;
5288279858Sjfv	int i;
5289279858Sjfv
5290279858Sjfv	hw = &pf->hw;
5291279858Sjfv
5292279858Sjfv	/*
5293279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5294279858Sjfv	 * so we have to use non-contiguous mappings.
5295279858Sjfv	 */
5296279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5297279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5298279858Sjfv
5299279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5300279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5301279858Sjfv
5302279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5303279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5304279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5305279858Sjfv
5306279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5307279858Sjfv	}
5308279858Sjfv
5309279858Sjfv	/* Map queues allocated to VF to its VSI. */
5310279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5311279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5312279858Sjfv
5313279858Sjfv	/* Set rest of VSI queues as unused. */
5314279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5315279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5316279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5317279858Sjfv
5318279858Sjfv	ixl_flush(hw);
5319279858Sjfv}
5320279858Sjfv
5321279858Sjfvstatic void
5322279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5323279858Sjfv{
5324279858Sjfv	struct i40e_hw *hw;
5325279858Sjfv
5326279858Sjfv	hw = &pf->hw;
5327279858Sjfv
5328279858Sjfv	if (vsi->seid == 0)
5329279858Sjfv		return;
5330279858Sjfv
5331279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5332279858Sjfv}
5333279858Sjfv
5334279858Sjfvstatic void
5335279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5336279858Sjfv{
5337279858Sjfv
5338279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5339279858Sjfv	ixl_flush(hw);
5340279858Sjfv}
5341279858Sjfv
5342279858Sjfvstatic void
5343279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5344279858Sjfv{
5345279858Sjfv
5346279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5347279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5348279858Sjfv	ixl_flush(hw);
5349279858Sjfv}
5350279858Sjfv
5351279858Sjfvstatic void
5352279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5353279858Sjfv{
5354279858Sjfv	struct i40e_hw *hw;
5355279858Sjfv	uint32_t vfint_reg, vpint_reg;
5356279858Sjfv	int i;
5357279858Sjfv
5358279858Sjfv	hw = &pf->hw;
5359279858Sjfv
5360279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5361279858Sjfv
5362279858Sjfv	/* Index 0 has a special register. */
5363279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5364279858Sjfv
5365279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5366279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5367279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5368279858Sjfv	}
5369279858Sjfv
5370279858Sjfv	/* Index 0 has a special register. */
5371279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5372279858Sjfv
5373279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5374279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5375279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5376279858Sjfv	}
5377279858Sjfv
5378279858Sjfv	vf->vsi.num_queues = 0;
5379279858Sjfv}
5380279858Sjfv
5381279858Sjfvstatic int
5382279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5383279858Sjfv{
5384279858Sjfv	struct i40e_hw *hw;
5385279858Sjfv	int i;
5386279858Sjfv	uint16_t global_vf_num;
5387279858Sjfv	uint32_t ciad;
5388279858Sjfv
5389279858Sjfv	hw = &pf->hw;
5390279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5391279858Sjfv
5392279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5393279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5394279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5395279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5396279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5397279858Sjfv			return (0);
5398279858Sjfv		DELAY(1);
5399279858Sjfv	}
5400279858Sjfv
5401279858Sjfv	return (ETIMEDOUT);
5402279858Sjfv}
5403279858Sjfv
5404279858Sjfvstatic void
5405279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5406279858Sjfv{
5407279858Sjfv	struct i40e_hw *hw;
5408279858Sjfv	uint32_t vfrtrig;
5409279858Sjfv
5410279858Sjfv	hw = &pf->hw;
5411279858Sjfv
5412279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5413279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5414279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5415279858Sjfv	ixl_flush(hw);
5416279858Sjfv
5417279858Sjfv	ixl_reinit_vf(pf, vf);
5418279858Sjfv}
5419279858Sjfv
5420279858Sjfvstatic void
5421279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5422279858Sjfv{
5423279858Sjfv	struct i40e_hw *hw;
5424279858Sjfv	uint32_t vfrstat, vfrtrig;
5425279858Sjfv	int i, error;
5426279858Sjfv
5427279858Sjfv	hw = &pf->hw;
5428279858Sjfv
5429279858Sjfv	error = ixl_flush_pcie(pf, vf);
5430279858Sjfv	if (error != 0)
5431279858Sjfv		device_printf(pf->dev,
5432279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5433279858Sjfv		    vf->vf_num);
5434279858Sjfv
5435279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5436279858Sjfv		DELAY(10);
5437279858Sjfv
5438279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5439279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5440279858Sjfv			break;
5441279858Sjfv	}
5442279858Sjfv
5443279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5444279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5445279858Sjfv
5446279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5447279858Sjfv
5448279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5449279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5450279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5451279858Sjfv
5452279858Sjfv	if (vf->vsi.seid != 0)
5453279858Sjfv		ixl_disable_rings(&vf->vsi);
5454279858Sjfv
5455279858Sjfv	ixl_vf_release_resources(pf, vf);
5456279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5457279858Sjfv	ixl_vf_map_queues(pf, vf);
5458279858Sjfv
5459279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5460279858Sjfv	ixl_flush(hw);
5461279858Sjfv}
5462279858Sjfv
5463279858Sjfvstatic const char *
5464279858Sjfvixl_vc_opcode_str(uint16_t op)
5465279858Sjfv{
5466279858Sjfv
5467279858Sjfv	switch (op) {
5468279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5469279858Sjfv		return ("VERSION");
5470279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5471279858Sjfv		return ("RESET_VF");
5472279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5473279858Sjfv		return ("GET_VF_RESOURCES");
5474279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5475279858Sjfv		return ("CONFIG_TX_QUEUE");
5476279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5477279858Sjfv		return ("CONFIG_RX_QUEUE");
5478279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5479279858Sjfv		return ("CONFIG_VSI_QUEUES");
5480279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5481279858Sjfv		return ("CONFIG_IRQ_MAP");
5482279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5483279858Sjfv		return ("ENABLE_QUEUES");
5484279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5485279858Sjfv		return ("DISABLE_QUEUES");
5486279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5487279858Sjfv		return ("ADD_ETHER_ADDRESS");
5488279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5489279858Sjfv		return ("DEL_ETHER_ADDRESS");
5490279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5491279858Sjfv		return ("ADD_VLAN");
5492279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5493279858Sjfv		return ("DEL_VLAN");
5494279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5495279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5496279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5497279858Sjfv		return ("GET_STATS");
5498279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5499279858Sjfv		return ("FCOE");
5500279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5501279858Sjfv		return ("EVENT");
5502279858Sjfv	default:
5503279858Sjfv		return ("UNKNOWN");
5504279858Sjfv	}
5505279858Sjfv}
5506279858Sjfv
5507279858Sjfvstatic int
5508279858Sjfvixl_vc_opcode_level(uint16_t opcode)
5509279858Sjfv{
5510279858Sjfv
5511279858Sjfv	switch (opcode) {
5512279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5513279858Sjfv		return (10);
5514279858Sjfv	default:
5515279858Sjfv		return (5);
5516279858Sjfv	}
5517279858Sjfv}
5518279858Sjfv
5519279858Sjfvstatic void
5520279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5521279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
5522279858Sjfv{
5523279858Sjfv	struct i40e_hw *hw;
5524279858Sjfv	int global_vf_id;
5525279858Sjfv
5526279858Sjfv	hw = &pf->hw;
5527279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5528279858Sjfv
5529279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5530279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5531279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5532279858Sjfv
5533279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5534279858Sjfv}
5535279858Sjfv
5536279858Sjfvstatic void
5537279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5538279858Sjfv{
5539279858Sjfv
5540279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5541279858Sjfv}
5542279858Sjfv
5543279858Sjfvstatic void
5544279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5545279858Sjfv    enum i40e_status_code status, const char *file, int line)
5546279858Sjfv{
5547279858Sjfv
5548279858Sjfv	I40E_VC_DEBUG(pf, 1,
5549279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5550279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5551279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5552279858Sjfv}
5553279858Sjfv
5554279858Sjfvstatic void
5555279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5556279858Sjfv    uint16_t msg_size)
5557279858Sjfv{
5558279858Sjfv	struct i40e_virtchnl_version_info reply;
5559279858Sjfv
5560279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5561279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5562279858Sjfv		    I40E_ERR_PARAM);
5563279858Sjfv		return;
5564279858Sjfv	}
5565279858Sjfv
5566279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5567279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5568279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5569279858Sjfv	    sizeof(reply));
5570279858Sjfv}
5571279858Sjfv
5572279858Sjfvstatic void
5573279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5574279858Sjfv    uint16_t msg_size)
5575279858Sjfv{
5576279858Sjfv
5577279858Sjfv	if (msg_size != 0) {
5578279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5579279858Sjfv		    I40E_ERR_PARAM);
5580279858Sjfv		return;
5581279858Sjfv	}
5582279858Sjfv
5583279858Sjfv	ixl_reset_vf(pf, vf);
5584279858Sjfv
5585279858Sjfv	/* No response to a reset message. */
5586279858Sjfv}
5587279858Sjfv
5588279858Sjfvstatic void
5589279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5590279858Sjfv    uint16_t msg_size)
5591279858Sjfv{
5592279858Sjfv	struct i40e_virtchnl_vf_resource reply;
5593279858Sjfv
5594279858Sjfv	if (msg_size != 0) {
5595279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5596279858Sjfv		    I40E_ERR_PARAM);
5597279858Sjfv		return;
5598279858Sjfv	}
5599279858Sjfv
5600279858Sjfv	bzero(&reply, sizeof(reply));
5601279858Sjfv
5602279858Sjfv	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5603279858Sjfv
5604279858Sjfv	reply.num_vsis = 1;
5605279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
5606279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5607279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5608279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5609279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5610279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5611279858Sjfv
5612279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5613279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
5614279858Sjfv}
5615279858Sjfv
5616279858Sjfvstatic int
5617279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5618279858Sjfv    struct i40e_virtchnl_txq_info *info)
5619279858Sjfv{
5620279858Sjfv	struct i40e_hw *hw;
5621279858Sjfv	struct i40e_hmc_obj_txq txq;
5622279858Sjfv	uint16_t global_queue_num, global_vf_num;
5623279858Sjfv	enum i40e_status_code status;
5624279858Sjfv	uint32_t qtx_ctl;
5625279858Sjfv
5626279858Sjfv	hw = &pf->hw;
5627279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5628279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5629279858Sjfv	bzero(&txq, sizeof(txq));
5630279858Sjfv
5631279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5632279858Sjfv	if (status != I40E_SUCCESS)
5633269198Sjfv		return (EINVAL);
5634279858Sjfv
5635279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5636279858Sjfv
5637279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
5638279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
5639279858Sjfv	txq.qlen = info->ring_len;
5640279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5641279858Sjfv	txq.rdylist_act = 0;
5642279858Sjfv
5643279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5644279858Sjfv	if (status != I40E_SUCCESS)
5645279858Sjfv		return (EINVAL);
5646279858Sjfv
5647279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5648279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5649279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5650279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5651279858Sjfv	ixl_flush(hw);
5652279858Sjfv
5653279858Sjfv	return (0);
5654279858Sjfv}
5655279858Sjfv
5656279858Sjfvstatic int
5657279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5658279858Sjfv    struct i40e_virtchnl_rxq_info *info)
5659279858Sjfv{
5660279858Sjfv	struct i40e_hw *hw;
5661279858Sjfv	struct i40e_hmc_obj_rxq rxq;
5662279858Sjfv	uint16_t global_queue_num;
5663279858Sjfv	enum i40e_status_code status;
5664279858Sjfv
5665279858Sjfv	hw = &pf->hw;
5666279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5667279858Sjfv	bzero(&rxq, sizeof(rxq));
5668279858Sjfv
5669279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5670279858Sjfv		return (EINVAL);
5671279858Sjfv
5672279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5673279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
5674279858Sjfv		return (EINVAL);
5675279858Sjfv
5676279858Sjfv	if (info->splithdr_enabled) {
5677279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5678279858Sjfv			return (EINVAL);
5679279858Sjfv
5680279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
5681279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5682279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5683279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5684279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5685279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5686279858Sjfv
5687279858Sjfv		rxq.dtype = 2;
5688269198Sjfv	}
5689269198Sjfv
5690279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5691279858Sjfv	if (status != I40E_SUCCESS)
5692279858Sjfv		return (EINVAL);
5693269198Sjfv
5694279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5695279858Sjfv	rxq.qlen = info->ring_len;
5696269198Sjfv
5697279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5698269198Sjfv
5699279858Sjfv	rxq.dsize = 1;
5700279858Sjfv	rxq.crcstrip = 1;
5701279858Sjfv	rxq.l2tsel = 1;
5702269198Sjfv
5703279858Sjfv	rxq.rxmax = info->max_pkt_size;
5704279858Sjfv	rxq.tphrdesc_ena = 1;
5705279858Sjfv	rxq.tphwdesc_ena = 1;
5706279858Sjfv	rxq.tphdata_ena = 1;
5707279858Sjfv	rxq.tphhead_ena = 1;
5708279858Sjfv	rxq.lrxqthresh = 2;
5709279858Sjfv	rxq.prefena = 1;
5710279858Sjfv
5711279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5712279858Sjfv	if (status != I40E_SUCCESS)
5713279858Sjfv		return (EINVAL);
5714279858Sjfv
5715279858Sjfv	return (0);
5716279858Sjfv}
5717279858Sjfv
5718279858Sjfvstatic void
5719279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5720279858Sjfv    uint16_t msg_size)
5721279858Sjfv{
5722279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
5723279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
5724279858Sjfv	int i;
5725279858Sjfv
5726279858Sjfv	if (msg_size < sizeof(*info)) {
5727279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5728279858Sjfv		    I40E_ERR_PARAM);
5729279858Sjfv		return;
5730279858Sjfv	}
5731279858Sjfv
5732279858Sjfv	info = msg;
5733279858Sjfv	if (info->num_queue_pairs == 0) {
5734279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5735279858Sjfv		    I40E_ERR_PARAM);
5736279858Sjfv		return;
5737279858Sjfv	}
5738279858Sjfv
5739279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5740279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5741279858Sjfv		    I40E_ERR_PARAM);
5742279858Sjfv		return;
5743279858Sjfv	}
5744279858Sjfv
5745279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
5746279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5747279858Sjfv		    I40E_ERR_PARAM);
5748279858Sjfv		return;
5749279858Sjfv	}
5750279858Sjfv
5751279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
5752279858Sjfv		pair = &info->qpair[i];
5753279858Sjfv
5754279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5755279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5756279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
5757279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
5758279858Sjfv
5759279858Sjfv			i40e_send_vf_nack(pf, vf,
5760279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5761279858Sjfv			return;
5762279858Sjfv		}
5763279858Sjfv
5764279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5765279858Sjfv			i40e_send_vf_nack(pf, vf,
5766279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5767279858Sjfv			return;
5768279858Sjfv		}
5769279858Sjfv
5770279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5771279858Sjfv			i40e_send_vf_nack(pf, vf,
5772279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5773279858Sjfv			return;
5774279858Sjfv		}
5775279858Sjfv	}
5776279858Sjfv
5777279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5778279858Sjfv}
5779279858Sjfv
5780279858Sjfvstatic void
5781279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
5782279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
5783279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
5784279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
5785279858Sjfv{
5786279858Sjfv	uint32_t offset, qctl;
5787279858Sjfv	uint16_t itr_indx;
5788279858Sjfv
5789279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
5790279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
5791279858Sjfv		itr_indx = vector->rxitr_idx;
5792279858Sjfv	} else {
5793279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
5794279858Sjfv		itr_indx = vector->txitr_idx;
5795279858Sjfv	}
5796279858Sjfv
5797279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5798279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5799279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5800279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5801279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5802279858Sjfv
5803279858Sjfv	wr32(&pf->hw, offset, qctl);
5804279858Sjfv
5805279858Sjfv	*last_type = cur_type;
5806279858Sjfv	*last_queue = cur_queue;
5807279858Sjfv}
5808279858Sjfv
5809279858Sjfvstatic void
5810279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5811279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
5812279858Sjfv{
5813279858Sjfv	struct i40e_hw *hw;
5814279858Sjfv	u_int qindex;
5815279858Sjfv	enum i40e_queue_type type, last_type;
5816279858Sjfv	uint32_t lnklst_reg;
5817279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5818279858Sjfv
5819279858Sjfv	hw = &pf->hw;
5820279858Sjfv
5821279858Sjfv	rxq_map = vector->rxq_map;
5822279858Sjfv	txq_map = vector->txq_map;
5823279858Sjfv
5824279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
5825279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
5826279858Sjfv
5827279858Sjfv	/*
5828279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
5829279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
5830279858Sjfv	 * both at once here.
5831279858Sjfv	 */
5832279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
5833279858Sjfv		if (txq_map != 0) {
5834279858Sjfv			qindex = ffs(txq_map) - 1;
5835279858Sjfv			type = I40E_QUEUE_TYPE_TX;
5836279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5837279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5838279858Sjfv			    &last_type, &last_queue);
5839279858Sjfv			txq_map &= ~(1 << qindex);
5840279858Sjfv		}
5841279858Sjfv
5842279858Sjfv		if (rxq_map != 0) {
5843279858Sjfv			qindex = ffs(rxq_map) - 1;
5844279858Sjfv			type = I40E_QUEUE_TYPE_RX;
5845279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5846279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5847279858Sjfv			    &last_type, &last_queue);
5848279858Sjfv			rxq_map &= ~(1 << qindex);
5849279858Sjfv		}
5850279858Sjfv	}
5851279858Sjfv
5852279858Sjfv	if (vector->vector_id == 0)
5853279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5854279858Sjfv	else
5855279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5856279858Sjfv		    vf->vf_num);
5857279858Sjfv	wr32(hw, lnklst_reg,
5858279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5859279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5860279858Sjfv
5861279858Sjfv	ixl_flush(hw);
5862279858Sjfv}
5863279858Sjfv
5864279858Sjfvstatic void
5865279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5866279858Sjfv    uint16_t msg_size)
5867279858Sjfv{
5868279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
5869279858Sjfv	struct i40e_virtchnl_vector_map *vector;
5870279858Sjfv	struct i40e_hw *hw;
5871279858Sjfv	int i, largest_txq, largest_rxq;
5872279858Sjfv
5873279858Sjfv	hw = &pf->hw;
5874279858Sjfv
5875279858Sjfv	if (msg_size < sizeof(*map)) {
5876279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5877279858Sjfv		    I40E_ERR_PARAM);
5878279858Sjfv		return;
5879279858Sjfv	}
5880279858Sjfv
5881279858Sjfv	map = msg;
5882279858Sjfv	if (map->num_vectors == 0) {
5883279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5884279858Sjfv		    I40E_ERR_PARAM);
5885279858Sjfv		return;
5886279858Sjfv	}
5887279858Sjfv
5888279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5889279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5890279858Sjfv		    I40E_ERR_PARAM);
5891279858Sjfv		return;
5892279858Sjfv	}
5893279858Sjfv
5894279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
5895279858Sjfv		vector = &map->vecmap[i];
5896279858Sjfv
5897279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5898279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
5899279858Sjfv			i40e_send_vf_nack(pf, vf,
5900279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5901279858Sjfv			return;
5902279858Sjfv		}
5903279858Sjfv
5904279858Sjfv		if (vector->rxq_map != 0) {
5905279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
5906279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
5907279858Sjfv				i40e_send_vf_nack(pf, vf,
5908279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5909279858Sjfv				    I40E_ERR_PARAM);
5910279858Sjfv				return;
5911279858Sjfv			}
5912279858Sjfv		}
5913279858Sjfv
5914279858Sjfv		if (vector->txq_map != 0) {
5915279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
5916279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
5917279858Sjfv				i40e_send_vf_nack(pf, vf,
5918279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5919279858Sjfv				    I40E_ERR_PARAM);
5920279858Sjfv				return;
5921279858Sjfv			}
5922279858Sjfv		}
5923279858Sjfv
5924279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5925279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
5926279858Sjfv			i40e_send_vf_nack(pf, vf,
5927279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5928279858Sjfv			    I40E_ERR_PARAM);
5929279858Sjfv			return;
5930279858Sjfv		}
5931279858Sjfv
5932279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
5933279858Sjfv	}
5934279858Sjfv
5935279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
5936279858Sjfv}
5937279858Sjfv
5938279858Sjfvstatic void
5939279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5940279858Sjfv    uint16_t msg_size)
5941279858Sjfv{
5942279858Sjfv	struct i40e_virtchnl_queue_select *select;
5943279858Sjfv	int error;
5944279858Sjfv
5945279858Sjfv	if (msg_size != sizeof(*select)) {
5946279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5947279858Sjfv		    I40E_ERR_PARAM);
5948279858Sjfv		return;
5949279858Sjfv	}
5950279858Sjfv
5951279858Sjfv	select = msg;
5952279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
5953279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
5954279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5955279858Sjfv		    I40E_ERR_PARAM);
5956279858Sjfv		return;
5957279858Sjfv	}
5958279858Sjfv
5959279858Sjfv	error = ixl_enable_rings(&vf->vsi);
5960269198Sjfv	if (error) {
5961279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
5962279858Sjfv		    I40E_ERR_TIMEOUT);
5963279858Sjfv		return;
5964269198Sjfv	}
5965269198Sjfv
5966279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
5967269198Sjfv}
5968266423Sjfv
5969279858Sjfvstatic void
5970279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
5971279858Sjfv    void *msg, uint16_t msg_size)
5972279858Sjfv{
5973279858Sjfv	struct i40e_virtchnl_queue_select *select;
5974279858Sjfv	int error;
5975279858Sjfv
5976279858Sjfv	if (msg_size != sizeof(*select)) {
5977279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5978279858Sjfv		    I40E_ERR_PARAM);
5979279858Sjfv		return;
5980279858Sjfv	}
5981279858Sjfv
5982279858Sjfv	select = msg;
5983279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
5984279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
5985279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5986279858Sjfv		    I40E_ERR_PARAM);
5987279858Sjfv		return;
5988279858Sjfv	}
5989279858Sjfv
5990279858Sjfv	error = ixl_disable_rings(&vf->vsi);
5991279858Sjfv	if (error) {
5992279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
5993279858Sjfv		    I40E_ERR_TIMEOUT);
5994279858Sjfv		return;
5995279858Sjfv	}
5996279858Sjfv
5997279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
5998279858Sjfv}
5999279858Sjfv
6000279858Sjfvstatic boolean_t
6001279858Sjfvixl_zero_mac(const uint8_t *addr)
6002279858Sjfv{
6003279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6004279858Sjfv
6005279858Sjfv	return (cmp_etheraddr(addr, zero));
6006279858Sjfv}
6007279858Sjfv
6008279858Sjfvstatic boolean_t
6009279858Sjfvixl_bcast_mac(const uint8_t *addr)
6010279858Sjfv{
6011279858Sjfv
6012279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
6013279858Sjfv}
6014279858Sjfv
6015279858Sjfvstatic int
6016279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6017279858Sjfv{
6018279858Sjfv
6019279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6020279858Sjfv		return (EINVAL);
6021279858Sjfv
6022279858Sjfv	/*
6023279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
6024279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6025279858Sjfv	 * is not its assigned MAC.
6026279858Sjfv	 */
6027279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6028279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6029279858Sjfv		return (EPERM);
6030279858Sjfv
6031279858Sjfv	return (0);
6032279858Sjfv}
6033279858Sjfv
6034279858Sjfvstatic void
6035279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6036279858Sjfv    uint16_t msg_size)
6037279858Sjfv{
6038279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6039279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6040279858Sjfv	struct ixl_vsi *vsi;
6041279858Sjfv	int i;
6042279858Sjfv	size_t expected_size;
6043279858Sjfv
6044279858Sjfv	vsi = &vf->vsi;
6045279858Sjfv
6046279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6047279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6048279858Sjfv		    I40E_ERR_PARAM);
6049279858Sjfv		return;
6050279858Sjfv	}
6051279858Sjfv
6052279858Sjfv	addr_list = msg;
6053279858Sjfv	expected_size = sizeof(*addr_list) +
6054279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6055279858Sjfv
6056279858Sjfv	if (addr_list->num_elements == 0 ||
6057279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6058279858Sjfv	    msg_size != expected_size) {
6059279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6060279858Sjfv		    I40E_ERR_PARAM);
6061279858Sjfv		return;
6062279858Sjfv	}
6063279858Sjfv
6064279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6065279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6066279858Sjfv			i40e_send_vf_nack(pf, vf,
6067279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6068279858Sjfv			return;
6069279858Sjfv		}
6070279858Sjfv	}
6071279858Sjfv
6072279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6073279858Sjfv		addr = &addr_list->list[i];
6074279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6075279858Sjfv	}
6076279858Sjfv
6077279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6078279858Sjfv}
6079279858Sjfv
6080279858Sjfvstatic void
6081279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6082279858Sjfv    uint16_t msg_size)
6083279858Sjfv{
6084279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6085279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6086279858Sjfv	size_t expected_size;
6087279858Sjfv	int i;
6088279858Sjfv
6089279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6090279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6091279858Sjfv		    I40E_ERR_PARAM);
6092279858Sjfv		return;
6093279858Sjfv	}
6094279858Sjfv
6095279858Sjfv	addr_list = msg;
6096279858Sjfv	expected_size = sizeof(*addr_list) +
6097279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6098279858Sjfv
6099279858Sjfv	if (addr_list->num_elements == 0 ||
6100279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6101279858Sjfv	    msg_size != expected_size) {
6102279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6103279858Sjfv		    I40E_ERR_PARAM);
6104279858Sjfv		return;
6105279858Sjfv	}
6106279858Sjfv
6107279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6108279858Sjfv		addr = &addr_list->list[i];
6109279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6110279858Sjfv			i40e_send_vf_nack(pf, vf,
6111279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6112279858Sjfv			return;
6113279858Sjfv		}
6114279858Sjfv	}
6115279858Sjfv
6116279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6117279858Sjfv		addr = &addr_list->list[i];
6118279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6119279858Sjfv	}
6120279858Sjfv
6121279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6122279858Sjfv}
6123279858Sjfv
6124279858Sjfvstatic enum i40e_status_code
6125279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6126279858Sjfv{
6127279858Sjfv	struct i40e_vsi_context vsi_ctx;
6128279858Sjfv
6129279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6130279858Sjfv
6131279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6132279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6133279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6134279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6135279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6136279858Sjfv}
6137279858Sjfv
6138279858Sjfvstatic void
6139279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6140279858Sjfv    uint16_t msg_size)
6141279858Sjfv{
6142279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6143279858Sjfv	enum i40e_status_code code;
6144279858Sjfv	size_t expected_size;
6145279858Sjfv	int i;
6146279858Sjfv
6147279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6148279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6149279858Sjfv		    I40E_ERR_PARAM);
6150279858Sjfv		return;
6151279858Sjfv	}
6152279858Sjfv
6153279858Sjfv	filter_list = msg;
6154279858Sjfv	expected_size = sizeof(*filter_list) +
6155279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6156279858Sjfv	if (filter_list->num_elements == 0 ||
6157279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6158279858Sjfv	    msg_size != expected_size) {
6159279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6160279858Sjfv		    I40E_ERR_PARAM);
6161279858Sjfv		return;
6162279858Sjfv	}
6163279858Sjfv
6164279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6165279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6166279858Sjfv		    I40E_ERR_PARAM);
6167279858Sjfv		return;
6168279858Sjfv	}
6169279858Sjfv
6170279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6171279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6172279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6173279858Sjfv			    I40E_ERR_PARAM);
6174279858Sjfv			return;
6175279858Sjfv		}
6176279858Sjfv	}
6177279858Sjfv
6178279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6179279858Sjfv	if (code != I40E_SUCCESS) {
6180279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6181279858Sjfv		    I40E_ERR_PARAM);
6182279858Sjfv	}
6183279858Sjfv
6184279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6185279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6186279858Sjfv
6187279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6188279858Sjfv}
6189279858Sjfv
6190279858Sjfvstatic void
6191279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6192279858Sjfv    uint16_t msg_size)
6193279858Sjfv{
6194279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6195279858Sjfv	int i;
6196279858Sjfv	size_t expected_size;
6197279858Sjfv
6198279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6199279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6200279858Sjfv		    I40E_ERR_PARAM);
6201279858Sjfv		return;
6202279858Sjfv	}
6203279858Sjfv
6204279858Sjfv	filter_list = msg;
6205279858Sjfv	expected_size = sizeof(*filter_list) +
6206279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6207279858Sjfv	if (filter_list->num_elements == 0 ||
6208279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6209279858Sjfv	    msg_size != expected_size) {
6210279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6211279858Sjfv		    I40E_ERR_PARAM);
6212279858Sjfv		return;
6213279858Sjfv	}
6214279858Sjfv
6215279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6216279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6217279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6218279858Sjfv			    I40E_ERR_PARAM);
6219279858Sjfv			return;
6220279858Sjfv		}
6221279858Sjfv	}
6222279858Sjfv
6223279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6224279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6225279858Sjfv		    I40E_ERR_PARAM);
6226279858Sjfv		return;
6227279858Sjfv	}
6228279858Sjfv
6229279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6230279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6231279858Sjfv
6232279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6233279858Sjfv}
6234279858Sjfv
6235279858Sjfvstatic void
6236279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6237279858Sjfv    void *msg, uint16_t msg_size)
6238279858Sjfv{
6239279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6240279858Sjfv	enum i40e_status_code code;
6241279858Sjfv
6242279858Sjfv	if (msg_size != sizeof(*info)) {
6243279858Sjfv		i40e_send_vf_nack(pf, vf,
6244279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6245279858Sjfv		return;
6246279858Sjfv	}
6247279858Sjfv
6248279858Sjfv	if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
6249279858Sjfv		i40e_send_vf_nack(pf, vf,
6250279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6251279858Sjfv		return;
6252279858Sjfv	}
6253279858Sjfv
6254279858Sjfv	info = msg;
6255279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6256279858Sjfv		i40e_send_vf_nack(pf, vf,
6257279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6258279858Sjfv		return;
6259279858Sjfv	}
6260279858Sjfv
6261279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6262279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6263279858Sjfv	if (code != I40E_SUCCESS) {
6264279858Sjfv		i40e_send_vf_nack(pf, vf,
6265279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6266279858Sjfv		return;
6267279858Sjfv	}
6268279858Sjfv
6269279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6270279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6271279858Sjfv	if (code != I40E_SUCCESS) {
6272279858Sjfv		i40e_send_vf_nack(pf, vf,
6273279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6274279858Sjfv		return;
6275279858Sjfv	}
6276279858Sjfv
6277279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6278279858Sjfv}
6279279858Sjfv
6280279858Sjfvstatic void
6281279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6282279858Sjfv    uint16_t msg_size)
6283279858Sjfv{
6284279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6285279858Sjfv
6286279858Sjfv	if (msg_size != sizeof(*queue)) {
6287279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6288279858Sjfv		    I40E_ERR_PARAM);
6289279858Sjfv		return;
6290279858Sjfv	}
6291279858Sjfv
6292279858Sjfv	queue = msg;
6293279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6294279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6295279858Sjfv		    I40E_ERR_PARAM);
6296279858Sjfv		return;
6297279858Sjfv	}
6298279858Sjfv
6299279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6300279858Sjfv
6301279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6302279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6303279858Sjfv}
6304279858Sjfv
6305279858Sjfvstatic void
6306279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6307279858Sjfv{
6308279858Sjfv	struct ixl_vf *vf;
6309279858Sjfv	void *msg;
6310279858Sjfv	uint16_t vf_num, msg_size;
6311279858Sjfv	uint32_t opcode;
6312279858Sjfv
6313279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6314279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6315279858Sjfv
6316279858Sjfv	if (vf_num >= pf->num_vfs) {
6317279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6318279858Sjfv		return;
6319279858Sjfv	}
6320279858Sjfv
6321279858Sjfv	vf = &pf->vfs[vf_num];
6322279858Sjfv	msg = event->msg_buf;
6323279858Sjfv	msg_size = event->msg_len;
6324279858Sjfv
6325279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6326279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6327279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6328279858Sjfv
6329279858Sjfv	switch (opcode) {
6330279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6331279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6332279858Sjfv		break;
6333279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6334279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6335279858Sjfv		break;
6336279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6337279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6338279858Sjfv		break;
6339279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6340279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6341279858Sjfv		break;
6342279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6343279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6344279858Sjfv		break;
6345279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6346279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6347279858Sjfv		break;
6348279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6349279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6350279858Sjfv		break;
6351279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6352279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6353279858Sjfv		break;
6354279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6355279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6356279858Sjfv		break;
6357279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6358279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6359279858Sjfv		break;
6360279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6361279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6362279858Sjfv		break;
6363279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6364279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6365279858Sjfv		break;
6366279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6367279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6368279858Sjfv		break;
6369279858Sjfv
6370279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6371279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6372279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6373279858Sjfv	default:
6374279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6375279858Sjfv		break;
6376279858Sjfv	}
6377279858Sjfv}
6378279858Sjfv
6379279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6380279858Sjfvstatic void
6381279858Sjfvixl_handle_vflr(void *arg, int pending)
6382279858Sjfv{
6383279858Sjfv	struct ixl_pf *pf;
6384279858Sjfv	struct i40e_hw *hw;
6385279858Sjfv	uint16_t global_vf_num;
6386279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6387279858Sjfv	int i;
6388279858Sjfv
6389279858Sjfv	pf = arg;
6390279858Sjfv	hw = &pf->hw;
6391279858Sjfv
6392279858Sjfv	IXL_PF_LOCK(pf);
6393279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6394279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6395279858Sjfv
6396279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6397279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6398279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6399279858Sjfv		if (vflrstat & vflrstat_mask) {
6400279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6401279858Sjfv			    vflrstat_mask);
6402279858Sjfv
6403279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6404279858Sjfv		}
6405279858Sjfv	}
6406279858Sjfv
6407279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6408279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6409279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6410279858Sjfv	ixl_flush(hw);
6411279858Sjfv
6412279858Sjfv	IXL_PF_UNLOCK(pf);
6413279858Sjfv}
6414279858Sjfv
6415279858Sjfvstatic int
6416279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6417279858Sjfv{
6418279858Sjfv
6419279858Sjfv	switch (err) {
6420279858Sjfv	case I40E_AQ_RC_EPERM:
6421279858Sjfv		return (EPERM);
6422279858Sjfv	case I40E_AQ_RC_ENOENT:
6423279858Sjfv		return (ENOENT);
6424279858Sjfv	case I40E_AQ_RC_ESRCH:
6425279858Sjfv		return (ESRCH);
6426279858Sjfv	case I40E_AQ_RC_EINTR:
6427279858Sjfv		return (EINTR);
6428279858Sjfv	case I40E_AQ_RC_EIO:
6429279858Sjfv		return (EIO);
6430279858Sjfv	case I40E_AQ_RC_ENXIO:
6431279858Sjfv		return (ENXIO);
6432279858Sjfv	case I40E_AQ_RC_E2BIG:
6433279858Sjfv		return (E2BIG);
6434279858Sjfv	case I40E_AQ_RC_EAGAIN:
6435279858Sjfv		return (EAGAIN);
6436279858Sjfv	case I40E_AQ_RC_ENOMEM:
6437279858Sjfv		return (ENOMEM);
6438279858Sjfv	case I40E_AQ_RC_EACCES:
6439279858Sjfv		return (EACCES);
6440279858Sjfv	case I40E_AQ_RC_EFAULT:
6441279858Sjfv		return (EFAULT);
6442279858Sjfv	case I40E_AQ_RC_EBUSY:
6443279858Sjfv		return (EBUSY);
6444279858Sjfv	case I40E_AQ_RC_EEXIST:
6445279858Sjfv		return (EEXIST);
6446279858Sjfv	case I40E_AQ_RC_EINVAL:
6447279858Sjfv		return (EINVAL);
6448279858Sjfv	case I40E_AQ_RC_ENOTTY:
6449279858Sjfv		return (ENOTTY);
6450279858Sjfv	case I40E_AQ_RC_ENOSPC:
6451279858Sjfv		return (ENOSPC);
6452279858Sjfv	case I40E_AQ_RC_ENOSYS:
6453279858Sjfv		return (ENOSYS);
6454279858Sjfv	case I40E_AQ_RC_ERANGE:
6455279858Sjfv		return (ERANGE);
6456279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6457279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6458279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6459279858Sjfv		return (EFAULT);
6460279858Sjfv	case I40E_AQ_RC_EMODE:
6461279858Sjfv		return (EPERM);
6462279858Sjfv	case I40E_AQ_RC_EFBIG:
6463279858Sjfv		return (EFBIG);
6464279858Sjfv	default:
6465279858Sjfv		return (EINVAL);
6466279858Sjfv	}
6467279858Sjfv}
6468279858Sjfv
6469279858Sjfvstatic int
6470279858Sjfvixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6471279858Sjfv{
6472279858Sjfv	struct ixl_pf *pf;
6473279858Sjfv	struct i40e_hw *hw;
6474279858Sjfv	struct ixl_vsi *pf_vsi;
6475279858Sjfv	enum i40e_status_code ret;
6476279858Sjfv	int i, error;
6477279858Sjfv
6478279858Sjfv	pf = device_get_softc(dev);
6479279858Sjfv	hw = &pf->hw;
6480279858Sjfv	pf_vsi = &pf->vsi;
6481279858Sjfv
6482279858Sjfv	IXL_PF_LOCK(pf);
6483279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6484279858Sjfv	    M_ZERO);
6485279858Sjfv
6486279858Sjfv	if (pf->vfs == NULL) {
6487279858Sjfv		error = ENOMEM;
6488279858Sjfv		goto fail;
6489279858Sjfv	}
6490279858Sjfv
6491279858Sjfv	for (i = 0; i < num_vfs; i++)
6492279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
6493279858Sjfv
6494279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6495279858Sjfv	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6496279858Sjfv	if (ret != I40E_SUCCESS) {
6497279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6498279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6499279858Sjfv		    error);
6500279858Sjfv		goto fail;
6501279858Sjfv	}
6502279858Sjfv
6503279858Sjfv	ixl_configure_msix(pf);
6504279858Sjfv	ixl_enable_adminq(hw);
6505279858Sjfv
6506279858Sjfv	pf->num_vfs = num_vfs;
6507279858Sjfv	IXL_PF_UNLOCK(pf);
6508279858Sjfv	return (0);
6509279858Sjfv
6510279858Sjfvfail:
6511279858Sjfv	free(pf->vfs, M_IXL);
6512279858Sjfv	pf->vfs = NULL;
6513279858Sjfv	IXL_PF_UNLOCK(pf);
6514279858Sjfv	return (error);
6515279858Sjfv}
6516279858Sjfv
6517279858Sjfvstatic void
6518279858Sjfvixl_uninit_iov(device_t dev)
6519279858Sjfv{
6520279858Sjfv	struct ixl_pf *pf;
6521279858Sjfv	struct i40e_hw *hw;
6522279858Sjfv	struct ixl_vsi *vsi;
6523279858Sjfv	struct ifnet *ifp;
6524279858Sjfv	struct ixl_vf *vfs;
6525279858Sjfv	int i, num_vfs;
6526279858Sjfv
6527279858Sjfv	pf = device_get_softc(dev);
6528279858Sjfv	hw = &pf->hw;
6529279858Sjfv	vsi = &pf->vsi;
6530279858Sjfv	ifp = vsi->ifp;
6531279858Sjfv
6532279858Sjfv	IXL_PF_LOCK(pf);
6533279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6534279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
6535279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6536279858Sjfv	}
6537279858Sjfv
6538279858Sjfv	if (pf->veb_seid != 0) {
6539279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6540279858Sjfv		pf->veb_seid = 0;
6541279858Sjfv	}
6542279858Sjfv
6543279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6544279858Sjfv		ixl_disable_intr(vsi);
6545279858Sjfv
6546279858Sjfv	vfs = pf->vfs;
6547279858Sjfv	num_vfs = pf->num_vfs;
6548279858Sjfv
6549279858Sjfv	pf->vfs = NULL;
6550279858Sjfv	pf->num_vfs = 0;
6551279858Sjfv	IXL_PF_UNLOCK(pf);
6552279858Sjfv
6553279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6554279858Sjfv	for (i = 0; i < num_vfs; i++)
6555279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
6556279858Sjfv	free(vfs, M_IXL);
6557279858Sjfv}
6558279858Sjfv
6559279858Sjfvstatic int
6560279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6561279858Sjfv{
6562279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
6563279858Sjfv	struct ixl_pf *pf;
6564279858Sjfv	struct ixl_vf *vf;
6565279858Sjfv	const void *mac;
6566279858Sjfv	size_t size;
6567279858Sjfv	int error;
6568279858Sjfv
6569279858Sjfv	pf = device_get_softc(dev);
6570279858Sjfv	vf = &pf->vfs[vfnum];
6571279858Sjfv
6572279858Sjfv	IXL_PF_LOCK(pf);
6573279858Sjfv	vf->vf_num = vfnum;
6574279858Sjfv
6575279858Sjfv	vf->vsi.back = pf;
6576279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
6577279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
6578279858Sjfv
6579279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
6580279858Sjfv	if (error != 0)
6581279858Sjfv		goto out;
6582279858Sjfv
6583279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
6584279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
6585279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6586279858Sjfv
6587279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
6588279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6589279858Sjfv	} else
6590279858Sjfv		/*
6591279858Sjfv		 * If the administrator has not specified a MAC address then
6592279858Sjfv		 * we must allow the VF to choose one.
6593279858Sjfv		 */
6594279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6595279858Sjfv
6596279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
6597279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6598279858Sjfv
6599279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
6600279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6601279858Sjfv
6602279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6603279858Sjfv
6604279858Sjfv	ixl_reset_vf(pf, vf);
6605279858Sjfvout:
6606279858Sjfv	IXL_PF_UNLOCK(pf);
6607279858Sjfv	if (error == 0) {
6608279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6609279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6610279858Sjfv	}
6611279858Sjfv
6612279858Sjfv	return (error);
6613279858Sjfv}
6614279858Sjfv#endif /* PCI_IOV */
6615