ixl_pf_main.c revision 299545
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 299545 2016-05-12 18:18:55Z erj $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51299545Serjchar ixl_driver_version[] = "1.4.5-k";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
71266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
72270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
73284049Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
74299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
75299545Serj	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
76266423Sjfv	/* required last entry */
77266423Sjfv	{0, 0, 0, 0, 0}
78266423Sjfv};
79266423Sjfv
80266423Sjfv/*********************************************************************
81266423Sjfv *  Table of branding strings
82266423Sjfv *********************************************************************/
83266423Sjfv
84270346Sjfvstatic char    *ixl_strings[] = {
85266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
86266423Sjfv};
87266423Sjfv
88266423Sjfv
89266423Sjfv/*********************************************************************
90266423Sjfv *  Function prototypes
91266423Sjfv *********************************************************************/
92270346Sjfvstatic int      ixl_probe(device_t);
93270346Sjfvstatic int      ixl_attach(device_t);
94270346Sjfvstatic int      ixl_detach(device_t);
95270346Sjfvstatic int      ixl_shutdown(device_t);
96270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
97270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
98270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
99270346Sjfvstatic void	ixl_init(void *);
100270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
101270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
102270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
103270346Sjfvstatic int      ixl_media_change(struct ifnet *);
104270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
105270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
106270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
107270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
108279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
109270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
110270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
111270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
112270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
113270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
114270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
115270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
116270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
117270346Sjfvstatic void	ixl_local_timer(void *);
118270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
119279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
120270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
121270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
122270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
123274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
124266423Sjfv
125279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
126279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
127279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
128279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
129279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
130266423Sjfv
131270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
132270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
133270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
134270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
135270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
136270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
137266423Sjfv
138270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
139270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
140270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
141270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
142270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
143270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
144266423Sjfv
145270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
146279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
147270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
148270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
149270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
150270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
151270346Sjfvstatic struct ixl_mac_filter *
152270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
153270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
154279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
155266423Sjfv
156279858Sjfv
157266423Sjfv/* Sysctl debug interface */
158270346Sjfvstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
159270346Sjfvstatic void	ixl_print_debug_info(struct ixl_pf *);
160266423Sjfv
161266423Sjfv/* The MSI/X Interrupt handlers */
162270346Sjfvstatic void	ixl_intr(void *);
163270346Sjfvstatic void	ixl_msix_que(void *);
164270346Sjfvstatic void	ixl_msix_adminq(void *);
165270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
166266423Sjfv
167266423Sjfv/* Deferred interrupt tasklets */
168270346Sjfvstatic void	ixl_do_adminq(void *, int);
169266423Sjfv
170266423Sjfv/* Sysctl handlers */
171270346Sjfvstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
172270346Sjfvstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
173270346Sjfvstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
174274205Sjfvstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
175266423Sjfv
176266423Sjfv/* Statistics */
177270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
178270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
179266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
180270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
181266423Sjfv		    struct sysctl_oid_list *,
182266423Sjfv		    struct i40e_eth_stats *);
183270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
184270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
185279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
186270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
187270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
188270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
189266423Sjfv		    u64 *, u64 *);
190270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
191266423Sjfv		    u64 *, u64 *);
192266423Sjfv
193277084Sjfv#ifdef IXL_DEBUG_SYSCTL
194270346Sjfvstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
195270346Sjfvstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
196270346Sjfvstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
197274205Sjfvstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
198274205Sjfvstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
199266423Sjfv#endif
200266423Sjfv
201279858Sjfv#ifdef PCI_IOV
202279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
203279858Sjfv
204299545Serjstatic int	ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
205299545Serjstatic void	ixl_uninit_iov(device_t dev);
206279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
207279858Sjfv
208279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
209279858Sjfv		    struct i40e_arq_event_info *);
210279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
211279858Sjfv
212279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
213279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
214279858Sjfv#endif
215279858Sjfv
216266423Sjfv/*********************************************************************
217266423Sjfv *  FreeBSD Device Interface Entry Points
218266423Sjfv *********************************************************************/
219266423Sjfv
220270346Sjfvstatic device_method_t ixl_methods[] = {
221266423Sjfv	/* Device interface */
222270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
223270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
224270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
225270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
226279858Sjfv#ifdef PCI_IOV
227299545Serj	DEVMETHOD(pci_init_iov, ixl_init_iov),
228299545Serj	DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
229299545Serj	DEVMETHOD(pci_add_vf, ixl_add_vf),
230279858Sjfv#endif
231266423Sjfv	{0, 0}
232266423Sjfv};
233266423Sjfv
234270346Sjfvstatic driver_t ixl_driver = {
235270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
236266423Sjfv};
237266423Sjfv
238270346Sjfvdevclass_t ixl_devclass;
239270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
240266423Sjfv
241270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
242270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
243279860Sjfv#ifdef DEV_NETMAP
244279860SjfvMODULE_DEPEND(ixl, netmap, 1, 1, 1);
245279860Sjfv#endif /* DEV_NETMAP */
246279860Sjfv
247266423Sjfv/*
248269198Sjfv** Global reset mutex
249269198Sjfv*/
250270346Sjfvstatic struct mtx ixl_reset_mtx;
251269198Sjfv
252269198Sjfv/*
253270346Sjfv** TUNEABLE PARAMETERS:
254270346Sjfv*/
255270346Sjfv
256270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
257270346Sjfv                   "IXL driver parameters");
258270346Sjfv
259270346Sjfv/*
260266423Sjfv * MSIX should be the default for best performance,
261266423Sjfv * but this allows it to be forced off for testing.
262266423Sjfv */
263270346Sjfvstatic int ixl_enable_msix = 1;
264270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
265270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
266270346Sjfv    "Enable MSI-X interrupts");
267266423Sjfv
268266423Sjfv/*
269266423Sjfv** Number of descriptors per ring:
270266423Sjfv**   - TX and RX are the same size
271266423Sjfv*/
272270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
273270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
274270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
275270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
276266423Sjfv
277266423Sjfv/*
278266423Sjfv** This can be set manually, if left as 0 the
279266423Sjfv** number of queues will be calculated based
280266423Sjfv** on cpus and msix vectors available.
281266423Sjfv*/
282270346Sjfvint ixl_max_queues = 0;
283270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
284270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
285270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
286266423Sjfv
287266423Sjfv/*
288266423Sjfv** Controls for Interrupt Throttling
289266423Sjfv**	- true/false for dynamic adjustment
290266423Sjfv** 	- default values for static ITR
291266423Sjfv*/
292270346Sjfvint ixl_dynamic_rx_itr = 0;
293270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
294270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
295270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
296266423Sjfv
297270346Sjfvint ixl_dynamic_tx_itr = 0;
298270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
299270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
300270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
301266423Sjfv
302270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
303270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
304270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
305270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
306270346Sjfv
307270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
308270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
309270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
310270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
311270346Sjfv
312270346Sjfv#ifdef IXL_FDIR
313270346Sjfvstatic int ixl_enable_fdir = 1;
314270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
315266423Sjfv/* Rate at which we sample */
316270346Sjfvint ixl_atr_rate = 20;
317270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
318266423Sjfv#endif
319266423Sjfv
320279860Sjfv#ifdef DEV_NETMAP
321279860Sjfv#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
322279860Sjfv#include <dev/netmap/if_ixl_netmap.h>
323279860Sjfv#endif /* DEV_NETMAP */
324274205Sjfv
325270346Sjfvstatic char *ixl_fc_string[6] = {
326266423Sjfv	"None",
327266423Sjfv	"Rx",
328266423Sjfv	"Tx",
329266423Sjfv	"Full",
330266423Sjfv	"Priority",
331266423Sjfv	"Default"
332266423Sjfv};
333266423Sjfv
334279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
335269198Sjfv
336279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
337279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
338279858Sjfv
339266423Sjfv/*********************************************************************
340266423Sjfv *  Device identification routine
341266423Sjfv *
342270346Sjfv *  ixl_probe determines if the driver should be loaded on
343266423Sjfv *  the hardware based on PCI vendor/device id of the device.
344266423Sjfv *
345266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
346266423Sjfv *********************************************************************/
347266423Sjfv
348266423Sjfvstatic int
349270346Sjfvixl_probe(device_t dev)
350266423Sjfv{
351270346Sjfv	ixl_vendor_info_t *ent;
352266423Sjfv
353266423Sjfv	u16	pci_vendor_id, pci_device_id;
354266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
355266423Sjfv	char	device_name[256];
356269198Sjfv	static bool lock_init = FALSE;
357266423Sjfv
358270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
359266423Sjfv
360266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
361266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
362266423Sjfv		return (ENXIO);
363266423Sjfv
364266423Sjfv	pci_device_id = pci_get_device(dev);
365266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
366266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
367266423Sjfv
368270346Sjfv	ent = ixl_vendor_info_array;
369266423Sjfv	while (ent->vendor_id != 0) {
370266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
371266423Sjfv		    (pci_device_id == ent->device_id) &&
372266423Sjfv
373266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
374266423Sjfv		     (ent->subvendor_id == 0)) &&
375266423Sjfv
376266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
377266423Sjfv		     (ent->subdevice_id == 0))) {
378266423Sjfv			sprintf(device_name, "%s, Version - %s",
379270346Sjfv				ixl_strings[ent->index],
380270346Sjfv				ixl_driver_version);
381266423Sjfv			device_set_desc_copy(dev, device_name);
382269198Sjfv			/* One shot mutex init */
383269198Sjfv			if (lock_init == FALSE) {
384269198Sjfv				lock_init = TRUE;
385270346Sjfv				mtx_init(&ixl_reset_mtx,
386270346Sjfv				    "ixl_reset",
387270346Sjfv				    "IXL RESET Lock", MTX_DEF);
388269198Sjfv			}
389266423Sjfv			return (BUS_PROBE_DEFAULT);
390266423Sjfv		}
391266423Sjfv		ent++;
392266423Sjfv	}
393266423Sjfv	return (ENXIO);
394266423Sjfv}
395266423Sjfv
396266423Sjfv/*********************************************************************
397266423Sjfv *  Device initialization routine
398266423Sjfv *
399266423Sjfv *  The attach entry point is called when the driver is being loaded.
400266423Sjfv *  This routine identifies the type of hardware, allocates all resources
401266423Sjfv *  and initializes the hardware.
402266423Sjfv *
403266423Sjfv *  return 0 on success, positive on failure
404266423Sjfv *********************************************************************/
405266423Sjfv
406266423Sjfvstatic int
407270346Sjfvixl_attach(device_t dev)
408266423Sjfv{
409270346Sjfv	struct ixl_pf	*pf;
410266423Sjfv	struct i40e_hw	*hw;
411270346Sjfv	struct ixl_vsi *vsi;
412266423Sjfv	u16		bus;
413266423Sjfv	int             error = 0;
414279858Sjfv#ifdef PCI_IOV
415279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
416279858Sjfv	int		iov_error;
417279858Sjfv#endif
418266423Sjfv
419270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
420266423Sjfv
421266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
422266423Sjfv	pf = device_get_softc(dev);
423266423Sjfv	pf->dev = pf->osdep.dev = dev;
424266423Sjfv	hw = &pf->hw;
425266423Sjfv
426266423Sjfv	/*
427266423Sjfv	** Note this assumes we have a single embedded VSI,
428266423Sjfv	** this could be enhanced later to allocate multiple
429266423Sjfv	*/
430266423Sjfv	vsi = &pf->vsi;
431266423Sjfv	vsi->dev = pf->dev;
432266423Sjfv
433266423Sjfv	/* Core Lock Init*/
434270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
435266423Sjfv
436266423Sjfv	/* Set up the timer callout */
437266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
438266423Sjfv
439266423Sjfv	/* Set up sysctls */
440266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
441266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
442266423Sjfv	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
443270346Sjfv	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
444266423Sjfv
445269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
446269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
447269198Sjfv	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
448270346Sjfv	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
449269198Sjfv
450270346Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
451270346Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
452270346Sjfv	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
453270346Sjfv	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
454270346Sjfv
455274205Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
456274205Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
457274205Sjfv	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
458274205Sjfv	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
459274205Sjfv
460266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
461266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
462273377Shselasky	    OID_AUTO, "rx_itr", CTLFLAG_RW,
463270346Sjfv	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
464266423Sjfv
465266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
466266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
467273377Shselasky	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
468270346Sjfv	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
469266423Sjfv
470266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
471266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
472273377Shselasky	    OID_AUTO, "tx_itr", CTLFLAG_RW,
473270346Sjfv	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
474266423Sjfv
475266423Sjfv	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
476266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
477273377Shselasky	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
478270346Sjfv	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
479266423Sjfv
480277084Sjfv#ifdef IXL_DEBUG_SYSCTL
481266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
482266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
483284049Sjfv	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
484284049Sjfv	    ixl_debug_info, "I", "Debug Information");
485284049Sjfv
486284049Sjfv	/* Debug shared-code message level */
487284049Sjfv	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
488284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
489284049Sjfv	    OID_AUTO, "debug_mask", CTLFLAG_RW,
490284049Sjfv	    &pf->hw.debug_mask, 0, "Debug Message Level");
491284049Sjfv
492284049Sjfv	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
493284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
494284049Sjfv	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
495284049Sjfv	    0, "PF/VF Virtual Channel debug level");
496284049Sjfv
497284049Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
498284049Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
499266423Sjfv	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
500270346Sjfv	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
501266423Sjfv
502266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
503266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
504266423Sjfv	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
505270346Sjfv	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
506266423Sjfv
507266423Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
508266423Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
509266423Sjfv	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
510270346Sjfv	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
511269198Sjfv
512269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
513269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
514274205Sjfv	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
515274205Sjfv	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
516269198Sjfv
517269198Sjfv	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
518269198Sjfv	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
519274205Sjfv	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
520274205Sjfv	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
521266423Sjfv#endif
522266423Sjfv
523274205Sjfv	/* Save off the PCI information */
524266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
525266423Sjfv	hw->device_id = pci_get_device(dev);
526266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
527266423Sjfv	hw->subsystem_vendor_id =
528266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
529266423Sjfv	hw->subsystem_device_id =
530266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
531266423Sjfv
532269198Sjfv	hw->bus.device = pci_get_slot(dev);
533266423Sjfv	hw->bus.func = pci_get_function(dev);
534266423Sjfv
535279858Sjfv	pf->vc_debug_lvl = 1;
536279858Sjfv
537266423Sjfv	/* Do PCI setup - map BAR0, etc */
538270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
539266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
540266423Sjfv		error = ENXIO;
541266423Sjfv		goto err_out;
542266423Sjfv	}
543266423Sjfv
544266423Sjfv	/* Establish a clean starting point */
545269198Sjfv	i40e_clear_hw(hw);
546266423Sjfv	error = i40e_pf_reset(hw);
547266423Sjfv	if (error) {
548269198Sjfv		device_printf(dev,"PF reset failure %x\n", error);
549269198Sjfv		error = EIO;
550269198Sjfv		goto err_out;
551269198Sjfv	}
552266423Sjfv
553266423Sjfv	/* Set admin queue parameters */
554270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
555270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
556270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
557270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
558266423Sjfv
559266423Sjfv	/* Initialize the shared code */
560266423Sjfv	error = i40e_init_shared_code(hw);
561266423Sjfv	if (error) {
562266423Sjfv		device_printf(dev,"Unable to initialize the shared code\n");
563266423Sjfv		error = EIO;
564266423Sjfv		goto err_out;
565266423Sjfv	}
566266423Sjfv
567266423Sjfv	/* Set up the admin queue */
568266423Sjfv	error = i40e_init_adminq(hw);
569266423Sjfv	if (error) {
570269198Sjfv		device_printf(dev, "The driver for the device stopped "
571269198Sjfv		    "because the NVM image is newer than expected.\n"
572269198Sjfv		    "You must install the most recent version of "
573269198Sjfv		    " the network driver.\n");
574266423Sjfv		goto err_out;
575266423Sjfv	}
576270346Sjfv	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
577266423Sjfv
578269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
579269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
580269198Sjfv		device_printf(dev, "The driver for the device detected "
581269198Sjfv		    "a newer version of the NVM image than expected.\n"
582269198Sjfv		    "Please install the most recent version of the network driver.\n");
583269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
584269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
585269198Sjfv		device_printf(dev, "The driver for the device detected "
586269198Sjfv		    "an older version of the NVM image than expected.\n"
587269198Sjfv		    "Please update the NVM image.\n");
588266423Sjfv
589266423Sjfv	/* Clear PXE mode */
590266423Sjfv	i40e_clear_pxe_mode(hw);
591266423Sjfv
592266423Sjfv	/* Get capabilities from the device */
593270346Sjfv	error = ixl_get_hw_capabilities(pf);
594266423Sjfv	if (error) {
595266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
596266423Sjfv		goto err_get_cap;
597266423Sjfv	}
598266423Sjfv
599266423Sjfv	/* Set up host memory cache */
600279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
601279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
602266423Sjfv	if (error) {
603266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
604266423Sjfv		goto err_get_cap;
605266423Sjfv	}
606266423Sjfv
607266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
608266423Sjfv	if (error) {
609266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
610266423Sjfv		goto err_mac_hmc;
611266423Sjfv	}
612266423Sjfv
613269198Sjfv	/* Disable LLDP from the firmware */
614269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
615269198Sjfv
616266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
617266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
618266423Sjfv	if (error) {
619266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
620266423Sjfv		goto err_mac_hmc;
621266423Sjfv	}
622266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
623266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
624266423Sjfv
625274205Sjfv	/* Set up VSI and queues */
626270346Sjfv	if (ixl_setup_stations(pf) != 0) {
627266423Sjfv		device_printf(dev, "setup stations failed!\n");
628266423Sjfv		error = ENOMEM;
629266423Sjfv		goto err_mac_hmc;
630266423Sjfv	}
631266423Sjfv
632266423Sjfv	/* Initialize mac filter list for VSI */
633266423Sjfv	SLIST_INIT(&vsi->ftl);
634266423Sjfv
635266423Sjfv	/* Set up interrupt routing here */
636266423Sjfv	if (pf->msix > 1)
637270346Sjfv		error = ixl_assign_vsi_msix(pf);
638266423Sjfv	else
639270346Sjfv		error = ixl_assign_vsi_legacy(pf);
640266423Sjfv	if (error)
641299545Serj		goto err_late;
642266423Sjfv
643279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
644279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
645279033Sjfv		i40e_msec_delay(75);
646279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
647279033Sjfv		if (error)
648279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
649279033Sjfv			    pf->hw.aq.asq_last_status);
650270346Sjfv	}
651279033Sjfv
652266423Sjfv	/* Determine link state */
653279858Sjfv	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
654284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
655266423Sjfv
656266423Sjfv	/* Setup OS specific network interface */
657274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
658274205Sjfv		device_printf(dev, "interface setup failed!\n");
659274205Sjfv		error = EIO;
660266423Sjfv		goto err_late;
661274205Sjfv	}
662266423Sjfv
663279033Sjfv	error = ixl_switch_config(pf);
664279033Sjfv	if (error) {
665279033Sjfv		device_printf(dev, "Initial switch config failed: %d\n", error);
666299545Serj		goto err_mac_hmc;
667279033Sjfv	}
668279033Sjfv
669279033Sjfv	/* Limit phy interrupts to link and modules failure */
670299545Serj	error = i40e_aq_set_phy_int_mask(hw,
671299545Serj	    I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
672299545Serj        if (error)
673279033Sjfv		device_printf(dev, "set phy mask failed: %d\n", error);
674279033Sjfv
675266423Sjfv	/* Get the bus configuration and set the shared code */
676270346Sjfv	bus = ixl_get_bus_info(hw, dev);
677266423Sjfv	i40e_set_pci_config_data(hw, bus);
678266423Sjfv
679266423Sjfv	/* Initialize statistics */
680270346Sjfv	ixl_pf_reset_stats(pf);
681270346Sjfv	ixl_update_stats_counters(pf);
682270346Sjfv	ixl_add_hw_stats(pf);
683266423Sjfv
684266423Sjfv	/* Register for VLAN events */
685266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
686270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
687266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
688270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
689266423Sjfv
690279858Sjfv#ifdef PCI_IOV
691279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
692279858Sjfv	if (pf->msix > 1) {
693279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
694279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
695279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
696279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
697279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
698279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
699279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
700279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
701279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
702274205Sjfv
703279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
704279858Sjfv		if (iov_error != 0)
705279858Sjfv			device_printf(dev,
706279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
707279858Sjfv			    iov_error);
708279858Sjfv	}
709279858Sjfv#endif
710279858Sjfv
711279860Sjfv#ifdef DEV_NETMAP
712279860Sjfv	ixl_netmap_attach(vsi);
713279860Sjfv#endif /* DEV_NETMAP */
714270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
715266423Sjfv	return (0);
716266423Sjfv
717266423Sjfverr_late:
718274205Sjfv	if (vsi->ifp != NULL)
719274205Sjfv		if_free(vsi->ifp);
720266423Sjfverr_mac_hmc:
721266423Sjfv	i40e_shutdown_lan_hmc(hw);
722266423Sjfverr_get_cap:
723266423Sjfv	i40e_shutdown_adminq(hw);
724266423Sjfverr_out:
725270346Sjfv	ixl_free_pci_resources(pf);
726274205Sjfv	ixl_free_vsi(vsi);
727270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
728266423Sjfv	return (error);
729266423Sjfv}
730266423Sjfv
731266423Sjfv/*********************************************************************
732266423Sjfv *  Device removal routine
733266423Sjfv *
734266423Sjfv *  The detach entry point is called when the driver is being removed.
735266423Sjfv *  This routine stops the adapter and deallocates all the resources
736266423Sjfv *  that were allocated for driver operation.
737266423Sjfv *
738266423Sjfv *  return 0 on success, positive on failure
739266423Sjfv *********************************************************************/
740266423Sjfv
741266423Sjfvstatic int
742270346Sjfvixl_detach(device_t dev)
743266423Sjfv{
744270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
745266423Sjfv	struct i40e_hw		*hw = &pf->hw;
746270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
747299545Serj	struct ixl_queue	*que = vsi->queues;
748266423Sjfv	i40e_status		status;
749279858Sjfv#ifdef PCI_IOV
750279858Sjfv	int			error;
751279858Sjfv#endif
752266423Sjfv
753270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
754266423Sjfv
755266423Sjfv	/* Make sure VLANS are not using driver */
756266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
757266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
758266423Sjfv		return (EBUSY);
759266423Sjfv	}
760266423Sjfv
761279858Sjfv#ifdef PCI_IOV
762279858Sjfv	error = pci_iov_detach(dev);
763279858Sjfv	if (error != 0) {
764279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
765279858Sjfv		return (error);
766279858Sjfv	}
767279858Sjfv#endif
768279858Sjfv
769279033Sjfv	ether_ifdetach(vsi->ifp);
770279033Sjfv	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
771279033Sjfv		IXL_PF_LOCK(pf);
772279033Sjfv		ixl_stop(pf);
773279033Sjfv		IXL_PF_UNLOCK(pf);
774279033Sjfv	}
775266423Sjfv
776299545Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
777299545Serj		if (que->tq) {
778299545Serj			taskqueue_drain(que->tq, &que->task);
779299545Serj			taskqueue_drain(que->tq, &que->tx_task);
780299545Serj			taskqueue_free(que->tq);
781299545Serj		}
782299545Serj	}
783266423Sjfv
784266423Sjfv	/* Shutdown LAN HMC */
785266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
786266423Sjfv	if (status)
787266423Sjfv		device_printf(dev,
788266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
789266423Sjfv
790266423Sjfv	/* Shutdown admin queue */
791266423Sjfv	status = i40e_shutdown_adminq(hw);
792266423Sjfv	if (status)
793266423Sjfv		device_printf(dev,
794266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
795266423Sjfv
796266423Sjfv	/* Unregister VLAN events */
797266423Sjfv	if (vsi->vlan_attach != NULL)
798266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
799266423Sjfv	if (vsi->vlan_detach != NULL)
800266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
801266423Sjfv
802266423Sjfv	callout_drain(&pf->timer);
803279860Sjfv#ifdef DEV_NETMAP
804279860Sjfv	netmap_detach(vsi->ifp);
805279860Sjfv#endif /* DEV_NETMAP */
806270346Sjfv	ixl_free_pci_resources(pf);
807266423Sjfv	bus_generic_detach(dev);
808266423Sjfv	if_free(vsi->ifp);
809270346Sjfv	ixl_free_vsi(vsi);
810270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
811266423Sjfv	return (0);
812266423Sjfv}
813266423Sjfv
814266423Sjfv/*********************************************************************
815266423Sjfv *
816266423Sjfv *  Shutdown entry point
817266423Sjfv *
818266423Sjfv **********************************************************************/
819266423Sjfv
820266423Sjfvstatic int
821270346Sjfvixl_shutdown(device_t dev)
822266423Sjfv{
823270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
824270346Sjfv	IXL_PF_LOCK(pf);
825270346Sjfv	ixl_stop(pf);
826270346Sjfv	IXL_PF_UNLOCK(pf);
827266423Sjfv	return (0);
828266423Sjfv}
829266423Sjfv
830266423Sjfv
831266423Sjfv/*********************************************************************
832266423Sjfv *
833266423Sjfv *  Get the hardware capabilities
834266423Sjfv *
835266423Sjfv **********************************************************************/
836266423Sjfv
837266423Sjfvstatic int
838270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
839266423Sjfv{
840266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
841266423Sjfv	struct i40e_hw	*hw = &pf->hw;
842266423Sjfv	device_t 	dev = pf->dev;
843266423Sjfv	int             error, len;
844266423Sjfv	u16		needed;
845266423Sjfv	bool		again = TRUE;
846266423Sjfv
847266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
848266423Sjfvretry:
849266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
850266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
851266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
852266423Sjfv                return (ENOMEM);
853266423Sjfv	}
854266423Sjfv
855266423Sjfv	/* This populates the hw struct */
856266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
857266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
858266423Sjfv	free(buf, M_DEVBUF);
859266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
860266423Sjfv	    (again == TRUE)) {
861266423Sjfv		/* retry once with a larger buffer */
862266423Sjfv		again = FALSE;
863266423Sjfv		len = needed;
864266423Sjfv		goto retry;
865266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
866266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
867266423Sjfv		    pf->hw.aq.asq_last_status);
868266423Sjfv		return (ENODEV);
869266423Sjfv	}
870266423Sjfv
871266423Sjfv	/* Capture this PF's starting queue pair */
872266423Sjfv	pf->qbase = hw->func_caps.base_queue;
873266423Sjfv
874270346Sjfv#ifdef IXL_DEBUG
875266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
876266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
877266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
878266423Sjfv	    hw->func_caps.num_msix_vectors,
879266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
880266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
881266423Sjfv	    hw->func_caps.fd_filters_best_effort,
882266423Sjfv	    hw->func_caps.num_tx_qp,
883266423Sjfv	    hw->func_caps.num_rx_qp,
884266423Sjfv	    hw->func_caps.base_queue);
885266423Sjfv#endif
886266423Sjfv	return (error);
887266423Sjfv}
888266423Sjfv
889266423Sjfvstatic void
890270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
891266423Sjfv{
892266423Sjfv	device_t 	dev = vsi->dev;
893266423Sjfv
894266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
895266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
896266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
897266423Sjfv		if (mask & IFCAP_TXCSUM) {
898266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
899266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
900270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
901270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
902266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
903266423Sjfv			}
904266423Sjfv		}
905266423Sjfv		else if (mask & IFCAP_TSO4) {
906266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
907270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
908266423Sjfv			device_printf(dev,
909266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
910266423Sjfv		}
911266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
912266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
913266423Sjfv		if (mask & IFCAP_TXCSUM)
914266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
915266423Sjfv		else if (mask & IFCAP_TSO4)
916266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
917266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
918266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
919266423Sjfv		if (mask & IFCAP_TXCSUM) {
920270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
921266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
922266423Sjfv			device_printf(dev,
923266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
924266423Sjfv		} else if (mask & IFCAP_TSO4)
925266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
926266423Sjfv	}
927266423Sjfv
928266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
929266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
930266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
931266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
932266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
933270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
934270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
935266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
936266423Sjfv			}
937266423Sjfv		} else if (mask & IFCAP_TSO6) {
938266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
939270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
940266423Sjfv			device_printf(dev,
941266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
942266423Sjfv		}
943266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
944266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
945266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
946266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
947266423Sjfv		else if (mask & IFCAP_TSO6)
948266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
949266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
950266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
951266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
952270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
953266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
954266423Sjfv			device_printf(dev,
955266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
956266423Sjfv		} else if (mask & IFCAP_TSO6)
957266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
958266423Sjfv	}
959266423Sjfv}
960266423Sjfv
961266423Sjfv/*********************************************************************
962266423Sjfv *  Ioctl entry point
963266423Sjfv *
964270346Sjfv *  ixl_ioctl is called when the user wants to configure the
965266423Sjfv *  interface.
966266423Sjfv *
967266423Sjfv *  return 0 on success, positive on failure
968266423Sjfv **********************************************************************/
969266423Sjfv
970266423Sjfvstatic int
971270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
972266423Sjfv{
973270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
974279858Sjfv	struct ixl_pf	*pf = vsi->back;
975266423Sjfv	struct ifreq	*ifr = (struct ifreq *) data;
976266423Sjfv#if defined(INET) || defined(INET6)
977266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
978266423Sjfv	bool		avoid_reset = FALSE;
979266423Sjfv#endif
980266423Sjfv	int             error = 0;
981266423Sjfv
982266423Sjfv	switch (command) {
983266423Sjfv
984266423Sjfv        case SIOCSIFADDR:
985266423Sjfv#ifdef INET
986266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
987266423Sjfv			avoid_reset = TRUE;
988266423Sjfv#endif
989266423Sjfv#ifdef INET6
990266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
991266423Sjfv			avoid_reset = TRUE;
992266423Sjfv#endif
993266423Sjfv#if defined(INET) || defined(INET6)
994266423Sjfv		/*
995266423Sjfv		** Calling init results in link renegotiation,
996266423Sjfv		** so we avoid doing it when possible.
997266423Sjfv		*/
998266423Sjfv		if (avoid_reset) {
999266423Sjfv			ifp->if_flags |= IFF_UP;
1000266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1001270346Sjfv				ixl_init(pf);
1002271900Sbz#ifdef INET
1003266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
1004266423Sjfv				arp_ifinit(ifp, ifa);
1005271900Sbz#endif
1006266423Sjfv		} else
1007266423Sjfv			error = ether_ioctl(ifp, command, data);
1008266423Sjfv		break;
1009266423Sjfv#endif
1010266423Sjfv	case SIOCSIFMTU:
1011266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1012270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
1013266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
1014266423Sjfv			error = EINVAL;
1015266423Sjfv		} else {
1016270346Sjfv			IXL_PF_LOCK(pf);
1017266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
1018266423Sjfv			vsi->max_frame_size =
1019266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1020266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
1021270346Sjfv			ixl_init_locked(pf);
1022270346Sjfv			IXL_PF_UNLOCK(pf);
1023266423Sjfv		}
1024266423Sjfv		break;
1025266423Sjfv	case SIOCSIFFLAGS:
1026266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1027270346Sjfv		IXL_PF_LOCK(pf);
1028266423Sjfv		if (ifp->if_flags & IFF_UP) {
1029266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1030266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
1031266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
1032270346Sjfv					ixl_set_promisc(vsi);
1033266423Sjfv				}
1034266423Sjfv			} else
1035270346Sjfv				ixl_init_locked(pf);
1036266423Sjfv		} else
1037266423Sjfv			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1038270346Sjfv				ixl_stop(pf);
1039266423Sjfv		pf->if_flags = ifp->if_flags;
1040270346Sjfv		IXL_PF_UNLOCK(pf);
1041266423Sjfv		break;
1042266423Sjfv	case SIOCADDMULTI:
1043266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
1044266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1045270346Sjfv			IXL_PF_LOCK(pf);
1046270346Sjfv			ixl_disable_intr(vsi);
1047270346Sjfv			ixl_add_multi(vsi);
1048270346Sjfv			ixl_enable_intr(vsi);
1049270346Sjfv			IXL_PF_UNLOCK(pf);
1050266423Sjfv		}
1051266423Sjfv		break;
1052266423Sjfv	case SIOCDELMULTI:
1053266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
1054266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1055270346Sjfv			IXL_PF_LOCK(pf);
1056270346Sjfv			ixl_disable_intr(vsi);
1057270346Sjfv			ixl_del_multi(vsi);
1058270346Sjfv			ixl_enable_intr(vsi);
1059270346Sjfv			IXL_PF_UNLOCK(pf);
1060266423Sjfv		}
1061266423Sjfv		break;
1062266423Sjfv	case SIOCSIFMEDIA:
1063266423Sjfv	case SIOCGIFMEDIA:
1064284049Sjfv#ifdef IFM_ETH_XTYPE
1065284049Sjfv	case SIOCGIFXMEDIA:
1066284049Sjfv#endif
1067266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1068266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1069266423Sjfv		break;
1070266423Sjfv	case SIOCSIFCAP:
1071266423Sjfv	{
1072266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1073266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1074266423Sjfv
1075270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1076266423Sjfv
1077266423Sjfv		if (mask & IFCAP_RXCSUM)
1078266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1079266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1080266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1081266423Sjfv		if (mask & IFCAP_LRO)
1082266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1083266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1084266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1085266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1086266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1087266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1088266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1089266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1090270346Sjfv			IXL_PF_LOCK(pf);
1091270346Sjfv			ixl_init_locked(pf);
1092270346Sjfv			IXL_PF_UNLOCK(pf);
1093266423Sjfv		}
1094266423Sjfv		VLAN_CAPABILITIES(ifp);
1095266423Sjfv
1096266423Sjfv		break;
1097266423Sjfv	}
1098266423Sjfv
1099266423Sjfv	default:
1100270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1101266423Sjfv		error = ether_ioctl(ifp, command, data);
1102266423Sjfv		break;
1103266423Sjfv	}
1104266423Sjfv
1105266423Sjfv	return (error);
1106266423Sjfv}
1107266423Sjfv
1108266423Sjfv
1109266423Sjfv/*********************************************************************
1110266423Sjfv *  Init entry point
1111266423Sjfv *
1112266423Sjfv *  This routine is used in two ways. It is used by the stack as
1113266423Sjfv *  init entry point in network interface structure. It is also used
1114266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1115266423Sjfv *  consistent state.
1116266423Sjfv *
1117266423Sjfv *  return 0 on success, positive on failure
1118266423Sjfv **********************************************************************/
1119266423Sjfv
1120266423Sjfvstatic void
1121270346Sjfvixl_init_locked(struct ixl_pf *pf)
1122266423Sjfv{
1123266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1124270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1125266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1126266423Sjfv	device_t 	dev = pf->dev;
1127266423Sjfv	struct i40e_filter_control_settings	filter;
1128266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1129266423Sjfv	int		ret;
1130266423Sjfv
1131266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1132270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1133270346Sjfv	ixl_stop(pf);
1134266423Sjfv
1135266423Sjfv	/* Get the latest mac address... User might use a LAA */
1136266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1137266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1138266423Sjfv	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1139299545Serj	    i40e_validate_mac_addr(tmpaddr)) {
1140266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1141266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1142266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1143266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1144266423Sjfv		    hw->mac.addr, NULL);
1145266423Sjfv		if (ret) {
1146266423Sjfv			device_printf(dev, "LLA address"
1147266423Sjfv			 "change failed!!\n");
1148266423Sjfv			return;
1149266423Sjfv		}
1150266423Sjfv	}
1151266423Sjfv
1152266423Sjfv	/* Set the various hardware offload abilities */
1153266423Sjfv	ifp->if_hwassist = 0;
1154266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1155266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1156266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1157266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1158266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1159266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1160266423Sjfv
1161266423Sjfv	/* Set up the device filtering */
1162266423Sjfv	bzero(&filter, sizeof(filter));
1163266423Sjfv	filter.enable_ethtype = TRUE;
1164266423Sjfv	filter.enable_macvlan = TRUE;
1165270346Sjfv#ifdef IXL_FDIR
1166266423Sjfv	filter.enable_fdir = TRUE;
1167266423Sjfv#endif
1168266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1169266423Sjfv		device_printf(dev, "set_filter_control() failed\n");
1170266423Sjfv
1171266423Sjfv	/* Set up RSS */
1172270346Sjfv	ixl_config_rss(vsi);
1173266423Sjfv
1174266423Sjfv	/*
1175279033Sjfv	** Prepare the VSI: rings, hmc contexts, etc...
1176266423Sjfv	*/
1177270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1178270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1179266423Sjfv		return;
1180266423Sjfv	}
1181266423Sjfv
1182266423Sjfv	/* Add protocol filters to list */
1183270346Sjfv	ixl_init_filters(vsi);
1184266423Sjfv
1185266423Sjfv	/* Setup vlan's if needed */
1186270346Sjfv	ixl_setup_vlan_filters(vsi);
1187266423Sjfv
1188266423Sjfv	/* Start the local timer */
1189270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1190266423Sjfv
1191266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1192270346Sjfv	if (ixl_enable_msix) {
1193270346Sjfv		ixl_configure_msix(pf);
1194270346Sjfv		ixl_configure_itr(pf);
1195266423Sjfv	} else
1196270346Sjfv		ixl_configure_legacy(pf);
1197266423Sjfv
1198270346Sjfv	ixl_enable_rings(vsi);
1199266423Sjfv
1200266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1201266423Sjfv
1202279858Sjfv	ixl_reconfigure_filters(vsi);
1203279858Sjfv
1204266423Sjfv	/* Set MTU in hardware*/
1205270346Sjfv	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1206270346Sjfv	    TRUE, 0, NULL);
1207270346Sjfv	if (aq_error)
1208270346Sjfv		device_printf(vsi->dev,
1209270346Sjfv			"aq_set_mac_config in init error, code %d\n",
1210270346Sjfv		    aq_error);
1211266423Sjfv
1212266423Sjfv	/* And now turn on interrupts */
1213270346Sjfv	ixl_enable_intr(vsi);
1214266423Sjfv
1215266423Sjfv	/* Now inform the stack we're ready */
1216266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1217266423Sjfv	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1218266423Sjfv
1219266423Sjfv	return;
1220266423Sjfv}
1221266423Sjfv
1222266423Sjfvstatic void
1223270346Sjfvixl_init(void *arg)
1224266423Sjfv{
1225270346Sjfv	struct ixl_pf *pf = arg;
1226266423Sjfv
1227270346Sjfv	IXL_PF_LOCK(pf);
1228270346Sjfv	ixl_init_locked(pf);
1229270346Sjfv	IXL_PF_UNLOCK(pf);
1230266423Sjfv	return;
1231266423Sjfv}
1232266423Sjfv
1233266423Sjfv/*
1234266423Sjfv**
1235266423Sjfv** MSIX Interrupt Handlers and Tasklets
1236266423Sjfv**
1237266423Sjfv*/
1238266423Sjfvstatic void
1239270346Sjfvixl_handle_que(void *context, int pending)
1240266423Sjfv{
1241270346Sjfv	struct ixl_queue *que = context;
1242270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1243266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1244266423Sjfv	struct tx_ring  *txr = &que->txr;
1245266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1246266423Sjfv	bool		more;
1247266423Sjfv
1248266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1249270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1250270346Sjfv		IXL_TX_LOCK(txr);
1251270346Sjfv		ixl_txeof(que);
1252266423Sjfv		if (!drbr_empty(ifp, txr->br))
1253270346Sjfv			ixl_mq_start_locked(ifp, txr);
1254270346Sjfv		IXL_TX_UNLOCK(txr);
1255266423Sjfv		if (more) {
1256266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1257266423Sjfv			return;
1258266423Sjfv		}
1259266423Sjfv	}
1260266423Sjfv
1261266423Sjfv	/* Reenable this interrupt - hmmm */
1262270346Sjfv	ixl_enable_queue(hw, que->me);
1263266423Sjfv	return;
1264266423Sjfv}
1265266423Sjfv
1266266423Sjfv
1267266423Sjfv/*********************************************************************
1268266423Sjfv *
1269266423Sjfv *  Legacy Interrupt Service routine
1270266423Sjfv *
1271266423Sjfv **********************************************************************/
1272266423Sjfvvoid
1273270346Sjfvixl_intr(void *arg)
1274266423Sjfv{
1275270346Sjfv	struct ixl_pf		*pf = arg;
1276266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1277270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1278270346Sjfv	struct ixl_queue	*que = vsi->queues;
1279266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1280266423Sjfv	struct tx_ring		*txr = &que->txr;
1281266423Sjfv        u32			reg, icr0, mask;
1282266423Sjfv	bool			more_tx, more_rx;
1283266423Sjfv
1284266423Sjfv	++que->irqs;
1285266423Sjfv
1286266423Sjfv	/* Protect against spurious interrupts */
1287266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1288266423Sjfv		return;
1289266423Sjfv
1290266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1291266423Sjfv
1292266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1293266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1294266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1295266423Sjfv
1296266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1297266423Sjfv
1298279858Sjfv#ifdef PCI_IOV
1299279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1300279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1301279858Sjfv#endif
1302279858Sjfv
1303266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1304266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1305266423Sjfv		return;
1306266423Sjfv	}
1307266423Sjfv
1308270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1309266423Sjfv
1310270346Sjfv	IXL_TX_LOCK(txr);
1311270346Sjfv	more_tx = ixl_txeof(que);
1312266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1313266423Sjfv		more_tx = 1;
1314270346Sjfv	IXL_TX_UNLOCK(txr);
1315266423Sjfv
1316266423Sjfv	/* re-enable other interrupt causes */
1317266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1318266423Sjfv
1319266423Sjfv	/* And now the queues */
1320266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1321266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1322266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1323266423Sjfv
1324266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1325266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1326266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1327266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1328266423Sjfv
1329270346Sjfv	ixl_enable_legacy(hw);
1330266423Sjfv
1331266423Sjfv	return;
1332266423Sjfv}
1333266423Sjfv
1334266423Sjfv
1335266423Sjfv/*********************************************************************
1336266423Sjfv *
1337266423Sjfv *  MSIX VSI Interrupt Service routine
1338266423Sjfv *
1339266423Sjfv **********************************************************************/
1340266423Sjfvvoid
1341270346Sjfvixl_msix_que(void *arg)
1342266423Sjfv{
1343270346Sjfv	struct ixl_queue	*que = arg;
1344270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1345266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1346266423Sjfv	struct tx_ring	*txr = &que->txr;
1347266423Sjfv	bool		more_tx, more_rx;
1348266423Sjfv
1349269198Sjfv	/* Protect against spurious interrupts */
1350269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1351269198Sjfv		return;
1352269198Sjfv
1353266423Sjfv	++que->irqs;
1354266423Sjfv
1355270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1356266423Sjfv
1357270346Sjfv	IXL_TX_LOCK(txr);
1358270346Sjfv	more_tx = ixl_txeof(que);
1359266423Sjfv	/*
1360266423Sjfv	** Make certain that if the stack
1361266423Sjfv	** has anything queued the task gets
1362266423Sjfv	** scheduled to handle it.
1363266423Sjfv	*/
1364266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1365266423Sjfv		more_tx = 1;
1366270346Sjfv	IXL_TX_UNLOCK(txr);
1367266423Sjfv
1368270346Sjfv	ixl_set_queue_rx_itr(que);
1369270346Sjfv	ixl_set_queue_tx_itr(que);
1370266423Sjfv
1371266423Sjfv	if (more_tx || more_rx)
1372266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1373266423Sjfv	else
1374270346Sjfv		ixl_enable_queue(hw, que->me);
1375266423Sjfv
1376266423Sjfv	return;
1377266423Sjfv}
1378266423Sjfv
1379266423Sjfv
1380266423Sjfv/*********************************************************************
1381266423Sjfv *
1382266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1383266423Sjfv *
1384266423Sjfv **********************************************************************/
1385266423Sjfvstatic void
1386270346Sjfvixl_msix_adminq(void *arg)
1387266423Sjfv{
1388270346Sjfv	struct ixl_pf	*pf = arg;
1389266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1390266423Sjfv	u32		reg, mask;
1391266423Sjfv
1392266423Sjfv	++pf->admin_irq;
1393266423Sjfv
1394266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1395266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1396266423Sjfv
1397266423Sjfv	/* Check on the cause */
1398266423Sjfv	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1399266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1400266423Sjfv
1401269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1402270346Sjfv		ixl_handle_mdd_event(pf);
1403266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1404269198Sjfv	}
1405266423Sjfv
1406279858Sjfv#ifdef PCI_IOV
1407279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1408266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1409279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1410279858Sjfv	}
1411279858Sjfv#endif
1412266423Sjfv
1413266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1414266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1415266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1416266423Sjfv
1417266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1418266423Sjfv	return;
1419266423Sjfv}
1420266423Sjfv
1421266423Sjfv/*********************************************************************
1422266423Sjfv *
1423266423Sjfv *  Media Ioctl callback
1424266423Sjfv *
1425266423Sjfv *  This routine is called whenever the user queries the status of
1426266423Sjfv *  the interface using ifconfig.
1427266423Sjfv *
1428266423Sjfv **********************************************************************/
1429266423Sjfvstatic void
1430270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1431266423Sjfv{
1432270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1433279858Sjfv	struct ixl_pf	*pf = vsi->back;
1434266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1435266423Sjfv
1436270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1437270346Sjfv	IXL_PF_LOCK(pf);
1438266423Sjfv
1439279858Sjfv	hw->phy.get_link_info = TRUE;
1440284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
1441270346Sjfv	ixl_update_link_status(pf);
1442266423Sjfv
1443266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1444266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1445266423Sjfv
1446279858Sjfv	if (!pf->link_up) {
1447270346Sjfv		IXL_PF_UNLOCK(pf);
1448266423Sjfv		return;
1449266423Sjfv	}
1450266423Sjfv
1451266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1452299545Serj
1453299545Serj	/* Hardware always does full-duplex */
1454266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1455266423Sjfv
1456266423Sjfv	switch (hw->phy.link_info.phy_type) {
1457266423Sjfv		/* 100 M */
1458266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1459266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1460266423Sjfv			break;
1461266423Sjfv		/* 1 G */
1462266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1463266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1464266423Sjfv			break;
1465269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1466269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1467269198Sjfv			break;
1468269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1469269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1470269198Sjfv			break;
1471266423Sjfv		/* 10 G */
1472266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1473266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1474266423Sjfv			break;
1475266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1476266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1477266423Sjfv			break;
1478266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1479266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1480266423Sjfv			break;
1481270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1482270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1483270346Sjfv			break;
1484266423Sjfv		/* 40 G */
1485266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1486266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1487266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1488266423Sjfv			break;
1489266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1490266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1491266423Sjfv			break;
1492266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1493266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1494266423Sjfv			break;
1495284049Sjfv#ifndef IFM_ETH_XTYPE
1496284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1497284049Sjfv			ifmr->ifm_active |= IFM_1000_CX;
1498284049Sjfv			break;
1499284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1500284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1501284049Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1502284049Sjfv			break;
1503284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1504284049Sjfv			ifmr->ifm_active |= IFM_10G_CX4;
1505284049Sjfv			break;
1506284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1507284049Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1508284049Sjfv			break;
1509279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1510279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1511284049Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1512279033Sjfv			break;
1513284049Sjfv#else
1514284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1515284049Sjfv			ifmr->ifm_active |= IFM_1000_KX;
1516284049Sjfv			break;
1517284049Sjfv		/* ERJ: What's the difference between these? */
1518284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1519284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1520284049Sjfv			ifmr->ifm_active |= IFM_10G_CR1;
1521284049Sjfv			break;
1522284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1523284049Sjfv			ifmr->ifm_active |= IFM_10G_KX4;
1524284049Sjfv			break;
1525284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1526284049Sjfv			ifmr->ifm_active |= IFM_10G_KR;
1527284049Sjfv			break;
1528299545Serj		/* Our single 20G media type */
1529284049Sjfv		case I40E_PHY_TYPE_20GBASE_KR2:
1530284049Sjfv			ifmr->ifm_active |= IFM_20G_KR2;
1531284049Sjfv			break;
1532284049Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1533284049Sjfv			ifmr->ifm_active |= IFM_40G_KR4;
1534284049Sjfv			break;
1535284049Sjfv		case I40E_PHY_TYPE_XLPPI:
1536284049Sjfv			ifmr->ifm_active |= IFM_40G_XLPPI;
1537284049Sjfv			break;
1538284049Sjfv#endif
1539266423Sjfv		default:
1540266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1541266423Sjfv			break;
1542266423Sjfv	}
1543266423Sjfv	/* Report flow control status as well */
1544266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1545266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1546266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1547266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1548266423Sjfv
1549270346Sjfv	IXL_PF_UNLOCK(pf);
1550266423Sjfv
1551266423Sjfv	return;
1552266423Sjfv}
1553266423Sjfv
1554299545Serj/*
1555299545Serj * NOTE: Fortville does not support forcing media speeds. Instead,
1556299545Serj * use the set_advertise sysctl to set the speeds Fortville
1557299545Serj * will advertise or be allowed to operate at.
1558299545Serj */
1559266423Sjfvstatic int
1560270346Sjfvixl_media_change(struct ifnet * ifp)
1561266423Sjfv{
1562270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1563266423Sjfv	struct ifmedia *ifm = &vsi->media;
1564266423Sjfv
1565270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1566266423Sjfv
1567266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1568266423Sjfv		return (EINVAL);
1569266423Sjfv
1570299545Serj	if_printf(ifp, "Media change is not supported.\n");
1571269198Sjfv
1572269198Sjfv	return (ENODEV);
1573266423Sjfv}
1574266423Sjfv
1575266423Sjfv
1576270346Sjfv#ifdef IXL_FDIR
1577266423Sjfv/*
1578266423Sjfv** ATR: Application Targetted Receive - creates a filter
1579266423Sjfv**	based on TX flow info that will keep the receive
1580266423Sjfv**	portion of the flow on the same queue. Based on the
1581266423Sjfv**	implementation this is only available for TCP connections
1582266423Sjfv*/
1583266423Sjfvvoid
1584270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1585266423Sjfv{
1586270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1587266423Sjfv	struct tx_ring			*txr = &que->txr;
1588266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1589266423Sjfv	u32				ptype, dtype;
1590266423Sjfv	int				idx;
1591266423Sjfv
1592266423Sjfv	/* check if ATR is enabled and sample rate */
1593270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1594266423Sjfv		return;
1595266423Sjfv	/*
1596266423Sjfv	** We sample all TCP SYN/FIN packets,
1597266423Sjfv	** or at the selected sample rate
1598266423Sjfv	*/
1599266423Sjfv	txr->atr_count++;
1600266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1601266423Sjfv	    (txr->atr_count < txr->atr_rate))
1602266423Sjfv                return;
1603266423Sjfv	txr->atr_count = 0;
1604266423Sjfv
1605266423Sjfv	/* Get a descriptor to use */
1606266423Sjfv	idx = txr->next_avail;
1607266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1608266423Sjfv	if (++idx == que->num_desc)
1609266423Sjfv		idx = 0;
1610266423Sjfv	txr->avail--;
1611266423Sjfv	txr->next_avail = idx;
1612266423Sjfv
1613266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1614266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1615266423Sjfv
1616266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1617266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1618266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1619266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1620266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1621266423Sjfv
1622266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1623266423Sjfv
1624266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1625266423Sjfv
1626266423Sjfv	/*
1627266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1628266423Sjfv	** the filter, otherwise its an update.
1629266423Sjfv	*/
1630266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1631266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1632266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1633266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1634266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1635266423Sjfv
1636266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1637266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1638266423Sjfv
1639266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1640266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1641266423Sjfv
1642266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1643266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1644266423Sjfv	return;
1645266423Sjfv}
1646266423Sjfv#endif
1647266423Sjfv
1648266423Sjfv
1649266423Sjfvstatic void
1650270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1651266423Sjfv{
1652266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1653266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1654266423Sjfv	int		err, mcnt = 0;
1655266423Sjfv	bool		uni = FALSE, multi = FALSE;
1656266423Sjfv
1657266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1658266423Sjfv                multi = TRUE;
1659266423Sjfv	else { /* Need to count the multicast addresses */
1660266423Sjfv		struct  ifmultiaddr *ifma;
1661266423Sjfv		if_maddr_rlock(ifp);
1662266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1663266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1664266423Sjfv                                continue;
1665266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1666266423Sjfv                                break;
1667266423Sjfv                        mcnt++;
1668266423Sjfv		}
1669266423Sjfv		if_maddr_runlock(ifp);
1670266423Sjfv	}
1671266423Sjfv
1672266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1673266423Sjfv                multi = TRUE;
1674266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1675266423Sjfv		uni = TRUE;
1676266423Sjfv
1677266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1678266423Sjfv	    vsi->seid, uni, NULL);
1679266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1680266423Sjfv	    vsi->seid, multi, NULL);
1681266423Sjfv	return;
1682266423Sjfv}
1683266423Sjfv
1684266423Sjfv/*********************************************************************
1685266423Sjfv * 	Filter Routines
1686266423Sjfv *
1687266423Sjfv *	Routines for multicast and vlan filter management.
1688266423Sjfv *
1689266423Sjfv *********************************************************************/
1690266423Sjfvstatic void
1691270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1692266423Sjfv{
1693266423Sjfv	struct	ifmultiaddr	*ifma;
1694266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1695266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1696266423Sjfv	int			mcnt = 0, flags;
1697266423Sjfv
1698270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1699266423Sjfv
1700266423Sjfv	if_maddr_rlock(ifp);
1701266423Sjfv	/*
1702266423Sjfv	** First just get a count, to decide if we
1703266423Sjfv	** we simply use multicast promiscuous.
1704266423Sjfv	*/
1705266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1706266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1707266423Sjfv			continue;
1708266423Sjfv		mcnt++;
1709266423Sjfv	}
1710266423Sjfv	if_maddr_runlock(ifp);
1711266423Sjfv
1712266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1713266423Sjfv		/* delete existing MC filters */
1714270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1715266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1716266423Sjfv		    vsi->seid, TRUE, NULL);
1717266423Sjfv		return;
1718266423Sjfv	}
1719266423Sjfv
1720266423Sjfv	mcnt = 0;
1721266423Sjfv	if_maddr_rlock(ifp);
1722266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1723266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1724266423Sjfv			continue;
1725270346Sjfv		ixl_add_mc_filter(vsi,
1726266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1727266423Sjfv		mcnt++;
1728266423Sjfv	}
1729266423Sjfv	if_maddr_runlock(ifp);
1730266423Sjfv	if (mcnt > 0) {
1731270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1732270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1733266423Sjfv	}
1734266423Sjfv
1735270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1736266423Sjfv	return;
1737266423Sjfv}
1738266423Sjfv
1739266423Sjfvstatic void
1740270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1741266423Sjfv{
1742266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1743266423Sjfv	struct ifmultiaddr	*ifma;
1744270346Sjfv	struct ixl_mac_filter	*f;
1745266423Sjfv	int			mcnt = 0;
1746266423Sjfv	bool		match = FALSE;
1747266423Sjfv
1748270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1749266423Sjfv
1750266423Sjfv	/* Search for removed multicast addresses */
1751266423Sjfv	if_maddr_rlock(ifp);
1752266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1753270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1754266423Sjfv			match = FALSE;
1755266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1756266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1757266423Sjfv					continue;
1758266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1759266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1760266423Sjfv					match = TRUE;
1761266423Sjfv					break;
1762266423Sjfv				}
1763266423Sjfv			}
1764266423Sjfv			if (match == FALSE) {
1765270346Sjfv				f->flags |= IXL_FILTER_DEL;
1766266423Sjfv				mcnt++;
1767266423Sjfv			}
1768266423Sjfv		}
1769266423Sjfv	}
1770266423Sjfv	if_maddr_runlock(ifp);
1771266423Sjfv
1772266423Sjfv	if (mcnt > 0)
1773270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1774266423Sjfv}
1775266423Sjfv
1776266423Sjfv
1777266423Sjfv/*********************************************************************
1778266423Sjfv *  Timer routine
1779266423Sjfv *
1780266423Sjfv *  This routine checks for link status,updates statistics,
1781266423Sjfv *  and runs the watchdog check.
1782266423Sjfv *
1783266423Sjfv **********************************************************************/
1784266423Sjfv
1785266423Sjfvstatic void
1786270346Sjfvixl_local_timer(void *arg)
1787266423Sjfv{
1788270346Sjfv	struct ixl_pf		*pf = arg;
1789266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1790270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1791270346Sjfv	struct ixl_queue	*que = vsi->queues;
1792266423Sjfv	device_t		dev = pf->dev;
1793266423Sjfv	int			hung = 0;
1794266423Sjfv	u32			mask;
1795266423Sjfv
1796266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1797266423Sjfv
1798266423Sjfv	/* Fire off the adminq task */
1799266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1800266423Sjfv
1801266423Sjfv	/* Update stats */
1802270346Sjfv	ixl_update_stats_counters(pf);
1803266423Sjfv
1804266423Sjfv	/*
1805269198Sjfv	** Check status of the queues
1806266423Sjfv	*/
1807266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1808266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1809266423Sjfv
1810266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1811266423Sjfv		/* Any queues with outstanding work get a sw irq */
1812266423Sjfv		if (que->busy)
1813266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1814266423Sjfv		/*
1815266423Sjfv		** Each time txeof runs without cleaning, but there
1816266423Sjfv		** are uncleaned descriptors it increments busy. If
1817266423Sjfv		** we get to 5 we declare it hung.
1818266423Sjfv		*/
1819270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1820269198Sjfv			++hung;
1821269198Sjfv			/* Mark the queue as inactive */
1822269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1823269198Sjfv			continue;
1824269198Sjfv		} else {
1825269198Sjfv			/* Check if we've come back from hung */
1826269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1827269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1828269198Sjfv		}
1829270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1830277084Sjfv#ifdef IXL_DEBUG
1831266423Sjfv			device_printf(dev,"Warning queue %d "
1832269198Sjfv			    "appears to be hung!\n", i);
1833277084Sjfv#endif
1834270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1835266423Sjfv			++hung;
1836266423Sjfv		}
1837266423Sjfv	}
1838266423Sjfv	/* Only reinit if all queues show hung */
1839266423Sjfv	if (hung == vsi->num_queues)
1840266423Sjfv		goto hung;
1841266423Sjfv
1842270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1843266423Sjfv	return;
1844266423Sjfv
1845266423Sjfvhung:
1846266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1847270346Sjfv	ixl_init_locked(pf);
1848266423Sjfv}
1849266423Sjfv
1850266423Sjfv/*
1851266423Sjfv** Note: this routine updates the OS on the link state
1852266423Sjfv**	the real check of the hardware only happens with
1853266423Sjfv**	a link interrupt.
1854266423Sjfv*/
1855266423Sjfvstatic void
1856270346Sjfvixl_update_link_status(struct ixl_pf *pf)
1857266423Sjfv{
1858270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1859266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1860266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1861266423Sjfv	device_t		dev = pf->dev;
1862266423Sjfv
1863279858Sjfv	if (pf->link_up){
1864266423Sjfv		if (vsi->link_active == FALSE) {
1865279033Sjfv			pf->fc = hw->fc.current_mode;
1866266423Sjfv			if (bootverbose) {
1867266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
1868266423Sjfv				    " Flow Control: %s\n",
1869279858Sjfv				    ((pf->link_speed ==
1870279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
1871279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
1872266423Sjfv			}
1873266423Sjfv			vsi->link_active = TRUE;
1874277084Sjfv			/*
1875277084Sjfv			** Warn user if link speed on NPAR enabled
1876277084Sjfv			** partition is not at least 10GB
1877277084Sjfv			*/
1878277084Sjfv			if (hw->func_caps.npar_enable &&
1879279858Sjfv			   (hw->phy.link_info.link_speed ==
1880279858Sjfv			   I40E_LINK_SPEED_1GB ||
1881279858Sjfv			   hw->phy.link_info.link_speed ==
1882279858Sjfv			   I40E_LINK_SPEED_100MB))
1883279858Sjfv				device_printf(dev, "The partition detected"
1884279858Sjfv				    "link speed that is less than 10Gbps\n");
1885266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
1886266423Sjfv		}
1887266423Sjfv	} else { /* Link down */
1888266423Sjfv		if (vsi->link_active == TRUE) {
1889266423Sjfv			if (bootverbose)
1890266423Sjfv				device_printf(dev,"Link is Down\n");
1891266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
1892266423Sjfv			vsi->link_active = FALSE;
1893266423Sjfv		}
1894266423Sjfv	}
1895266423Sjfv
1896266423Sjfv	return;
1897266423Sjfv}
1898266423Sjfv
1899266423Sjfv/*********************************************************************
1900266423Sjfv *
1901266423Sjfv *  This routine disables all traffic on the adapter by issuing a
1902266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
1903266423Sjfv *
1904266423Sjfv **********************************************************************/
1905266423Sjfv
1906266423Sjfvstatic void
1907270346Sjfvixl_stop(struct ixl_pf *pf)
1908266423Sjfv{
1909270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1910266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1911266423Sjfv
1912266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1913266423Sjfv
1914270346Sjfv	INIT_DEBUGOUT("ixl_stop: begin\n");
1915279858Sjfv	if (pf->num_vfs == 0)
1916279858Sjfv		ixl_disable_intr(vsi);
1917279858Sjfv	else
1918279858Sjfv		ixl_disable_rings_intr(vsi);
1919270346Sjfv	ixl_disable_rings(vsi);
1920266423Sjfv
1921266423Sjfv	/* Tell the stack that the interface is no longer active */
1922266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1923266423Sjfv
1924266423Sjfv	/* Stop the local timer */
1925266423Sjfv	callout_stop(&pf->timer);
1926266423Sjfv
1927266423Sjfv	return;
1928266423Sjfv}
1929266423Sjfv
1930266423Sjfv
1931266423Sjfv/*********************************************************************
1932266423Sjfv *
1933266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1934266423Sjfv *
1935266423Sjfv **********************************************************************/
1936266423Sjfvstatic int
1937270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
1938266423Sjfv{
1939266423Sjfv	device_t        dev = pf->dev;
1940270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1941270346Sjfv	struct		ixl_queue *que = vsi->queues;
1942266423Sjfv	int 		error, rid = 0;
1943266423Sjfv
1944266423Sjfv	if (pf->msix == 1)
1945266423Sjfv		rid = 1;
1946266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1947266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
1948266423Sjfv	if (pf->res == NULL) {
1949266423Sjfv		device_printf(dev,"Unable to allocate"
1950266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
1951266423Sjfv		return (ENXIO);
1952266423Sjfv	}
1953266423Sjfv
1954266423Sjfv	/* Set the handler function */
1955266423Sjfv	error = bus_setup_intr(dev, pf->res,
1956266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1957270346Sjfv	    ixl_intr, pf, &pf->tag);
1958266423Sjfv	if (error) {
1959266423Sjfv		pf->res = NULL;
1960266423Sjfv		device_printf(dev, "Failed to register legacy/msi handler");
1961266423Sjfv		return (error);
1962266423Sjfv	}
1963266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1964270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1965270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1966270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1967266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
1968266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1969266423Sjfv	    device_get_nameunit(dev));
1970270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1971279858Sjfv
1972279858Sjfv#ifdef PCI_IOV
1973279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
1974279858Sjfv#endif
1975279858Sjfv
1976270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1977266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
1978266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1979266423Sjfv	    device_get_nameunit(dev));
1980266423Sjfv
1981266423Sjfv	return (0);
1982266423Sjfv}
1983266423Sjfv
1984266423Sjfv
1985266423Sjfv/*********************************************************************
1986266423Sjfv *
1987266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
1988266423Sjfv *
1989266423Sjfv **********************************************************************/
1990266423Sjfvstatic int
1991270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
1992266423Sjfv{
1993266423Sjfv	device_t	dev = pf->dev;
1994270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
1995270346Sjfv	struct 		ixl_queue *que = vsi->queues;
1996266423Sjfv	struct		tx_ring	 *txr;
1997266423Sjfv	int 		error, rid, vector = 0;
1998299545Serj#ifdef	RSS
1999299545Serj	cpuset_t cpu_mask;
2000299545Serj#endif
2001266423Sjfv
2002266423Sjfv	/* Admin Que is vector 0*/
2003266423Sjfv	rid = vector + 1;
2004266423Sjfv	pf->res = bus_alloc_resource_any(dev,
2005266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2006266423Sjfv	if (!pf->res) {
2007266423Sjfv		device_printf(dev,"Unable to allocate"
2008266423Sjfv    	    " bus resource: Adminq interrupt [%d]\n", rid);
2009266423Sjfv		return (ENXIO);
2010266423Sjfv	}
2011266423Sjfv	/* Set the adminq vector and handler */
2012266423Sjfv	error = bus_setup_intr(dev, pf->res,
2013266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2014270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
2015266423Sjfv	if (error) {
2016266423Sjfv		pf->res = NULL;
2017266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
2018266423Sjfv		return (error);
2019266423Sjfv	}
2020266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2021266423Sjfv	pf->admvec = vector;
2022299545Serj	/* Tasklet for Admin Queue */
2023299545Serj	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2024299545Serj
2025299545Serj#ifdef PCI_IOV
2026299545Serj	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2027299545Serj#endif
2028299545Serj
2029299545Serj	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2030299545Serj	    taskqueue_thread_enqueue, &pf->tq);
2031299545Serj	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2032299545Serj	    device_get_nameunit(pf->dev));
2033266423Sjfv	++vector;
2034266423Sjfv
2035266423Sjfv	/* Now set up the stations */
2036266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2037277084Sjfv		int cpu_id = i;
2038266423Sjfv		rid = vector + 1;
2039266423Sjfv		txr = &que->txr;
2040266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2041266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
2042266423Sjfv		if (que->res == NULL) {
2043266423Sjfv			device_printf(dev,"Unable to allocate"
2044266423Sjfv		    	    " bus resource: que interrupt [%d]\n", vector);
2045266423Sjfv			return (ENXIO);
2046266423Sjfv		}
2047266423Sjfv		/* Set the handler function */
2048266423Sjfv		error = bus_setup_intr(dev, que->res,
2049266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2050270346Sjfv		    ixl_msix_que, que, &que->tag);
2051266423Sjfv		if (error) {
2052266423Sjfv			que->res = NULL;
2053266423Sjfv			device_printf(dev, "Failed to register que handler");
2054266423Sjfv			return (error);
2055266423Sjfv		}
2056266423Sjfv		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2057266423Sjfv		/* Bind the vector to a CPU */
2058277084Sjfv#ifdef RSS
2059277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2060277084Sjfv#endif
2061277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2062266423Sjfv		que->msix = vector;
2063299545Serj		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2064299545Serj		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2065299545Serj		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2066299545Serj		    taskqueue_thread_enqueue, &que->tq);
2067299545Serj#ifdef RSS
2068299545Serj		CPU_SETOF(cpu_id, &cpu_mask);
2069299545Serj		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2070299545Serj		    &cpu_mask, "%s (bucket %d)",
2071299545Serj		    device_get_nameunit(dev), cpu_id);
2072299545Serj#else
2073299545Serj		taskqueue_start_threads(&que->tq, 1, PI_NET,
2074299545Serj		    "%s que", device_get_nameunit(dev));
2075299545Serj#endif
2076266423Sjfv	}
2077266423Sjfv
2078266423Sjfv	return (0);
2079266423Sjfv}
2080266423Sjfv
2081266423Sjfv
2082266423Sjfv/*
2083266423Sjfv * Allocate MSI/X vectors
2084266423Sjfv */
2085266423Sjfvstatic int
2086270346Sjfvixl_init_msix(struct ixl_pf *pf)
2087266423Sjfv{
2088266423Sjfv	device_t dev = pf->dev;
2089266423Sjfv	int rid, want, vectors, queues, available;
2090266423Sjfv
2091266423Sjfv	/* Override by tuneable */
2092270346Sjfv	if (ixl_enable_msix == 0)
2093266423Sjfv		goto msi;
2094266423Sjfv
2095269198Sjfv	/*
2096269198Sjfv	** When used in a virtualized environment
2097269198Sjfv	** PCI BUSMASTER capability may not be set
2098269198Sjfv	** so explicity set it here and rewrite
2099269198Sjfv	** the ENABLE in the MSIX control register
2100269198Sjfv	** at this point to cause the host to
2101269198Sjfv	** successfully initialize us.
2102269198Sjfv	*/
2103269198Sjfv	{
2104269198Sjfv		u16 pci_cmd_word;
2105269198Sjfv		int msix_ctrl;
2106269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2107269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2108269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2109269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2110269198Sjfv		rid += PCIR_MSIX_CTRL;
2111269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2112269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2113269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2114269198Sjfv	}
2115269198Sjfv
2116266423Sjfv	/* First try MSI/X */
2117270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2118266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2119266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2120266423Sjfv       	if (!pf->msix_mem) {
2121266423Sjfv		/* May not be enabled */
2122266423Sjfv		device_printf(pf->dev,
2123266423Sjfv		    "Unable to map MSIX table \n");
2124266423Sjfv		goto msi;
2125266423Sjfv	}
2126266423Sjfv
2127266423Sjfv	available = pci_msix_count(dev);
2128266423Sjfv	if (available == 0) { /* system has msix disabled */
2129266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2130266423Sjfv		    rid, pf->msix_mem);
2131266423Sjfv		pf->msix_mem = NULL;
2132266423Sjfv		goto msi;
2133266423Sjfv	}
2134266423Sjfv
2135266423Sjfv	/* Figure out a reasonable auto config value */
2136266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2137266423Sjfv
2138299545Serj	/* Override with hardcoded value if sane */
2139270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2140270346Sjfv		queues = ixl_max_queues;
2141266423Sjfv
2142277084Sjfv#ifdef  RSS
2143277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2144277084Sjfv	if (queues > rss_getnumbuckets())
2145277084Sjfv		queues = rss_getnumbuckets();
2146277084Sjfv#endif
2147277084Sjfv
2148266423Sjfv	/*
2149266423Sjfv	** Want one vector (RX/TX pair) per queue
2150266423Sjfv	** plus an additional for the admin queue.
2151266423Sjfv	*/
2152266423Sjfv	want = queues + 1;
2153266423Sjfv	if (want <= available)	/* Have enough */
2154266423Sjfv		vectors = want;
2155266423Sjfv	else {
2156266423Sjfv               	device_printf(pf->dev,
2157266423Sjfv		    "MSIX Configuration Problem, "
2158266423Sjfv		    "%d vectors available but %d wanted!\n",
2159266423Sjfv		    available, want);
2160266423Sjfv		return (0); /* Will go to Legacy setup */
2161266423Sjfv	}
2162266423Sjfv
2163266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2164266423Sjfv               	device_printf(pf->dev,
2165266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2166266423Sjfv		pf->msix = vectors;
2167266423Sjfv		pf->vsi.num_queues = queues;
2168277084Sjfv#ifdef RSS
2169277084Sjfv		/*
2170277084Sjfv		 * If we're doing RSS, the number of queues needs to
2171277084Sjfv		 * match the number of RSS buckets that are configured.
2172277084Sjfv		 *
2173277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2174277084Sjfv		 *   up with queues that get no traffic.
2175277084Sjfv		 *
2176277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2177277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2178277084Sjfv		 *   so there'll be some contention.
2179277084Sjfv		 */
2180277084Sjfv		if (queues != rss_getnumbuckets()) {
2181277084Sjfv			device_printf(dev,
2182277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2183277084Sjfv			    "; performance will be impacted.\n",
2184277084Sjfv			    __func__, queues, rss_getnumbuckets());
2185277084Sjfv		}
2186277084Sjfv#endif
2187266423Sjfv		return (vectors);
2188266423Sjfv	}
2189266423Sjfvmsi:
2190266423Sjfv       	vectors = pci_msi_count(dev);
2191266423Sjfv	pf->vsi.num_queues = 1;
2192266423Sjfv	pf->msix = 1;
2193270346Sjfv	ixl_max_queues = 1;
2194270346Sjfv	ixl_enable_msix = 0;
2195266423Sjfv       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2196266423Sjfv               	device_printf(pf->dev,"Using an MSI interrupt\n");
2197266423Sjfv	else {
2198266423Sjfv		pf->msix = 0;
2199266423Sjfv               	device_printf(pf->dev,"Using a Legacy interrupt\n");
2200266423Sjfv	}
2201266423Sjfv	return (vectors);
2202266423Sjfv}
2203266423Sjfv
2204266423Sjfv
2205266423Sjfv/*
2206266423Sjfv * Plumb MSI/X vectors
2207266423Sjfv */
2208266423Sjfvstatic void
2209270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2210266423Sjfv{
2211266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2212270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2213266423Sjfv	u32		reg;
2214266423Sjfv	u16		vector = 1;
2215266423Sjfv
2216266423Sjfv	/* First set up the adminq - vector 0 */
2217266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2218266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2219266423Sjfv
2220266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2221266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2222266423Sjfv	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2223266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2224266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2225266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2226266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2227266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2228266423Sjfv
2229266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2230270346Sjfv	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2231266423Sjfv
2232266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2233266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2234266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2235266423Sjfv
2236266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2237266423Sjfv
2238266423Sjfv	/* Next configure the queues */
2239266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2240299545Serj		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2241266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2242266423Sjfv
2243266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2244270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2245266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2246266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2247266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2248266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2249266423Sjfv
2250266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2251270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2252266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2253299545Serj		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2254266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2255299545Serj		if (i == (vsi->num_queues - 1))
2256299545Serj			reg |= (IXL_QUEUE_EOL
2257299545Serj			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2258266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2259266423Sjfv	}
2260266423Sjfv}
2261266423Sjfv
2262266423Sjfv/*
2263266423Sjfv * Configure for MSI single vector operation
2264266423Sjfv */
2265266423Sjfvstatic void
2266270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2267266423Sjfv{
2268266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2269266423Sjfv	u32		reg;
2270266423Sjfv
2271266423Sjfv
2272266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2273266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2274266423Sjfv
2275266423Sjfv
2276266423Sjfv	/* Setup "other" causes */
2277266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2278266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2279266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2280266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2281266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2282266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2283266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2284266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2285266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2286266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2287266423Sjfv	    ;
2288266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2289266423Sjfv
2290266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2291266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2292266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2293266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2294266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2295266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2296266423Sjfv
2297266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2298266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2299266423Sjfv
2300266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2301266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2302270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2303266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2304266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2305266423Sjfv
2306266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2307270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2308270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2309266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2310266423Sjfv
2311266423Sjfv	/* Next enable the queue pair */
2312266423Sjfv	reg = rd32(hw, I40E_QTX_ENA(0));
2313266423Sjfv	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2314266423Sjfv	wr32(hw, I40E_QTX_ENA(0), reg);
2315266423Sjfv
2316266423Sjfv	reg = rd32(hw, I40E_QRX_ENA(0));
2317266423Sjfv	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2318266423Sjfv	wr32(hw, I40E_QRX_ENA(0), reg);
2319266423Sjfv}
2320266423Sjfv
2321266423Sjfv
2322266423Sjfv/*
2323266423Sjfv * Set the Initial ITR state
2324266423Sjfv */
2325266423Sjfvstatic void
2326270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2327266423Sjfv{
2328266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2329270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2330270346Sjfv	struct ixl_queue	*que = vsi->queues;
2331266423Sjfv
2332270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2333270346Sjfv	if (ixl_dynamic_rx_itr)
2334270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2335270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2336270346Sjfv	if (ixl_dynamic_tx_itr)
2337270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2338266423Sjfv
2339266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2340266423Sjfv		struct tx_ring	*txr = &que->txr;
2341266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2342266423Sjfv
2343270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2344266423Sjfv		    vsi->rx_itr_setting);
2345266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2346270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2347270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2348266423Sjfv		    vsi->tx_itr_setting);
2349266423Sjfv		txr->itr = vsi->tx_itr_setting;
2350270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2351266423Sjfv	}
2352266423Sjfv}
2353266423Sjfv
2354266423Sjfv
2355266423Sjfvstatic int
2356270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2357266423Sjfv{
2358266423Sjfv	int             rid;
2359266423Sjfv	device_t        dev = pf->dev;
2360266423Sjfv
2361266423Sjfv	rid = PCIR_BAR(0);
2362266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2363266423Sjfv	    &rid, RF_ACTIVE);
2364266423Sjfv
2365266423Sjfv	if (!(pf->pci_mem)) {
2366266423Sjfv		device_printf(dev,"Unable to allocate bus resource: memory\n");
2367266423Sjfv		return (ENXIO);
2368266423Sjfv	}
2369266423Sjfv
2370266423Sjfv	pf->osdep.mem_bus_space_tag =
2371266423Sjfv		rman_get_bustag(pf->pci_mem);
2372266423Sjfv	pf->osdep.mem_bus_space_handle =
2373266423Sjfv		rman_get_bushandle(pf->pci_mem);
2374270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2375272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2376266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2377266423Sjfv
2378266423Sjfv	pf->hw.back = &pf->osdep;
2379266423Sjfv
2380266423Sjfv	/*
2381266423Sjfv	** Now setup MSI or MSI/X, should
2382266423Sjfv	** return us the number of supported
2383266423Sjfv	** vectors. (Will be 1 for MSI)
2384266423Sjfv	*/
2385270346Sjfv	pf->msix = ixl_init_msix(pf);
2386266423Sjfv	return (0);
2387266423Sjfv}
2388266423Sjfv
2389266423Sjfvstatic void
2390270346Sjfvixl_free_pci_resources(struct ixl_pf * pf)
2391266423Sjfv{
2392270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2393270346Sjfv	struct ixl_queue	*que = vsi->queues;
2394266423Sjfv	device_t		dev = pf->dev;
2395266423Sjfv	int			rid, memrid;
2396266423Sjfv
2397270346Sjfv	memrid = PCIR_BAR(IXL_BAR);
2398266423Sjfv
2399266423Sjfv	/* We may get here before stations are setup */
2400270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2401266423Sjfv		goto early;
2402266423Sjfv
2403266423Sjfv	/*
2404266423Sjfv	**  Release all msix VSI resources:
2405266423Sjfv	*/
2406266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2407266423Sjfv		rid = que->msix + 1;
2408266423Sjfv		if (que->tag != NULL) {
2409266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2410266423Sjfv			que->tag = NULL;
2411266423Sjfv		}
2412266423Sjfv		if (que->res != NULL)
2413266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2414266423Sjfv	}
2415266423Sjfv
2416266423Sjfvearly:
2417266423Sjfv	/* Clean the AdminQ interrupt last */
2418266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2419266423Sjfv		rid = pf->admvec + 1;
2420266423Sjfv	else
2421266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2422266423Sjfv
2423266423Sjfv	if (pf->tag != NULL) {
2424266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2425266423Sjfv		pf->tag = NULL;
2426266423Sjfv	}
2427266423Sjfv	if (pf->res != NULL)
2428266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2429266423Sjfv
2430266423Sjfv	if (pf->msix)
2431266423Sjfv		pci_release_msi(dev);
2432266423Sjfv
2433266423Sjfv	if (pf->msix_mem != NULL)
2434266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2435266423Sjfv		    memrid, pf->msix_mem);
2436266423Sjfv
2437266423Sjfv	if (pf->pci_mem != NULL)
2438266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2439266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2440266423Sjfv
2441266423Sjfv	return;
2442266423Sjfv}
2443266423Sjfv
2444274205Sjfvstatic void
2445274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2446274205Sjfv{
2447274205Sjfv	/* Display supported media types */
2448274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2449274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2450266423Sjfv
2451274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2452274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2453279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2454279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2455279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2456279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2457274205Sjfv
2458284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2459279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2460274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2461274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2462279033Sjfv
2463274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2464274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2465274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2466274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2467274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2468274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2469274205Sjfv
2470279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2471279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2472279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2473279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2474279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2475274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2476274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2477274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2478274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2479274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2480284049Sjfv
2481284049Sjfv#ifndef IFM_ETH_XTYPE
2482284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2483284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2484284049Sjfv
2485284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2486284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2487284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2488284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2489284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2490284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2491284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2492284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2493284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2494284049Sjfv
2495284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2496284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2497284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2498284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2499284049Sjfv#else
2500284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2501284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2502284049Sjfv
2503284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2504284049Sjfv	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2505284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2506284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2507284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2508284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2509284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2510284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2511284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2512284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2513284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2514284049Sjfv
2515284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2516284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2517284049Sjfv
2518284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2519284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2520284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2521284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2522284049Sjfv#endif
2523274205Sjfv}
2524274205Sjfv
2525266423Sjfv/*********************************************************************
2526266423Sjfv *
2527266423Sjfv *  Setup networking device structure and register an interface.
2528266423Sjfv *
2529266423Sjfv **********************************************************************/
2530266423Sjfvstatic int
2531270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2532266423Sjfv{
2533266423Sjfv	struct ifnet		*ifp;
2534266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2535270346Sjfv	struct ixl_queue	*que = vsi->queues;
2536279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2537266423Sjfv	enum i40e_status_code aq_error = 0;
2538266423Sjfv
2539270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2540266423Sjfv
2541266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2542266423Sjfv	if (ifp == NULL) {
2543266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2544266423Sjfv		return (-1);
2545266423Sjfv	}
2546266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2547266423Sjfv	ifp->if_mtu = ETHERMTU;
2548299545Serj	ifp->if_baudrate = 4000000000;  // ??
2549270346Sjfv	ifp->if_init = ixl_init;
2550266423Sjfv	ifp->if_softc = vsi;
2551266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2552270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2553266423Sjfv
2554274205Sjfv#if __FreeBSD_version >= 1100036
2555272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2556272227Sglebius#endif
2557272227Sglebius
2558270346Sjfv	ifp->if_transmit = ixl_mq_start;
2559266423Sjfv
2560270346Sjfv	ifp->if_qflush = ixl_qflush;
2561266423Sjfv
2562266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2563266423Sjfv
2564266423Sjfv	vsi->max_frame_size =
2565266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2566266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2567266423Sjfv
2568266423Sjfv	/*
2569266423Sjfv	 * Tell the upper layer(s) we support long frames.
2570266423Sjfv	 */
2571270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2572266423Sjfv
2573266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2574266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2575266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2576266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2577266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2578266423Sjfv
2579266423Sjfv	/* VLAN capabilties */
2580266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2581266423Sjfv			     |  IFCAP_VLAN_HWTSO
2582266423Sjfv			     |  IFCAP_VLAN_MTU
2583266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2584266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2585266423Sjfv
2586266423Sjfv	/*
2587266423Sjfv	** Don't turn this on by default, if vlans are
2588266423Sjfv	** created on another pseudo device (eg. lagg)
2589266423Sjfv	** then vlan events are not passed thru, breaking
2590266423Sjfv	** operation, but with HW FILTER off it works. If
2591270346Sjfv	** using vlans directly on the ixl driver you can
2592266423Sjfv	** enable this and get full hardware tag filtering.
2593266423Sjfv	*/
2594266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2595266423Sjfv
2596266423Sjfv	/*
2597266423Sjfv	 * Specify the media types supported by this adapter and register
2598266423Sjfv	 * callbacks to update media and link information
2599266423Sjfv	 */
2600270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2601270346Sjfv		     ixl_media_status);
2602266423Sjfv
2603279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2604279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2605279033Sjfv	/* May need delay to detect fiber correctly */
2606274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2607274205Sjfv		i40e_msec_delay(200);
2608277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2609279033Sjfv		    TRUE, &abilities, NULL);
2610279033Sjfv	}
2611279033Sjfv	if (aq_error) {
2612274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2613274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2614274205Sjfv		else
2615279033Sjfv			device_printf(dev,
2616279033Sjfv			    "Error getting supported media types, err %d,"
2617279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2618279033Sjfv		return (0);
2619279033Sjfv	}
2620266423Sjfv
2621279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2622279033Sjfv
2623266423Sjfv	/* Use autoselect media by default */
2624266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2625266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2626266423Sjfv
2627274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2628274205Sjfv
2629266423Sjfv	return (0);
2630266423Sjfv}
2631266423Sjfv
2632279858Sjfv/*
2633279858Sjfv** Run when the Admin Queue gets a
2634279858Sjfv** link transition interrupt.
2635279858Sjfv*/
2636279858Sjfvstatic void
2637279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2638266423Sjfv{
2639279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2640279858Sjfv	struct i40e_aqc_get_link_status *status =
2641279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2642266423Sjfv	bool check;
2643266423Sjfv
2644279858Sjfv	hw->phy.get_link_info = TRUE;
2645284049Sjfv	i40e_get_link_status(hw, &check);
2646279858Sjfv	pf->link_up = check;
2647270346Sjfv#ifdef IXL_DEBUG
2648266423Sjfv	printf("Link is %s\n", check ? "up":"down");
2649266423Sjfv#endif
2650279858Sjfv	/* Report if Unqualified modules are found */
2651279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2652279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2653279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2654279858Sjfv		device_printf(pf->dev, "Link failed because "
2655279858Sjfv		    "an unqualified module was detected\n");
2656279858Sjfv
2657279858Sjfv	return;
2658266423Sjfv}
2659266423Sjfv
2660266423Sjfv/*********************************************************************
2661266423Sjfv *
2662279033Sjfv *  Get Firmware Switch configuration
2663279033Sjfv *	- this will need to be more robust when more complex
2664279033Sjfv *	  switch configurations are enabled.
2665266423Sjfv *
2666266423Sjfv **********************************************************************/
2667266423Sjfvstatic int
2668279033Sjfvixl_switch_config(struct ixl_pf *pf)
2669266423Sjfv{
2670279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2671279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2672266423Sjfv	device_t 	dev = vsi->dev;
2673266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2674266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2675279858Sjfv	int	ret;
2676266423Sjfv	u16	next = 0;
2677266423Sjfv
2678279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2679266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2680266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2681266423Sjfv	    sizeof(aq_buf), &next, NULL);
2682266423Sjfv	if (ret) {
2683279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2684279858Sjfv		    ret);
2685266423Sjfv		return (ret);
2686266423Sjfv	}
2687270346Sjfv#ifdef IXL_DEBUG
2688279858Sjfv	device_printf(dev,
2689279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2690266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2691279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2692279858Sjfv		device_printf(dev,
2693279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2694279858Sjfv		    sw_config->element[i].element_type,
2695279858Sjfv		    sw_config->element[i].seid,
2696279858Sjfv		    sw_config->element[i].uplink_seid,
2697279858Sjfv		    sw_config->element[i].downlink_seid);
2698279858Sjfv	}
2699266423Sjfv#endif
2700279033Sjfv	/* Simplified due to a single VSI at the moment */
2701279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2702279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2703266423Sjfv	vsi->seid = sw_config->element[0].seid;
2704279033Sjfv	return (ret);
2705279033Sjfv}
2706266423Sjfv
2707279033Sjfv/*********************************************************************
2708279033Sjfv *
2709279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2710279033Sjfv *  			 like the number of descriptors, buffer size,
2711279033Sjfv *			 plus we init the rings thru this function.
2712279033Sjfv *
2713279033Sjfv **********************************************************************/
2714279033Sjfvstatic int
2715279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2716279033Sjfv{
2717279858Sjfv	struct ixl_pf		*pf = vsi->back;
2718279033Sjfv	struct ixl_queue	*que = vsi->queues;
2719279033Sjfv	device_t		dev = vsi->dev;
2720279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2721279033Sjfv	struct i40e_vsi_context	ctxt;
2722279033Sjfv	int			err = 0;
2723279033Sjfv
2724266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2725266423Sjfv	ctxt.seid = vsi->seid;
2726279858Sjfv	if (pf->veb_seid != 0)
2727279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2728266423Sjfv	ctxt.pf_num = hw->pf_id;
2729279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2730279033Sjfv	if (err) {
2731279033Sjfv		device_printf(dev,"get vsi params failed %x!!\n", err);
2732279033Sjfv		return (err);
2733266423Sjfv	}
2734270346Sjfv#ifdef IXL_DEBUG
2735266423Sjfv	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2736266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2737266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2738266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2739266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2740266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2741266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2742266423Sjfv#endif
2743266423Sjfv	/*
2744266423Sjfv	** Set the queue and traffic class bits
2745266423Sjfv	**  - when multiple traffic classes are supported
2746266423Sjfv	**    this will need to be more robust.
2747266423Sjfv	*/
2748266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2749266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2750299545Serj	ctxt.info.queue_mapping[0] = 0;
2751299545Serj	ctxt.info.tc_mapping[0] = 0x0800;
2752266423Sjfv
2753266423Sjfv	/* Set VLAN receive stripping mode */
2754266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2755266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2756266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2757266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2758266423Sjfv	else
2759266423Sjfv	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2760266423Sjfv
2761266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2762266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2763266423Sjfv
2764266423Sjfv	/* Reset VSI statistics */
2765270346Sjfv	ixl_vsi_reset_stats(vsi);
2766266423Sjfv	vsi->hw_filters_add = 0;
2767266423Sjfv	vsi->hw_filters_del = 0;
2768266423Sjfv
2769279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2770279858Sjfv
2771279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2772279033Sjfv	if (err) {
2773266423Sjfv		device_printf(dev,"update vsi params failed %x!!\n",
2774266423Sjfv		   hw->aq.asq_last_status);
2775279033Sjfv		return (err);
2776279033Sjfv	}
2777266423Sjfv
2778266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2779266423Sjfv		struct tx_ring		*txr = &que->txr;
2780266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2781266423Sjfv		struct i40e_hmc_obj_txq tctx;
2782266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2783266423Sjfv		u32			txctl;
2784266423Sjfv		u16			size;
2785266423Sjfv
2786266423Sjfv
2787266423Sjfv		/* Setup the HMC TX Context  */
2788266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2789266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2790266423Sjfv		tctx.new_context = 1;
2791279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2792266423Sjfv		tctx.qlen = que->num_desc;
2793266423Sjfv		tctx.fc_ena = 0;
2794269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2795269198Sjfv		/* Enable HEAD writeback */
2796269198Sjfv		tctx.head_wb_ena = 1;
2797269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
2798269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
2799266423Sjfv		tctx.rdylist_act = 0;
2800266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
2801266423Sjfv		if (err) {
2802266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
2803266423Sjfv			break;
2804266423Sjfv		}
2805266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2806266423Sjfv		if (err) {
2807266423Sjfv			device_printf(dev, "Unable to set TX context\n");
2808266423Sjfv			break;
2809266423Sjfv		}
2810266423Sjfv		/* Associate the ring with this PF */
2811266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
2812266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2813266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
2814266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
2815270346Sjfv		ixl_flush(hw);
2816266423Sjfv
2817266423Sjfv		/* Do ring (re)init */
2818270346Sjfv		ixl_init_tx_ring(que);
2819266423Sjfv
2820266423Sjfv		/* Next setup the HMC RX Context  */
2821279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
2822266423Sjfv			rxr->mbuf_sz = MCLBYTES;
2823266423Sjfv		else
2824266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
2825266423Sjfv
2826266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2827266423Sjfv
2828266423Sjfv		/* Set up an RX context for the HMC */
2829266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2830266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2831266423Sjfv		/* ignore header split for now */
2832266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2833266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2834266423Sjfv		    vsi->max_frame_size : max_rxmax;
2835266423Sjfv		rctx.dtype = 0;
2836266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
2837266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
2838279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
2839266423Sjfv		rctx.qlen = que->num_desc;
2840266423Sjfv		rctx.tphrdesc_ena = 1;
2841266423Sjfv		rctx.tphwdesc_ena = 1;
2842266423Sjfv		rctx.tphdata_ena = 0;
2843266423Sjfv		rctx.tphhead_ena = 0;
2844266423Sjfv		rctx.lrxqthresh = 2;
2845266423Sjfv		rctx.crcstrip = 1;
2846266423Sjfv		rctx.l2tsel = 1;
2847266423Sjfv		rctx.showiv = 1;
2848266423Sjfv		rctx.fc_ena = 0;
2849266423Sjfv		rctx.prefena = 1;
2850266423Sjfv
2851266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
2852266423Sjfv		if (err) {
2853266423Sjfv			device_printf(dev,
2854266423Sjfv			    "Unable to clear RX context %d\n", i);
2855266423Sjfv			break;
2856266423Sjfv		}
2857266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2858266423Sjfv		if (err) {
2859266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
2860266423Sjfv			break;
2861266423Sjfv		}
2862270346Sjfv		err = ixl_init_rx_ring(que);
2863266423Sjfv		if (err) {
2864266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2865266423Sjfv			break;
2866266423Sjfv		}
2867299545Serj		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2868279860Sjfv#ifdef DEV_NETMAP
2869279860Sjfv		/* preserve queue */
2870279860Sjfv		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
2871279860Sjfv			struct netmap_adapter *na = NA(vsi->ifp);
2872279860Sjfv			struct netmap_kring *kring = &na->rx_rings[i];
2873279860Sjfv			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2874279860Sjfv			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2875279860Sjfv		} else
2876279860Sjfv#endif /* DEV_NETMAP */
2877266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2878266423Sjfv	}
2879266423Sjfv	return (err);
2880266423Sjfv}
2881266423Sjfv
2882266423Sjfv
2883266423Sjfv/*********************************************************************
2884266423Sjfv *
2885266423Sjfv *  Free all VSI structs.
2886266423Sjfv *
2887266423Sjfv **********************************************************************/
2888266423Sjfvvoid
2889270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
2890266423Sjfv{
2891270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2892270346Sjfv	struct ixl_queue	*que = vsi->queues;
2893266423Sjfv
2894266423Sjfv	/* Free station queues */
2895266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2896266423Sjfv		struct tx_ring *txr = &que->txr;
2897266423Sjfv		struct rx_ring *rxr = &que->rxr;
2898266423Sjfv
2899266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2900266423Sjfv			continue;
2901270346Sjfv		IXL_TX_LOCK(txr);
2902270346Sjfv		ixl_free_que_tx(que);
2903266423Sjfv		if (txr->base)
2904271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
2905270346Sjfv		IXL_TX_UNLOCK(txr);
2906270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
2907266423Sjfv
2908266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2909266423Sjfv			continue;
2910270346Sjfv		IXL_RX_LOCK(rxr);
2911270346Sjfv		ixl_free_que_rx(que);
2912266423Sjfv		if (rxr->base)
2913271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2914270346Sjfv		IXL_RX_UNLOCK(rxr);
2915270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
2916266423Sjfv
2917266423Sjfv	}
2918266423Sjfv	free(vsi->queues, M_DEVBUF);
2919266423Sjfv
2920266423Sjfv	/* Free VSI filter list */
2921279858Sjfv	ixl_free_mac_filters(vsi);
2922279858Sjfv}
2923279858Sjfv
2924279858Sjfvstatic void
2925279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
2926279858Sjfv{
2927279858Sjfv	struct ixl_mac_filter *f;
2928279858Sjfv
2929266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
2930266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
2931266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2932266423Sjfv		free(f, M_DEVBUF);
2933266423Sjfv	}
2934266423Sjfv}
2935266423Sjfv
2936266423Sjfv
2937266423Sjfv/*********************************************************************
2938266423Sjfv *
2939266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
2940266423Sjfv *  associated queues, rings and the descriptors associated with each,
2941266423Sjfv *  called only once at attach.
2942266423Sjfv *
2943266423Sjfv **********************************************************************/
2944266423Sjfvstatic int
2945270346Sjfvixl_setup_stations(struct ixl_pf *pf)
2946266423Sjfv{
2947266423Sjfv	device_t		dev = pf->dev;
2948270346Sjfv	struct ixl_vsi		*vsi;
2949270346Sjfv	struct ixl_queue	*que;
2950266423Sjfv	struct tx_ring		*txr;
2951266423Sjfv	struct rx_ring		*rxr;
2952266423Sjfv	int 			rsize, tsize;
2953266423Sjfv	int			error = I40E_SUCCESS;
2954266423Sjfv
2955266423Sjfv	vsi = &pf->vsi;
2956266423Sjfv	vsi->back = (void *)pf;
2957266423Sjfv	vsi->hw = &pf->hw;
2958266423Sjfv	vsi->id = 0;
2959266423Sjfv	vsi->num_vlans = 0;
2960279858Sjfv	vsi->back = pf;
2961266423Sjfv
2962266423Sjfv	/* Get memory for the station queues */
2963266423Sjfv        if (!(vsi->queues =
2964270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2965266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2966266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
2967266423Sjfv                error = ENOMEM;
2968266423Sjfv                goto early;
2969266423Sjfv        }
2970266423Sjfv
2971266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
2972266423Sjfv		que = &vsi->queues[i];
2973270346Sjfv		que->num_desc = ixl_ringsz;
2974266423Sjfv		que->me = i;
2975266423Sjfv		que->vsi = vsi;
2976269198Sjfv		/* mark the queue as active */
2977269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
2978266423Sjfv		txr = &que->txr;
2979266423Sjfv		txr->que = que;
2980269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
2981266423Sjfv
2982266423Sjfv		/* Initialize the TX lock */
2983266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2984266423Sjfv		    device_get_nameunit(dev), que->me);
2985266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2986266423Sjfv		/* Create the TX descriptor ring */
2987269198Sjfv		tsize = roundup2((que->num_desc *
2988269198Sjfv		    sizeof(struct i40e_tx_desc)) +
2989269198Sjfv		    sizeof(u32), DBA_ALIGN);
2990271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
2991271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2992266423Sjfv			device_printf(dev,
2993266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
2994266423Sjfv			error = ENOMEM;
2995266423Sjfv			goto fail;
2996266423Sjfv		}
2997266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2998266423Sjfv		bzero((void *)txr->base, tsize);
2999266423Sjfv       		/* Now allocate transmit soft structs for the ring */
3000270346Sjfv       		if (ixl_allocate_tx_data(que)) {
3001266423Sjfv			device_printf(dev,
3002266423Sjfv			    "Critical Failure setting up TX structures\n");
3003266423Sjfv			error = ENOMEM;
3004266423Sjfv			goto fail;
3005266423Sjfv       		}
3006266423Sjfv		/* Allocate a buf ring */
3007266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3008266423Sjfv		    M_WAITOK, &txr->mtx);
3009266423Sjfv		if (txr->br == NULL) {
3010266423Sjfv			device_printf(dev,
3011266423Sjfv			    "Critical Failure setting up TX buf ring\n");
3012266423Sjfv			error = ENOMEM;
3013266423Sjfv			goto fail;
3014266423Sjfv       		}
3015266423Sjfv
3016266423Sjfv		/*
3017266423Sjfv		 * Next the RX queues...
3018266423Sjfv		 */
3019266423Sjfv		rsize = roundup2(que->num_desc *
3020266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3021266423Sjfv		rxr = &que->rxr;
3022266423Sjfv		rxr->que = que;
3023269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
3024266423Sjfv
3025266423Sjfv		/* Initialize the RX side lock */
3026266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3027266423Sjfv		    device_get_nameunit(dev), que->me);
3028266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3029266423Sjfv
3030271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3031271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3032266423Sjfv			device_printf(dev,
3033266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
3034266423Sjfv			error = ENOMEM;
3035266423Sjfv			goto fail;
3036266423Sjfv		}
3037266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3038266423Sjfv		bzero((void *)rxr->base, rsize);
3039266423Sjfv
3040266423Sjfv        	/* Allocate receive soft structs for the ring*/
3041270346Sjfv		if (ixl_allocate_rx_data(que)) {
3042266423Sjfv			device_printf(dev,
3043266423Sjfv			    "Critical Failure setting up receive structs\n");
3044266423Sjfv			error = ENOMEM;
3045266423Sjfv			goto fail;
3046266423Sjfv		}
3047266423Sjfv	}
3048266423Sjfv
3049266423Sjfv	return (0);
3050266423Sjfv
3051266423Sjfvfail:
3052266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3053266423Sjfv		que = &vsi->queues[i];
3054266423Sjfv		rxr = &que->rxr;
3055266423Sjfv		txr = &que->txr;
3056266423Sjfv		if (rxr->base)
3057271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3058266423Sjfv		if (txr->base)
3059271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3060266423Sjfv	}
3061266423Sjfv
3062266423Sjfvearly:
3063266423Sjfv	return (error);
3064266423Sjfv}
3065266423Sjfv
3066266423Sjfv/*
3067266423Sjfv** Provide a update to the queue RX
3068266423Sjfv** interrupt moderation value.
3069266423Sjfv*/
3070266423Sjfvstatic void
3071270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
3072266423Sjfv{
3073270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3074266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3075266423Sjfv	struct rx_ring	*rxr = &que->rxr;
3076266423Sjfv	u16		rx_itr;
3077266423Sjfv	u16		rx_latency = 0;
3078266423Sjfv	int		rx_bytes;
3079266423Sjfv
3080266423Sjfv
3081266423Sjfv	/* Idle, do nothing */
3082266423Sjfv	if (rxr->bytes == 0)
3083266423Sjfv		return;
3084266423Sjfv
3085270346Sjfv	if (ixl_dynamic_rx_itr) {
3086266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
3087266423Sjfv		rx_itr = rxr->itr;
3088266423Sjfv
3089266423Sjfv		/* Adjust latency range */
3090266423Sjfv		switch (rxr->latency) {
3091270346Sjfv		case IXL_LOW_LATENCY:
3092266423Sjfv			if (rx_bytes > 10) {
3093270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3094270346Sjfv				rx_itr = IXL_ITR_20K;
3095266423Sjfv			}
3096266423Sjfv			break;
3097270346Sjfv		case IXL_AVE_LATENCY:
3098266423Sjfv			if (rx_bytes > 20) {
3099270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3100270346Sjfv				rx_itr = IXL_ITR_8K;
3101266423Sjfv			} else if (rx_bytes <= 10) {
3102270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3103270346Sjfv				rx_itr = IXL_ITR_100K;
3104266423Sjfv			}
3105266423Sjfv			break;
3106270346Sjfv		case IXL_BULK_LATENCY:
3107266423Sjfv			if (rx_bytes <= 20) {
3108270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3109270346Sjfv				rx_itr = IXL_ITR_20K;
3110266423Sjfv			}
3111266423Sjfv			break;
3112266423Sjfv       		 }
3113266423Sjfv
3114266423Sjfv		rxr->latency = rx_latency;
3115266423Sjfv
3116266423Sjfv		if (rx_itr != rxr->itr) {
3117266423Sjfv			/* do an exponential smoothing */
3118266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3119266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3120270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3121270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3122266423Sjfv			    que->me), rxr->itr);
3123266423Sjfv		}
3124266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3125270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3126270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3127266423Sjfv		/* Update the hardware if needed */
3128266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3129266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3130270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3131266423Sjfv			    que->me), rxr->itr);
3132266423Sjfv		}
3133266423Sjfv	}
3134266423Sjfv	rxr->bytes = 0;
3135266423Sjfv	rxr->packets = 0;
3136266423Sjfv	return;
3137266423Sjfv}
3138266423Sjfv
3139266423Sjfv
3140266423Sjfv/*
3141266423Sjfv** Provide a update to the queue TX
3142266423Sjfv** interrupt moderation value.
3143266423Sjfv*/
3144266423Sjfvstatic void
3145270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3146266423Sjfv{
3147270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3148266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3149266423Sjfv	struct tx_ring	*txr = &que->txr;
3150266423Sjfv	u16		tx_itr;
3151266423Sjfv	u16		tx_latency = 0;
3152266423Sjfv	int		tx_bytes;
3153266423Sjfv
3154266423Sjfv
3155266423Sjfv	/* Idle, do nothing */
3156266423Sjfv	if (txr->bytes == 0)
3157266423Sjfv		return;
3158266423Sjfv
3159270346Sjfv	if (ixl_dynamic_tx_itr) {
3160266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3161266423Sjfv		tx_itr = txr->itr;
3162266423Sjfv
3163266423Sjfv		switch (txr->latency) {
3164270346Sjfv		case IXL_LOW_LATENCY:
3165266423Sjfv			if (tx_bytes > 10) {
3166270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3167270346Sjfv				tx_itr = IXL_ITR_20K;
3168266423Sjfv			}
3169266423Sjfv			break;
3170270346Sjfv		case IXL_AVE_LATENCY:
3171266423Sjfv			if (tx_bytes > 20) {
3172270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3173270346Sjfv				tx_itr = IXL_ITR_8K;
3174266423Sjfv			} else if (tx_bytes <= 10) {
3175270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3176270346Sjfv				tx_itr = IXL_ITR_100K;
3177266423Sjfv			}
3178266423Sjfv			break;
3179270346Sjfv		case IXL_BULK_LATENCY:
3180266423Sjfv			if (tx_bytes <= 20) {
3181270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3182270346Sjfv				tx_itr = IXL_ITR_20K;
3183266423Sjfv			}
3184266423Sjfv			break;
3185266423Sjfv		}
3186266423Sjfv
3187266423Sjfv		txr->latency = tx_latency;
3188266423Sjfv
3189266423Sjfv		if (tx_itr != txr->itr) {
3190266423Sjfv       	         /* do an exponential smoothing */
3191266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3192266423Sjfv			    ((9 * tx_itr) + txr->itr);
3193270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3194270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3195266423Sjfv			    que->me), txr->itr);
3196266423Sjfv		}
3197266423Sjfv
3198266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3199270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3200270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3201266423Sjfv		/* Update the hardware if needed */
3202266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3203266423Sjfv			txr->itr = vsi->tx_itr_setting;
3204270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3205266423Sjfv			    que->me), txr->itr);
3206266423Sjfv		}
3207266423Sjfv	}
3208266423Sjfv	txr->bytes = 0;
3209266423Sjfv	txr->packets = 0;
3210266423Sjfv	return;
3211266423Sjfv}
3212266423Sjfv
3213279858Sjfv#define QUEUE_NAME_LEN 32
3214266423Sjfv
3215266423Sjfvstatic void
3216279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3217279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3218279858Sjfv{
3219279858Sjfv	struct sysctl_oid *tree;
3220279858Sjfv	struct sysctl_oid_list *child;
3221279858Sjfv	struct sysctl_oid_list *vsi_list;
3222279858Sjfv
3223279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3224279858Sjfv	child = SYSCTL_CHILDREN(tree);
3225279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3226279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3227279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3228279858Sjfv
3229279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3230279858Sjfv}
3231279858Sjfv
3232279858Sjfvstatic void
3233270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3234266423Sjfv{
3235266423Sjfv	device_t dev = pf->dev;
3236270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3237270346Sjfv	struct ixl_queue *queues = vsi->queues;
3238269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3239266423Sjfv
3240266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3241266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3242266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3243279858Sjfv	struct sysctl_oid_list *vsi_list;
3244266423Sjfv
3245279858Sjfv	struct sysctl_oid *queue_node;
3246279858Sjfv	struct sysctl_oid_list *queue_list;
3247266423Sjfv
3248269198Sjfv	struct tx_ring *txr;
3249269198Sjfv	struct rx_ring *rxr;
3250279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3251266423Sjfv
3252266423Sjfv	/* Driver statistics */
3253266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3254266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3255266423Sjfv			"Watchdog timeouts");
3256266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3257266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3258266423Sjfv			"Admin Queue IRQ Handled");
3259266423Sjfv
3260279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3261279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3262266423Sjfv
3263266423Sjfv	/* Queue statistics */
3264266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3265269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3266279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3267279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3268266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3269266423Sjfv
3270269198Sjfv		txr = &(queues[q].txr);
3271269198Sjfv		rxr = &(queues[q].rxr);
3272269198Sjfv
3273269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3274266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3275266423Sjfv				"m_defrag() failed");
3276269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3277266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3278266423Sjfv				"Driver dropped packets");
3279266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3280266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3281266423Sjfv				"irqs on this queue");
3282269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3283266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3284266423Sjfv				"TSO");
3285269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3286266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3287266423Sjfv				"Driver tx dma failure in xmit");
3288266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3289266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3290266423Sjfv				"Queue No Descriptor Available");
3291266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3292266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3293266423Sjfv				"Queue Packets Transmitted");
3294266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3295270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3296266423Sjfv				"Queue Bytes Transmitted");
3297266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3298266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3299266423Sjfv				"Queue Packets Received");
3300266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3301266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3302266423Sjfv				"Queue Bytes Received");
3303266423Sjfv	}
3304266423Sjfv
3305266423Sjfv	/* MAC stats */
3306270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3307266423Sjfv}
3308266423Sjfv
3309266423Sjfvstatic void
3310270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3311266423Sjfv	struct sysctl_oid_list *child,
3312266423Sjfv	struct i40e_eth_stats *eth_stats)
3313266423Sjfv{
3314270346Sjfv	struct ixl_sysctl_info ctls[] =
3315266423Sjfv	{
3316266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3317266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3318266423Sjfv			"Unicast Packets Received"},
3319266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3320266423Sjfv			"Multicast Packets Received"},
3321266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3322266423Sjfv			"Broadcast Packets Received"},
3323269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3324266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3325266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3326266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3327266423Sjfv			"Multicast Packets Transmitted"},
3328266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3329266423Sjfv			"Broadcast Packets Transmitted"},
3330266423Sjfv		// end
3331266423Sjfv		{0,0,0}
3332266423Sjfv	};
3333266423Sjfv
3334270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3335297753Spfg	while (entry->stat != NULL)
3336266423Sjfv	{
3337266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3338266423Sjfv				CTLFLAG_RD, entry->stat,
3339266423Sjfv				entry->description);
3340266423Sjfv		entry++;
3341266423Sjfv	}
3342266423Sjfv}
3343266423Sjfv
3344266423Sjfvstatic void
3345270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3346266423Sjfv	struct sysctl_oid_list *child,
3347266423Sjfv	struct i40e_hw_port_stats *stats)
3348266423Sjfv{
3349269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3350266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3351266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3352266423Sjfv
3353266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3354270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3355266423Sjfv
3356270346Sjfv	struct ixl_sysctl_info ctls[] =
3357266423Sjfv	{
3358266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3359266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3360266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3361266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3362266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3363266423Sjfv		/* Packet Reception Stats */
3364266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3365266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3366266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3367266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3368266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3369266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3370266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3371266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3372266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3373266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3374266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3375266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3376266423Sjfv		/* Packet Transmission Stats */
3377266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3378266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3379266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3380266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3381266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3382266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3383266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3384266423Sjfv		/* Flow control */
3385266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3386266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3387266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3388266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3389266423Sjfv		/* End */
3390266423Sjfv		{0,0,0}
3391266423Sjfv	};
3392266423Sjfv
3393270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3394297753Spfg	while (entry->stat != NULL)
3395266423Sjfv	{
3396266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3397266423Sjfv				CTLFLAG_RD, entry->stat,
3398266423Sjfv				entry->description);
3399266423Sjfv		entry++;
3400266423Sjfv	}
3401266423Sjfv}
3402266423Sjfv
3403284049Sjfv
3404266423Sjfv/*
3405270346Sjfv** ixl_config_rss - setup RSS
3406266423Sjfv**  - note this is done for the single vsi
3407266423Sjfv*/
3408270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3409266423Sjfv{
3410270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3411266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3412266423Sjfv	u32		lut = 0;
3413277084Sjfv	u64		set_hena = 0, hena;
3414277084Sjfv	int		i, j, que_id;
3415277084Sjfv#ifdef RSS
3416277084Sjfv	u32		rss_hash_config;
3417277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3418277084Sjfv#else
3419277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3420277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3421277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3422277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3423277084Sjfv#endif
3424266423Sjfv
3425277084Sjfv#ifdef RSS
3426277084Sjfv        /* Fetch the configured RSS key */
3427277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3428277084Sjfv#endif
3429266423Sjfv
3430266423Sjfv	/* Fill out hash function seed */
3431277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3432277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3433266423Sjfv
3434266423Sjfv	/* Enable PCTYPES for RSS: */
3435277084Sjfv#ifdef RSS
3436277084Sjfv	rss_hash_config = rss_gethashconfig();
3437277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3438277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3439277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3440277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3441277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3442277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3443277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3444277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3445279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3446277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3447277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3448277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3449277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3450277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3451277084Sjfv#else
3452266423Sjfv	set_hena =
3453266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3454266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3455266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3456266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3457266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3458266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3459266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3460266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3461266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3462266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3463266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3464277084Sjfv#endif
3465266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3466266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3467266423Sjfv	hena |= set_hena;
3468266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3469266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3470266423Sjfv
3471266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3472266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3473266423Sjfv		if (j == vsi->num_queues)
3474266423Sjfv			j = 0;
3475277084Sjfv#ifdef RSS
3476277084Sjfv		/*
3477277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3478277084Sjfv		 * Cap it at the number of configured buckets (which is
3479277084Sjfv		 * num_queues.)
3480277084Sjfv		 */
3481277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3482277262Sjfv		que_id = que_id % vsi->num_queues;
3483277084Sjfv#else
3484277084Sjfv		que_id = j;
3485277084Sjfv#endif
3486266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3487277084Sjfv		lut = (lut << 8) | (que_id &
3488266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3489266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3490266423Sjfv		if ((i & 3) == 3)
3491266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3492266423Sjfv	}
3493270346Sjfv	ixl_flush(hw);
3494266423Sjfv}
3495266423Sjfv
3496266423Sjfv
3497266423Sjfv/*
3498266423Sjfv** This routine is run via an vlan config EVENT,
3499266423Sjfv** it enables us to use the HW Filter table since
3500266423Sjfv** we can get the vlan id. This just creates the
3501266423Sjfv** entry in the soft version of the VFTA, init will
3502266423Sjfv** repopulate the real table.
3503266423Sjfv*/
3504266423Sjfvstatic void
3505270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3506266423Sjfv{
3507270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3508266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3509270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3510266423Sjfv
3511266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3512266423Sjfv		return;
3513266423Sjfv
3514266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3515266423Sjfv		return;
3516266423Sjfv
3517270346Sjfv	IXL_PF_LOCK(pf);
3518266423Sjfv	++vsi->num_vlans;
3519270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3520270346Sjfv	IXL_PF_UNLOCK(pf);
3521266423Sjfv}
3522266423Sjfv
3523266423Sjfv/*
3524266423Sjfv** This routine is run via an vlan
3525266423Sjfv** unconfig EVENT, remove our entry
3526266423Sjfv** in the soft vfta.
3527266423Sjfv*/
3528266423Sjfvstatic void
3529270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3530266423Sjfv{
3531270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3532266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3533270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3534266423Sjfv
3535266423Sjfv	if (ifp->if_softc !=  arg)
3536266423Sjfv		return;
3537266423Sjfv
3538266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3539266423Sjfv		return;
3540266423Sjfv
3541270346Sjfv	IXL_PF_LOCK(pf);
3542266423Sjfv	--vsi->num_vlans;
3543270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3544270346Sjfv	IXL_PF_UNLOCK(pf);
3545266423Sjfv}
3546266423Sjfv
3547266423Sjfv/*
3548266423Sjfv** This routine updates vlan filters, called by init
3549266423Sjfv** it scans the filter table and then updates the hw
3550266423Sjfv** after a soft reset.
3551266423Sjfv*/
3552266423Sjfvstatic void
3553270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3554266423Sjfv{
3555270346Sjfv	struct ixl_mac_filter	*f;
3556266423Sjfv	int			cnt = 0, flags;
3557266423Sjfv
3558266423Sjfv	if (vsi->num_vlans == 0)
3559266423Sjfv		return;
3560266423Sjfv	/*
3561266423Sjfv	** Scan the filter list for vlan entries,
3562266423Sjfv	** mark them for addition and then call
3563266423Sjfv	** for the AQ update.
3564266423Sjfv	*/
3565266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3566270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3567266423Sjfv			f->flags |=
3568270346Sjfv			    (IXL_FILTER_ADD |
3569270346Sjfv			    IXL_FILTER_USED);
3570266423Sjfv			cnt++;
3571266423Sjfv		}
3572266423Sjfv	}
3573266423Sjfv	if (cnt == 0) {
3574266423Sjfv		printf("setup vlan: no filters found!\n");
3575266423Sjfv		return;
3576266423Sjfv	}
3577270346Sjfv	flags = IXL_FILTER_VLAN;
3578270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3579270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3580266423Sjfv	return;
3581266423Sjfv}
3582266423Sjfv
3583266423Sjfv/*
3584266423Sjfv** Initialize filter list and add filters that the hardware
3585266423Sjfv** needs to know about.
3586266423Sjfv*/
3587266423Sjfvstatic void
3588270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3589266423Sjfv{
3590269198Sjfv	/* Add broadcast address */
3591279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3592266423Sjfv}
3593266423Sjfv
3594266423Sjfv/*
3595266423Sjfv** This routine adds mulicast filters
3596266423Sjfv*/
3597266423Sjfvstatic void
3598270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3599266423Sjfv{
3600270346Sjfv	struct ixl_mac_filter *f;
3601266423Sjfv
3602266423Sjfv	/* Does one already exist */
3603270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3604266423Sjfv	if (f != NULL)
3605266423Sjfv		return;
3606266423Sjfv
3607270346Sjfv	f = ixl_get_filter(vsi);
3608266423Sjfv	if (f == NULL) {
3609266423Sjfv		printf("WARNING: no filter available!!\n");
3610266423Sjfv		return;
3611266423Sjfv	}
3612266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3613270346Sjfv	f->vlan = IXL_VLAN_ANY;
3614270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3615270346Sjfv	    | IXL_FILTER_MC);
3616266423Sjfv
3617266423Sjfv	return;
3618266423Sjfv}
3619266423Sjfv
3620279858Sjfvstatic void
3621279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3622279858Sjfv{
3623279858Sjfv
3624279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3625279858Sjfv}
3626279858Sjfv
3627266423Sjfv/*
3628266423Sjfv** This routine adds macvlan filters
3629266423Sjfv*/
3630266423Sjfvstatic void
3631270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3632266423Sjfv{
3633270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3634279858Sjfv	struct ixl_pf		*pf;
3635279858Sjfv	device_t		dev;
3636266423Sjfv
3637270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3638266423Sjfv
3639279858Sjfv	pf = vsi->back;
3640279858Sjfv	dev = pf->dev;
3641279858Sjfv
3642266423Sjfv	/* Does one already exist */
3643270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3644266423Sjfv	if (f != NULL)
3645266423Sjfv		return;
3646266423Sjfv	/*
3647266423Sjfv	** Is this the first vlan being registered, if so we
3648266423Sjfv	** need to remove the ANY filter that indicates we are
3649266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3650266423Sjfv	*/
3651270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3652270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3653266423Sjfv		if (tmp != NULL) {
3654270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3655270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3656266423Sjfv		}
3657266423Sjfv	}
3658266423Sjfv
3659270346Sjfv	f = ixl_get_filter(vsi);
3660266423Sjfv	if (f == NULL) {
3661266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3662266423Sjfv		return;
3663266423Sjfv	}
3664266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3665266423Sjfv	f->vlan = vlan;
3666270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3667270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3668270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3669279858Sjfv	else
3670279858Sjfv		vsi->num_macs++;
3671266423Sjfv
3672270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3673266423Sjfv	return;
3674266423Sjfv}
3675266423Sjfv
3676266423Sjfvstatic void
3677270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3678266423Sjfv{
3679270346Sjfv	struct ixl_mac_filter *f;
3680266423Sjfv
3681270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3682266423Sjfv	if (f == NULL)
3683266423Sjfv		return;
3684266423Sjfv
3685270346Sjfv	f->flags |= IXL_FILTER_DEL;
3686270346Sjfv	ixl_del_hw_filters(vsi, 1);
3687279858Sjfv	vsi->num_macs--;
3688266423Sjfv
3689266423Sjfv	/* Check if this is the last vlan removal */
3690270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3691266423Sjfv		/* Switch back to a non-vlan filter */
3692270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3693270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3694266423Sjfv	}
3695266423Sjfv	return;
3696266423Sjfv}
3697266423Sjfv
3698266423Sjfv/*
3699266423Sjfv** Find the filter with both matching mac addr and vlan id
3700266423Sjfv*/
3701270346Sjfvstatic struct ixl_mac_filter *
3702270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3703266423Sjfv{
3704270346Sjfv	struct ixl_mac_filter	*f;
3705266423Sjfv	bool			match = FALSE;
3706266423Sjfv
3707266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3708266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3709266423Sjfv			continue;
3710266423Sjfv		if (f->vlan == vlan) {
3711266423Sjfv			match = TRUE;
3712266423Sjfv			break;
3713266423Sjfv		}
3714266423Sjfv	}
3715266423Sjfv
3716266423Sjfv	if (!match)
3717266423Sjfv		f = NULL;
3718266423Sjfv	return (f);
3719266423Sjfv}
3720266423Sjfv
3721266423Sjfv/*
3722266423Sjfv** This routine takes additions to the vsi filter
3723266423Sjfv** table and creates an Admin Queue call to create
3724266423Sjfv** the filters in the hardware.
3725266423Sjfv*/
3726266423Sjfvstatic void
3727270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3728266423Sjfv{
3729266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3730270346Sjfv	struct ixl_mac_filter	*f;
3731279858Sjfv	struct ixl_pf		*pf;
3732279858Sjfv	struct i40e_hw		*hw;
3733279858Sjfv	device_t		dev;
3734279858Sjfv	int			err, j = 0;
3735266423Sjfv
3736279858Sjfv	pf = vsi->back;
3737279858Sjfv	dev = pf->dev;
3738279858Sjfv	hw = &pf->hw;
3739279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3740279858Sjfv
3741266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3742266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3743266423Sjfv	if (a == NULL) {
3744277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3745266423Sjfv		return;
3746266423Sjfv	}
3747266423Sjfv
3748266423Sjfv	/*
3749266423Sjfv	** Scan the filter list, each time we find one
3750266423Sjfv	** we add it to the admin queue array and turn off
3751266423Sjfv	** the add bit.
3752266423Sjfv	*/
3753266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3754266423Sjfv		if (f->flags == flags) {
3755266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3756266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3757279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3758279858Sjfv				b->vlan_tag = 0;
3759279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3760279858Sjfv			} else {
3761279858Sjfv				b->vlan_tag = f->vlan;
3762279858Sjfv				b->flags = 0;
3763279858Sjfv			}
3764279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3765270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3766266423Sjfv			j++;
3767266423Sjfv		}
3768266423Sjfv		if (j == cnt)
3769266423Sjfv			break;
3770266423Sjfv	}
3771266423Sjfv	if (j > 0) {
3772266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3773266423Sjfv		if (err)
3774279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3775279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3776266423Sjfv		else
3777266423Sjfv			vsi->hw_filters_add += j;
3778266423Sjfv	}
3779266423Sjfv	free(a, M_DEVBUF);
3780266423Sjfv	return;
3781266423Sjfv}
3782266423Sjfv
3783266423Sjfv/*
3784266423Sjfv** This routine takes removals in the vsi filter
3785266423Sjfv** table and creates an Admin Queue call to delete
3786266423Sjfv** the filters in the hardware.
3787266423Sjfv*/
3788266423Sjfvstatic void
3789270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3790266423Sjfv{
3791266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3792279858Sjfv	struct ixl_pf		*pf;
3793279858Sjfv	struct i40e_hw		*hw;
3794279858Sjfv	device_t		dev;
3795270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
3796266423Sjfv	int			err, j = 0;
3797266423Sjfv
3798270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
3799266423Sjfv
3800279858Sjfv	pf = vsi->back;
3801279858Sjfv	hw = &pf->hw;
3802279858Sjfv	dev = pf->dev;
3803279858Sjfv
3804266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3805266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3806266423Sjfv	if (d == NULL) {
3807266423Sjfv		printf("del hw filter failed to get memory\n");
3808266423Sjfv		return;
3809266423Sjfv	}
3810266423Sjfv
3811266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3812270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
3813266423Sjfv			e = &d[j]; // a pox on fvl long names :)
3814266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3815270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3816266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3817266423Sjfv			/* delete entry from vsi list */
3818270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3819266423Sjfv			free(f, M_DEVBUF);
3820266423Sjfv			j++;
3821266423Sjfv		}
3822266423Sjfv		if (j == cnt)
3823266423Sjfv			break;
3824266423Sjfv	}
3825266423Sjfv	if (j > 0) {
3826266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3827266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
3828266423Sjfv		   so we'll ignore that specific error. */
3829277084Sjfv		// TODO: Does this still occur on current firmwares?
3830266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3831266423Sjfv			int sc = 0;
3832266423Sjfv			for (int i = 0; i < j; i++)
3833266423Sjfv				sc += (!d[i].error_code);
3834266423Sjfv			vsi->hw_filters_del += sc;
3835266423Sjfv			device_printf(dev,
3836266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
3837266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
3838266423Sjfv		} else
3839266423Sjfv			vsi->hw_filters_del += j;
3840266423Sjfv	}
3841266423Sjfv	free(d, M_DEVBUF);
3842266423Sjfv
3843270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
3844266423Sjfv	return;
3845266423Sjfv}
3846266423Sjfv
3847279858Sjfvstatic int
3848270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
3849266423Sjfv{
3850279858Sjfv	struct ixl_pf	*pf = vsi->back;
3851279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3852279858Sjfv	int		index, error;
3853266423Sjfv	u32		reg;
3854266423Sjfv
3855279858Sjfv	error = 0;
3856266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3857279858Sjfv		index = vsi->first_queue + i;
3858279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
3859266423Sjfv
3860279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3861266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3862266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
3863279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3864266423Sjfv		/* Verify the enable took */
3865266423Sjfv		for (int j = 0; j < 10; j++) {
3866279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3867266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3868266423Sjfv				break;
3869266423Sjfv			i40e_msec_delay(10);
3870266423Sjfv		}
3871279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3872279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
3873279858Sjfv			    index);
3874279858Sjfv			error = ETIMEDOUT;
3875279858Sjfv		}
3876266423Sjfv
3877279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3878266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3879266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
3880279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3881266423Sjfv		/* Verify the enable took */
3882266423Sjfv		for (int j = 0; j < 10; j++) {
3883279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3884266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3885266423Sjfv				break;
3886266423Sjfv			i40e_msec_delay(10);
3887266423Sjfv		}
3888279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
3889279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
3890279858Sjfv			    index);
3891279858Sjfv			error = ETIMEDOUT;
3892279858Sjfv		}
3893266423Sjfv	}
3894279858Sjfv
3895279858Sjfv	return (error);
3896266423Sjfv}
3897266423Sjfv
3898279858Sjfvstatic int
3899270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
3900266423Sjfv{
3901279858Sjfv	struct ixl_pf	*pf = vsi->back;
3902279858Sjfv	struct i40e_hw	*hw = &pf->hw;
3903279858Sjfv	int		index, error;
3904266423Sjfv	u32		reg;
3905266423Sjfv
3906279858Sjfv	error = 0;
3907266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3908279858Sjfv		index = vsi->first_queue + i;
3909279858Sjfv
3910279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
3911266423Sjfv		i40e_usec_delay(500);
3912266423Sjfv
3913279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
3914266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3915279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
3916266423Sjfv		/* Verify the disable took */
3917266423Sjfv		for (int j = 0; j < 10; j++) {
3918279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
3919266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3920266423Sjfv				break;
3921266423Sjfv			i40e_msec_delay(10);
3922266423Sjfv		}
3923279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3924279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
3925279858Sjfv			    index);
3926279858Sjfv			error = ETIMEDOUT;
3927279858Sjfv		}
3928266423Sjfv
3929279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
3930266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3931279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
3932266423Sjfv		/* Verify the disable took */
3933266423Sjfv		for (int j = 0; j < 10; j++) {
3934279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
3935266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3936266423Sjfv				break;
3937266423Sjfv			i40e_msec_delay(10);
3938266423Sjfv		}
3939279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3940279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
3941279858Sjfv			    index);
3942279858Sjfv			error = ETIMEDOUT;
3943279858Sjfv		}
3944266423Sjfv	}
3945279858Sjfv
3946279858Sjfv	return (error);
3947266423Sjfv}
3948266423Sjfv
3949269198Sjfv/**
3950270346Sjfv * ixl_handle_mdd_event
3951269198Sjfv *
3952269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
3953269198Sjfv * (But also detects events from the PF, as well)
3954269198Sjfv **/
3955270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
3956269198Sjfv{
3957269198Sjfv	struct i40e_hw *hw = &pf->hw;
3958269198Sjfv	device_t dev = pf->dev;
3959269198Sjfv	bool mdd_detected = false;
3960269198Sjfv	bool pf_mdd_detected = false;
3961269198Sjfv	u32 reg;
3962269198Sjfv
3963269198Sjfv	/* find what triggered the MDD event */
3964269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
3965269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3966269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3967269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3968269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3969269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
3970269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3971269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
3972269198Sjfv		device_printf(dev,
3973269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3974269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
3975269198Sjfv			 event, queue, pf_num);
3976269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3977269198Sjfv		mdd_detected = true;
3978269198Sjfv	}
3979269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
3980269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3981269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3982269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3983269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3984269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
3985269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3986269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
3987269198Sjfv		device_printf(dev,
3988269198Sjfv			 "Malicious Driver Detection event 0x%02x"
3989269198Sjfv			 " on RX queue %d of function 0x%02x\n",
3990269198Sjfv			 event, queue, func);
3991269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3992269198Sjfv		mdd_detected = true;
3993269198Sjfv	}
3994269198Sjfv
3995269198Sjfv	if (mdd_detected) {
3996269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
3997269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3998269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3999269198Sjfv			device_printf(dev,
4000269198Sjfv				 "MDD TX event is for this function 0x%08x",
4001269198Sjfv				 reg);
4002269198Sjfv			pf_mdd_detected = true;
4003269198Sjfv		}
4004269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
4005269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4006269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4007269198Sjfv			device_printf(dev,
4008269198Sjfv				 "MDD RX event is for this function 0x%08x",
4009269198Sjfv				 reg);
4010269198Sjfv			pf_mdd_detected = true;
4011269198Sjfv		}
4012269198Sjfv	}
4013269198Sjfv
4014269198Sjfv	/* re-enable mdd interrupt cause */
4015269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4016269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4017269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4018270346Sjfv	ixl_flush(hw);
4019269198Sjfv}
4020269198Sjfv
4021266423Sjfvstatic void
4022270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
4023266423Sjfv{
4024266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4025270346Sjfv	struct ixl_queue	*que = vsi->queues;
4026266423Sjfv
4027270346Sjfv	if (ixl_enable_msix) {
4028270346Sjfv		ixl_enable_adminq(hw);
4029266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
4030270346Sjfv			ixl_enable_queue(hw, que->me);
4031266423Sjfv	} else
4032270346Sjfv		ixl_enable_legacy(hw);
4033266423Sjfv}
4034266423Sjfv
4035266423Sjfvstatic void
4036279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
4037266423Sjfv{
4038266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4039270346Sjfv	struct ixl_queue	*que = vsi->queues;
4040266423Sjfv
4041279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
4042279858Sjfv		ixl_disable_queue(hw, que->me);
4043279858Sjfv}
4044279858Sjfv
4045279858Sjfvstatic void
4046279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
4047279858Sjfv{
4048279858Sjfv	struct i40e_hw		*hw = vsi->hw;
4049279858Sjfv
4050279858Sjfv	if (ixl_enable_msix)
4051270346Sjfv		ixl_disable_adminq(hw);
4052279858Sjfv	else
4053270346Sjfv		ixl_disable_legacy(hw);
4054266423Sjfv}
4055266423Sjfv
4056266423Sjfvstatic void
4057270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
4058266423Sjfv{
4059266423Sjfv	u32		reg;
4060266423Sjfv
4061266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4062266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4063270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4064266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4065270346Sjfv	ixl_flush(hw);
4066266423Sjfv	return;
4067266423Sjfv}
4068266423Sjfv
4069266423Sjfvstatic void
4070270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
4071266423Sjfv{
4072266423Sjfv	u32		reg;
4073266423Sjfv
4074270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4075266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4076266423Sjfv
4077266423Sjfv	return;
4078266423Sjfv}
4079266423Sjfv
4080266423Sjfvstatic void
4081270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
4082266423Sjfv{
4083266423Sjfv	u32		reg;
4084266423Sjfv
4085266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4086266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4087270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4088266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4089266423Sjfv}
4090266423Sjfv
4091266423Sjfvstatic void
4092270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
4093266423Sjfv{
4094266423Sjfv	u32		reg;
4095266423Sjfv
4096270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4097266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4098266423Sjfv
4099266423Sjfv	return;
4100266423Sjfv}
4101266423Sjfv
4102266423Sjfvstatic void
4103270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4104266423Sjfv{
4105266423Sjfv	u32		reg;
4106266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4107266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4108270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4109266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4110266423Sjfv}
4111266423Sjfv
4112266423Sjfvstatic void
4113270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4114266423Sjfv{
4115266423Sjfv	u32		reg;
4116266423Sjfv
4117270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4118266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4119266423Sjfv
4120266423Sjfv	return;
4121266423Sjfv}
4122266423Sjfv
4123266423Sjfvstatic void
4124270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4125266423Sjfv{
4126266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4127279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4128279858Sjfv	struct ixl_vf	*vf;
4129269198Sjfv
4130266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4131266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4132266423Sjfv
4133266423Sjfv	/* Update hw stats */
4134270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4135266423Sjfv			   pf->stat_offsets_loaded,
4136266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4137270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4138266423Sjfv			   pf->stat_offsets_loaded,
4139266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4140270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4141266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4142266423Sjfv			   pf->stat_offsets_loaded,
4143266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4144270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4145266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4146266423Sjfv			   pf->stat_offsets_loaded,
4147266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4148270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4149266423Sjfv			   pf->stat_offsets_loaded,
4150266423Sjfv			   &osd->eth.rx_discards,
4151266423Sjfv			   &nsd->eth.rx_discards);
4152270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4153266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4154266423Sjfv			   pf->stat_offsets_loaded,
4155266423Sjfv			   &osd->eth.rx_unicast,
4156266423Sjfv			   &nsd->eth.rx_unicast);
4157270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4158266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4159266423Sjfv			   pf->stat_offsets_loaded,
4160266423Sjfv			   &osd->eth.tx_unicast,
4161266423Sjfv			   &nsd->eth.tx_unicast);
4162270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4163266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4164266423Sjfv			   pf->stat_offsets_loaded,
4165266423Sjfv			   &osd->eth.rx_multicast,
4166266423Sjfv			   &nsd->eth.rx_multicast);
4167270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4168266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4169266423Sjfv			   pf->stat_offsets_loaded,
4170266423Sjfv			   &osd->eth.tx_multicast,
4171266423Sjfv			   &nsd->eth.tx_multicast);
4172270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4173266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4174266423Sjfv			   pf->stat_offsets_loaded,
4175266423Sjfv			   &osd->eth.rx_broadcast,
4176266423Sjfv			   &nsd->eth.rx_broadcast);
4177270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4178266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4179266423Sjfv			   pf->stat_offsets_loaded,
4180266423Sjfv			   &osd->eth.tx_broadcast,
4181266423Sjfv			   &nsd->eth.tx_broadcast);
4182266423Sjfv
4183270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4184266423Sjfv			   pf->stat_offsets_loaded,
4185266423Sjfv			   &osd->tx_dropped_link_down,
4186266423Sjfv			   &nsd->tx_dropped_link_down);
4187270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4188266423Sjfv			   pf->stat_offsets_loaded,
4189266423Sjfv			   &osd->mac_local_faults,
4190266423Sjfv			   &nsd->mac_local_faults);
4191270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4192266423Sjfv			   pf->stat_offsets_loaded,
4193266423Sjfv			   &osd->mac_remote_faults,
4194266423Sjfv			   &nsd->mac_remote_faults);
4195270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4196266423Sjfv			   pf->stat_offsets_loaded,
4197266423Sjfv			   &osd->rx_length_errors,
4198266423Sjfv			   &nsd->rx_length_errors);
4199266423Sjfv
4200269198Sjfv	/* Flow control (LFC) stats */
4201270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4202266423Sjfv			   pf->stat_offsets_loaded,
4203266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4204270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4205266423Sjfv			   pf->stat_offsets_loaded,
4206266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4207270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4208266423Sjfv			   pf->stat_offsets_loaded,
4209266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4210270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4211266423Sjfv			   pf->stat_offsets_loaded,
4212266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4213266423Sjfv
4214269198Sjfv	/* Packet size stats rx */
4215270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4216266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4217266423Sjfv			   pf->stat_offsets_loaded,
4218266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4219270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4220266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4221266423Sjfv			   pf->stat_offsets_loaded,
4222266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4223270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4224266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4225266423Sjfv			   pf->stat_offsets_loaded,
4226266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4227270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4228266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4229266423Sjfv			   pf->stat_offsets_loaded,
4230266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4231270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4232266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4233266423Sjfv			   pf->stat_offsets_loaded,
4234266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4235270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4236266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4237266423Sjfv			   pf->stat_offsets_loaded,
4238266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4239270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4240266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4241266423Sjfv			   pf->stat_offsets_loaded,
4242266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4243266423Sjfv
4244269198Sjfv	/* Packet size stats tx */
4245270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4246266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4247266423Sjfv			   pf->stat_offsets_loaded,
4248266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4249270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4250266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4251266423Sjfv			   pf->stat_offsets_loaded,
4252266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4253270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4254266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4255266423Sjfv			   pf->stat_offsets_loaded,
4256266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4257270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4258266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4259266423Sjfv			   pf->stat_offsets_loaded,
4260266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4261270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4262266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4263266423Sjfv			   pf->stat_offsets_loaded,
4264266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4265270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4266266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4267266423Sjfv			   pf->stat_offsets_loaded,
4268266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4269270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4270266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4271266423Sjfv			   pf->stat_offsets_loaded,
4272266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4273266423Sjfv
4274270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4275266423Sjfv			   pf->stat_offsets_loaded,
4276266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4277270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4278266423Sjfv			   pf->stat_offsets_loaded,
4279266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4280270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4281266423Sjfv			   pf->stat_offsets_loaded,
4282266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4283270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4284266423Sjfv			   pf->stat_offsets_loaded,
4285266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4286266423Sjfv	pf->stat_offsets_loaded = true;
4287269198Sjfv	/* End hw stats */
4288266423Sjfv
4289266423Sjfv	/* Update vsi stats */
4290279858Sjfv	ixl_update_vsi_stats(vsi);
4291266423Sjfv
4292279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4293279858Sjfv		vf = &pf->vfs[i];
4294279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4295279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4296279858Sjfv	}
4297266423Sjfv}
4298266423Sjfv
4299266423Sjfv/*
4300266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4301266423Sjfv**  - do outside interrupt since it might sleep
4302266423Sjfv*/
4303266423Sjfvstatic void
4304270346Sjfvixl_do_adminq(void *context, int pending)
4305266423Sjfv{
4306270346Sjfv	struct ixl_pf			*pf = context;
4307266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4308270346Sjfv	struct ixl_vsi			*vsi = &pf->vsi;
4309266423Sjfv	struct i40e_arq_event_info	event;
4310266423Sjfv	i40e_status			ret;
4311266423Sjfv	u32				reg, loop = 0;
4312266423Sjfv	u16				opcode, result;
4313266423Sjfv
4314274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4315274205Sjfv	event.msg_buf = malloc(event.buf_len,
4316266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4317266423Sjfv	if (!event.msg_buf) {
4318266423Sjfv		printf("Unable to allocate adminq memory\n");
4319266423Sjfv		return;
4320266423Sjfv	}
4321266423Sjfv
4322279858Sjfv	IXL_PF_LOCK(pf);
4323266423Sjfv	/* clean and process any events */
4324266423Sjfv	do {
4325266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4326266423Sjfv		if (ret)
4327266423Sjfv			break;
4328266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4329266423Sjfv		switch (opcode) {
4330266423Sjfv		case i40e_aqc_opc_get_link_status:
4331279858Sjfv			ixl_link_event(pf, &event);
4332270346Sjfv			ixl_update_link_status(pf);
4333266423Sjfv			break;
4334266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4335279858Sjfv#ifdef PCI_IOV
4336279858Sjfv			ixl_handle_vf_msg(pf, &event);
4337279858Sjfv#endif
4338266423Sjfv			break;
4339266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4340266423Sjfv			break;
4341266423Sjfv		default:
4342270346Sjfv#ifdef IXL_DEBUG
4343266423Sjfv			printf("AdminQ unknown event %x\n", opcode);
4344266423Sjfv#endif
4345266423Sjfv			break;
4346266423Sjfv		}
4347266423Sjfv
4348270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4349266423Sjfv
4350266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4351269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4352266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4353266423Sjfv	free(event.msg_buf, M_DEVBUF);
4354266423Sjfv
4355279858Sjfv	/*
4356279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4357279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4358279858Sjfv	 */
4359279858Sjfv	if (result > 0)
4360279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4361266423Sjfv	else
4362270346Sjfv		ixl_enable_intr(vsi);
4363279858Sjfv
4364279858Sjfv	IXL_PF_UNLOCK(pf);
4365266423Sjfv}
4366266423Sjfv
4367266423Sjfvstatic int
4368270346Sjfvixl_debug_info(SYSCTL_HANDLER_ARGS)
4369266423Sjfv{
4370270346Sjfv	struct ixl_pf	*pf;
4371266423Sjfv	int		error, input = 0;
4372266423Sjfv
4373266423Sjfv	error = sysctl_handle_int(oidp, &input, 0, req);
4374266423Sjfv
4375266423Sjfv	if (error || !req->newptr)
4376266423Sjfv		return (error);
4377266423Sjfv
4378266423Sjfv	if (input == 1) {
4379270346Sjfv		pf = (struct ixl_pf *)arg1;
4380270346Sjfv		ixl_print_debug_info(pf);
4381266423Sjfv	}
4382266423Sjfv
4383266423Sjfv	return (error);
4384266423Sjfv}
4385266423Sjfv
4386266423Sjfvstatic void
4387270346Sjfvixl_print_debug_info(struct ixl_pf *pf)
4388266423Sjfv{
4389266423Sjfv	struct i40e_hw		*hw = &pf->hw;
4390270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
4391270346Sjfv	struct ixl_queue	*que = vsi->queues;
4392266423Sjfv	struct rx_ring		*rxr = &que->rxr;
4393266423Sjfv	struct tx_ring		*txr = &que->txr;
4394266423Sjfv	u32			reg;
4395266423Sjfv
4396266423Sjfv
4397270799Sbz	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
4398270799Sbz	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
4399266423Sjfv	printf("RX next check = %x\n", rxr->next_check);
4400270799Sbz	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
4401270799Sbz	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
4402266423Sjfv	printf("TX desc avail = %x\n", txr->avail);
4403266423Sjfv
4404266423Sjfv	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4405266423Sjfv	 printf("RX Bytes = %x\n", reg);
4406266423Sjfv	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4407266423Sjfv	 printf("Port RX Bytes = %x\n", reg);
4408266423Sjfv	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4409266423Sjfv	 printf("RX discard = %x\n", reg);
4410266423Sjfv	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4411266423Sjfv	 printf("Port RX discard = %x\n", reg);
4412266423Sjfv
4413266423Sjfv	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4414266423Sjfv	 printf("TX errors = %x\n", reg);
4415266423Sjfv	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4416266423Sjfv	 printf("TX Bytes = %x\n", reg);
4417266423Sjfv
4418266423Sjfv	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4419266423Sjfv	 printf("RX undersize = %x\n", reg);
4420266423Sjfv	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4421266423Sjfv	 printf("RX fragments = %x\n", reg);
4422266423Sjfv	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4423266423Sjfv	 printf("RX oversize = %x\n", reg);
4424266423Sjfv	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4425266423Sjfv	 printf("RX length error = %x\n", reg);
4426266423Sjfv	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4427266423Sjfv	 printf("mac remote fault = %x\n", reg);
4428266423Sjfv	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4429266423Sjfv	 printf("mac local fault = %x\n", reg);
4430266423Sjfv}
4431266423Sjfv
4432266423Sjfv/**
4433266423Sjfv * Update VSI-specific ethernet statistics counters.
4434266423Sjfv **/
4435270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4436266423Sjfv{
4437270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4438266423Sjfv	struct i40e_hw *hw = &pf->hw;
4439266423Sjfv	struct i40e_eth_stats *es;
4440266423Sjfv	struct i40e_eth_stats *oes;
4441272227Sglebius	struct i40e_hw_port_stats *nsd;
4442266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4443266423Sjfv
4444266423Sjfv	es = &vsi->eth_stats;
4445266423Sjfv	oes = &vsi->eth_stats_offsets;
4446272227Sglebius	nsd = &pf->stats;
4447266423Sjfv
4448266423Sjfv	/* Gather up the stats that the hw collects */
4449270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4450266423Sjfv			   vsi->stat_offsets_loaded,
4451266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4452270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4453266423Sjfv			   vsi->stat_offsets_loaded,
4454266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4455266423Sjfv
4456270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4457266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4458266423Sjfv			   vsi->stat_offsets_loaded,
4459266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4460270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4461266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4462266423Sjfv			   vsi->stat_offsets_loaded,
4463266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4464270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4465266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4466266423Sjfv			   vsi->stat_offsets_loaded,
4467266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4468270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4469266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4470266423Sjfv			   vsi->stat_offsets_loaded,
4471266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4472266423Sjfv
4473270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4474266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4475266423Sjfv			   vsi->stat_offsets_loaded,
4476266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4477270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4478266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4479266423Sjfv			   vsi->stat_offsets_loaded,
4480266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4481270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4482266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4483266423Sjfv			   vsi->stat_offsets_loaded,
4484266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4485270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4486266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4487266423Sjfv			   vsi->stat_offsets_loaded,
4488266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4489266423Sjfv	vsi->stat_offsets_loaded = true;
4490279858Sjfv}
4491269198Sjfv
4492279858Sjfvstatic void
4493279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4494279858Sjfv{
4495279858Sjfv	struct ixl_pf		*pf;
4496279858Sjfv	struct ifnet		*ifp;
4497279858Sjfv	struct i40e_eth_stats	*es;
4498279858Sjfv	u64			tx_discards;
4499279858Sjfv
4500279858Sjfv	struct i40e_hw_port_stats *nsd;
4501279858Sjfv
4502279858Sjfv	pf = vsi->back;
4503279858Sjfv	ifp = vsi->ifp;
4504279858Sjfv	es = &vsi->eth_stats;
4505279858Sjfv	nsd = &pf->stats;
4506279858Sjfv
4507279858Sjfv	ixl_update_eth_stats(vsi);
4508279858Sjfv
4509272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4510279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4511272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4512272227Sglebius
4513269198Sjfv	/* Update ifnet stats */
4514272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4515269198Sjfv	                   es->rx_multicast +
4516272227Sglebius			   es->rx_broadcast);
4517272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4518269198Sjfv	                   es->tx_multicast +
4519272227Sglebius			   es->tx_broadcast);
4520272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4521272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4522272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4523272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4524269198Sjfv
4525279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4526279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4527279858Sjfv	    nsd->rx_jabber);
4528272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4529272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4530272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4531272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4532272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4533266423Sjfv}
4534266423Sjfv
4535266423Sjfv/**
4536266423Sjfv * Reset all of the stats for the given pf
4537266423Sjfv **/
4538270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4539266423Sjfv{
4540266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4541266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4542266423Sjfv	pf->stat_offsets_loaded = false;
4543266423Sjfv}
4544266423Sjfv
4545266423Sjfv/**
4546266423Sjfv * Resets all stats of the given vsi
4547266423Sjfv **/
4548270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4549266423Sjfv{
4550266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4551266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4552266423Sjfv	vsi->stat_offsets_loaded = false;
4553266423Sjfv}
4554266423Sjfv
4555266423Sjfv/**
4556266423Sjfv * Read and update a 48 bit stat from the hw
4557266423Sjfv *
4558266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4559266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4560266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4561266423Sjfv * to report stats that count from zero.
4562266423Sjfv **/
4563266423Sjfvstatic void
4564270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4565266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4566266423Sjfv{
4567266423Sjfv	u64 new_data;
4568266423Sjfv
4569270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4570266423Sjfv	new_data = rd64(hw, loreg);
4571266423Sjfv#else
4572266423Sjfv	/*
4573269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4574266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4575266423Sjfv	 */
4576266423Sjfv	new_data = rd32(hw, loreg);
4577266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4578266423Sjfv#endif
4579266423Sjfv
4580266423Sjfv	if (!offset_loaded)
4581266423Sjfv		*offset = new_data;
4582266423Sjfv	if (new_data >= *offset)
4583266423Sjfv		*stat = new_data - *offset;
4584266423Sjfv	else
4585266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4586266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4587266423Sjfv}
4588266423Sjfv
4589266423Sjfv/**
4590266423Sjfv * Read and update a 32 bit stat from the hw
4591266423Sjfv **/
4592266423Sjfvstatic void
4593270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4594266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4595266423Sjfv{
4596266423Sjfv	u32 new_data;
4597266423Sjfv
4598266423Sjfv	new_data = rd32(hw, reg);
4599266423Sjfv	if (!offset_loaded)
4600266423Sjfv		*offset = new_data;
4601266423Sjfv	if (new_data >= *offset)
4602266423Sjfv		*stat = (u32)(new_data - *offset);
4603266423Sjfv	else
4604266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4605266423Sjfv}
4606266423Sjfv
4607266423Sjfv/*
4608266423Sjfv** Set flow control using sysctl:
4609266423Sjfv** 	0 - off
4610266423Sjfv**	1 - rx pause
4611266423Sjfv**	2 - tx pause
4612266423Sjfv**	3 - full
4613266423Sjfv*/
4614266423Sjfvstatic int
4615270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4616266423Sjfv{
4617266423Sjfv	/*
4618266423Sjfv	 * TODO: ensure flow control is disabled if
4619266423Sjfv	 * priority flow control is enabled
4620266423Sjfv	 *
4621266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4622266423Sjfv	 * if tx flow control is enabled.
4623266423Sjfv	 */
4624270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4625266423Sjfv	struct i40e_hw *hw = &pf->hw;
4626266423Sjfv	device_t dev = pf->dev;
4627279033Sjfv	int error = 0;
4628266423Sjfv	enum i40e_status_code aq_error = 0;
4629266423Sjfv	u8 fc_aq_err = 0;
4630266423Sjfv
4631279033Sjfv	/* Get request */
4632279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4633266423Sjfv	if ((error) || (req->newptr == NULL))
4634269198Sjfv		return (error);
4635279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4636266423Sjfv		device_printf(dev,
4637266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4638266423Sjfv		return (EINVAL);
4639266423Sjfv	}
4640266423Sjfv
4641269198Sjfv	/*
4642269198Sjfv	** Changing flow control mode currently does not work on
4643269198Sjfv	** 40GBASE-CR4 PHYs
4644269198Sjfv	*/
4645269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4646269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4647269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4648269198Sjfv		    " on 40GBase-CR4 media.\n");
4649269198Sjfv		return (ENODEV);
4650269198Sjfv	}
4651269198Sjfv
4652266423Sjfv	/* Set fc ability for port */
4653279033Sjfv	hw->fc.requested_mode = pf->fc;
4654269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4655269198Sjfv	if (aq_error) {
4656269198Sjfv		device_printf(dev,
4657269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4658269198Sjfv		    __func__, aq_error, fc_aq_err);
4659269198Sjfv		return (EAGAIN);
4660269198Sjfv	}
4661266423Sjfv
4662269198Sjfv	return (0);
4663269198Sjfv}
4664266423Sjfv
4665270346Sjfvstatic int
4666270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4667270346Sjfv{
4668270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4669270346Sjfv	struct i40e_hw *hw = &pf->hw;
4670270346Sjfv	int error = 0, index = 0;
4671270346Sjfv
4672270346Sjfv	char *speeds[] = {
4673270346Sjfv		"Unknown",
4674270346Sjfv		"100M",
4675270346Sjfv		"1G",
4676270346Sjfv		"10G",
4677270346Sjfv		"40G",
4678270346Sjfv		"20G"
4679270346Sjfv	};
4680270346Sjfv
4681270346Sjfv	ixl_update_link_status(pf);
4682270346Sjfv
4683270346Sjfv	switch (hw->phy.link_info.link_speed) {
4684270346Sjfv	case I40E_LINK_SPEED_100MB:
4685270346Sjfv		index = 1;
4686270346Sjfv		break;
4687270346Sjfv	case I40E_LINK_SPEED_1GB:
4688270346Sjfv		index = 2;
4689270346Sjfv		break;
4690270346Sjfv	case I40E_LINK_SPEED_10GB:
4691270346Sjfv		index = 3;
4692270346Sjfv		break;
4693270346Sjfv	case I40E_LINK_SPEED_40GB:
4694270346Sjfv		index = 4;
4695270346Sjfv		break;
4696270346Sjfv	case I40E_LINK_SPEED_20GB:
4697270346Sjfv		index = 5;
4698270346Sjfv		break;
4699270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
4700270346Sjfv	default:
4701270346Sjfv		index = 0;
4702270346Sjfv		break;
4703270346Sjfv	}
4704270346Sjfv
4705270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
4706270346Sjfv	    strlen(speeds[index]), req);
4707270346Sjfv	return (error);
4708270346Sjfv}
4709270346Sjfv
4710274205Sjfvstatic int
4711274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4712274205Sjfv{
4713274205Sjfv	struct i40e_hw *hw = &pf->hw;
4714274205Sjfv	device_t dev = pf->dev;
4715274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4716274205Sjfv	struct i40e_aq_set_phy_config config;
4717274205Sjfv	enum i40e_status_code aq_error = 0;
4718274205Sjfv
4719274205Sjfv	/* Get current capability information */
4720279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4721279033Sjfv	    FALSE, FALSE, &abilities, NULL);
4722274205Sjfv	if (aq_error) {
4723279033Sjfv		device_printf(dev,
4724279033Sjfv		    "%s: Error getting phy capabilities %d,"
4725274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4726274205Sjfv		    hw->aq.asq_last_status);
4727274205Sjfv		return (EAGAIN);
4728274205Sjfv	}
4729274205Sjfv
4730274205Sjfv	/* Prepare new config */
4731274205Sjfv	bzero(&config, sizeof(config));
4732274205Sjfv	config.phy_type = abilities.phy_type;
4733274205Sjfv	config.abilities = abilities.abilities
4734274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4735274205Sjfv	config.eee_capability = abilities.eee_capability;
4736274205Sjfv	config.eeer = abilities.eeer_val;
4737274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
4738274205Sjfv	/* Translate into aq cmd link_speed */
4739279858Sjfv	if (speeds & 0x8)
4740279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
4741274205Sjfv	if (speeds & 0x4)
4742274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
4743274205Sjfv	if (speeds & 0x2)
4744274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
4745274205Sjfv	if (speeds & 0x1)
4746274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
4747274205Sjfv
4748274205Sjfv	/* Do aq command & restart link */
4749274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4750274205Sjfv	if (aq_error) {
4751279033Sjfv		device_printf(dev,
4752279033Sjfv		    "%s: Error setting new phy config %d,"
4753274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4754274205Sjfv		    hw->aq.asq_last_status);
4755274205Sjfv		return (EAGAIN);
4756274205Sjfv	}
4757274205Sjfv
4758277084Sjfv	/*
4759277084Sjfv	** This seems a bit heavy handed, but we
4760277084Sjfv	** need to get a reinit on some devices
4761277084Sjfv	*/
4762277084Sjfv	IXL_PF_LOCK(pf);
4763277084Sjfv	ixl_stop(pf);
4764277084Sjfv	ixl_init_locked(pf);
4765277084Sjfv	IXL_PF_UNLOCK(pf);
4766277084Sjfv
4767274205Sjfv	return (0);
4768274205Sjfv}
4769274205Sjfv
4770269198Sjfv/*
4771269198Sjfv** Control link advertise speed:
4772270346Sjfv**	Flags:
4773270346Sjfv**	0x1 - advertise 100 Mb
4774270346Sjfv**	0x2 - advertise 1G
4775270346Sjfv**	0x4 - advertise 10G
4776279858Sjfv**	0x8 - advertise 20G
4777269198Sjfv**
4778269198Sjfv** Does not work on 40G devices.
4779269198Sjfv*/
4780269198Sjfvstatic int
4781270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
4782269198Sjfv{
4783270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4784269198Sjfv	struct i40e_hw *hw = &pf->hw;
4785269198Sjfv	device_t dev = pf->dev;
4786270346Sjfv	int requested_ls = 0;
4787269198Sjfv	int error = 0;
4788266423Sjfv
4789269198Sjfv	/*
4790269198Sjfv	** FW doesn't support changing advertised speed
4791269198Sjfv	** for 40G devices; speed is always 40G.
4792269198Sjfv	*/
4793269198Sjfv	if (i40e_is_40G_device(hw->device_id))
4794269198Sjfv		return (ENODEV);
4795266423Sjfv
4796269198Sjfv	/* Read in new mode */
4797270346Sjfv	requested_ls = pf->advertised_speed;
4798269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4799269198Sjfv	if ((error) || (req->newptr == NULL))
4800269198Sjfv		return (error);
4801279858Sjfv	/* Check for sane value */
4802279858Sjfv	if (requested_ls < 0x1 || requested_ls > 0xE) {
4803279858Sjfv		device_printf(dev, "Invalid advertised speed; "
4804279858Sjfv		    "valid modes are 0x1 through 0xE\n");
4805269198Sjfv		return (EINVAL);
4806266423Sjfv	}
4807279858Sjfv	/* Then check for validity based on adapter type */
4808279858Sjfv	switch (hw->device_id) {
4809279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
4810299545Serj	case I40E_DEV_ID_10G_BASE_T4:
4811279858Sjfv		if (requested_ls & 0x8) {
4812279858Sjfv			device_printf(dev,
4813279858Sjfv			    "20Gbs speed not supported on this device.\n");
4814279858Sjfv			return (EINVAL);
4815279858Sjfv		}
4816279858Sjfv		break;
4817279858Sjfv	case I40E_DEV_ID_20G_KR2:
4818299545Serj	case I40E_DEV_ID_20G_KR2_A:
4819279858Sjfv		if (requested_ls & 0x1) {
4820279858Sjfv			device_printf(dev,
4821279858Sjfv			    "100Mbs speed not supported on this device.\n");
4822279858Sjfv			return (EINVAL);
4823279858Sjfv		}
4824279858Sjfv		break;
4825279858Sjfv	default:
4826279858Sjfv		if (requested_ls & ~0x6) {
4827279858Sjfv			device_printf(dev,
4828279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
4829279858Sjfv			return (EINVAL);
4830279858Sjfv		}
4831279858Sjfv		break;
4832279858Sjfv	}
4833269198Sjfv
4834269198Sjfv	/* Exit if no change */
4835270346Sjfv	if (pf->advertised_speed == requested_ls)
4836269198Sjfv		return (0);
4837269198Sjfv
4838274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
4839274205Sjfv	if (error)
4840274205Sjfv		return (error);
4841270346Sjfv
4842270346Sjfv	pf->advertised_speed = requested_ls;
4843270346Sjfv	ixl_update_link_status(pf);
4844269198Sjfv	return (0);
4845266423Sjfv}
4846266423Sjfv
4847266423Sjfv/*
4848266423Sjfv** Get the width and transaction speed of
4849266423Sjfv** the bus this adapter is plugged into.
4850266423Sjfv*/
4851266423Sjfvstatic u16
4852270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4853266423Sjfv{
4854266423Sjfv        u16                     link;
4855266423Sjfv        u32                     offset;
4856266423Sjfv
4857266423Sjfv        /* Get the PCI Express Capabilities offset */
4858266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4859266423Sjfv
4860266423Sjfv        /* ...and read the Link Status Register */
4861266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4862266423Sjfv
4863266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
4864266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
4865266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
4866266423Sjfv                break;
4867266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
4868266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
4869266423Sjfv                break;
4870266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
4871266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
4872266423Sjfv                break;
4873266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
4874266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
4875266423Sjfv                break;
4876266423Sjfv        default:
4877266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
4878266423Sjfv                break;
4879266423Sjfv        }
4880266423Sjfv
4881266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
4882266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
4883266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
4884266423Sjfv                break;
4885266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
4886266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
4887266423Sjfv                break;
4888266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
4889266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
4890266423Sjfv                break;
4891266423Sjfv        default:
4892266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
4893266423Sjfv                break;
4894266423Sjfv        }
4895266423Sjfv
4896266423Sjfv
4897266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4898266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4899266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4900266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4901266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4902266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4903266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4904266423Sjfv            ("Unknown"));
4905266423Sjfv
4906266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4907266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
4908266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
4909279858Sjfv                    " for this device\n     may be insufficient for"
4910279858Sjfv                    " optimal performance.\n");
4911266423Sjfv                device_printf(dev, "For expected performance a x8 "
4912266423Sjfv                    "PCIE Gen3 slot is required.\n");
4913266423Sjfv        }
4914266423Sjfv
4915266423Sjfv        return (link);
4916266423Sjfv}
4917266423Sjfv
4918274205Sjfvstatic int
4919274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
4920274205Sjfv{
4921274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
4922274205Sjfv	struct i40e_hw	*hw = &pf->hw;
4923274205Sjfv	char		buf[32];
4924274205Sjfv
4925274205Sjfv	snprintf(buf, sizeof(buf),
4926274205Sjfv	    "f%d.%d a%d.%d n%02x.%02x e%08x",
4927274205Sjfv	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
4928274205Sjfv	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
4929274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
4930274205Sjfv	    IXL_NVM_VERSION_HI_SHIFT,
4931274205Sjfv	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
4932274205Sjfv	    IXL_NVM_VERSION_LO_SHIFT,
4933274205Sjfv	    hw->nvm.eetrack);
4934274205Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4935274205Sjfv}
4936274205Sjfv
4937274205Sjfv
4938277084Sjfv#ifdef IXL_DEBUG_SYSCTL
4939266423Sjfvstatic int
4940270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4941266423Sjfv{
4942270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4943266423Sjfv	struct i40e_hw *hw = &pf->hw;
4944266423Sjfv	struct i40e_link_status link_status;
4945266423Sjfv	char buf[512];
4946266423Sjfv
4947266423Sjfv	enum i40e_status_code aq_error = 0;
4948266423Sjfv
4949266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4950266423Sjfv	if (aq_error) {
4951266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4952266423Sjfv		return (EPERM);
4953266423Sjfv	}
4954266423Sjfv
4955266423Sjfv	sprintf(buf, "\n"
4956266423Sjfv	    "PHY Type : %#04x\n"
4957266423Sjfv	    "Speed    : %#04x\n"
4958266423Sjfv	    "Link info: %#04x\n"
4959266423Sjfv	    "AN info  : %#04x\n"
4960266423Sjfv	    "Ext info : %#04x",
4961266423Sjfv	    link_status.phy_type, link_status.link_speed,
4962266423Sjfv	    link_status.link_info, link_status.an_info,
4963266423Sjfv	    link_status.ext_info);
4964266423Sjfv
4965266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4966266423Sjfv}
4967266423Sjfv
4968266423Sjfvstatic int
4969270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4970266423Sjfv{
4971279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
4972279858Sjfv	struct i40e_hw		*hw = &pf->hw;
4973279858Sjfv	char			buf[512];
4974279858Sjfv	enum i40e_status_code	aq_error = 0;
4975266423Sjfv
4976279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4977266423Sjfv
4978279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4979279858Sjfv	    TRUE, FALSE, &abilities, NULL);
4980266423Sjfv	if (aq_error) {
4981266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4982266423Sjfv		return (EPERM);
4983266423Sjfv	}
4984266423Sjfv
4985266423Sjfv	sprintf(buf, "\n"
4986266423Sjfv	    "PHY Type : %#010x\n"
4987266423Sjfv	    "Speed    : %#04x\n"
4988266423Sjfv	    "Abilities: %#04x\n"
4989266423Sjfv	    "EEE cap  : %#06x\n"
4990266423Sjfv	    "EEER reg : %#010x\n"
4991266423Sjfv	    "D3 Lpan  : %#04x",
4992279858Sjfv	    abilities.phy_type, abilities.link_speed,
4993279858Sjfv	    abilities.abilities, abilities.eee_capability,
4994279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
4995266423Sjfv
4996266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4997266423Sjfv}
4998266423Sjfv
4999266423Sjfvstatic int
5000270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5001266423Sjfv{
5002270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5003270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
5004270346Sjfv	struct ixl_mac_filter *f;
5005266423Sjfv	char *buf, *buf_i;
5006266423Sjfv
5007266423Sjfv	int error = 0;
5008266423Sjfv	int ftl_len = 0;
5009266423Sjfv	int ftl_counter = 0;
5010266423Sjfv	int buf_len = 0;
5011266423Sjfv	int entry_len = 42;
5012266423Sjfv
5013266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5014266423Sjfv		ftl_len++;
5015266423Sjfv	}
5016266423Sjfv
5017266423Sjfv	if (ftl_len < 1) {
5018266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
5019266423Sjfv		return (0);
5020266423Sjfv	}
5021266423Sjfv
5022266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5023266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5024266423Sjfv
5025266423Sjfv	sprintf(buf_i++, "\n");
5026266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5027266423Sjfv		sprintf(buf_i,
5028266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
5029266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5030266423Sjfv		buf_i += entry_len;
5031266423Sjfv		/* don't print '\n' for last entry */
5032266423Sjfv		if (++ftl_counter != ftl_len) {
5033266423Sjfv			sprintf(buf_i, "\n");
5034266423Sjfv			buf_i++;
5035266423Sjfv		}
5036266423Sjfv	}
5037266423Sjfv
5038266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5039266423Sjfv	if (error)
5040266423Sjfv		printf("sysctl error: %d\n", error);
5041266423Sjfv	free(buf, M_DEVBUF);
5042266423Sjfv	return error;
5043266423Sjfv}
5044269198Sjfv
5045270346Sjfv#define IXL_SW_RES_SIZE 0x14
5046269198Sjfvstatic int
5047277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
5048277084Sjfv{
5049277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5050284049Sjfv	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5051284049Sjfv	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5052277084Sjfv
5053277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
5054277084Sjfv}
5055277084Sjfv
5056277084Sjfvstatic int
5057274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5058269198Sjfv{
5059270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5060269198Sjfv	struct i40e_hw *hw = &pf->hw;
5061269198Sjfv	device_t dev = pf->dev;
5062269198Sjfv	struct sbuf *buf;
5063269198Sjfv	int error = 0;
5064269198Sjfv
5065269198Sjfv	u8 num_entries;
5066270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5067269198Sjfv
5068284049Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5069269198Sjfv	if (!buf) {
5070269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
5071269198Sjfv		return (ENOMEM);
5072269198Sjfv	}
5073269198Sjfv
5074277084Sjfv	bzero(resp, sizeof(resp));
5075269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5076269198Sjfv				resp,
5077270346Sjfv				IXL_SW_RES_SIZE,
5078269198Sjfv				NULL);
5079269198Sjfv	if (error) {
5080279858Sjfv		device_printf(dev,
5081279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5082269198Sjfv		    __func__, error, hw->aq.asq_last_status);
5083269198Sjfv		sbuf_delete(buf);
5084269198Sjfv		return error;
5085269198Sjfv	}
5086269198Sjfv
5087277084Sjfv	/* Sort entries by type for display */
5088277084Sjfv	qsort(resp, num_entries,
5089277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5090277084Sjfv	    &ixl_res_alloc_cmp);
5091277084Sjfv
5092269198Sjfv	sbuf_cat(buf, "\n");
5093277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5094269198Sjfv	sbuf_printf(buf,
5095269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5096269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5097269198Sjfv	for (int i = 0; i < num_entries; i++) {
5098269198Sjfv		sbuf_printf(buf,
5099269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5100269198Sjfv		    resp[i].resource_type,
5101269198Sjfv		    resp[i].guaranteed,
5102269198Sjfv		    resp[i].total,
5103269198Sjfv		    resp[i].used,
5104269198Sjfv		    resp[i].total_unalloced);
5105269198Sjfv		if (i < num_entries - 1)
5106269198Sjfv			sbuf_cat(buf, "\n");
5107269198Sjfv	}
5108269198Sjfv
5109269198Sjfv	error = sbuf_finish(buf);
5110299545Serj	if (error) {
5111299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5112299545Serj		sbuf_delete(buf);
5113299545Serj		return error;
5114299545Serj	}
5115299545Serj
5116299545Serj	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5117299545Serj	if (error)
5118299545Serj		device_printf(dev, "sysctl error: %d\n", error);
5119290708Ssmh	sbuf_delete(buf);
5120299545Serj	return error;
5121274205Sjfv}
5122269198Sjfv
5123274205Sjfv/*
5124274205Sjfv** Caller must init and delete sbuf; this function will clear and
5125274205Sjfv** finish it for caller.
5126274205Sjfv*/
5127274205Sjfvstatic char *
5128274205Sjfvixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
5129274205Sjfv{
5130274205Sjfv	sbuf_clear(s);
5131274205Sjfv
5132274205Sjfv	if (seid == 0 && uplink)
5133274205Sjfv		sbuf_cat(s, "Network");
5134274205Sjfv	else if (seid == 0)
5135274205Sjfv		sbuf_cat(s, "Host");
5136274205Sjfv	else if (seid == 1)
5137274205Sjfv		sbuf_cat(s, "EMP");
5138274205Sjfv	else if (seid <= 5)
5139274205Sjfv		sbuf_printf(s, "MAC %d", seid - 2);
5140274205Sjfv	else if (seid <= 15)
5141274205Sjfv		sbuf_cat(s, "Reserved");
5142274205Sjfv	else if (seid <= 31)
5143274205Sjfv		sbuf_printf(s, "PF %d", seid - 16);
5144274205Sjfv	else if (seid <= 159)
5145274205Sjfv		sbuf_printf(s, "VF %d", seid - 32);
5146274205Sjfv	else if (seid <= 287)
5147274205Sjfv		sbuf_cat(s, "Reserved");
5148274205Sjfv	else if (seid <= 511)
5149274205Sjfv		sbuf_cat(s, "Other"); // for other structures
5150274205Sjfv	else if (seid <= 895)
5151274205Sjfv		sbuf_printf(s, "VSI %d", seid - 512);
5152274205Sjfv	else if (seid <= 1023)
5153274205Sjfv		sbuf_printf(s, "Reserved");
5154274205Sjfv	else
5155274205Sjfv		sbuf_cat(s, "Invalid");
5156274205Sjfv
5157274205Sjfv	sbuf_finish(s);
5158274205Sjfv	return sbuf_data(s);
5159269198Sjfv}
5160269198Sjfv
5161274205Sjfvstatic int
5162274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5163274205Sjfv{
5164274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5165274205Sjfv	struct i40e_hw *hw = &pf->hw;
5166274205Sjfv	device_t dev = pf->dev;
5167274205Sjfv	struct sbuf *buf;
5168274205Sjfv	struct sbuf *nmbuf;
5169274205Sjfv	int error = 0;
5170274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5171274205Sjfv
5172274205Sjfv	u16 next = 0;
5173274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5174274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5175274205Sjfv
5176284049Sjfv	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
5177274205Sjfv	if (!buf) {
5178274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5179274205Sjfv		return (ENOMEM);
5180274205Sjfv	}
5181274205Sjfv
5182274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5183274205Sjfv	    sizeof(aq_buf), &next, NULL);
5184274205Sjfv	if (error) {
5185279858Sjfv		device_printf(dev,
5186279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5187274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5188274205Sjfv		sbuf_delete(buf);
5189274205Sjfv		return error;
5190274205Sjfv	}
5191274205Sjfv
5192274205Sjfv	nmbuf = sbuf_new_auto();
5193274205Sjfv	if (!nmbuf) {
5194274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5195274205Sjfv		return (ENOMEM);
5196274205Sjfv	}
5197274205Sjfv
5198274205Sjfv	sbuf_cat(buf, "\n");
5199274205Sjfv	// Assuming <= 255 elements in switch
5200274205Sjfv	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
5201274205Sjfv	/* Exclude:
5202274205Sjfv	** Revision -- all elements are revision 1 for now
5203274205Sjfv	*/
5204274205Sjfv	sbuf_printf(buf,
5205274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5206274205Sjfv	    "                |          |          | (uplink)\n");
5207274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5208274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5209274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5210274205Sjfv		sbuf_cat(buf, " ");
5211279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5212279858Sjfv		    sw_config->element[i].seid, false));
5213274205Sjfv		sbuf_cat(buf, " | ");
5214279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5215279858Sjfv		    sw_config->element[i].uplink_seid, true));
5216274205Sjfv		sbuf_cat(buf, "   ");
5217279858Sjfv		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
5218279858Sjfv		    sw_config->element[i].downlink_seid, false));
5219274205Sjfv		sbuf_cat(buf, "   ");
5220274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5221274205Sjfv		if (i < sw_config->header.num_reported - 1)
5222274205Sjfv			sbuf_cat(buf, "\n");
5223274205Sjfv	}
5224274205Sjfv	sbuf_delete(nmbuf);
5225274205Sjfv
5226274205Sjfv	error = sbuf_finish(buf);
5227299545Serj	if (error) {
5228299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5229299545Serj		sbuf_delete(buf);
5230299545Serj		return error;
5231299545Serj	}
5232299545Serj
5233299545Serj	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
5234299545Serj	if (error)
5235299545Serj		device_printf(dev, "sysctl error: %d\n", error);
5236274205Sjfv	sbuf_delete(buf);
5237274205Sjfv
5238274205Sjfv	return (error);
5239274205Sjfv}
5240279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5241274205Sjfv
5242279858Sjfv
5243279858Sjfv#ifdef PCI_IOV
5244269198Sjfvstatic int
5245279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5246269198Sjfv{
5247279858Sjfv	struct i40e_hw *hw;
5248279858Sjfv	struct ixl_vsi *vsi;
5249279858Sjfv	struct i40e_vsi_context vsi_ctx;
5250279858Sjfv	int i;
5251279858Sjfv	uint16_t first_queue;
5252279858Sjfv	enum i40e_status_code code;
5253269198Sjfv
5254279858Sjfv	hw = &pf->hw;
5255279858Sjfv	vsi = &pf->vsi;
5256269198Sjfv
5257279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5258279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5259279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5260279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5261279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5262279858Sjfv
5263279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5264279858Sjfv
5265279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5266279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5267279858Sjfv
5268279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5269279858Sjfv	vsi_ctx.info.sec_flags = 0;
5270279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5271279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5272279858Sjfv
5273279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5274279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5275279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5276279858Sjfv
5277279858Sjfv	vsi_ctx.info.valid_sections |=
5278279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5279279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5280279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5281279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5282279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5283279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5284279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5285279858Sjfv
5286279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5287279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5288279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5289279858Sjfv
5290279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5291279858Sjfv	if (code != I40E_SUCCESS)
5292279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5293279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5294279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5295279858Sjfv	vf->vsi.first_queue = first_queue;
5296279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5297279858Sjfv
5298279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5299279858Sjfv	if (code != I40E_SUCCESS)
5300279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5301279858Sjfv
5302279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5303279858Sjfv	if (code != I40E_SUCCESS) {
5304279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5305279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5306279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5307269198Sjfv	}
5308269198Sjfv
5309279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5310279858Sjfv	return (0);
5311279858Sjfv}
5312279858Sjfv
5313279858Sjfvstatic int
5314279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5315279858Sjfv{
5316279858Sjfv	struct i40e_hw *hw;
5317279858Sjfv	int error;
5318279858Sjfv
5319279858Sjfv	hw = &pf->hw;
5320279858Sjfv
5321279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5322279858Sjfv	if (error != 0)
5323269198Sjfv		return (error);
5324279858Sjfv
5325279858Sjfv	vf->vsi.hw_filters_add = 0;
5326279858Sjfv	vf->vsi.hw_filters_del = 0;
5327279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5328279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5329279858Sjfv
5330279858Sjfv	return (0);
5331279858Sjfv}
5332279858Sjfv
5333279858Sjfvstatic void
5334279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5335279858Sjfv    uint32_t val)
5336279858Sjfv{
5337279858Sjfv	uint32_t qtable;
5338279858Sjfv	int index, shift;
5339279858Sjfv
5340279858Sjfv	/*
5341279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5342279858Sjfv	 * gymnastics to convert the queue number into a register index and
5343279858Sjfv	 * shift.
5344279858Sjfv	 */
5345279858Sjfv	index = qnum / 2;
5346279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5347279858Sjfv
5348279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5349279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5350279858Sjfv	qtable |= val << shift;
5351279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5352279858Sjfv}
5353279858Sjfv
5354279858Sjfvstatic void
5355279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5356279858Sjfv{
5357279858Sjfv	struct i40e_hw *hw;
5358279858Sjfv	uint32_t qtable;
5359279858Sjfv	int i;
5360279858Sjfv
5361279858Sjfv	hw = &pf->hw;
5362279858Sjfv
5363279858Sjfv	/*
5364279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5365279858Sjfv	 * so we have to use non-contiguous mappings.
5366279858Sjfv	 */
5367279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5368279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5369279858Sjfv
5370279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5371279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5372279858Sjfv
5373279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5374279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5375279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5376279858Sjfv
5377279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5378279858Sjfv	}
5379279858Sjfv
5380279858Sjfv	/* Map queues allocated to VF to its VSI. */
5381279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5382279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5383279858Sjfv
5384279858Sjfv	/* Set rest of VSI queues as unused. */
5385279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5386279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5387279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5388279858Sjfv
5389279858Sjfv	ixl_flush(hw);
5390279858Sjfv}
5391279858Sjfv
5392279858Sjfvstatic void
5393279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5394279858Sjfv{
5395279858Sjfv	struct i40e_hw *hw;
5396279858Sjfv
5397279858Sjfv	hw = &pf->hw;
5398279858Sjfv
5399279858Sjfv	if (vsi->seid == 0)
5400279858Sjfv		return;
5401279858Sjfv
5402279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5403279858Sjfv}
5404279858Sjfv
5405279858Sjfvstatic void
5406279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5407279858Sjfv{
5408279858Sjfv
5409279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5410279858Sjfv	ixl_flush(hw);
5411279858Sjfv}
5412279858Sjfv
5413279858Sjfvstatic void
5414279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5415279858Sjfv{
5416279858Sjfv
5417279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5418279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5419279858Sjfv	ixl_flush(hw);
5420279858Sjfv}
5421279858Sjfv
5422279858Sjfvstatic void
5423279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5424279858Sjfv{
5425279858Sjfv	struct i40e_hw *hw;
5426279858Sjfv	uint32_t vfint_reg, vpint_reg;
5427279858Sjfv	int i;
5428279858Sjfv
5429279858Sjfv	hw = &pf->hw;
5430279858Sjfv
5431279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5432279858Sjfv
5433279858Sjfv	/* Index 0 has a special register. */
5434279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5435279858Sjfv
5436279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5437279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5438279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5439279858Sjfv	}
5440279858Sjfv
5441279858Sjfv	/* Index 0 has a special register. */
5442279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5443279858Sjfv
5444279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5445279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5446279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5447279858Sjfv	}
5448279858Sjfv
5449279858Sjfv	vf->vsi.num_queues = 0;
5450279858Sjfv}
5451279858Sjfv
5452279858Sjfvstatic int
5453279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5454279858Sjfv{
5455279858Sjfv	struct i40e_hw *hw;
5456279858Sjfv	int i;
5457279858Sjfv	uint16_t global_vf_num;
5458279858Sjfv	uint32_t ciad;
5459279858Sjfv
5460279858Sjfv	hw = &pf->hw;
5461279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5462279858Sjfv
5463279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5464279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5465279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5466279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5467279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5468279858Sjfv			return (0);
5469279858Sjfv		DELAY(1);
5470279858Sjfv	}
5471279858Sjfv
5472279858Sjfv	return (ETIMEDOUT);
5473279858Sjfv}
5474279858Sjfv
5475279858Sjfvstatic void
5476279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5477279858Sjfv{
5478279858Sjfv	struct i40e_hw *hw;
5479279858Sjfv	uint32_t vfrtrig;
5480279858Sjfv
5481279858Sjfv	hw = &pf->hw;
5482279858Sjfv
5483279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5484279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5485279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5486279858Sjfv	ixl_flush(hw);
5487279858Sjfv
5488279858Sjfv	ixl_reinit_vf(pf, vf);
5489279858Sjfv}
5490279858Sjfv
5491279858Sjfvstatic void
5492279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5493279858Sjfv{
5494279858Sjfv	struct i40e_hw *hw;
5495279858Sjfv	uint32_t vfrstat, vfrtrig;
5496279858Sjfv	int i, error;
5497279858Sjfv
5498279858Sjfv	hw = &pf->hw;
5499279858Sjfv
5500279858Sjfv	error = ixl_flush_pcie(pf, vf);
5501279858Sjfv	if (error != 0)
5502279858Sjfv		device_printf(pf->dev,
5503279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5504279858Sjfv		    vf->vf_num);
5505279858Sjfv
5506279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5507279858Sjfv		DELAY(10);
5508279858Sjfv
5509279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5510279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5511279858Sjfv			break;
5512279858Sjfv	}
5513279858Sjfv
5514279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5515279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5516279858Sjfv
5517279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5518279858Sjfv
5519279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5520279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5521279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5522279858Sjfv
5523279858Sjfv	if (vf->vsi.seid != 0)
5524279858Sjfv		ixl_disable_rings(&vf->vsi);
5525279858Sjfv
5526279858Sjfv	ixl_vf_release_resources(pf, vf);
5527279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5528279858Sjfv	ixl_vf_map_queues(pf, vf);
5529279858Sjfv
5530279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5531279858Sjfv	ixl_flush(hw);
5532279858Sjfv}
5533279858Sjfv
5534279858Sjfvstatic const char *
5535279858Sjfvixl_vc_opcode_str(uint16_t op)
5536279858Sjfv{
5537279858Sjfv
5538279858Sjfv	switch (op) {
5539279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5540279858Sjfv		return ("VERSION");
5541279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5542279858Sjfv		return ("RESET_VF");
5543279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5544279858Sjfv		return ("GET_VF_RESOURCES");
5545279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5546279858Sjfv		return ("CONFIG_TX_QUEUE");
5547279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5548279858Sjfv		return ("CONFIG_RX_QUEUE");
5549279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5550279858Sjfv		return ("CONFIG_VSI_QUEUES");
5551279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5552279858Sjfv		return ("CONFIG_IRQ_MAP");
5553279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5554279858Sjfv		return ("ENABLE_QUEUES");
5555279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5556279858Sjfv		return ("DISABLE_QUEUES");
5557279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5558279858Sjfv		return ("ADD_ETHER_ADDRESS");
5559279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5560279858Sjfv		return ("DEL_ETHER_ADDRESS");
5561279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5562279858Sjfv		return ("ADD_VLAN");
5563279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5564279858Sjfv		return ("DEL_VLAN");
5565279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5566279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5567279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5568279858Sjfv		return ("GET_STATS");
5569279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5570279858Sjfv		return ("FCOE");
5571279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5572279858Sjfv		return ("EVENT");
5573279858Sjfv	default:
5574279858Sjfv		return ("UNKNOWN");
5575279858Sjfv	}
5576279858Sjfv}
5577279858Sjfv
5578279858Sjfvstatic int
5579279858Sjfvixl_vc_opcode_level(uint16_t opcode)
5580279858Sjfv{
5581279858Sjfv
5582279858Sjfv	switch (opcode) {
5583279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5584279858Sjfv		return (10);
5585279858Sjfv	default:
5586279858Sjfv		return (5);
5587279858Sjfv	}
5588279858Sjfv}
5589279858Sjfv
5590279858Sjfvstatic void
5591279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5592279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
5593279858Sjfv{
5594279858Sjfv	struct i40e_hw *hw;
5595279858Sjfv	int global_vf_id;
5596279858Sjfv
5597279858Sjfv	hw = &pf->hw;
5598279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
5599279858Sjfv
5600279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
5601279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
5602279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
5603279858Sjfv
5604279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
5605279858Sjfv}
5606279858Sjfv
5607279858Sjfvstatic void
5608279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
5609279858Sjfv{
5610279858Sjfv
5611279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
5612279858Sjfv}
5613279858Sjfv
5614279858Sjfvstatic void
5615279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
5616279858Sjfv    enum i40e_status_code status, const char *file, int line)
5617279858Sjfv{
5618279858Sjfv
5619279858Sjfv	I40E_VC_DEBUG(pf, 1,
5620279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
5621279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
5622279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
5623279858Sjfv}
5624279858Sjfv
5625279858Sjfvstatic void
5626279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5627279858Sjfv    uint16_t msg_size)
5628279858Sjfv{
5629279858Sjfv	struct i40e_virtchnl_version_info reply;
5630279858Sjfv
5631279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
5632279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
5633279858Sjfv		    I40E_ERR_PARAM);
5634279858Sjfv		return;
5635279858Sjfv	}
5636279858Sjfv
5637279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
5638279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
5639279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
5640279858Sjfv	    sizeof(reply));
5641279858Sjfv}
5642279858Sjfv
5643279858Sjfvstatic void
5644279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5645279858Sjfv    uint16_t msg_size)
5646279858Sjfv{
5647279858Sjfv
5648279858Sjfv	if (msg_size != 0) {
5649279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
5650279858Sjfv		    I40E_ERR_PARAM);
5651279858Sjfv		return;
5652279858Sjfv	}
5653279858Sjfv
5654279858Sjfv	ixl_reset_vf(pf, vf);
5655279858Sjfv
5656279858Sjfv	/* No response to a reset message. */
5657279858Sjfv}
5658279858Sjfv
5659279858Sjfvstatic void
5660279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5661279858Sjfv    uint16_t msg_size)
5662279858Sjfv{
5663279858Sjfv	struct i40e_virtchnl_vf_resource reply;
5664279858Sjfv
5665279858Sjfv	if (msg_size != 0) {
5666279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5667279858Sjfv		    I40E_ERR_PARAM);
5668279858Sjfv		return;
5669279858Sjfv	}
5670279858Sjfv
5671279858Sjfv	bzero(&reply, sizeof(reply));
5672279858Sjfv
5673279858Sjfv	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
5674279858Sjfv
5675279858Sjfv	reply.num_vsis = 1;
5676279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
5677279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
5678279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
5679279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
5680279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
5681279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
5682279858Sjfv
5683279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
5684279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
5685279858Sjfv}
5686279858Sjfv
5687279858Sjfvstatic int
5688279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5689279858Sjfv    struct i40e_virtchnl_txq_info *info)
5690279858Sjfv{
5691279858Sjfv	struct i40e_hw *hw;
5692279858Sjfv	struct i40e_hmc_obj_txq txq;
5693279858Sjfv	uint16_t global_queue_num, global_vf_num;
5694279858Sjfv	enum i40e_status_code status;
5695279858Sjfv	uint32_t qtx_ctl;
5696279858Sjfv
5697279858Sjfv	hw = &pf->hw;
5698279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5699279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5700279858Sjfv	bzero(&txq, sizeof(txq));
5701279858Sjfv
5702279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
5703279858Sjfv	if (status != I40E_SUCCESS)
5704269198Sjfv		return (EINVAL);
5705279858Sjfv
5706279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
5707279858Sjfv
5708279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
5709279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
5710279858Sjfv	txq.qlen = info->ring_len;
5711279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
5712279858Sjfv	txq.rdylist_act = 0;
5713279858Sjfv
5714279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
5715279858Sjfv	if (status != I40E_SUCCESS)
5716279858Sjfv		return (EINVAL);
5717279858Sjfv
5718279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
5719279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5720279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
5721279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
5722279858Sjfv	ixl_flush(hw);
5723279858Sjfv
5724279858Sjfv	return (0);
5725279858Sjfv}
5726279858Sjfv
5727279858Sjfvstatic int
5728279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
5729279858Sjfv    struct i40e_virtchnl_rxq_info *info)
5730279858Sjfv{
5731279858Sjfv	struct i40e_hw *hw;
5732279858Sjfv	struct i40e_hmc_obj_rxq rxq;
5733279858Sjfv	uint16_t global_queue_num;
5734279858Sjfv	enum i40e_status_code status;
5735279858Sjfv
5736279858Sjfv	hw = &pf->hw;
5737279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
5738279858Sjfv	bzero(&rxq, sizeof(rxq));
5739279858Sjfv
5740279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
5741279858Sjfv		return (EINVAL);
5742279858Sjfv
5743279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
5744279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
5745279858Sjfv		return (EINVAL);
5746279858Sjfv
5747279858Sjfv	if (info->splithdr_enabled) {
5748279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
5749279858Sjfv			return (EINVAL);
5750279858Sjfv
5751279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
5752279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
5753279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
5754279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
5755279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
5756279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
5757279858Sjfv
5758279858Sjfv		rxq.dtype = 2;
5759269198Sjfv	}
5760269198Sjfv
5761279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
5762279858Sjfv	if (status != I40E_SUCCESS)
5763279858Sjfv		return (EINVAL);
5764269198Sjfv
5765279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
5766279858Sjfv	rxq.qlen = info->ring_len;
5767269198Sjfv
5768279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
5769269198Sjfv
5770279858Sjfv	rxq.dsize = 1;
5771279858Sjfv	rxq.crcstrip = 1;
5772279858Sjfv	rxq.l2tsel = 1;
5773269198Sjfv
5774279858Sjfv	rxq.rxmax = info->max_pkt_size;
5775279858Sjfv	rxq.tphrdesc_ena = 1;
5776279858Sjfv	rxq.tphwdesc_ena = 1;
5777279858Sjfv	rxq.tphdata_ena = 1;
5778279858Sjfv	rxq.tphhead_ena = 1;
5779279858Sjfv	rxq.lrxqthresh = 2;
5780279858Sjfv	rxq.prefena = 1;
5781279858Sjfv
5782279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
5783279858Sjfv	if (status != I40E_SUCCESS)
5784279858Sjfv		return (EINVAL);
5785279858Sjfv
5786279858Sjfv	return (0);
5787279858Sjfv}
5788279858Sjfv
5789279858Sjfvstatic void
5790279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5791279858Sjfv    uint16_t msg_size)
5792279858Sjfv{
5793279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
5794279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
5795279858Sjfv	int i;
5796279858Sjfv
5797279858Sjfv	if (msg_size < sizeof(*info)) {
5798279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5799279858Sjfv		    I40E_ERR_PARAM);
5800279858Sjfv		return;
5801279858Sjfv	}
5802279858Sjfv
5803279858Sjfv	info = msg;
5804279858Sjfv	if (info->num_queue_pairs == 0) {
5805279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5806279858Sjfv		    I40E_ERR_PARAM);
5807279858Sjfv		return;
5808279858Sjfv	}
5809279858Sjfv
5810279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
5811279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5812279858Sjfv		    I40E_ERR_PARAM);
5813279858Sjfv		return;
5814279858Sjfv	}
5815279858Sjfv
5816279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
5817279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
5818279858Sjfv		    I40E_ERR_PARAM);
5819279858Sjfv		return;
5820279858Sjfv	}
5821279858Sjfv
5822279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
5823279858Sjfv		pair = &info->qpair[i];
5824279858Sjfv
5825279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
5826279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
5827279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
5828279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
5829279858Sjfv
5830279858Sjfv			i40e_send_vf_nack(pf, vf,
5831279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5832279858Sjfv			return;
5833279858Sjfv		}
5834279858Sjfv
5835279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
5836279858Sjfv			i40e_send_vf_nack(pf, vf,
5837279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5838279858Sjfv			return;
5839279858Sjfv		}
5840279858Sjfv
5841279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
5842279858Sjfv			i40e_send_vf_nack(pf, vf,
5843279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
5844279858Sjfv			return;
5845279858Sjfv		}
5846279858Sjfv	}
5847279858Sjfv
5848279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
5849279858Sjfv}
5850279858Sjfv
5851279858Sjfvstatic void
5852279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
5853279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
5854279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
5855279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
5856279858Sjfv{
5857279858Sjfv	uint32_t offset, qctl;
5858279858Sjfv	uint16_t itr_indx;
5859279858Sjfv
5860279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
5861279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
5862279858Sjfv		itr_indx = vector->rxitr_idx;
5863279858Sjfv	} else {
5864279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
5865279858Sjfv		itr_indx = vector->txitr_idx;
5866279858Sjfv	}
5867279858Sjfv
5868279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5869279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5870279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5871279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5872279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
5873279858Sjfv
5874279858Sjfv	wr32(&pf->hw, offset, qctl);
5875279858Sjfv
5876279858Sjfv	*last_type = cur_type;
5877279858Sjfv	*last_queue = cur_queue;
5878279858Sjfv}
5879279858Sjfv
5880279858Sjfvstatic void
5881279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
5882279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
5883279858Sjfv{
5884279858Sjfv	struct i40e_hw *hw;
5885279858Sjfv	u_int qindex;
5886279858Sjfv	enum i40e_queue_type type, last_type;
5887279858Sjfv	uint32_t lnklst_reg;
5888279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
5889279858Sjfv
5890279858Sjfv	hw = &pf->hw;
5891279858Sjfv
5892279858Sjfv	rxq_map = vector->rxq_map;
5893279858Sjfv	txq_map = vector->txq_map;
5894279858Sjfv
5895279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
5896279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
5897279858Sjfv
5898279858Sjfv	/*
5899279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
5900279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
5901279858Sjfv	 * both at once here.
5902279858Sjfv	 */
5903279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
5904279858Sjfv		if (txq_map != 0) {
5905279858Sjfv			qindex = ffs(txq_map) - 1;
5906279858Sjfv			type = I40E_QUEUE_TYPE_TX;
5907279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5908279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5909279858Sjfv			    &last_type, &last_queue);
5910279858Sjfv			txq_map &= ~(1 << qindex);
5911279858Sjfv		}
5912279858Sjfv
5913279858Sjfv		if (rxq_map != 0) {
5914279858Sjfv			qindex = ffs(rxq_map) - 1;
5915279858Sjfv			type = I40E_QUEUE_TYPE_RX;
5916279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
5917279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
5918279858Sjfv			    &last_type, &last_queue);
5919279858Sjfv			rxq_map &= ~(1 << qindex);
5920279858Sjfv		}
5921279858Sjfv	}
5922279858Sjfv
5923279858Sjfv	if (vector->vector_id == 0)
5924279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
5925279858Sjfv	else
5926279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
5927279858Sjfv		    vf->vf_num);
5928279858Sjfv	wr32(hw, lnklst_reg,
5929279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5930279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
5931279858Sjfv
5932279858Sjfv	ixl_flush(hw);
5933279858Sjfv}
5934279858Sjfv
5935279858Sjfvstatic void
5936279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
5937279858Sjfv    uint16_t msg_size)
5938279858Sjfv{
5939279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
5940279858Sjfv	struct i40e_virtchnl_vector_map *vector;
5941279858Sjfv	struct i40e_hw *hw;
5942279858Sjfv	int i, largest_txq, largest_rxq;
5943279858Sjfv
5944279858Sjfv	hw = &pf->hw;
5945279858Sjfv
5946279858Sjfv	if (msg_size < sizeof(*map)) {
5947279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5948279858Sjfv		    I40E_ERR_PARAM);
5949279858Sjfv		return;
5950279858Sjfv	}
5951279858Sjfv
5952279858Sjfv	map = msg;
5953279858Sjfv	if (map->num_vectors == 0) {
5954279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5955279858Sjfv		    I40E_ERR_PARAM);
5956279858Sjfv		return;
5957279858Sjfv	}
5958279858Sjfv
5959279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
5960279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5961279858Sjfv		    I40E_ERR_PARAM);
5962279858Sjfv		return;
5963279858Sjfv	}
5964279858Sjfv
5965279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
5966279858Sjfv		vector = &map->vecmap[i];
5967279858Sjfv
5968279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
5969279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
5970279858Sjfv			i40e_send_vf_nack(pf, vf,
5971279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
5972279858Sjfv			return;
5973279858Sjfv		}
5974279858Sjfv
5975279858Sjfv		if (vector->rxq_map != 0) {
5976279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
5977279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
5978279858Sjfv				i40e_send_vf_nack(pf, vf,
5979279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5980279858Sjfv				    I40E_ERR_PARAM);
5981279858Sjfv				return;
5982279858Sjfv			}
5983279858Sjfv		}
5984279858Sjfv
5985279858Sjfv		if (vector->txq_map != 0) {
5986279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
5987279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
5988279858Sjfv				i40e_send_vf_nack(pf, vf,
5989279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5990279858Sjfv				    I40E_ERR_PARAM);
5991279858Sjfv				return;
5992279858Sjfv			}
5993279858Sjfv		}
5994279858Sjfv
5995279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
5996279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
5997279858Sjfv			i40e_send_vf_nack(pf, vf,
5998279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
5999279858Sjfv			    I40E_ERR_PARAM);
6000279858Sjfv			return;
6001279858Sjfv		}
6002279858Sjfv
6003279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
6004279858Sjfv	}
6005279858Sjfv
6006279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6007279858Sjfv}
6008279858Sjfv
6009279858Sjfvstatic void
6010279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6011279858Sjfv    uint16_t msg_size)
6012279858Sjfv{
6013279858Sjfv	struct i40e_virtchnl_queue_select *select;
6014279858Sjfv	int error;
6015279858Sjfv
6016279858Sjfv	if (msg_size != sizeof(*select)) {
6017279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6018279858Sjfv		    I40E_ERR_PARAM);
6019279858Sjfv		return;
6020279858Sjfv	}
6021279858Sjfv
6022279858Sjfv	select = msg;
6023279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6024279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6025279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6026279858Sjfv		    I40E_ERR_PARAM);
6027279858Sjfv		return;
6028279858Sjfv	}
6029279858Sjfv
6030279858Sjfv	error = ixl_enable_rings(&vf->vsi);
6031269198Sjfv	if (error) {
6032279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6033279858Sjfv		    I40E_ERR_TIMEOUT);
6034279858Sjfv		return;
6035269198Sjfv	}
6036269198Sjfv
6037279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6038269198Sjfv}
6039266423Sjfv
6040279858Sjfvstatic void
6041279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6042279858Sjfv    void *msg, uint16_t msg_size)
6043279858Sjfv{
6044279858Sjfv	struct i40e_virtchnl_queue_select *select;
6045279858Sjfv	int error;
6046279858Sjfv
6047279858Sjfv	if (msg_size != sizeof(*select)) {
6048279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6049279858Sjfv		    I40E_ERR_PARAM);
6050279858Sjfv		return;
6051279858Sjfv	}
6052279858Sjfv
6053279858Sjfv	select = msg;
6054279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6055279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6056279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6057279858Sjfv		    I40E_ERR_PARAM);
6058279858Sjfv		return;
6059279858Sjfv	}
6060279858Sjfv
6061279858Sjfv	error = ixl_disable_rings(&vf->vsi);
6062279858Sjfv	if (error) {
6063279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6064279858Sjfv		    I40E_ERR_TIMEOUT);
6065279858Sjfv		return;
6066279858Sjfv	}
6067279858Sjfv
6068279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6069279858Sjfv}
6070279858Sjfv
6071279858Sjfvstatic boolean_t
6072279858Sjfvixl_zero_mac(const uint8_t *addr)
6073279858Sjfv{
6074279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6075279858Sjfv
6076279858Sjfv	return (cmp_etheraddr(addr, zero));
6077279858Sjfv}
6078279858Sjfv
6079279858Sjfvstatic boolean_t
6080279858Sjfvixl_bcast_mac(const uint8_t *addr)
6081279858Sjfv{
6082279858Sjfv
6083279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
6084279858Sjfv}
6085279858Sjfv
6086279858Sjfvstatic int
6087279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6088279858Sjfv{
6089279858Sjfv
6090279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6091279858Sjfv		return (EINVAL);
6092279858Sjfv
6093279858Sjfv	/*
6094279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
6095279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6096279858Sjfv	 * is not its assigned MAC.
6097279858Sjfv	 */
6098279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6099279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6100279858Sjfv		return (EPERM);
6101279858Sjfv
6102279858Sjfv	return (0);
6103279858Sjfv}
6104279858Sjfv
6105279858Sjfvstatic void
6106279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6107279858Sjfv    uint16_t msg_size)
6108279858Sjfv{
6109279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6110279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6111279858Sjfv	struct ixl_vsi *vsi;
6112279858Sjfv	int i;
6113279858Sjfv	size_t expected_size;
6114279858Sjfv
6115279858Sjfv	vsi = &vf->vsi;
6116279858Sjfv
6117279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6118279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6119279858Sjfv		    I40E_ERR_PARAM);
6120279858Sjfv		return;
6121279858Sjfv	}
6122279858Sjfv
6123279858Sjfv	addr_list = msg;
6124279858Sjfv	expected_size = sizeof(*addr_list) +
6125279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6126279858Sjfv
6127279858Sjfv	if (addr_list->num_elements == 0 ||
6128279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6129279858Sjfv	    msg_size != expected_size) {
6130279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6131279858Sjfv		    I40E_ERR_PARAM);
6132279858Sjfv		return;
6133279858Sjfv	}
6134279858Sjfv
6135279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6136279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6137279858Sjfv			i40e_send_vf_nack(pf, vf,
6138279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6139279858Sjfv			return;
6140279858Sjfv		}
6141279858Sjfv	}
6142279858Sjfv
6143279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6144279858Sjfv		addr = &addr_list->list[i];
6145279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6146279858Sjfv	}
6147279858Sjfv
6148279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6149279858Sjfv}
6150279858Sjfv
6151279858Sjfvstatic void
6152279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6153279858Sjfv    uint16_t msg_size)
6154279858Sjfv{
6155279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6156279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6157279858Sjfv	size_t expected_size;
6158279858Sjfv	int i;
6159279858Sjfv
6160279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6161279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6162279858Sjfv		    I40E_ERR_PARAM);
6163279858Sjfv		return;
6164279858Sjfv	}
6165279858Sjfv
6166279858Sjfv	addr_list = msg;
6167279858Sjfv	expected_size = sizeof(*addr_list) +
6168279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6169279858Sjfv
6170279858Sjfv	if (addr_list->num_elements == 0 ||
6171279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6172279858Sjfv	    msg_size != expected_size) {
6173279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6174279858Sjfv		    I40E_ERR_PARAM);
6175279858Sjfv		return;
6176279858Sjfv	}
6177279858Sjfv
6178279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6179279858Sjfv		addr = &addr_list->list[i];
6180279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6181279858Sjfv			i40e_send_vf_nack(pf, vf,
6182279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6183279858Sjfv			return;
6184279858Sjfv		}
6185279858Sjfv	}
6186279858Sjfv
6187279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6188279858Sjfv		addr = &addr_list->list[i];
6189279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6190279858Sjfv	}
6191279858Sjfv
6192279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6193279858Sjfv}
6194279858Sjfv
6195279858Sjfvstatic enum i40e_status_code
6196279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6197279858Sjfv{
6198279858Sjfv	struct i40e_vsi_context vsi_ctx;
6199279858Sjfv
6200279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6201279858Sjfv
6202279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6203279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6204279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6205279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6206279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6207279858Sjfv}
6208279858Sjfv
6209279858Sjfvstatic void
6210279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6211279858Sjfv    uint16_t msg_size)
6212279858Sjfv{
6213279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6214279858Sjfv	enum i40e_status_code code;
6215279858Sjfv	size_t expected_size;
6216279858Sjfv	int i;
6217279858Sjfv
6218279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6219279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6220279858Sjfv		    I40E_ERR_PARAM);
6221279858Sjfv		return;
6222279858Sjfv	}
6223279858Sjfv
6224279858Sjfv	filter_list = msg;
6225279858Sjfv	expected_size = sizeof(*filter_list) +
6226279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6227279858Sjfv	if (filter_list->num_elements == 0 ||
6228279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6229279858Sjfv	    msg_size != expected_size) {
6230279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6231279858Sjfv		    I40E_ERR_PARAM);
6232279858Sjfv		return;
6233279858Sjfv	}
6234279858Sjfv
6235279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6236279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6237279858Sjfv		    I40E_ERR_PARAM);
6238279858Sjfv		return;
6239279858Sjfv	}
6240279858Sjfv
6241279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6242279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6243279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6244279858Sjfv			    I40E_ERR_PARAM);
6245279858Sjfv			return;
6246279858Sjfv		}
6247279858Sjfv	}
6248279858Sjfv
6249279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6250279858Sjfv	if (code != I40E_SUCCESS) {
6251279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6252279858Sjfv		    I40E_ERR_PARAM);
6253279858Sjfv	}
6254279858Sjfv
6255279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6256279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6257279858Sjfv
6258279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6259279858Sjfv}
6260279858Sjfv
6261279858Sjfvstatic void
6262279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6263279858Sjfv    uint16_t msg_size)
6264279858Sjfv{
6265279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6266279858Sjfv	int i;
6267279858Sjfv	size_t expected_size;
6268279858Sjfv
6269279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6270279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6271279858Sjfv		    I40E_ERR_PARAM);
6272279858Sjfv		return;
6273279858Sjfv	}
6274279858Sjfv
6275279858Sjfv	filter_list = msg;
6276279858Sjfv	expected_size = sizeof(*filter_list) +
6277279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6278279858Sjfv	if (filter_list->num_elements == 0 ||
6279279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6280279858Sjfv	    msg_size != expected_size) {
6281279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6282279858Sjfv		    I40E_ERR_PARAM);
6283279858Sjfv		return;
6284279858Sjfv	}
6285279858Sjfv
6286279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6287279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6288279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6289279858Sjfv			    I40E_ERR_PARAM);
6290279858Sjfv			return;
6291279858Sjfv		}
6292279858Sjfv	}
6293279858Sjfv
6294279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6295279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6296279858Sjfv		    I40E_ERR_PARAM);
6297279858Sjfv		return;
6298279858Sjfv	}
6299279858Sjfv
6300279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6301279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6302279858Sjfv
6303279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6304279858Sjfv}
6305279858Sjfv
6306279858Sjfvstatic void
6307279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6308279858Sjfv    void *msg, uint16_t msg_size)
6309279858Sjfv{
6310279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6311279858Sjfv	enum i40e_status_code code;
6312279858Sjfv
6313279858Sjfv	if (msg_size != sizeof(*info)) {
6314279858Sjfv		i40e_send_vf_nack(pf, vf,
6315279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6316279858Sjfv		return;
6317279858Sjfv	}
6318279858Sjfv
6319295787Skevlo	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6320279858Sjfv		i40e_send_vf_nack(pf, vf,
6321279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6322279858Sjfv		return;
6323279858Sjfv	}
6324279858Sjfv
6325279858Sjfv	info = msg;
6326279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6327279858Sjfv		i40e_send_vf_nack(pf, vf,
6328279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6329279858Sjfv		return;
6330279858Sjfv	}
6331279858Sjfv
6332279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6333279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6334279858Sjfv	if (code != I40E_SUCCESS) {
6335279858Sjfv		i40e_send_vf_nack(pf, vf,
6336279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6337279858Sjfv		return;
6338279858Sjfv	}
6339279858Sjfv
6340279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6341279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6342279858Sjfv	if (code != I40E_SUCCESS) {
6343279858Sjfv		i40e_send_vf_nack(pf, vf,
6344279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6345279858Sjfv		return;
6346279858Sjfv	}
6347279858Sjfv
6348279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6349279858Sjfv}
6350279858Sjfv
6351279858Sjfvstatic void
6352279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6353279858Sjfv    uint16_t msg_size)
6354279858Sjfv{
6355279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6356279858Sjfv
6357279858Sjfv	if (msg_size != sizeof(*queue)) {
6358279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6359279858Sjfv		    I40E_ERR_PARAM);
6360279858Sjfv		return;
6361279858Sjfv	}
6362279858Sjfv
6363279858Sjfv	queue = msg;
6364279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6365279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6366279858Sjfv		    I40E_ERR_PARAM);
6367279858Sjfv		return;
6368279858Sjfv	}
6369279858Sjfv
6370279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6371279858Sjfv
6372279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6373279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6374279858Sjfv}
6375279858Sjfv
6376279858Sjfvstatic void
6377279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6378279858Sjfv{
6379279858Sjfv	struct ixl_vf *vf;
6380279858Sjfv	void *msg;
6381279858Sjfv	uint16_t vf_num, msg_size;
6382279858Sjfv	uint32_t opcode;
6383279858Sjfv
6384279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6385279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6386279858Sjfv
6387279858Sjfv	if (vf_num >= pf->num_vfs) {
6388279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6389279858Sjfv		return;
6390279858Sjfv	}
6391279858Sjfv
6392279858Sjfv	vf = &pf->vfs[vf_num];
6393279858Sjfv	msg = event->msg_buf;
6394279858Sjfv	msg_size = event->msg_len;
6395279858Sjfv
6396279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6397279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6398279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6399279858Sjfv
6400279858Sjfv	switch (opcode) {
6401279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6402279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6403279858Sjfv		break;
6404279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6405279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6406279858Sjfv		break;
6407279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6408279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6409279858Sjfv		break;
6410279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6411279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6412279858Sjfv		break;
6413279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6414279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6415279858Sjfv		break;
6416279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6417279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6418279858Sjfv		break;
6419279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6420279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6421279858Sjfv		break;
6422279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6423279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6424279858Sjfv		break;
6425279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6426279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6427279858Sjfv		break;
6428279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6429279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6430279858Sjfv		break;
6431279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6432279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6433279858Sjfv		break;
6434279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6435279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6436279858Sjfv		break;
6437279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6438279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6439279858Sjfv		break;
6440279858Sjfv
6441279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6442279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6443279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6444279858Sjfv	default:
6445279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6446279858Sjfv		break;
6447279858Sjfv	}
6448279858Sjfv}
6449279858Sjfv
6450279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6451279858Sjfvstatic void
6452279858Sjfvixl_handle_vflr(void *arg, int pending)
6453279858Sjfv{
6454279858Sjfv	struct ixl_pf *pf;
6455279858Sjfv	struct i40e_hw *hw;
6456279858Sjfv	uint16_t global_vf_num;
6457279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6458279858Sjfv	int i;
6459279858Sjfv
6460279858Sjfv	pf = arg;
6461279858Sjfv	hw = &pf->hw;
6462279858Sjfv
6463279858Sjfv	IXL_PF_LOCK(pf);
6464279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6465279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6466279858Sjfv
6467279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6468279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6469279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6470279858Sjfv		if (vflrstat & vflrstat_mask) {
6471279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6472279858Sjfv			    vflrstat_mask);
6473279858Sjfv
6474279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6475279858Sjfv		}
6476279858Sjfv	}
6477279858Sjfv
6478279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6479279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6480279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6481279858Sjfv	ixl_flush(hw);
6482279858Sjfv
6483279858Sjfv	IXL_PF_UNLOCK(pf);
6484279858Sjfv}
6485279858Sjfv
6486279858Sjfvstatic int
6487279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6488279858Sjfv{
6489279858Sjfv
6490279858Sjfv	switch (err) {
6491279858Sjfv	case I40E_AQ_RC_EPERM:
6492279858Sjfv		return (EPERM);
6493279858Sjfv	case I40E_AQ_RC_ENOENT:
6494279858Sjfv		return (ENOENT);
6495279858Sjfv	case I40E_AQ_RC_ESRCH:
6496279858Sjfv		return (ESRCH);
6497279858Sjfv	case I40E_AQ_RC_EINTR:
6498279858Sjfv		return (EINTR);
6499279858Sjfv	case I40E_AQ_RC_EIO:
6500279858Sjfv		return (EIO);
6501279858Sjfv	case I40E_AQ_RC_ENXIO:
6502279858Sjfv		return (ENXIO);
6503279858Sjfv	case I40E_AQ_RC_E2BIG:
6504279858Sjfv		return (E2BIG);
6505279858Sjfv	case I40E_AQ_RC_EAGAIN:
6506279858Sjfv		return (EAGAIN);
6507279858Sjfv	case I40E_AQ_RC_ENOMEM:
6508279858Sjfv		return (ENOMEM);
6509279858Sjfv	case I40E_AQ_RC_EACCES:
6510279858Sjfv		return (EACCES);
6511279858Sjfv	case I40E_AQ_RC_EFAULT:
6512279858Sjfv		return (EFAULT);
6513279858Sjfv	case I40E_AQ_RC_EBUSY:
6514279858Sjfv		return (EBUSY);
6515279858Sjfv	case I40E_AQ_RC_EEXIST:
6516279858Sjfv		return (EEXIST);
6517279858Sjfv	case I40E_AQ_RC_EINVAL:
6518279858Sjfv		return (EINVAL);
6519279858Sjfv	case I40E_AQ_RC_ENOTTY:
6520279858Sjfv		return (ENOTTY);
6521279858Sjfv	case I40E_AQ_RC_ENOSPC:
6522279858Sjfv		return (ENOSPC);
6523279858Sjfv	case I40E_AQ_RC_ENOSYS:
6524279858Sjfv		return (ENOSYS);
6525279858Sjfv	case I40E_AQ_RC_ERANGE:
6526279858Sjfv		return (ERANGE);
6527279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6528279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6529279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6530279858Sjfv		return (EFAULT);
6531279858Sjfv	case I40E_AQ_RC_EMODE:
6532279858Sjfv		return (EPERM);
6533279858Sjfv	case I40E_AQ_RC_EFBIG:
6534279858Sjfv		return (EFBIG);
6535279858Sjfv	default:
6536279858Sjfv		return (EINVAL);
6537279858Sjfv	}
6538279858Sjfv}
6539279858Sjfv
6540279858Sjfvstatic int
6541299545Serjixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6542279858Sjfv{
6543279858Sjfv	struct ixl_pf *pf;
6544279858Sjfv	struct i40e_hw *hw;
6545279858Sjfv	struct ixl_vsi *pf_vsi;
6546279858Sjfv	enum i40e_status_code ret;
6547279858Sjfv	int i, error;
6548279858Sjfv
6549279858Sjfv	pf = device_get_softc(dev);
6550279858Sjfv	hw = &pf->hw;
6551279858Sjfv	pf_vsi = &pf->vsi;
6552279858Sjfv
6553279858Sjfv	IXL_PF_LOCK(pf);
6554279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6555279858Sjfv	    M_ZERO);
6556279858Sjfv
6557279858Sjfv	if (pf->vfs == NULL) {
6558279858Sjfv		error = ENOMEM;
6559279858Sjfv		goto fail;
6560279858Sjfv	}
6561279858Sjfv
6562279858Sjfv	for (i = 0; i < num_vfs; i++)
6563279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
6564279858Sjfv
6565279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
6566279858Sjfv	    1, FALSE, FALSE, &pf->veb_seid, NULL);
6567279858Sjfv	if (ret != I40E_SUCCESS) {
6568279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
6569279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
6570279858Sjfv		    error);
6571279858Sjfv		goto fail;
6572279858Sjfv	}
6573279858Sjfv
6574279858Sjfv	ixl_configure_msix(pf);
6575279858Sjfv	ixl_enable_adminq(hw);
6576279858Sjfv
6577279858Sjfv	pf->num_vfs = num_vfs;
6578279858Sjfv	IXL_PF_UNLOCK(pf);
6579279858Sjfv	return (0);
6580279858Sjfv
6581279858Sjfvfail:
6582279858Sjfv	free(pf->vfs, M_IXL);
6583279858Sjfv	pf->vfs = NULL;
6584279858Sjfv	IXL_PF_UNLOCK(pf);
6585279858Sjfv	return (error);
6586279858Sjfv}
6587279858Sjfv
6588279858Sjfvstatic void
6589299545Serjixl_uninit_iov(device_t dev)
6590279858Sjfv{
6591279858Sjfv	struct ixl_pf *pf;
6592279858Sjfv	struct i40e_hw *hw;
6593279858Sjfv	struct ixl_vsi *vsi;
6594279858Sjfv	struct ifnet *ifp;
6595279858Sjfv	struct ixl_vf *vfs;
6596279858Sjfv	int i, num_vfs;
6597279858Sjfv
6598279858Sjfv	pf = device_get_softc(dev);
6599279858Sjfv	hw = &pf->hw;
6600279858Sjfv	vsi = &pf->vsi;
6601279858Sjfv	ifp = vsi->ifp;
6602279858Sjfv
6603279858Sjfv	IXL_PF_LOCK(pf);
6604279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6605279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
6606279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
6607279858Sjfv	}
6608279858Sjfv
6609279858Sjfv	if (pf->veb_seid != 0) {
6610279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
6611279858Sjfv		pf->veb_seid = 0;
6612279858Sjfv	}
6613279858Sjfv
6614279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
6615279858Sjfv		ixl_disable_intr(vsi);
6616279858Sjfv
6617279858Sjfv	vfs = pf->vfs;
6618279858Sjfv	num_vfs = pf->num_vfs;
6619279858Sjfv
6620279858Sjfv	pf->vfs = NULL;
6621279858Sjfv	pf->num_vfs = 0;
6622279858Sjfv	IXL_PF_UNLOCK(pf);
6623279858Sjfv
6624279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
6625279858Sjfv	for (i = 0; i < num_vfs; i++)
6626279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
6627279858Sjfv	free(vfs, M_IXL);
6628279858Sjfv}
6629279858Sjfv
6630279858Sjfvstatic int
6631279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
6632279858Sjfv{
6633279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
6634279858Sjfv	struct ixl_pf *pf;
6635279858Sjfv	struct ixl_vf *vf;
6636279858Sjfv	const void *mac;
6637279858Sjfv	size_t size;
6638279858Sjfv	int error;
6639279858Sjfv
6640279858Sjfv	pf = device_get_softc(dev);
6641279858Sjfv	vf = &pf->vfs[vfnum];
6642279858Sjfv
6643279858Sjfv	IXL_PF_LOCK(pf);
6644279858Sjfv	vf->vf_num = vfnum;
6645279858Sjfv
6646279858Sjfv	vf->vsi.back = pf;
6647279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
6648279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
6649279858Sjfv
6650279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
6651279858Sjfv	if (error != 0)
6652279858Sjfv		goto out;
6653279858Sjfv
6654279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
6655279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
6656279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
6657279858Sjfv
6658279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
6659279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6660279858Sjfv	} else
6661279858Sjfv		/*
6662279858Sjfv		 * If the administrator has not specified a MAC address then
6663279858Sjfv		 * we must allow the VF to choose one.
6664279858Sjfv		 */
6665279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
6666279858Sjfv
6667279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
6668279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
6669279858Sjfv
6670279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
6671279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
6672279858Sjfv
6673279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
6674279858Sjfv
6675279858Sjfv	ixl_reset_vf(pf, vf);
6676279858Sjfvout:
6677279858Sjfv	IXL_PF_UNLOCK(pf);
6678279858Sjfv	if (error == 0) {
6679279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
6680279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
6681279858Sjfv	}
6682279858Sjfv
6683279858Sjfv	return (error);
6684279858Sjfv}
6685279858Sjfv#endif /* PCI_IOV */
6686