if_ixl.c revision 299552
1266423Sjfv/******************************************************************************
2266423Sjfv
3279033Sjfv  Copyright (c) 2013-2015, Intel Corporation
4266423Sjfv  All rights reserved.
5266423Sjfv
6266423Sjfv  Redistribution and use in source and binary forms, with or without
7266423Sjfv  modification, are permitted provided that the following conditions are met:
8266423Sjfv
9266423Sjfv   1. Redistributions of source code must retain the above copyright notice,
10266423Sjfv      this list of conditions and the following disclaimer.
11266423Sjfv
12266423Sjfv   2. Redistributions in binary form must reproduce the above copyright
13266423Sjfv      notice, this list of conditions and the following disclaimer in the
14266423Sjfv      documentation and/or other materials provided with the distribution.
15266423Sjfv
16266423Sjfv   3. Neither the name of the Intel Corporation nor the names of its
17266423Sjfv      contributors may be used to endorse or promote products derived from
18266423Sjfv      this software without specific prior written permission.
19266423Sjfv
20266423Sjfv  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21266423Sjfv  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22266423Sjfv  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23266423Sjfv  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24266423Sjfv  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25266423Sjfv  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26266423Sjfv  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27266423Sjfv  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28266423Sjfv  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29266423Sjfv  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30266423Sjfv  POSSIBILITY OF SUCH DAMAGE.
31266423Sjfv
32266423Sjfv******************************************************************************/
33266423Sjfv/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 299552 2016-05-12 18:21:17Z erj $*/
34266423Sjfv
35279033Sjfv#ifndef IXL_STANDALONE_BUILD
36266423Sjfv#include "opt_inet.h"
37266423Sjfv#include "opt_inet6.h"
38277084Sjfv#include "opt_rss.h"
39279033Sjfv#endif
40279033Sjfv
41270346Sjfv#include "ixl.h"
42270346Sjfv#include "ixl_pf.h"
43269198Sjfv
44277262Sjfv#ifdef RSS
45277262Sjfv#include <net/rss_config.h>
46277262Sjfv#endif
47277262Sjfv
48266423Sjfv/*********************************************************************
49266423Sjfv *  Driver version
50266423Sjfv *********************************************************************/
51299552Serjchar ixl_driver_version[] = "1.4.17-k";
52266423Sjfv
53266423Sjfv/*********************************************************************
54266423Sjfv *  PCI Device ID Table
55266423Sjfv *
56266423Sjfv *  Used by probe to select devices to load on
57270346Sjfv *  Last field stores an index into ixl_strings
58266423Sjfv *  Last entry must be all 0s
59266423Sjfv *
60266423Sjfv *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61266423Sjfv *********************************************************************/
62266423Sjfv
63270346Sjfvstatic ixl_vendor_info_t ixl_vendor_info_array[] =
64266423Sjfv{
65266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
66266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
67266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
68266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
69266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
70266423Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
71270346Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
72284049Sjfv	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
73266423Sjfv	/* required last entry */
74266423Sjfv	{0, 0, 0, 0, 0}
75266423Sjfv};
76266423Sjfv
77266423Sjfv/*********************************************************************
78266423Sjfv *  Table of branding strings
79266423Sjfv *********************************************************************/
80266423Sjfv
81270346Sjfvstatic char    *ixl_strings[] = {
82266423Sjfv	"Intel(R) Ethernet Connection XL710 Driver"
83266423Sjfv};
84266423Sjfv
85266423Sjfv
86266423Sjfv/*********************************************************************
87266423Sjfv *  Function prototypes
88266423Sjfv *********************************************************************/
89270346Sjfvstatic int      ixl_probe(device_t);
90270346Sjfvstatic int      ixl_attach(device_t);
91270346Sjfvstatic int      ixl_detach(device_t);
92270346Sjfvstatic int      ixl_shutdown(device_t);
93270346Sjfvstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
94270346Sjfvstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
95270346Sjfvstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
96270346Sjfvstatic void	ixl_init(void *);
97270346Sjfvstatic void	ixl_init_locked(struct ixl_pf *);
98270346Sjfvstatic void     ixl_stop(struct ixl_pf *);
99299547Serjstatic void	ixl_stop_locked(struct ixl_pf *);
100270346Sjfvstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
101270346Sjfvstatic int      ixl_media_change(struct ifnet *);
102270346Sjfvstatic void     ixl_update_link_status(struct ixl_pf *);
103270346Sjfvstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
104270346Sjfvstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
105270346Sjfvstatic int	ixl_setup_stations(struct ixl_pf *);
106279033Sjfvstatic int	ixl_switch_config(struct ixl_pf *);
107270346Sjfvstatic int	ixl_initialize_vsi(struct ixl_vsi *);
108270346Sjfvstatic int	ixl_assign_vsi_msix(struct ixl_pf *);
109270346Sjfvstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
110270346Sjfvstatic int	ixl_init_msix(struct ixl_pf *);
111270346Sjfvstatic void	ixl_configure_msix(struct ixl_pf *);
112270346Sjfvstatic void	ixl_configure_itr(struct ixl_pf *);
113270346Sjfvstatic void	ixl_configure_legacy(struct ixl_pf *);
114299546Serjstatic void	ixl_init_taskqueues(struct ixl_pf *);
115299546Serjstatic void	ixl_free_taskqueues(struct ixl_pf *);
116299547Serjstatic void	ixl_free_interrupt_resources(struct ixl_pf *);
117270346Sjfvstatic void	ixl_free_pci_resources(struct ixl_pf *);
118270346Sjfvstatic void	ixl_local_timer(void *);
119270346Sjfvstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
120279858Sjfvstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
121270346Sjfvstatic void	ixl_config_rss(struct ixl_vsi *);
122270346Sjfvstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
123270346Sjfvstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
124274205Sjfvstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
125266423Sjfv
126279858Sjfvstatic int	ixl_enable_rings(struct ixl_vsi *);
127279858Sjfvstatic int	ixl_disable_rings(struct ixl_vsi *);
128279858Sjfvstatic void	ixl_enable_intr(struct ixl_vsi *);
129279858Sjfvstatic void	ixl_disable_intr(struct ixl_vsi *);
130279858Sjfvstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
131266423Sjfv
132270346Sjfvstatic void     ixl_enable_adminq(struct i40e_hw *);
133270346Sjfvstatic void     ixl_disable_adminq(struct i40e_hw *);
134270346Sjfvstatic void     ixl_enable_queue(struct i40e_hw *, int);
135270346Sjfvstatic void     ixl_disable_queue(struct i40e_hw *, int);
136270346Sjfvstatic void     ixl_enable_legacy(struct i40e_hw *);
137270346Sjfvstatic void     ixl_disable_legacy(struct i40e_hw *);
138266423Sjfv
139270346Sjfvstatic void     ixl_set_promisc(struct ixl_vsi *);
140270346Sjfvstatic void     ixl_add_multi(struct ixl_vsi *);
141270346Sjfvstatic void     ixl_del_multi(struct ixl_vsi *);
142270346Sjfvstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
143270346Sjfvstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
144270346Sjfvstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
145266423Sjfv
146270346Sjfvstatic void	ixl_init_filters(struct ixl_vsi *);
147279858Sjfvstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
148270346Sjfvstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
149270346Sjfvstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
150270346Sjfvstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
151270346Sjfvstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
152270346Sjfvstatic struct ixl_mac_filter *
153270346Sjfv		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
154270346Sjfvstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
155279858Sjfvstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
156266423Sjfv
157299549Serj/* Sysctls*/
158299549Serjstatic void	ixl_add_device_sysctls(struct ixl_pf *);
159279858Sjfv
160299549Serjstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
161299549Serjstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
162299549Serjstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
163299549Serjstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
164299549Serj
165299549Serj#ifdef IXL_DEBUG_SYSCTL
166299552Serjstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
167299552Serjstatic void	ixl_print_debug_info(struct ixl_pf *);
168299552Serj
169299549Serjstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
170299549Serjstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
171299549Serjstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
172299549Serjstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
173299549Serjstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
174299549Serj#endif
175299549Serj
176266423Sjfv/* The MSI/X Interrupt handlers */
177270346Sjfvstatic void	ixl_intr(void *);
178270346Sjfvstatic void	ixl_msix_que(void *);
179270346Sjfvstatic void	ixl_msix_adminq(void *);
180270346Sjfvstatic void	ixl_handle_mdd_event(struct ixl_pf *);
181266423Sjfv
182266423Sjfv/* Deferred interrupt tasklets */
183270346Sjfvstatic void	ixl_do_adminq(void *, int);
184266423Sjfv
185266423Sjfv/* Statistics */
186270346Sjfvstatic void     ixl_add_hw_stats(struct ixl_pf *);
187270346Sjfvstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
188266423Sjfv		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
189270346Sjfvstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
190266423Sjfv		    struct sysctl_oid_list *,
191266423Sjfv		    struct i40e_eth_stats *);
192270346Sjfvstatic void	ixl_update_stats_counters(struct ixl_pf *);
193270346Sjfvstatic void	ixl_update_eth_stats(struct ixl_vsi *);
194279858Sjfvstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
195270346Sjfvstatic void	ixl_pf_reset_stats(struct ixl_pf *);
196270346Sjfvstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
197270346Sjfvstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
198266423Sjfv		    u64 *, u64 *);
199270346Sjfvstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
200266423Sjfv		    u64 *, u64 *);
201299547Serj/* NVM update */
202299547Serjstatic int	ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
203266423Sjfv
204266423Sjfv
205279858Sjfv#ifdef PCI_IOV
206279858Sjfvstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
207279858Sjfv
208299546Serjstatic int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
209299546Serjstatic void	ixl_iov_uninit(device_t dev);
210279858Sjfvstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
211279858Sjfv
212279858Sjfvstatic void	ixl_handle_vf_msg(struct ixl_pf *,
213279858Sjfv		    struct i40e_arq_event_info *);
214279858Sjfvstatic void	ixl_handle_vflr(void *arg, int pending);
215279858Sjfv
216279858Sjfvstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
217279858Sjfvstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
218279858Sjfv#endif
219279858Sjfv
220266423Sjfv/*********************************************************************
221266423Sjfv *  FreeBSD Device Interface Entry Points
222266423Sjfv *********************************************************************/
223266423Sjfv
224270346Sjfvstatic device_method_t ixl_methods[] = {
225266423Sjfv	/* Device interface */
226270346Sjfv	DEVMETHOD(device_probe, ixl_probe),
227270346Sjfv	DEVMETHOD(device_attach, ixl_attach),
228270346Sjfv	DEVMETHOD(device_detach, ixl_detach),
229270346Sjfv	DEVMETHOD(device_shutdown, ixl_shutdown),
230279858Sjfv#ifdef PCI_IOV
231299546Serj	DEVMETHOD(pci_iov_init, ixl_iov_init),
232299546Serj	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
233299546Serj	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
234279858Sjfv#endif
235266423Sjfv	{0, 0}
236266423Sjfv};
237266423Sjfv
238270346Sjfvstatic driver_t ixl_driver = {
239270346Sjfv	"ixl", ixl_methods, sizeof(struct ixl_pf),
240266423Sjfv};
241266423Sjfv
242270346Sjfvdevclass_t ixl_devclass;
243270346SjfvDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
244266423Sjfv
245270346SjfvMODULE_DEPEND(ixl, pci, 1, 1, 1);
246270346SjfvMODULE_DEPEND(ixl, ether, 1, 1, 1);
247279860Sjfv#ifdef DEV_NETMAP
248279860SjfvMODULE_DEPEND(ixl, netmap, 1, 1, 1);
249279860Sjfv#endif /* DEV_NETMAP */
250279860Sjfv
251266423Sjfv/*
252269198Sjfv** Global reset mutex
253269198Sjfv*/
254270346Sjfvstatic struct mtx ixl_reset_mtx;
255269198Sjfv
256269198Sjfv/*
257270346Sjfv** TUNEABLE PARAMETERS:
258270346Sjfv*/
259270346Sjfv
260270346Sjfvstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
261270346Sjfv                   "IXL driver parameters");
262270346Sjfv
263270346Sjfv/*
264266423Sjfv * MSIX should be the default for best performance,
265266423Sjfv * but this allows it to be forced off for testing.
266266423Sjfv */
267270346Sjfvstatic int ixl_enable_msix = 1;
268270346SjfvTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
269270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
270270346Sjfv    "Enable MSI-X interrupts");
271266423Sjfv
272266423Sjfv/*
273266423Sjfv** Number of descriptors per ring:
274266423Sjfv**   - TX and RX are the same size
275266423Sjfv*/
276270346Sjfvstatic int ixl_ringsz = DEFAULT_RING;
277270346SjfvTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
278270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
279270346Sjfv    &ixl_ringsz, 0, "Descriptor Ring Size");
280266423Sjfv
281266423Sjfv/*
282266423Sjfv** This can be set manually, if left as 0 the
283266423Sjfv** number of queues will be calculated based
284266423Sjfv** on cpus and msix vectors available.
285266423Sjfv*/
286270346Sjfvint ixl_max_queues = 0;
287270346SjfvTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
288270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
289270346Sjfv    &ixl_max_queues, 0, "Number of Queues");
290266423Sjfv
291266423Sjfv/*
292266423Sjfv** Controls for Interrupt Throttling
293266423Sjfv**	- true/false for dynamic adjustment
294266423Sjfv** 	- default values for static ITR
295266423Sjfv*/
296270346Sjfvint ixl_dynamic_rx_itr = 0;
297270346SjfvTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
298270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
299270346Sjfv    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
300266423Sjfv
301270346Sjfvint ixl_dynamic_tx_itr = 0;
302270346SjfvTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
303270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
304270346Sjfv    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
305266423Sjfv
306270346Sjfvint ixl_rx_itr = IXL_ITR_8K;
307270346SjfvTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
308270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
309270346Sjfv    &ixl_rx_itr, 0, "RX Interrupt Rate");
310270346Sjfv
311270346Sjfvint ixl_tx_itr = IXL_ITR_4K;
312270346SjfvTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
313270346SjfvSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
314270346Sjfv    &ixl_tx_itr, 0, "TX Interrupt Rate");
315270346Sjfv
316270346Sjfv#ifdef IXL_FDIR
317270346Sjfvstatic int ixl_enable_fdir = 1;
318270346SjfvTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
319266423Sjfv/* Rate at which we sample */
320270346Sjfvint ixl_atr_rate = 20;
321270346SjfvTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
322266423Sjfv#endif
323266423Sjfv
324279860Sjfv#ifdef DEV_NETMAP
325279860Sjfv#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
326279860Sjfv#include <dev/netmap/if_ixl_netmap.h>
327279860Sjfv#endif /* DEV_NETMAP */
328274205Sjfv
329270346Sjfvstatic char *ixl_fc_string[6] = {
330266423Sjfv	"None",
331266423Sjfv	"Rx",
332266423Sjfv	"Tx",
333266423Sjfv	"Full",
334266423Sjfv	"Priority",
335266423Sjfv	"Default"
336266423Sjfv};
337266423Sjfv
338279858Sjfvstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
339269198Sjfv
340279858Sjfvstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
341279858Sjfv    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
342279858Sjfv
343266423Sjfv/*********************************************************************
344266423Sjfv *  Device identification routine
345266423Sjfv *
346270346Sjfv *  ixl_probe determines if the driver should be loaded on
347266423Sjfv *  the hardware based on PCI vendor/device id of the device.
348266423Sjfv *
349266423Sjfv *  return BUS_PROBE_DEFAULT on success, positive on failure
350266423Sjfv *********************************************************************/
351266423Sjfv
352266423Sjfvstatic int
353270346Sjfvixl_probe(device_t dev)
354266423Sjfv{
355270346Sjfv	ixl_vendor_info_t *ent;
356266423Sjfv
357266423Sjfv	u16	pci_vendor_id, pci_device_id;
358266423Sjfv	u16	pci_subvendor_id, pci_subdevice_id;
359266423Sjfv	char	device_name[256];
360269198Sjfv	static bool lock_init = FALSE;
361266423Sjfv
362299552Serj#if 0
363270346Sjfv	INIT_DEBUGOUT("ixl_probe: begin");
364299552Serj#endif
365266423Sjfv	pci_vendor_id = pci_get_vendor(dev);
366266423Sjfv	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
367266423Sjfv		return (ENXIO);
368266423Sjfv
369266423Sjfv	pci_device_id = pci_get_device(dev);
370266423Sjfv	pci_subvendor_id = pci_get_subvendor(dev);
371266423Sjfv	pci_subdevice_id = pci_get_subdevice(dev);
372266423Sjfv
373270346Sjfv	ent = ixl_vendor_info_array;
374266423Sjfv	while (ent->vendor_id != 0) {
375266423Sjfv		if ((pci_vendor_id == ent->vendor_id) &&
376266423Sjfv		    (pci_device_id == ent->device_id) &&
377266423Sjfv
378266423Sjfv		    ((pci_subvendor_id == ent->subvendor_id) ||
379266423Sjfv		     (ent->subvendor_id == 0)) &&
380266423Sjfv
381266423Sjfv		    ((pci_subdevice_id == ent->subdevice_id) ||
382266423Sjfv		     (ent->subdevice_id == 0))) {
383266423Sjfv			sprintf(device_name, "%s, Version - %s",
384270346Sjfv				ixl_strings[ent->index],
385270346Sjfv				ixl_driver_version);
386266423Sjfv			device_set_desc_copy(dev, device_name);
387269198Sjfv			/* One shot mutex init */
388269198Sjfv			if (lock_init == FALSE) {
389269198Sjfv				lock_init = TRUE;
390270346Sjfv				mtx_init(&ixl_reset_mtx,
391270346Sjfv				    "ixl_reset",
392270346Sjfv				    "IXL RESET Lock", MTX_DEF);
393269198Sjfv			}
394266423Sjfv			return (BUS_PROBE_DEFAULT);
395266423Sjfv		}
396266423Sjfv		ent++;
397266423Sjfv	}
398266423Sjfv	return (ENXIO);
399266423Sjfv}
400266423Sjfv
401266423Sjfv/*********************************************************************
402266423Sjfv *  Device initialization routine
403266423Sjfv *
404266423Sjfv *  The attach entry point is called when the driver is being loaded.
405266423Sjfv *  This routine identifies the type of hardware, allocates all resources
406266423Sjfv *  and initializes the hardware.
407266423Sjfv *
408266423Sjfv *  return 0 on success, positive on failure
409266423Sjfv *********************************************************************/
410266423Sjfv
411266423Sjfvstatic int
412270346Sjfvixl_attach(device_t dev)
413266423Sjfv{
414270346Sjfv	struct ixl_pf	*pf;
415266423Sjfv	struct i40e_hw	*hw;
416299552Serj	struct ixl_vsi  *vsi;
417266423Sjfv	u16		bus;
418266423Sjfv	int             error = 0;
419279858Sjfv#ifdef PCI_IOV
420279858Sjfv	nvlist_t	*pf_schema, *vf_schema;
421279858Sjfv	int		iov_error;
422279858Sjfv#endif
423266423Sjfv
424270346Sjfv	INIT_DEBUGOUT("ixl_attach: begin");
425266423Sjfv
426266423Sjfv	/* Allocate, clear, and link in our primary soft structure */
427266423Sjfv	pf = device_get_softc(dev);
428266423Sjfv	pf->dev = pf->osdep.dev = dev;
429266423Sjfv	hw = &pf->hw;
430266423Sjfv
431266423Sjfv	/*
432266423Sjfv	** Note this assumes we have a single embedded VSI,
433266423Sjfv	** this could be enhanced later to allocate multiple
434266423Sjfv	*/
435266423Sjfv	vsi = &pf->vsi;
436266423Sjfv	vsi->dev = pf->dev;
437266423Sjfv
438266423Sjfv	/* Core Lock Init*/
439270346Sjfv	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
440266423Sjfv
441266423Sjfv	/* Set up the timer callout */
442266423Sjfv	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
443266423Sjfv
444274205Sjfv	/* Save off the PCI information */
445266423Sjfv	hw->vendor_id = pci_get_vendor(dev);
446266423Sjfv	hw->device_id = pci_get_device(dev);
447266423Sjfv	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
448266423Sjfv	hw->subsystem_vendor_id =
449266423Sjfv	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
450266423Sjfv	hw->subsystem_device_id =
451266423Sjfv	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
452266423Sjfv
453269198Sjfv	hw->bus.device = pci_get_slot(dev);
454266423Sjfv	hw->bus.func = pci_get_function(dev);
455266423Sjfv
456279858Sjfv	pf->vc_debug_lvl = 1;
457279858Sjfv
458266423Sjfv	/* Do PCI setup - map BAR0, etc */
459270346Sjfv	if (ixl_allocate_pci_resources(pf)) {
460266423Sjfv		device_printf(dev, "Allocation of PCI resources failed\n");
461266423Sjfv		error = ENXIO;
462266423Sjfv		goto err_out;
463266423Sjfv	}
464266423Sjfv
465266423Sjfv	/* Establish a clean starting point */
466269198Sjfv	i40e_clear_hw(hw);
467266423Sjfv	error = i40e_pf_reset(hw);
468266423Sjfv	if (error) {
469299549Serj		device_printf(dev, "PF reset failure %d\n", error);
470269198Sjfv		error = EIO;
471269198Sjfv		goto err_out;
472269198Sjfv	}
473266423Sjfv
474266423Sjfv	/* Set admin queue parameters */
475270346Sjfv	hw->aq.num_arq_entries = IXL_AQ_LEN;
476270346Sjfv	hw->aq.num_asq_entries = IXL_AQ_LEN;
477270346Sjfv	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
478270346Sjfv	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
479266423Sjfv
480299549Serj	/* Initialize mac filter list for VSI */
481299549Serj	SLIST_INIT(&vsi->ftl);
482299549Serj
483266423Sjfv	/* Initialize the shared code */
484266423Sjfv	error = i40e_init_shared_code(hw);
485266423Sjfv	if (error) {
486299549Serj		device_printf(dev, "Unable to initialize shared code, error %d\n",
487299549Serj		    error);
488266423Sjfv		error = EIO;
489266423Sjfv		goto err_out;
490266423Sjfv	}
491266423Sjfv
492266423Sjfv	/* Set up the admin queue */
493266423Sjfv	error = i40e_init_adminq(hw);
494299549Serj	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
495299549Serj		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
496299549Serj		    error);
497299549Serj		error = EIO;
498299549Serj		goto err_out;
499299549Serj	}
500299552Serj	ixl_print_nvm_version(pf);
501299552Serj
502299549Serj	if (error == I40E_ERR_FIRMWARE_API_VERSION) {
503269198Sjfv		device_printf(dev, "The driver for the device stopped "
504269198Sjfv		    "because the NVM image is newer than expected.\n"
505269198Sjfv		    "You must install the most recent version of "
506299549Serj		    "the network driver.\n");
507299549Serj		error = EIO;
508266423Sjfv		goto err_out;
509266423Sjfv	}
510266423Sjfv
511269198Sjfv        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
512269198Sjfv	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
513269198Sjfv		device_printf(dev, "The driver for the device detected "
514269198Sjfv		    "a newer version of the NVM image than expected.\n"
515269198Sjfv		    "Please install the most recent version of the network driver.\n");
516269198Sjfv	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
517269198Sjfv	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
518269198Sjfv		device_printf(dev, "The driver for the device detected "
519269198Sjfv		    "an older version of the NVM image than expected.\n"
520269198Sjfv		    "Please update the NVM image.\n");
521266423Sjfv
522266423Sjfv	/* Clear PXE mode */
523266423Sjfv	i40e_clear_pxe_mode(hw);
524266423Sjfv
525266423Sjfv	/* Get capabilities from the device */
526270346Sjfv	error = ixl_get_hw_capabilities(pf);
527266423Sjfv	if (error) {
528266423Sjfv		device_printf(dev, "HW capabilities failure!\n");
529266423Sjfv		goto err_get_cap;
530266423Sjfv	}
531266423Sjfv
532266423Sjfv	/* Set up host memory cache */
533279858Sjfv	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
534279858Sjfv	    hw->func_caps.num_rx_qp, 0, 0);
535266423Sjfv	if (error) {
536266423Sjfv		device_printf(dev, "init_lan_hmc failed: %d\n", error);
537266423Sjfv		goto err_get_cap;
538266423Sjfv	}
539266423Sjfv
540266423Sjfv	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
541266423Sjfv	if (error) {
542266423Sjfv		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
543266423Sjfv		goto err_mac_hmc;
544266423Sjfv	}
545266423Sjfv
546269198Sjfv	/* Disable LLDP from the firmware */
547269198Sjfv	i40e_aq_stop_lldp(hw, TRUE, NULL);
548269198Sjfv
549266423Sjfv	i40e_get_mac_addr(hw, hw->mac.addr);
550266423Sjfv	error = i40e_validate_mac_addr(hw->mac.addr);
551266423Sjfv	if (error) {
552266423Sjfv		device_printf(dev, "validate_mac_addr failed: %d\n", error);
553266423Sjfv		goto err_mac_hmc;
554266423Sjfv	}
555266423Sjfv	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
556266423Sjfv	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
557266423Sjfv
558274205Sjfv	/* Set up VSI and queues */
559270346Sjfv	if (ixl_setup_stations(pf) != 0) {
560266423Sjfv		device_printf(dev, "setup stations failed!\n");
561266423Sjfv		error = ENOMEM;
562266423Sjfv		goto err_mac_hmc;
563266423Sjfv	}
564266423Sjfv
565279033Sjfv	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
566279033Sjfv	    (hw->aq.fw_maj_ver < 4)) {
567279033Sjfv		i40e_msec_delay(75);
568279033Sjfv		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
569299547Serj		if (error) {
570279033Sjfv			device_printf(dev, "link restart failed, aq_err=%d\n",
571279033Sjfv			    pf->hw.aq.asq_last_status);
572299547Serj			goto err_late;
573299547Serj		}
574270346Sjfv	}
575279033Sjfv
576266423Sjfv	/* Determine link state */
577299547Serj	hw->phy.get_link_info = TRUE;
578284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
579266423Sjfv
580299547Serj	/* Setup OS network interface / ifnet */
581274205Sjfv	if (ixl_setup_interface(dev, vsi) != 0) {
582274205Sjfv		device_printf(dev, "interface setup failed!\n");
583274205Sjfv		error = EIO;
584266423Sjfv		goto err_late;
585274205Sjfv	}
586266423Sjfv
587279033Sjfv	error = ixl_switch_config(pf);
588279033Sjfv	if (error) {
589299547Serj		device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error);
590299546Serj		goto err_late;
591279033Sjfv	}
592279033Sjfv
593299547Serj	/* Limit PHY interrupts to link, autoneg, and modules failure */
594299548Serj	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
595299547Serj	    NULL);
596299547Serj        if (error) {
597299547Serj		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
598299547Serj		    " aq_err %d\n", error, hw->aq.asq_last_status);
599299547Serj		goto err_late;
600299547Serj	}
601279033Sjfv
602266423Sjfv	/* Get the bus configuration and set the shared code */
603270346Sjfv	bus = ixl_get_bus_info(hw, dev);
604266423Sjfv	i40e_set_pci_config_data(hw, bus);
605266423Sjfv
606299546Serj	/* Initialize taskqueues */
607299546Serj	ixl_init_taskqueues(pf);
608299546Serj
609299549Serj	/* Initialize statistics & add sysctls */
610299549Serj	ixl_add_device_sysctls(pf);
611299549Serj
612270346Sjfv	ixl_pf_reset_stats(pf);
613270346Sjfv	ixl_update_stats_counters(pf);
614270346Sjfv	ixl_add_hw_stats(pf);
615266423Sjfv
616266423Sjfv	/* Register for VLAN events */
617266423Sjfv	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
618270346Sjfv	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
619266423Sjfv	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
620270346Sjfv	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
621266423Sjfv
622279858Sjfv#ifdef PCI_IOV
623279858Sjfv	/* SR-IOV is only supported when MSI-X is in use. */
624279858Sjfv	if (pf->msix > 1) {
625279858Sjfv		pf_schema = pci_iov_schema_alloc_node();
626279858Sjfv		vf_schema = pci_iov_schema_alloc_node();
627279858Sjfv		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
628279858Sjfv		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
629279858Sjfv		    IOV_SCHEMA_HASDEFAULT, TRUE);
630279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
631279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
632279858Sjfv		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
633279858Sjfv		    IOV_SCHEMA_HASDEFAULT, FALSE);
634274205Sjfv
635279858Sjfv		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
636299552Serj		if (iov_error != 0) {
637279858Sjfv			device_printf(dev,
638279858Sjfv			    "Failed to initialize SR-IOV (error=%d)\n",
639279858Sjfv			    iov_error);
640299552Serj		} else
641299552Serj			device_printf(dev, "SR-IOV ready\n");
642279858Sjfv	}
643279858Sjfv#endif
644279858Sjfv
645279860Sjfv#ifdef DEV_NETMAP
646279860Sjfv	ixl_netmap_attach(vsi);
647279860Sjfv#endif /* DEV_NETMAP */
648270346Sjfv	INIT_DEBUGOUT("ixl_attach: end");
649266423Sjfv	return (0);
650266423Sjfv
651266423Sjfverr_late:
652274205Sjfv	if (vsi->ifp != NULL)
653274205Sjfv		if_free(vsi->ifp);
654266423Sjfverr_mac_hmc:
655266423Sjfv	i40e_shutdown_lan_hmc(hw);
656266423Sjfverr_get_cap:
657266423Sjfv	i40e_shutdown_adminq(hw);
658266423Sjfverr_out:
659270346Sjfv	ixl_free_pci_resources(pf);
660274205Sjfv	ixl_free_vsi(vsi);
661270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
662266423Sjfv	return (error);
663266423Sjfv}
664266423Sjfv
665266423Sjfv/*********************************************************************
666266423Sjfv *  Device removal routine
667266423Sjfv *
668266423Sjfv *  The detach entry point is called when the driver is being removed.
669266423Sjfv *  This routine stops the adapter and deallocates all the resources
670266423Sjfv *  that were allocated for driver operation.
671266423Sjfv *
672266423Sjfv *  return 0 on success, positive on failure
673266423Sjfv *********************************************************************/
674266423Sjfv
675266423Sjfvstatic int
676270346Sjfvixl_detach(device_t dev)
677266423Sjfv{
678270346Sjfv	struct ixl_pf		*pf = device_get_softc(dev);
679266423Sjfv	struct i40e_hw		*hw = &pf->hw;
680270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
681266423Sjfv	i40e_status		status;
682279858Sjfv#ifdef PCI_IOV
683279858Sjfv	int			error;
684279858Sjfv#endif
685266423Sjfv
686270346Sjfv	INIT_DEBUGOUT("ixl_detach: begin");
687266423Sjfv
688266423Sjfv	/* Make sure VLANS are not using driver */
689266423Sjfv	if (vsi->ifp->if_vlantrunk != NULL) {
690266423Sjfv		device_printf(dev,"Vlan in use, detach first\n");
691266423Sjfv		return (EBUSY);
692266423Sjfv	}
693266423Sjfv
694279858Sjfv#ifdef PCI_IOV
695279858Sjfv	error = pci_iov_detach(dev);
696279858Sjfv	if (error != 0) {
697279858Sjfv		device_printf(dev, "SR-IOV in use; detach first.\n");
698279858Sjfv		return (error);
699279858Sjfv	}
700279858Sjfv#endif
701279858Sjfv
702279033Sjfv	ether_ifdetach(vsi->ifp);
703299547Serj	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
704279033Sjfv		ixl_stop(pf);
705266423Sjfv
706299546Serj	ixl_free_taskqueues(pf);
707266423Sjfv
708266423Sjfv	/* Shutdown LAN HMC */
709266423Sjfv	status = i40e_shutdown_lan_hmc(hw);
710266423Sjfv	if (status)
711266423Sjfv		device_printf(dev,
712266423Sjfv		    "Shutdown LAN HMC failed with code %d\n", status);
713266423Sjfv
714266423Sjfv	/* Shutdown admin queue */
715266423Sjfv	status = i40e_shutdown_adminq(hw);
716266423Sjfv	if (status)
717266423Sjfv		device_printf(dev,
718266423Sjfv		    "Shutdown Admin queue failed with code %d\n", status);
719266423Sjfv
720266423Sjfv	/* Unregister VLAN events */
721266423Sjfv	if (vsi->vlan_attach != NULL)
722266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
723266423Sjfv	if (vsi->vlan_detach != NULL)
724266423Sjfv		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
725266423Sjfv
726266423Sjfv	callout_drain(&pf->timer);
727279860Sjfv#ifdef DEV_NETMAP
728279860Sjfv	netmap_detach(vsi->ifp);
729279860Sjfv#endif /* DEV_NETMAP */
730270346Sjfv	ixl_free_pci_resources(pf);
731266423Sjfv	bus_generic_detach(dev);
732266423Sjfv	if_free(vsi->ifp);
733270346Sjfv	ixl_free_vsi(vsi);
734270346Sjfv	IXL_PF_LOCK_DESTROY(pf);
735266423Sjfv	return (0);
736266423Sjfv}
737266423Sjfv
738266423Sjfv/*********************************************************************
739266423Sjfv *
740266423Sjfv *  Shutdown entry point
741266423Sjfv *
742266423Sjfv **********************************************************************/
743266423Sjfv
744266423Sjfvstatic int
745270346Sjfvixl_shutdown(device_t dev)
746266423Sjfv{
747270346Sjfv	struct ixl_pf *pf = device_get_softc(dev);
748270346Sjfv	ixl_stop(pf);
749266423Sjfv	return (0);
750266423Sjfv}
751266423Sjfv
752266423Sjfv
753266423Sjfv/*********************************************************************
754266423Sjfv *
755266423Sjfv *  Get the hardware capabilities
756266423Sjfv *
757266423Sjfv **********************************************************************/
758266423Sjfv
759266423Sjfvstatic int
760270346Sjfvixl_get_hw_capabilities(struct ixl_pf *pf)
761266423Sjfv{
762266423Sjfv	struct i40e_aqc_list_capabilities_element_resp *buf;
763266423Sjfv	struct i40e_hw	*hw = &pf->hw;
764266423Sjfv	device_t 	dev = pf->dev;
765266423Sjfv	int             error, len;
766266423Sjfv	u16		needed;
767266423Sjfv	bool		again = TRUE;
768266423Sjfv
769266423Sjfv	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770266423Sjfvretry:
771266423Sjfv	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772266423Sjfv	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773266423Sjfv		device_printf(dev, "Unable to allocate cap memory\n");
774266423Sjfv                return (ENOMEM);
775266423Sjfv	}
776266423Sjfv
777266423Sjfv	/* This populates the hw struct */
778266423Sjfv        error = i40e_aq_discover_capabilities(hw, buf, len,
779266423Sjfv	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780266423Sjfv	free(buf, M_DEVBUF);
781266423Sjfv	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782266423Sjfv	    (again == TRUE)) {
783266423Sjfv		/* retry once with a larger buffer */
784266423Sjfv		again = FALSE;
785266423Sjfv		len = needed;
786266423Sjfv		goto retry;
787266423Sjfv	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788266423Sjfv		device_printf(dev, "capability discovery failed: %d\n",
789266423Sjfv		    pf->hw.aq.asq_last_status);
790266423Sjfv		return (ENODEV);
791266423Sjfv	}
792266423Sjfv
793266423Sjfv	/* Capture this PF's starting queue pair */
794266423Sjfv	pf->qbase = hw->func_caps.base_queue;
795266423Sjfv
796270346Sjfv#ifdef IXL_DEBUG
797266423Sjfv	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798266423Sjfv	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799266423Sjfv	    hw->pf_id, hw->func_caps.num_vfs,
800266423Sjfv	    hw->func_caps.num_msix_vectors,
801266423Sjfv	    hw->func_caps.num_msix_vectors_vf,
802266423Sjfv	    hw->func_caps.fd_filters_guaranteed,
803266423Sjfv	    hw->func_caps.fd_filters_best_effort,
804266423Sjfv	    hw->func_caps.num_tx_qp,
805266423Sjfv	    hw->func_caps.num_rx_qp,
806266423Sjfv	    hw->func_caps.base_queue);
807266423Sjfv#endif
808266423Sjfv	return (error);
809266423Sjfv}
810266423Sjfv
811266423Sjfvstatic void
812270346Sjfvixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813266423Sjfv{
814266423Sjfv	device_t 	dev = vsi->dev;
815266423Sjfv
816266423Sjfv	/* Enable/disable TXCSUM/TSO4 */
817266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM)
818266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
819266423Sjfv		if (mask & IFCAP_TXCSUM) {
820266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM;
821266423Sjfv			/* enable TXCSUM, restore TSO if previously enabled */
822270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824266423Sjfv				ifp->if_capenable |= IFCAP_TSO4;
825266423Sjfv			}
826266423Sjfv		}
827266423Sjfv		else if (mask & IFCAP_TSO4) {
828266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830266423Sjfv			device_printf(dev,
831266423Sjfv			    "TSO4 requires txcsum, enabling both...\n");
832266423Sjfv		}
833266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
834266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO4)) {
835266423Sjfv		if (mask & IFCAP_TXCSUM)
836266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM;
837266423Sjfv		else if (mask & IFCAP_TSO4)
838266423Sjfv			ifp->if_capenable |= IFCAP_TSO4;
839266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM)
840266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO4)) {
841266423Sjfv		if (mask & IFCAP_TXCSUM) {
842270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844266423Sjfv			device_printf(dev,
845266423Sjfv			    "TSO4 requires txcsum, disabling both...\n");
846266423Sjfv		} else if (mask & IFCAP_TSO4)
847266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO4;
848266423Sjfv	}
849266423Sjfv
850266423Sjfv	/* Enable/disable TXCSUM_IPV6/TSO6 */
851266423Sjfv	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
853266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
854266423Sjfv			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855270346Sjfv			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856270346Sjfv				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857266423Sjfv				ifp->if_capenable |= IFCAP_TSO6;
858266423Sjfv			}
859266423Sjfv		} else if (mask & IFCAP_TSO6) {
860266423Sjfv			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861270346Sjfv			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862266423Sjfv			device_printf(dev,
863266423Sjfv			    "TSO6 requires txcsum6, enabling both...\n");
864266423Sjfv		}
865266423Sjfv	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866266423Sjfv	    && !(ifp->if_capenable & IFCAP_TSO6)) {
867266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6)
868266423Sjfv			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869266423Sjfv		else if (mask & IFCAP_TSO6)
870266423Sjfv			ifp->if_capenable |= IFCAP_TSO6;
871266423Sjfv	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872266423Sjfv	    && (ifp->if_capenable & IFCAP_TSO6)) {
873266423Sjfv		if (mask & IFCAP_TXCSUM_IPV6) {
874270346Sjfv			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875266423Sjfv			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876266423Sjfv			device_printf(dev,
877266423Sjfv			    "TSO6 requires txcsum6, disabling both...\n");
878266423Sjfv		} else if (mask & IFCAP_TSO6)
879266423Sjfv			ifp->if_capenable &= ~IFCAP_TSO6;
880266423Sjfv	}
881266423Sjfv}
882266423Sjfv
883266423Sjfv/*********************************************************************
884266423Sjfv *  Ioctl entry point
885266423Sjfv *
886270346Sjfv *  ixl_ioctl is called when the user wants to configure the
887266423Sjfv *  interface.
888266423Sjfv *
889266423Sjfv *  return 0 on success, positive on failure
890266423Sjfv **********************************************************************/
891266423Sjfv
892266423Sjfvstatic int
893270346Sjfvixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894266423Sjfv{
895270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
896279858Sjfv	struct ixl_pf	*pf = vsi->back;
897299547Serj	struct ifreq	*ifr = (struct ifreq *)data;
898299547Serj	struct ifdrv	*ifd = (struct ifdrv *)data;
899266423Sjfv#if defined(INET) || defined(INET6)
900266423Sjfv	struct ifaddr *ifa = (struct ifaddr *)data;
901266423Sjfv	bool		avoid_reset = FALSE;
902266423Sjfv#endif
903266423Sjfv	int             error = 0;
904266423Sjfv
905266423Sjfv	switch (command) {
906266423Sjfv
907266423Sjfv        case SIOCSIFADDR:
908266423Sjfv#ifdef INET
909266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET)
910266423Sjfv			avoid_reset = TRUE;
911266423Sjfv#endif
912266423Sjfv#ifdef INET6
913266423Sjfv		if (ifa->ifa_addr->sa_family == AF_INET6)
914266423Sjfv			avoid_reset = TRUE;
915266423Sjfv#endif
916266423Sjfv#if defined(INET) || defined(INET6)
917266423Sjfv		/*
918266423Sjfv		** Calling init results in link renegotiation,
919266423Sjfv		** so we avoid doing it when possible.
920266423Sjfv		*/
921266423Sjfv		if (avoid_reset) {
922266423Sjfv			ifp->if_flags |= IFF_UP;
923266423Sjfv			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
924270346Sjfv				ixl_init(pf);
925271900Sbz#ifdef INET
926266423Sjfv			if (!(ifp->if_flags & IFF_NOARP))
927266423Sjfv				arp_ifinit(ifp, ifa);
928271900Sbz#endif
929266423Sjfv		} else
930266423Sjfv			error = ether_ioctl(ifp, command, data);
931266423Sjfv		break;
932266423Sjfv#endif
933266423Sjfv	case SIOCSIFMTU:
934266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
935270346Sjfv		if (ifr->ifr_mtu > IXL_MAX_FRAME -
936266423Sjfv		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
937266423Sjfv			error = EINVAL;
938266423Sjfv		} else {
939270346Sjfv			IXL_PF_LOCK(pf);
940266423Sjfv			ifp->if_mtu = ifr->ifr_mtu;
941266423Sjfv			vsi->max_frame_size =
942266423Sjfv				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
943266423Sjfv			    + ETHER_VLAN_ENCAP_LEN;
944270346Sjfv			ixl_init_locked(pf);
945270346Sjfv			IXL_PF_UNLOCK(pf);
946266423Sjfv		}
947266423Sjfv		break;
948266423Sjfv	case SIOCSIFFLAGS:
949266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
950270346Sjfv		IXL_PF_LOCK(pf);
951266423Sjfv		if (ifp->if_flags & IFF_UP) {
952266423Sjfv			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
953266423Sjfv				if ((ifp->if_flags ^ pf->if_flags) &
954266423Sjfv				    (IFF_PROMISC | IFF_ALLMULTI)) {
955270346Sjfv					ixl_set_promisc(vsi);
956266423Sjfv				}
957299547Serj			} else {
958299547Serj				IXL_PF_UNLOCK(pf);
959299547Serj				ixl_init(pf);
960299547Serj				IXL_PF_LOCK(pf);
961299547Serj			}
962299547Serj		} else {
963299547Serj			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
964299547Serj				IXL_PF_UNLOCK(pf);
965270346Sjfv				ixl_stop(pf);
966299547Serj				IXL_PF_LOCK(pf);
967299547Serj			}
968299547Serj		}
969266423Sjfv		pf->if_flags = ifp->if_flags;
970270346Sjfv		IXL_PF_UNLOCK(pf);
971266423Sjfv		break;
972299547Serj	case SIOCSDRVSPEC:
973299547Serj	case SIOCGDRVSPEC:
974299547Serj		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
975299547Serj		    "Info)\n");
976299547Serj
977299547Serj		/* NVM update command */
978299547Serj		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
979299547Serj			error = ixl_handle_nvmupd_cmd(pf, ifd);
980299547Serj		else
981299547Serj			error = EINVAL;
982299547Serj		break;
983266423Sjfv	case SIOCADDMULTI:
984266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
985266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986270346Sjfv			IXL_PF_LOCK(pf);
987270346Sjfv			ixl_disable_intr(vsi);
988270346Sjfv			ixl_add_multi(vsi);
989270346Sjfv			ixl_enable_intr(vsi);
990270346Sjfv			IXL_PF_UNLOCK(pf);
991266423Sjfv		}
992266423Sjfv		break;
993266423Sjfv	case SIOCDELMULTI:
994266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
995266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
996270346Sjfv			IXL_PF_LOCK(pf);
997270346Sjfv			ixl_disable_intr(vsi);
998270346Sjfv			ixl_del_multi(vsi);
999270346Sjfv			ixl_enable_intr(vsi);
1000270346Sjfv			IXL_PF_UNLOCK(pf);
1001266423Sjfv		}
1002266423Sjfv		break;
1003266423Sjfv	case SIOCSIFMEDIA:
1004266423Sjfv	case SIOCGIFMEDIA:
1005284049Sjfv#ifdef IFM_ETH_XTYPE
1006284049Sjfv	case SIOCGIFXMEDIA:
1007284049Sjfv#endif
1008266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1009266423Sjfv		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
1010266423Sjfv		break;
1011266423Sjfv	case SIOCSIFCAP:
1012266423Sjfv	{
1013266423Sjfv		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1014266423Sjfv		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1015266423Sjfv
1016270346Sjfv		ixl_cap_txcsum_tso(vsi, ifp, mask);
1017266423Sjfv
1018266423Sjfv		if (mask & IFCAP_RXCSUM)
1019266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM;
1020266423Sjfv		if (mask & IFCAP_RXCSUM_IPV6)
1021266423Sjfv			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1022266423Sjfv		if (mask & IFCAP_LRO)
1023266423Sjfv			ifp->if_capenable ^= IFCAP_LRO;
1024266423Sjfv		if (mask & IFCAP_VLAN_HWTAGGING)
1025266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1026266423Sjfv		if (mask & IFCAP_VLAN_HWFILTER)
1027266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1028266423Sjfv		if (mask & IFCAP_VLAN_HWTSO)
1029266423Sjfv			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1030266423Sjfv		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1031270346Sjfv			IXL_PF_LOCK(pf);
1032270346Sjfv			ixl_init_locked(pf);
1033270346Sjfv			IXL_PF_UNLOCK(pf);
1034266423Sjfv		}
1035266423Sjfv		VLAN_CAPABILITIES(ifp);
1036266423Sjfv
1037266423Sjfv		break;
1038266423Sjfv	}
1039266423Sjfv
1040266423Sjfv	default:
1041270346Sjfv		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1042266423Sjfv		error = ether_ioctl(ifp, command, data);
1043266423Sjfv		break;
1044266423Sjfv	}
1045266423Sjfv
1046266423Sjfv	return (error);
1047266423Sjfv}
1048266423Sjfv
1049266423Sjfv
1050266423Sjfv/*********************************************************************
1051266423Sjfv *  Init entry point
1052266423Sjfv *
1053266423Sjfv *  This routine is used in two ways. It is used by the stack as
1054266423Sjfv *  init entry point in network interface structure. It is also used
1055266423Sjfv *  by the driver as a hw/sw initialization routine to get to a
1056266423Sjfv *  consistent state.
1057266423Sjfv *
1058266423Sjfv *  return 0 on success, positive on failure
1059266423Sjfv **********************************************************************/
1060266423Sjfv
1061266423Sjfvstatic void
1062270346Sjfvixl_init_locked(struct ixl_pf *pf)
1063266423Sjfv{
1064266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1065270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
1066266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1067266423Sjfv	device_t 	dev = pf->dev;
1068266423Sjfv	struct i40e_filter_control_settings	filter;
1069266423Sjfv	u8		tmpaddr[ETHER_ADDR_LEN];
1070266423Sjfv	int		ret;
1071266423Sjfv
1072266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1073270346Sjfv	INIT_DEBUGOUT("ixl_init: begin");
1074266423Sjfv
1075299547Serj	ixl_stop_locked(pf);
1076299547Serj
1077266423Sjfv	/* Get the latest mac address... User might use a LAA */
1078266423Sjfv	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1079266423Sjfv	      I40E_ETH_LENGTH_OF_ADDRESS);
1080299546Serj	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1081299546Serj	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
1082299546Serj		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1083266423Sjfv		bcopy(tmpaddr, hw->mac.addr,
1084266423Sjfv		    I40E_ETH_LENGTH_OF_ADDRESS);
1085266423Sjfv		ret = i40e_aq_mac_address_write(hw,
1086266423Sjfv		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1087266423Sjfv		    hw->mac.addr, NULL);
1088266423Sjfv		if (ret) {
1089266423Sjfv			device_printf(dev, "LLA address"
1090266423Sjfv			 "change failed!!\n");
1091266423Sjfv			return;
1092266423Sjfv		}
1093266423Sjfv	}
1094266423Sjfv
1095299551Serj	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1096299551Serj
1097266423Sjfv	/* Set the various hardware offload abilities */
1098266423Sjfv	ifp->if_hwassist = 0;
1099266423Sjfv	if (ifp->if_capenable & IFCAP_TSO)
1100266423Sjfv		ifp->if_hwassist |= CSUM_TSO;
1101266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM)
1102266423Sjfv		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1103266423Sjfv	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1104266423Sjfv		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1105266423Sjfv
1106266423Sjfv	/* Set up the device filtering */
1107266423Sjfv	bzero(&filter, sizeof(filter));
1108266423Sjfv	filter.enable_ethtype = TRUE;
1109266423Sjfv	filter.enable_macvlan = TRUE;
1110270346Sjfv#ifdef IXL_FDIR
1111266423Sjfv	filter.enable_fdir = TRUE;
1112266423Sjfv#endif
1113299548Serj	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
1114266423Sjfv	if (i40e_set_filter_control(hw, &filter))
1115299548Serj		device_printf(dev, "i40e_set_filter_control() failed\n");
1116266423Sjfv
1117266423Sjfv	/* Set up RSS */
1118270346Sjfv	ixl_config_rss(vsi);
1119266423Sjfv
1120299548Serj	/* Prepare the VSI: rings, hmc contexts, etc... */
1121270346Sjfv	if (ixl_initialize_vsi(vsi)) {
1122270346Sjfv		device_printf(dev, "initialize vsi failed!!\n");
1123266423Sjfv		return;
1124266423Sjfv	}
1125266423Sjfv
1126266423Sjfv	/* Add protocol filters to list */
1127270346Sjfv	ixl_init_filters(vsi);
1128266423Sjfv
1129266423Sjfv	/* Setup vlan's if needed */
1130270346Sjfv	ixl_setup_vlan_filters(vsi);
1131266423Sjfv
1132266423Sjfv	/* Set up MSI/X routing and the ITR settings */
1133270346Sjfv	if (ixl_enable_msix) {
1134270346Sjfv		ixl_configure_msix(pf);
1135270346Sjfv		ixl_configure_itr(pf);
1136266423Sjfv	} else
1137270346Sjfv		ixl_configure_legacy(pf);
1138266423Sjfv
1139270346Sjfv	ixl_enable_rings(vsi);
1140266423Sjfv
1141266423Sjfv	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1142266423Sjfv
1143279858Sjfv	ixl_reconfigure_filters(vsi);
1144279858Sjfv
1145266423Sjfv	/* And now turn on interrupts */
1146270346Sjfv	ixl_enable_intr(vsi);
1147266423Sjfv
1148299547Serj	/* Get link info */
1149299547Serj	hw->phy.get_link_info = TRUE;
1150299547Serj	i40e_get_link_status(hw, &pf->link_up);
1151299547Serj	ixl_update_link_status(pf);
1152299547Serj
1153299548Serj	/* Start the local timer */
1154299548Serj	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1155299548Serj
1156266423Sjfv	/* Now inform the stack we're ready */
1157266423Sjfv	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1158266423Sjfv
1159266423Sjfv	return;
1160266423Sjfv}
1161266423Sjfv
1162299548Serjstatic int
1163299548Serjixl_teardown_hw_structs(struct ixl_pf *pf)
1164299548Serj{
1165299548Serj	enum i40e_status_code status = 0;
1166299548Serj	struct i40e_hw *hw = &pf->hw;
1167299548Serj	device_t dev = pf->dev;
1168299548Serj
1169299548Serj	/* Shutdown LAN HMC */
1170299548Serj	if (hw->hmc.hmc_obj) {
1171299548Serj		status = i40e_shutdown_lan_hmc(hw);
1172299548Serj		if (status) {
1173299548Serj			device_printf(dev,
1174299548Serj			    "init: LAN HMC shutdown failure; status %d\n", status);
1175299548Serj			goto err_out;
1176299548Serj		}
1177299548Serj	}
1178299548Serj
1179299548Serj	// XXX: This gets called when we know the adminq is inactive;
1180299548Serj	// so we already know it's setup when we get here.
1181299548Serj
1182299548Serj	/* Shutdown admin queue */
1183299548Serj	status = i40e_shutdown_adminq(hw);
1184299548Serj	if (status)
1185299548Serj		device_printf(dev,
1186299548Serj		    "init: Admin Queue shutdown failure; status %d\n", status);
1187299548Serj
1188299548Serjerr_out:
1189299548Serj	return (status);
1190299548Serj}
1191299548Serj
1192299548Serjstatic int
1193299548Serjixl_reset(struct ixl_pf *pf)
1194299548Serj{
1195299548Serj	struct i40e_hw *hw = &pf->hw;
1196299548Serj	device_t dev = pf->dev;
1197299548Serj	int error = 0;
1198299548Serj
1199299548Serj	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
1200299548Serj	i40e_clear_hw(hw);
1201299548Serj	error = i40e_pf_reset(hw);
1202299548Serj	if (error) {
1203299548Serj		device_printf(dev, "init: PF reset failure");
1204299548Serj		error = EIO;
1205299548Serj		goto err_out;
1206299548Serj	}
1207299548Serj
1208299548Serj	error = i40e_init_adminq(hw);
1209299548Serj	if (error) {
1210299548Serj		device_printf(dev, "init: Admin queue init failure; status code %d", error);
1211299548Serj		error = EIO;
1212299548Serj		goto err_out;
1213299548Serj	}
1214299548Serj
1215299548Serj	i40e_clear_pxe_mode(hw);
1216299548Serj
1217299548Serj	error = ixl_get_hw_capabilities(pf);
1218299548Serj	if (error) {
1219299548Serj		device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error);
1220299548Serj		goto err_out;
1221299548Serj	}
1222299548Serj
1223299548Serj	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1224299548Serj	    hw->func_caps.num_rx_qp, 0, 0);
1225299548Serj	if (error) {
1226299548Serj		device_printf(dev, "init: LAN HMC init failed; status code %d\n", error);
1227299548Serj		error = EIO;
1228299548Serj		goto err_out;
1229299548Serj	}
1230299548Serj
1231299548Serj	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1232299548Serj	if (error) {
1233299552Serj		device_printf(dev, "init: LAN HMC config failed; status code %d\n", error);
1234299548Serj		error = EIO;
1235299548Serj		goto err_out;
1236299548Serj	}
1237299548Serj
1238299548Serj	// XXX: need to do switch config here?
1239299548Serj
1240299548Serj	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
1241299548Serj	    NULL);
1242299548Serj        if (error) {
1243299548Serj		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
1244299548Serj		    " aq_err %d\n", error, hw->aq.asq_last_status);
1245299548Serj		error = EIO;
1246299548Serj		goto err_out;
1247299548Serj	}
1248299548Serj
1249299548Serj	u8 set_fc_err_mask;
1250299548Serj	error = i40e_set_fc(hw, &set_fc_err_mask, true);
1251299548Serj	if (error) {
1252299548Serj		device_printf(dev, "init: setting link flow control failed; retcode %d,"
1253299548Serj		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
1254299548Serj		goto err_out;
1255299548Serj	}
1256299548Serj
1257299548Serj	// XXX: (Rebuild VSIs?)
1258299548Serj
1259299552Serj	/* Firmware delay workaround */
1260299548Serj	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
1261299548Serj	    (hw->aq.fw_maj_ver < 4)) {
1262299548Serj		i40e_msec_delay(75);
1263299548Serj		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
1264299548Serj		if (error) {
1265299548Serj			device_printf(dev, "init: link restart failed, aq_err %d\n",
1266299548Serj			    hw->aq.asq_last_status);
1267299548Serj			goto err_out;
1268299548Serj		}
1269299548Serj	}
1270299548Serj
1271299548Serj
1272299548Serjerr_out:
1273299548Serj	return (error);
1274299548Serj}
1275299548Serj
1276266423Sjfvstatic void
1277270346Sjfvixl_init(void *arg)
1278266423Sjfv{
1279270346Sjfv	struct ixl_pf *pf = arg;
1280299547Serj	int ret = 0;
1281266423Sjfv
1282299548Serj	/*
1283299548Serj	 * If the aq is dead here, it probably means something outside of the driver
1284299548Serj	 * did something to the adapter, like a PF reset.
1285299548Serj	 * So rebuild the driver's state here if that occurs.
1286299548Serj	 */
1287299548Serj	if (!i40e_check_asq_alive(&pf->hw)) {
1288299548Serj		device_printf(pf->dev, "asq is not alive; rebuilding...\n");
1289299548Serj		IXL_PF_LOCK(pf);
1290299548Serj		ixl_teardown_hw_structs(pf);
1291299548Serj		ixl_reset(pf);
1292299548Serj		IXL_PF_UNLOCK(pf);
1293299548Serj	}
1294299548Serj
1295299547Serj	/* Set up interrupt routing here */
1296299547Serj	if (pf->msix > 1)
1297299547Serj		ret = ixl_assign_vsi_msix(pf);
1298299547Serj	else
1299299547Serj		ret = ixl_assign_vsi_legacy(pf);
1300299547Serj	if (ret) {
1301299547Serj		device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret);
1302299547Serj		return;
1303299547Serj	}
1304299547Serj
1305270346Sjfv	IXL_PF_LOCK(pf);
1306270346Sjfv	ixl_init_locked(pf);
1307270346Sjfv	IXL_PF_UNLOCK(pf);
1308266423Sjfv	return;
1309266423Sjfv}
1310266423Sjfv
1311266423Sjfv/*
1312266423Sjfv**
1313266423Sjfv** MSIX Interrupt Handlers and Tasklets
1314266423Sjfv**
1315266423Sjfv*/
1316266423Sjfvstatic void
1317270346Sjfvixl_handle_que(void *context, int pending)
1318266423Sjfv{
1319270346Sjfv	struct ixl_queue *que = context;
1320270346Sjfv	struct ixl_vsi *vsi = que->vsi;
1321266423Sjfv	struct i40e_hw  *hw = vsi->hw;
1322266423Sjfv	struct tx_ring  *txr = &que->txr;
1323266423Sjfv	struct ifnet    *ifp = vsi->ifp;
1324266423Sjfv	bool		more;
1325266423Sjfv
1326266423Sjfv	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1327270346Sjfv		more = ixl_rxeof(que, IXL_RX_LIMIT);
1328270346Sjfv		IXL_TX_LOCK(txr);
1329270346Sjfv		ixl_txeof(que);
1330266423Sjfv		if (!drbr_empty(ifp, txr->br))
1331270346Sjfv			ixl_mq_start_locked(ifp, txr);
1332270346Sjfv		IXL_TX_UNLOCK(txr);
1333266423Sjfv		if (more) {
1334266423Sjfv			taskqueue_enqueue(que->tq, &que->task);
1335266423Sjfv			return;
1336266423Sjfv		}
1337266423Sjfv	}
1338266423Sjfv
1339266423Sjfv	/* Reenable this interrupt - hmmm */
1340270346Sjfv	ixl_enable_queue(hw, que->me);
1341266423Sjfv	return;
1342266423Sjfv}
1343266423Sjfv
1344266423Sjfv
1345266423Sjfv/*********************************************************************
1346266423Sjfv *
1347266423Sjfv *  Legacy Interrupt Service routine
1348266423Sjfv *
1349266423Sjfv **********************************************************************/
1350266423Sjfvvoid
1351270346Sjfvixl_intr(void *arg)
1352266423Sjfv{
1353270346Sjfv	struct ixl_pf		*pf = arg;
1354266423Sjfv	struct i40e_hw		*hw =  &pf->hw;
1355270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1356270346Sjfv	struct ixl_queue	*que = vsi->queues;
1357266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1358266423Sjfv	struct tx_ring		*txr = &que->txr;
1359266423Sjfv        u32			reg, icr0, mask;
1360266423Sjfv	bool			more_tx, more_rx;
1361266423Sjfv
1362266423Sjfv	++que->irqs;
1363266423Sjfv
1364266423Sjfv	/* Protect against spurious interrupts */
1365266423Sjfv	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1366266423Sjfv		return;
1367266423Sjfv
1368266423Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0);
1369266423Sjfv
1370266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1371266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1372266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1373266423Sjfv
1374266423Sjfv        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1375266423Sjfv
1376279858Sjfv#ifdef PCI_IOV
1377279858Sjfv	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
1378279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1379279858Sjfv#endif
1380279858Sjfv
1381266423Sjfv	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1382266423Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
1383266423Sjfv		return;
1384266423Sjfv	}
1385266423Sjfv
1386270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1387266423Sjfv
1388270346Sjfv	IXL_TX_LOCK(txr);
1389270346Sjfv	more_tx = ixl_txeof(que);
1390266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1391266423Sjfv		more_tx = 1;
1392270346Sjfv	IXL_TX_UNLOCK(txr);
1393266423Sjfv
1394266423Sjfv	/* re-enable other interrupt causes */
1395266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1396266423Sjfv
1397266423Sjfv	/* And now the queues */
1398266423Sjfv	reg = rd32(hw, I40E_QINT_RQCTL(0));
1399266423Sjfv	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1400266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
1401266423Sjfv
1402266423Sjfv	reg = rd32(hw, I40E_QINT_TQCTL(0));
1403266423Sjfv	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1404266423Sjfv	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1405266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
1406266423Sjfv
1407270346Sjfv	ixl_enable_legacy(hw);
1408266423Sjfv
1409266423Sjfv	return;
1410266423Sjfv}
1411266423Sjfv
1412266423Sjfv
1413266423Sjfv/*********************************************************************
1414266423Sjfv *
1415266423Sjfv *  MSIX VSI Interrupt Service routine
1416266423Sjfv *
1417266423Sjfv **********************************************************************/
1418266423Sjfvvoid
1419270346Sjfvixl_msix_que(void *arg)
1420266423Sjfv{
1421270346Sjfv	struct ixl_queue	*que = arg;
1422270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
1423266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1424266423Sjfv	struct tx_ring	*txr = &que->txr;
1425266423Sjfv	bool		more_tx, more_rx;
1426266423Sjfv
1427269198Sjfv	/* Protect against spurious interrupts */
1428269198Sjfv	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1429269198Sjfv		return;
1430269198Sjfv
1431266423Sjfv	++que->irqs;
1432266423Sjfv
1433270346Sjfv	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1434266423Sjfv
1435270346Sjfv	IXL_TX_LOCK(txr);
1436270346Sjfv	more_tx = ixl_txeof(que);
1437266423Sjfv	/*
1438266423Sjfv	** Make certain that if the stack
1439266423Sjfv	** has anything queued the task gets
1440266423Sjfv	** scheduled to handle it.
1441266423Sjfv	*/
1442266423Sjfv	if (!drbr_empty(vsi->ifp, txr->br))
1443266423Sjfv		more_tx = 1;
1444270346Sjfv	IXL_TX_UNLOCK(txr);
1445266423Sjfv
1446270346Sjfv	ixl_set_queue_rx_itr(que);
1447270346Sjfv	ixl_set_queue_tx_itr(que);
1448266423Sjfv
1449266423Sjfv	if (more_tx || more_rx)
1450266423Sjfv		taskqueue_enqueue(que->tq, &que->task);
1451266423Sjfv	else
1452270346Sjfv		ixl_enable_queue(hw, que->me);
1453266423Sjfv
1454266423Sjfv	return;
1455266423Sjfv}
1456266423Sjfv
1457266423Sjfv
1458266423Sjfv/*********************************************************************
1459266423Sjfv *
1460266423Sjfv *  MSIX Admin Queue Interrupt Service routine
1461266423Sjfv *
1462266423Sjfv **********************************************************************/
1463266423Sjfvstatic void
1464270346Sjfvixl_msix_adminq(void *arg)
1465266423Sjfv{
1466270346Sjfv	struct ixl_pf	*pf = arg;
1467266423Sjfv	struct i40e_hw	*hw = &pf->hw;
1468299549Serj	u32		reg, mask, rstat_reg;
1469299549Serj	bool		do_task = FALSE;
1470266423Sjfv
1471266423Sjfv	++pf->admin_irq;
1472266423Sjfv
1473266423Sjfv	reg = rd32(hw, I40E_PFINT_ICR0);
1474266423Sjfv	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1475266423Sjfv
1476266423Sjfv	/* Check on the cause */
1477299549Serj	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
1478299549Serj		mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
1479299549Serj		do_task = TRUE;
1480299549Serj	}
1481266423Sjfv
1482269198Sjfv	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1483270346Sjfv		ixl_handle_mdd_event(pf);
1484299549Serj		mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
1485269198Sjfv	}
1486266423Sjfv
1487299549Serj	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
1488299549Serj		device_printf(pf->dev, "Reset Requested!\n");
1489299549Serj		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
1490299549Serj		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
1491299549Serj		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
1492299549Serj		device_printf(pf->dev, "Reset type: ");
1493299549Serj		switch (rstat_reg) {
1494299549Serj		/* These others might be handled similarly to an EMPR reset */
1495299549Serj		case I40E_RESET_CORER:
1496299549Serj			printf("CORER\n");
1497299549Serj			break;
1498299549Serj		case I40E_RESET_GLOBR:
1499299549Serj			printf("GLOBR\n");
1500299549Serj			break;
1501299549Serj		case I40E_RESET_EMPR:
1502299549Serj			printf("EMPR\n");
1503299549Serj			atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
1504299549Serj			break;
1505299549Serj		default:
1506299549Serj			printf("?\n");
1507299549Serj			break;
1508299549Serj		}
1509299549Serj		// overload admin queue task to check reset progress?
1510299549Serj		do_task = TRUE;
1511299549Serj	}
1512299549Serj
1513299549Serj	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
1514299549Serj		device_printf(pf->dev, "ECC Error detected!\n");
1515299549Serj	}
1516299549Serj
1517299549Serj	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
1518299549Serj		device_printf(pf->dev, "HMC Error detected!\n");
1519299549Serj	}
1520299549Serj
1521299549Serj	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
1522299549Serj		device_printf(pf->dev, "PCI Exception detected!\n");
1523299549Serj	}
1524299549Serj
1525279858Sjfv#ifdef PCI_IOV
1526279858Sjfv	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
1527266423Sjfv		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1528279858Sjfv		taskqueue_enqueue(pf->tq, &pf->vflr_task);
1529279858Sjfv	}
1530279858Sjfv#endif
1531266423Sjfv
1532266423Sjfv	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1533266423Sjfv	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1534266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1535266423Sjfv
1536299549Serj	if (do_task)
1537299549Serj		taskqueue_enqueue(pf->tq, &pf->adminq);
1538266423Sjfv}
1539266423Sjfv
1540266423Sjfv/*********************************************************************
1541266423Sjfv *
1542266423Sjfv *  Media Ioctl callback
1543266423Sjfv *
1544266423Sjfv *  This routine is called whenever the user queries the status of
1545266423Sjfv *  the interface using ifconfig.
1546266423Sjfv *
1547266423Sjfv **********************************************************************/
1548266423Sjfvstatic void
1549270346Sjfvixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1550266423Sjfv{
1551270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
1552279858Sjfv	struct ixl_pf	*pf = vsi->back;
1553266423Sjfv	struct i40e_hw  *hw = &pf->hw;
1554266423Sjfv
1555270346Sjfv	INIT_DEBUGOUT("ixl_media_status: begin");
1556270346Sjfv	IXL_PF_LOCK(pf);
1557266423Sjfv
1558279858Sjfv	hw->phy.get_link_info = TRUE;
1559284049Sjfv	i40e_get_link_status(hw, &pf->link_up);
1560270346Sjfv	ixl_update_link_status(pf);
1561266423Sjfv
1562266423Sjfv	ifmr->ifm_status = IFM_AVALID;
1563266423Sjfv	ifmr->ifm_active = IFM_ETHER;
1564266423Sjfv
1565279858Sjfv	if (!pf->link_up) {
1566270346Sjfv		IXL_PF_UNLOCK(pf);
1567266423Sjfv		return;
1568266423Sjfv	}
1569266423Sjfv
1570266423Sjfv	ifmr->ifm_status |= IFM_ACTIVE;
1571299545Serj
1572299545Serj	/* Hardware always does full-duplex */
1573266423Sjfv	ifmr->ifm_active |= IFM_FDX;
1574266423Sjfv
1575266423Sjfv	switch (hw->phy.link_info.phy_type) {
1576266423Sjfv		/* 100 M */
1577266423Sjfv		case I40E_PHY_TYPE_100BASE_TX:
1578266423Sjfv			ifmr->ifm_active |= IFM_100_TX;
1579266423Sjfv			break;
1580266423Sjfv		/* 1 G */
1581266423Sjfv		case I40E_PHY_TYPE_1000BASE_T:
1582266423Sjfv			ifmr->ifm_active |= IFM_1000_T;
1583266423Sjfv			break;
1584269198Sjfv		case I40E_PHY_TYPE_1000BASE_SX:
1585269198Sjfv			ifmr->ifm_active |= IFM_1000_SX;
1586269198Sjfv			break;
1587269198Sjfv		case I40E_PHY_TYPE_1000BASE_LX:
1588269198Sjfv			ifmr->ifm_active |= IFM_1000_LX;
1589269198Sjfv			break;
1590299552Serj		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1591299552Serj			ifmr->ifm_active |= IFM_OTHER;
1592299552Serj			break;
1593266423Sjfv		/* 10 G */
1594266423Sjfv		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1595266423Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1596266423Sjfv			break;
1597266423Sjfv		case I40E_PHY_TYPE_10GBASE_SR:
1598266423Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1599266423Sjfv			break;
1600266423Sjfv		case I40E_PHY_TYPE_10GBASE_LR:
1601266423Sjfv			ifmr->ifm_active |= IFM_10G_LR;
1602266423Sjfv			break;
1603270346Sjfv		case I40E_PHY_TYPE_10GBASE_T:
1604270346Sjfv			ifmr->ifm_active |= IFM_10G_T;
1605270346Sjfv			break;
1606299552Serj		case I40E_PHY_TYPE_XAUI:
1607299552Serj		case I40E_PHY_TYPE_XFI:
1608299552Serj		case I40E_PHY_TYPE_10GBASE_AOC:
1609299552Serj			ifmr->ifm_active |= IFM_OTHER;
1610299552Serj			break;
1611266423Sjfv		/* 40 G */
1612266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4:
1613266423Sjfv		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1614266423Sjfv			ifmr->ifm_active |= IFM_40G_CR4;
1615266423Sjfv			break;
1616266423Sjfv		case I40E_PHY_TYPE_40GBASE_SR4:
1617266423Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1618266423Sjfv			break;
1619266423Sjfv		case I40E_PHY_TYPE_40GBASE_LR4:
1620266423Sjfv			ifmr->ifm_active |= IFM_40G_LR4;
1621266423Sjfv			break;
1622299552Serj		case I40E_PHY_TYPE_XLAUI:
1623299552Serj			ifmr->ifm_active |= IFM_OTHER;
1624299552Serj			break;
1625284049Sjfv#ifndef IFM_ETH_XTYPE
1626284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1627284049Sjfv			ifmr->ifm_active |= IFM_1000_CX;
1628284049Sjfv			break;
1629299552Serj		case I40E_PHY_TYPE_SGMII:
1630299552Serj			ifmr->ifm_active |= IFM_OTHER;
1631299552Serj			break;
1632284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1633284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1634284049Sjfv			ifmr->ifm_active |= IFM_10G_TWINAX;
1635284049Sjfv			break;
1636284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1637284049Sjfv			ifmr->ifm_active |= IFM_10G_CX4;
1638284049Sjfv			break;
1639284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1640284049Sjfv			ifmr->ifm_active |= IFM_10G_SR;
1641284049Sjfv			break;
1642299552Serj		case I40E_PHY_TYPE_SFI:
1643299552Serj			ifmr->ifm_active |= IFM_OTHER;
1644299552Serj			break;
1645279033Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1646279033Sjfv		case I40E_PHY_TYPE_XLPPI:
1647299552Serj		case I40E_PHY_TYPE_40GBASE_AOC:
1648284049Sjfv			ifmr->ifm_active |= IFM_40G_SR4;
1649279033Sjfv			break;
1650284049Sjfv#else
1651284049Sjfv		case I40E_PHY_TYPE_1000BASE_KX:
1652284049Sjfv			ifmr->ifm_active |= IFM_1000_KX;
1653284049Sjfv			break;
1654299552Serj		case I40E_PHY_TYPE_SGMII:
1655299552Serj			ifmr->ifm_active |= IFM_1000_SGMII;
1656299552Serj			break;
1657284049Sjfv		/* ERJ: What's the difference between these? */
1658284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1659284049Sjfv		case I40E_PHY_TYPE_10GBASE_CR1:
1660284049Sjfv			ifmr->ifm_active |= IFM_10G_CR1;
1661284049Sjfv			break;
1662284049Sjfv		case I40E_PHY_TYPE_10GBASE_KX4:
1663284049Sjfv			ifmr->ifm_active |= IFM_10G_KX4;
1664284049Sjfv			break;
1665284049Sjfv		case I40E_PHY_TYPE_10GBASE_KR:
1666284049Sjfv			ifmr->ifm_active |= IFM_10G_KR;
1667284049Sjfv			break;
1668299552Serj		case I40E_PHY_TYPE_SFI:
1669299552Serj			ifmr->ifm_active |= IFM_10G_SFI;
1670299552Serj			break;
1671299545Serj		/* Our single 20G media type */
1672284049Sjfv		case I40E_PHY_TYPE_20GBASE_KR2:
1673284049Sjfv			ifmr->ifm_active |= IFM_20G_KR2;
1674284049Sjfv			break;
1675284049Sjfv		case I40E_PHY_TYPE_40GBASE_KR4:
1676284049Sjfv			ifmr->ifm_active |= IFM_40G_KR4;
1677284049Sjfv			break;
1678284049Sjfv		case I40E_PHY_TYPE_XLPPI:
1679299552Serj		case I40E_PHY_TYPE_40GBASE_AOC:
1680284049Sjfv			ifmr->ifm_active |= IFM_40G_XLPPI;
1681284049Sjfv			break;
1682284049Sjfv#endif
1683299552Serj		/* Unknown to driver */
1684266423Sjfv		default:
1685266423Sjfv			ifmr->ifm_active |= IFM_UNKNOWN;
1686266423Sjfv			break;
1687266423Sjfv	}
1688266423Sjfv	/* Report flow control status as well */
1689266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1690266423Sjfv		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1691266423Sjfv	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1692266423Sjfv		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1693266423Sjfv
1694270346Sjfv	IXL_PF_UNLOCK(pf);
1695266423Sjfv
1696266423Sjfv	return;
1697266423Sjfv}
1698266423Sjfv
1699299545Serj/*
1700299545Serj * NOTE: Fortville does not support forcing media speeds. Instead,
1701299545Serj * use the set_advertise sysctl to set the speeds Fortville
1702299545Serj * will advertise or be allowed to operate at.
1703299545Serj */
1704266423Sjfvstatic int
1705270346Sjfvixl_media_change(struct ifnet * ifp)
1706266423Sjfv{
1707270346Sjfv	struct ixl_vsi *vsi = ifp->if_softc;
1708266423Sjfv	struct ifmedia *ifm = &vsi->media;
1709266423Sjfv
1710270346Sjfv	INIT_DEBUGOUT("ixl_media_change: begin");
1711266423Sjfv
1712266423Sjfv	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1713266423Sjfv		return (EINVAL);
1714266423Sjfv
1715299545Serj	if_printf(ifp, "Media change is not supported.\n");
1716269198Sjfv
1717269198Sjfv	return (ENODEV);
1718266423Sjfv}
1719266423Sjfv
1720266423Sjfv
1721270346Sjfv#ifdef IXL_FDIR
1722266423Sjfv/*
1723266423Sjfv** ATR: Application Targetted Receive - creates a filter
1724266423Sjfv**	based on TX flow info that will keep the receive
1725266423Sjfv**	portion of the flow on the same queue. Based on the
1726266423Sjfv**	implementation this is only available for TCP connections
1727266423Sjfv*/
1728266423Sjfvvoid
1729270346Sjfvixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1730266423Sjfv{
1731270346Sjfv	struct ixl_vsi			*vsi = que->vsi;
1732266423Sjfv	struct tx_ring			*txr = &que->txr;
1733266423Sjfv	struct i40e_filter_program_desc	*FDIR;
1734266423Sjfv	u32				ptype, dtype;
1735266423Sjfv	int				idx;
1736266423Sjfv
1737266423Sjfv	/* check if ATR is enabled and sample rate */
1738270346Sjfv	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1739266423Sjfv		return;
1740266423Sjfv	/*
1741266423Sjfv	** We sample all TCP SYN/FIN packets,
1742266423Sjfv	** or at the selected sample rate
1743266423Sjfv	*/
1744266423Sjfv	txr->atr_count++;
1745266423Sjfv	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1746266423Sjfv	    (txr->atr_count < txr->atr_rate))
1747266423Sjfv                return;
1748266423Sjfv	txr->atr_count = 0;
1749266423Sjfv
1750266423Sjfv	/* Get a descriptor to use */
1751266423Sjfv	idx = txr->next_avail;
1752266423Sjfv	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1753266423Sjfv	if (++idx == que->num_desc)
1754266423Sjfv		idx = 0;
1755266423Sjfv	txr->avail--;
1756266423Sjfv	txr->next_avail = idx;
1757266423Sjfv
1758266423Sjfv	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1759266423Sjfv	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1760266423Sjfv
1761266423Sjfv	ptype |= (etype == ETHERTYPE_IP) ?
1762266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1763266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1764266423Sjfv	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1765266423Sjfv	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1766266423Sjfv
1767266423Sjfv	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1768266423Sjfv
1769266423Sjfv	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1770266423Sjfv
1771266423Sjfv	/*
1772266423Sjfv	** We use the TCP TH_FIN as a trigger to remove
1773266423Sjfv	** the filter, otherwise its an update.
1774266423Sjfv	*/
1775266423Sjfv	dtype |= (th->th_flags & TH_FIN) ?
1776266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1777266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1778266423Sjfv	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1779266423Sjfv	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1780266423Sjfv
1781266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1782266423Sjfv	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1783266423Sjfv
1784266423Sjfv	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1785266423Sjfv	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1786266423Sjfv
1787266423Sjfv	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1788266423Sjfv	FDIR->dtype_cmd_cntindex = htole32(dtype);
1789266423Sjfv	return;
1790266423Sjfv}
1791266423Sjfv#endif
1792266423Sjfv
1793266423Sjfv
1794266423Sjfvstatic void
1795270346Sjfvixl_set_promisc(struct ixl_vsi *vsi)
1796266423Sjfv{
1797266423Sjfv	struct ifnet	*ifp = vsi->ifp;
1798266423Sjfv	struct i40e_hw	*hw = vsi->hw;
1799266423Sjfv	int		err, mcnt = 0;
1800266423Sjfv	bool		uni = FALSE, multi = FALSE;
1801266423Sjfv
1802266423Sjfv	if (ifp->if_flags & IFF_ALLMULTI)
1803266423Sjfv                multi = TRUE;
1804266423Sjfv	else { /* Need to count the multicast addresses */
1805266423Sjfv		struct  ifmultiaddr *ifma;
1806266423Sjfv		if_maddr_rlock(ifp);
1807266423Sjfv		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1808266423Sjfv                        if (ifma->ifma_addr->sa_family != AF_LINK)
1809266423Sjfv                                continue;
1810266423Sjfv                        if (mcnt == MAX_MULTICAST_ADDR)
1811266423Sjfv                                break;
1812266423Sjfv                        mcnt++;
1813266423Sjfv		}
1814266423Sjfv		if_maddr_runlock(ifp);
1815266423Sjfv	}
1816266423Sjfv
1817266423Sjfv	if (mcnt >= MAX_MULTICAST_ADDR)
1818266423Sjfv                multi = TRUE;
1819266423Sjfv        if (ifp->if_flags & IFF_PROMISC)
1820266423Sjfv		uni = TRUE;
1821266423Sjfv
1822266423Sjfv	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1823266423Sjfv	    vsi->seid, uni, NULL);
1824266423Sjfv	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1825266423Sjfv	    vsi->seid, multi, NULL);
1826266423Sjfv	return;
1827266423Sjfv}
1828266423Sjfv
1829266423Sjfv/*********************************************************************
1830266423Sjfv * 	Filter Routines
1831266423Sjfv *
1832266423Sjfv *	Routines for multicast and vlan filter management.
1833266423Sjfv *
1834266423Sjfv *********************************************************************/
1835266423Sjfvstatic void
1836270346Sjfvixl_add_multi(struct ixl_vsi *vsi)
1837266423Sjfv{
1838266423Sjfv	struct	ifmultiaddr	*ifma;
1839266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1840266423Sjfv	struct i40e_hw		*hw = vsi->hw;
1841266423Sjfv	int			mcnt = 0, flags;
1842266423Sjfv
1843270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1844266423Sjfv
1845266423Sjfv	if_maddr_rlock(ifp);
1846266423Sjfv	/*
1847266423Sjfv	** First just get a count, to decide if we
1848266423Sjfv	** we simply use multicast promiscuous.
1849266423Sjfv	*/
1850266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1851266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1852266423Sjfv			continue;
1853266423Sjfv		mcnt++;
1854266423Sjfv	}
1855266423Sjfv	if_maddr_runlock(ifp);
1856266423Sjfv
1857266423Sjfv	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1858266423Sjfv		/* delete existing MC filters */
1859270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1860266423Sjfv		i40e_aq_set_vsi_multicast_promiscuous(hw,
1861266423Sjfv		    vsi->seid, TRUE, NULL);
1862266423Sjfv		return;
1863266423Sjfv	}
1864266423Sjfv
1865266423Sjfv	mcnt = 0;
1866266423Sjfv	if_maddr_rlock(ifp);
1867266423Sjfv	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1868266423Sjfv		if (ifma->ifma_addr->sa_family != AF_LINK)
1869266423Sjfv			continue;
1870270346Sjfv		ixl_add_mc_filter(vsi,
1871266423Sjfv		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1872266423Sjfv		mcnt++;
1873266423Sjfv	}
1874266423Sjfv	if_maddr_runlock(ifp);
1875266423Sjfv	if (mcnt > 0) {
1876270346Sjfv		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1877270346Sjfv		ixl_add_hw_filters(vsi, flags, mcnt);
1878266423Sjfv	}
1879266423Sjfv
1880270346Sjfv	IOCTL_DEBUGOUT("ixl_add_multi: end");
1881266423Sjfv	return;
1882266423Sjfv}
1883266423Sjfv
1884266423Sjfvstatic void
1885270346Sjfvixl_del_multi(struct ixl_vsi *vsi)
1886266423Sjfv{
1887266423Sjfv	struct ifnet		*ifp = vsi->ifp;
1888266423Sjfv	struct ifmultiaddr	*ifma;
1889270346Sjfv	struct ixl_mac_filter	*f;
1890266423Sjfv	int			mcnt = 0;
1891266423Sjfv	bool		match = FALSE;
1892266423Sjfv
1893270346Sjfv	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1894266423Sjfv
1895266423Sjfv	/* Search for removed multicast addresses */
1896266423Sjfv	if_maddr_rlock(ifp);
1897266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
1898270346Sjfv		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1899266423Sjfv			match = FALSE;
1900266423Sjfv			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1901266423Sjfv				if (ifma->ifma_addr->sa_family != AF_LINK)
1902266423Sjfv					continue;
1903266423Sjfv				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1904266423Sjfv				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1905266423Sjfv					match = TRUE;
1906266423Sjfv					break;
1907266423Sjfv				}
1908266423Sjfv			}
1909266423Sjfv			if (match == FALSE) {
1910270346Sjfv				f->flags |= IXL_FILTER_DEL;
1911266423Sjfv				mcnt++;
1912266423Sjfv			}
1913266423Sjfv		}
1914266423Sjfv	}
1915266423Sjfv	if_maddr_runlock(ifp);
1916266423Sjfv
1917266423Sjfv	if (mcnt > 0)
1918270346Sjfv		ixl_del_hw_filters(vsi, mcnt);
1919266423Sjfv}
1920266423Sjfv
1921266423Sjfv
1922266423Sjfv/*********************************************************************
1923266423Sjfv *  Timer routine
1924266423Sjfv *
1925266423Sjfv *  This routine checks for link status,updates statistics,
1926266423Sjfv *  and runs the watchdog check.
1927266423Sjfv *
1928299551Serj *  Only runs when the driver is configured UP and RUNNING.
1929299551Serj *
1930266423Sjfv **********************************************************************/
1931266423Sjfv
1932266423Sjfvstatic void
1933270346Sjfvixl_local_timer(void *arg)
1934266423Sjfv{
1935270346Sjfv	struct ixl_pf		*pf = arg;
1936266423Sjfv	struct i40e_hw		*hw = &pf->hw;
1937270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
1938270346Sjfv	struct ixl_queue	*que = vsi->queues;
1939266423Sjfv	device_t		dev = pf->dev;
1940266423Sjfv	int			hung = 0;
1941266423Sjfv	u32			mask;
1942266423Sjfv
1943266423Sjfv	mtx_assert(&pf->pf_mtx, MA_OWNED);
1944266423Sjfv
1945266423Sjfv	/* Fire off the adminq task */
1946266423Sjfv	taskqueue_enqueue(pf->tq, &pf->adminq);
1947266423Sjfv
1948266423Sjfv	/* Update stats */
1949270346Sjfv	ixl_update_stats_counters(pf);
1950266423Sjfv
1951266423Sjfv	/*
1952269198Sjfv	** Check status of the queues
1953266423Sjfv	*/
1954266423Sjfv	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1955266423Sjfv		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1956266423Sjfv
1957266423Sjfv	for (int i = 0; i < vsi->num_queues; i++,que++) {
1958266423Sjfv		/* Any queues with outstanding work get a sw irq */
1959266423Sjfv		if (que->busy)
1960266423Sjfv			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1961266423Sjfv		/*
1962266423Sjfv		** Each time txeof runs without cleaning, but there
1963266423Sjfv		** are uncleaned descriptors it increments busy. If
1964266423Sjfv		** we get to 5 we declare it hung.
1965266423Sjfv		*/
1966270346Sjfv		if (que->busy == IXL_QUEUE_HUNG) {
1967269198Sjfv			++hung;
1968269198Sjfv			/* Mark the queue as inactive */
1969269198Sjfv			vsi->active_queues &= ~((u64)1 << que->me);
1970269198Sjfv			continue;
1971269198Sjfv		} else {
1972269198Sjfv			/* Check if we've come back from hung */
1973269198Sjfv			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1974269198Sjfv				vsi->active_queues |= ((u64)1 << que->me);
1975269198Sjfv		}
1976270346Sjfv		if (que->busy >= IXL_MAX_TX_BUSY) {
1977277084Sjfv#ifdef IXL_DEBUG
1978266423Sjfv			device_printf(dev,"Warning queue %d "
1979269198Sjfv			    "appears to be hung!\n", i);
1980277084Sjfv#endif
1981270346Sjfv			que->busy = IXL_QUEUE_HUNG;
1982266423Sjfv			++hung;
1983266423Sjfv		}
1984266423Sjfv	}
1985266423Sjfv	/* Only reinit if all queues show hung */
1986266423Sjfv	if (hung == vsi->num_queues)
1987266423Sjfv		goto hung;
1988266423Sjfv
1989270346Sjfv	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1990266423Sjfv	return;
1991266423Sjfv
1992266423Sjfvhung:
1993266423Sjfv	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1994270346Sjfv	ixl_init_locked(pf);
1995266423Sjfv}
1996266423Sjfv
1997266423Sjfv/*
1998266423Sjfv** Note: this routine updates the OS on the link state
1999266423Sjfv**	the real check of the hardware only happens with
2000266423Sjfv**	a link interrupt.
2001266423Sjfv*/
2002266423Sjfvstatic void
2003270346Sjfvixl_update_link_status(struct ixl_pf *pf)
2004266423Sjfv{
2005270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2006266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2007266423Sjfv	struct ifnet		*ifp = vsi->ifp;
2008266423Sjfv	device_t		dev = pf->dev;
2009266423Sjfv
2010299547Serj	if (pf->link_up) {
2011266423Sjfv		if (vsi->link_active == FALSE) {
2012279033Sjfv			pf->fc = hw->fc.current_mode;
2013266423Sjfv			if (bootverbose) {
2014266423Sjfv				device_printf(dev,"Link is up %d Gbps %s,"
2015266423Sjfv				    " Flow Control: %s\n",
2016279858Sjfv				    ((pf->link_speed ==
2017279858Sjfv				    I40E_LINK_SPEED_40GB)? 40:10),
2018279033Sjfv				    "Full Duplex", ixl_fc_string[pf->fc]);
2019266423Sjfv			}
2020266423Sjfv			vsi->link_active = TRUE;
2021277084Sjfv			/*
2022277084Sjfv			** Warn user if link speed on NPAR enabled
2023277084Sjfv			** partition is not at least 10GB
2024277084Sjfv			*/
2025277084Sjfv			if (hw->func_caps.npar_enable &&
2026279858Sjfv			   (hw->phy.link_info.link_speed ==
2027279858Sjfv			   I40E_LINK_SPEED_1GB ||
2028279858Sjfv			   hw->phy.link_info.link_speed ==
2029279858Sjfv			   I40E_LINK_SPEED_100MB))
2030279858Sjfv				device_printf(dev, "The partition detected"
2031279858Sjfv				    "link speed that is less than 10Gbps\n");
2032266423Sjfv			if_link_state_change(ifp, LINK_STATE_UP);
2033266423Sjfv		}
2034266423Sjfv	} else { /* Link down */
2035266423Sjfv		if (vsi->link_active == TRUE) {
2036266423Sjfv			if (bootverbose)
2037299547Serj				device_printf(dev, "Link is Down\n");
2038266423Sjfv			if_link_state_change(ifp, LINK_STATE_DOWN);
2039266423Sjfv			vsi->link_active = FALSE;
2040266423Sjfv		}
2041266423Sjfv	}
2042266423Sjfv
2043266423Sjfv	return;
2044266423Sjfv}
2045266423Sjfv
2046299547Serjstatic void
2047299547Serjixl_stop(struct ixl_pf *pf)
2048299547Serj{
2049299547Serj	IXL_PF_LOCK(pf);
2050299547Serj	ixl_stop_locked(pf);
2051299547Serj	IXL_PF_UNLOCK(pf);
2052299547Serj
2053299547Serj	ixl_free_interrupt_resources(pf);
2054299547Serj}
2055299547Serj
2056266423Sjfv/*********************************************************************
2057266423Sjfv *
2058266423Sjfv *  This routine disables all traffic on the adapter by issuing a
2059266423Sjfv *  global reset on the MAC and deallocates TX/RX buffers.
2060266423Sjfv *
2061266423Sjfv **********************************************************************/
2062266423Sjfv
2063266423Sjfvstatic void
2064299547Serjixl_stop_locked(struct ixl_pf *pf)
2065266423Sjfv{
2066270346Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2067266423Sjfv	struct ifnet	*ifp = vsi->ifp;
2068266423Sjfv
2069299547Serj	INIT_DEBUGOUT("ixl_stop: begin\n");
2070266423Sjfv
2071299547Serj	IXL_PF_LOCK_ASSERT(pf);
2072299547Serj
2073299547Serj	/* Stop the local timer */
2074299547Serj	callout_stop(&pf->timer);
2075299547Serj
2076279858Sjfv	if (pf->num_vfs == 0)
2077279858Sjfv		ixl_disable_intr(vsi);
2078279858Sjfv	else
2079279858Sjfv		ixl_disable_rings_intr(vsi);
2080270346Sjfv	ixl_disable_rings(vsi);
2081266423Sjfv
2082266423Sjfv	/* Tell the stack that the interface is no longer active */
2083266423Sjfv	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2084266423Sjfv}
2085266423Sjfv
2086266423Sjfv
2087266423Sjfv/*********************************************************************
2088266423Sjfv *
2089266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
2090266423Sjfv *
2091266423Sjfv **********************************************************************/
2092266423Sjfvstatic int
2093270346Sjfvixl_assign_vsi_legacy(struct ixl_pf *pf)
2094266423Sjfv{
2095266423Sjfv	device_t        dev = pf->dev;
2096270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
2097270346Sjfv	struct		ixl_queue *que = vsi->queues;
2098266423Sjfv	int 		error, rid = 0;
2099266423Sjfv
2100266423Sjfv	if (pf->msix == 1)
2101266423Sjfv		rid = 1;
2102266423Sjfv	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2103266423Sjfv	    &rid, RF_SHAREABLE | RF_ACTIVE);
2104266423Sjfv	if (pf->res == NULL) {
2105299548Serj		device_printf(dev, "Unable to allocate"
2106266423Sjfv		    " bus resource: vsi legacy/msi interrupt\n");
2107266423Sjfv		return (ENXIO);
2108266423Sjfv	}
2109266423Sjfv
2110266423Sjfv	/* Set the handler function */
2111266423Sjfv	error = bus_setup_intr(dev, pf->res,
2112266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2113270346Sjfv	    ixl_intr, pf, &pf->tag);
2114266423Sjfv	if (error) {
2115266423Sjfv		pf->res = NULL;
2116299552Serj		device_printf(dev, "Failed to register legacy/msi handler\n");
2117266423Sjfv		return (error);
2118266423Sjfv	}
2119266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
2120270346Sjfv	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2121270346Sjfv	TASK_INIT(&que->task, 0, ixl_handle_que, que);
2122270346Sjfv	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2123266423Sjfv	    taskqueue_thread_enqueue, &que->tq);
2124266423Sjfv	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2125266423Sjfv	    device_get_nameunit(dev));
2126270346Sjfv	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2127279858Sjfv
2128279858Sjfv#ifdef PCI_IOV
2129279858Sjfv	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2130279858Sjfv#endif
2131279858Sjfv
2132270346Sjfv	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2133266423Sjfv	    taskqueue_thread_enqueue, &pf->tq);
2134266423Sjfv	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2135266423Sjfv	    device_get_nameunit(dev));
2136266423Sjfv
2137266423Sjfv	return (0);
2138266423Sjfv}
2139266423Sjfv
2140299546Serjstatic void
2141299546Serjixl_init_taskqueues(struct ixl_pf *pf)
2142299546Serj{
2143299546Serj	struct ixl_vsi *vsi = &pf->vsi;
2144299546Serj	struct ixl_queue *que = vsi->queues;
2145299546Serj	device_t dev = pf->dev;
2146266423Sjfv
2147299546Serj	/* Tasklet for Admin Queue */
2148299546Serj	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
2149299546Serj#ifdef PCI_IOV
2150299546Serj	/* VFLR Tasklet */
2151299546Serj	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
2152299546Serj#endif
2153299546Serj
2154299546Serj	/* Create and start PF taskqueue */
2155299546Serj	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
2156299546Serj	    taskqueue_thread_enqueue, &pf->tq);
2157299546Serj	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
2158299546Serj	    device_get_nameunit(dev));
2159299546Serj
2160299546Serj	/* Create queue tasks and start queue taskqueues */
2161299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2162299546Serj		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
2163299546Serj		TASK_INIT(&que->task, 0, ixl_handle_que, que);
2164299546Serj		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
2165299546Serj		    taskqueue_thread_enqueue, &que->tq);
2166299546Serj#ifdef RSS
2167299546Serj		CPU_SETOF(cpu_id, &cpu_mask);
2168299546Serj		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2169299546Serj		    &cpu_mask, "%s (bucket %d)",
2170299546Serj		    device_get_nameunit(dev), cpu_id);
2171299546Serj#else
2172299546Serj		taskqueue_start_threads(&que->tq, 1, PI_NET,
2173299546Serj		    "%s (que %d)", device_get_nameunit(dev), que->me);
2174299546Serj#endif
2175299546Serj	}
2176299546Serj
2177299546Serj}
2178299546Serj
2179299546Serjstatic void
2180299546Serjixl_free_taskqueues(struct ixl_pf *pf)
2181299546Serj{
2182299546Serj	struct ixl_vsi		*vsi = &pf->vsi;
2183299546Serj	struct ixl_queue	*que = vsi->queues;
2184299546Serj
2185299546Serj	if (pf->tq)
2186299546Serj		taskqueue_free(pf->tq);
2187299546Serj	for (int i = 0; i < vsi->num_queues; i++, que++) {
2188299546Serj		if (que->tq)
2189299546Serj			taskqueue_free(que->tq);
2190299546Serj	}
2191299546Serj}
2192299546Serj
2193266423Sjfv/*********************************************************************
2194266423Sjfv *
2195266423Sjfv *  Setup MSIX Interrupt resources and handlers for the VSI
2196266423Sjfv *
2197266423Sjfv **********************************************************************/
2198266423Sjfvstatic int
2199270346Sjfvixl_assign_vsi_msix(struct ixl_pf *pf)
2200266423Sjfv{
2201266423Sjfv	device_t	dev = pf->dev;
2202270346Sjfv	struct 		ixl_vsi *vsi = &pf->vsi;
2203270346Sjfv	struct 		ixl_queue *que = vsi->queues;
2204266423Sjfv	struct		tx_ring	 *txr;
2205266423Sjfv	int 		error, rid, vector = 0;
2206299545Serj#ifdef	RSS
2207299545Serj	cpuset_t cpu_mask;
2208299545Serj#endif
2209266423Sjfv
2210299546Serj	/* Admin Queue interrupt vector is 0 */
2211266423Sjfv	rid = vector + 1;
2212266423Sjfv	pf->res = bus_alloc_resource_any(dev,
2213266423Sjfv    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2214266423Sjfv	if (!pf->res) {
2215299546Serj		device_printf(dev, "Unable to allocate"
2216299546Serj		    " bus resource: Adminq interrupt [rid=%d]\n", rid);
2217266423Sjfv		return (ENXIO);
2218266423Sjfv	}
2219266423Sjfv	/* Set the adminq vector and handler */
2220266423Sjfv	error = bus_setup_intr(dev, pf->res,
2221266423Sjfv	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222270346Sjfv	    ixl_msix_adminq, pf, &pf->tag);
2223266423Sjfv	if (error) {
2224266423Sjfv		pf->res = NULL;
2225266423Sjfv		device_printf(dev, "Failed to register Admin que handler");
2226266423Sjfv		return (error);
2227266423Sjfv	}
2228266423Sjfv	bus_describe_intr(dev, pf->res, pf->tag, "aq");
2229266423Sjfv	pf->admvec = vector;
2230266423Sjfv	++vector;
2231266423Sjfv
2232266423Sjfv	/* Now set up the stations */
2233266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
2234277084Sjfv		int cpu_id = i;
2235266423Sjfv		rid = vector + 1;
2236266423Sjfv		txr = &que->txr;
2237266423Sjfv		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2238266423Sjfv		    RF_SHAREABLE | RF_ACTIVE);
2239266423Sjfv		if (que->res == NULL) {
2240299546Serj			device_printf(dev, "Unable to allocate"
2241299546Serj		    	    " bus resource: que interrupt [rid=%d]\n", rid);
2242266423Sjfv			return (ENXIO);
2243266423Sjfv		}
2244266423Sjfv		/* Set the handler function */
2245266423Sjfv		error = bus_setup_intr(dev, que->res,
2246266423Sjfv		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2247270346Sjfv		    ixl_msix_que, que, &que->tag);
2248266423Sjfv		if (error) {
2249266423Sjfv			que->res = NULL;
2250266423Sjfv			device_printf(dev, "Failed to register que handler");
2251266423Sjfv			return (error);
2252266423Sjfv		}
2253299546Serj		bus_describe_intr(dev, que->res, que->tag, "que%d", i);
2254266423Sjfv		/* Bind the vector to a CPU */
2255277084Sjfv#ifdef RSS
2256277084Sjfv		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2257277084Sjfv#endif
2258277084Sjfv		bus_bind_intr(dev, que->res, cpu_id);
2259266423Sjfv		que->msix = vector;
2260266423Sjfv	}
2261266423Sjfv
2262266423Sjfv	return (0);
2263266423Sjfv}
2264266423Sjfv
2265266423Sjfv
2266266423Sjfv/*
2267266423Sjfv * Allocate MSI/X vectors
2268266423Sjfv */
2269266423Sjfvstatic int
2270270346Sjfvixl_init_msix(struct ixl_pf *pf)
2271266423Sjfv{
2272266423Sjfv	device_t dev = pf->dev;
2273266423Sjfv	int rid, want, vectors, queues, available;
2274266423Sjfv
2275266423Sjfv	/* Override by tuneable */
2276270346Sjfv	if (ixl_enable_msix == 0)
2277299552Serj		goto no_msix;
2278266423Sjfv
2279269198Sjfv	/*
2280269198Sjfv	** When used in a virtualized environment
2281269198Sjfv	** PCI BUSMASTER capability may not be set
2282269198Sjfv	** so explicity set it here and rewrite
2283269198Sjfv	** the ENABLE in the MSIX control register
2284269198Sjfv	** at this point to cause the host to
2285269198Sjfv	** successfully initialize us.
2286269198Sjfv	*/
2287269198Sjfv	{
2288269198Sjfv		u16 pci_cmd_word;
2289269198Sjfv		int msix_ctrl;
2290269198Sjfv		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2291269198Sjfv		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
2292269198Sjfv		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
2293269198Sjfv		pci_find_cap(dev, PCIY_MSIX, &rid);
2294269198Sjfv		rid += PCIR_MSIX_CTRL;
2295269198Sjfv		msix_ctrl = pci_read_config(dev, rid, 2);
2296269198Sjfv		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
2297269198Sjfv		pci_write_config(dev, rid, msix_ctrl, 2);
2298269198Sjfv	}
2299269198Sjfv
2300266423Sjfv	/* First try MSI/X */
2301270346Sjfv	rid = PCIR_BAR(IXL_BAR);
2302266423Sjfv	pf->msix_mem = bus_alloc_resource_any(dev,
2303266423Sjfv	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2304266423Sjfv       	if (!pf->msix_mem) {
2305266423Sjfv		/* May not be enabled */
2306266423Sjfv		device_printf(pf->dev,
2307299549Serj		    "Unable to map MSIX table\n");
2308299552Serj		goto no_msix;
2309266423Sjfv	}
2310266423Sjfv
2311266423Sjfv	available = pci_msix_count(dev);
2312266423Sjfv	if (available == 0) { /* system has msix disabled */
2313266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2314266423Sjfv		    rid, pf->msix_mem);
2315266423Sjfv		pf->msix_mem = NULL;
2316299552Serj		goto no_msix;
2317266423Sjfv	}
2318266423Sjfv
2319266423Sjfv	/* Figure out a reasonable auto config value */
2320266423Sjfv	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
2321266423Sjfv
2322299552Serj	/* Override with tunable value if tunable is less than autoconfig count */
2323270346Sjfv	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
2324270346Sjfv		queues = ixl_max_queues;
2325299546Serj	else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
2326299546Serj		device_printf(dev, "ixl_max_queues > # of cpus, using "
2327299546Serj		    "autoconfig amount...\n");
2328299546Serj	/* Or limit maximum auto-configured queues to 8 */
2329299546Serj	else if ((ixl_max_queues == 0) && (queues > 8))
2330299546Serj		queues = 8;
2331266423Sjfv
2332277084Sjfv#ifdef  RSS
2333277084Sjfv	/* If we're doing RSS, clamp at the number of RSS buckets */
2334277084Sjfv	if (queues > rss_getnumbuckets())
2335277084Sjfv		queues = rss_getnumbuckets();
2336277084Sjfv#endif
2337277084Sjfv
2338266423Sjfv	/*
2339266423Sjfv	** Want one vector (RX/TX pair) per queue
2340266423Sjfv	** plus an additional for the admin queue.
2341266423Sjfv	*/
2342266423Sjfv	want = queues + 1;
2343266423Sjfv	if (want <= available)	/* Have enough */
2344266423Sjfv		vectors = want;
2345266423Sjfv	else {
2346266423Sjfv               	device_printf(pf->dev,
2347266423Sjfv		    "MSIX Configuration Problem, "
2348266423Sjfv		    "%d vectors available but %d wanted!\n",
2349266423Sjfv		    available, want);
2350266423Sjfv		return (0); /* Will go to Legacy setup */
2351266423Sjfv	}
2352266423Sjfv
2353266423Sjfv	if (pci_alloc_msix(dev, &vectors) == 0) {
2354266423Sjfv               	device_printf(pf->dev,
2355266423Sjfv		    "Using MSIX interrupts with %d vectors\n", vectors);
2356266423Sjfv		pf->msix = vectors;
2357266423Sjfv		pf->vsi.num_queues = queues;
2358277084Sjfv#ifdef RSS
2359277084Sjfv		/*
2360277084Sjfv		 * If we're doing RSS, the number of queues needs to
2361277084Sjfv		 * match the number of RSS buckets that are configured.
2362277084Sjfv		 *
2363277084Sjfv		 * + If there's more queues than RSS buckets, we'll end
2364277084Sjfv		 *   up with queues that get no traffic.
2365277084Sjfv		 *
2366277084Sjfv		 * + If there's more RSS buckets than queues, we'll end
2367277084Sjfv		 *   up having multiple RSS buckets map to the same queue,
2368277084Sjfv		 *   so there'll be some contention.
2369277084Sjfv		 */
2370277084Sjfv		if (queues != rss_getnumbuckets()) {
2371277084Sjfv			device_printf(dev,
2372277084Sjfv			    "%s: queues (%d) != RSS buckets (%d)"
2373277084Sjfv			    "; performance will be impacted.\n",
2374277084Sjfv			    __func__, queues, rss_getnumbuckets());
2375277084Sjfv		}
2376277084Sjfv#endif
2377266423Sjfv		return (vectors);
2378266423Sjfv	}
2379299552Serjno_msix:
2380299552Serj	vectors = pci_msi_count(dev);
2381266423Sjfv	pf->vsi.num_queues = 1;
2382270346Sjfv	ixl_max_queues = 1;
2383270346Sjfv	ixl_enable_msix = 0;
2384299552Serj	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2385299547Serj		device_printf(pf->dev, "Using an MSI interrupt\n");
2386266423Sjfv	else {
2387299552Serj		vectors = 0;
2388299547Serj		device_printf(pf->dev, "Using a Legacy interrupt\n");
2389266423Sjfv	}
2390266423Sjfv	return (vectors);
2391266423Sjfv}
2392266423Sjfv
2393266423Sjfv/*
2394299547Serj * Plumb MSIX vectors
2395266423Sjfv */
2396266423Sjfvstatic void
2397270346Sjfvixl_configure_msix(struct ixl_pf *pf)
2398266423Sjfv{
2399266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2400270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
2401266423Sjfv	u32		reg;
2402266423Sjfv	u16		vector = 1;
2403266423Sjfv
2404266423Sjfv	/* First set up the adminq - vector 0 */
2405266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2406266423Sjfv	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2407266423Sjfv
2408266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2409266423Sjfv	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2410299549Serj	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2411266423Sjfv	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2412266423Sjfv	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2413266423Sjfv	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2414266423Sjfv	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2415266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2416266423Sjfv
2417299547Serj	/*
2418299547Serj	 * 0x7FF is the end of the queue list.
2419299547Serj	 * This means we won't use MSI-X vector 0 for a queue interrupt
2420299547Serj	 * in MSIX mode.
2421299547Serj	 */
2422266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2423299547Serj	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
2424299547Serj	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
2425266423Sjfv
2426266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2427266423Sjfv	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2428266423Sjfv	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2429266423Sjfv
2430266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2431266423Sjfv
2432266423Sjfv	/* Next configure the queues */
2433266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2434299545Serj		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2435266423Sjfv		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2436266423Sjfv
2437266423Sjfv		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2438270346Sjfv		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2439266423Sjfv		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2440266423Sjfv		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2441266423Sjfv		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2442266423Sjfv		wr32(hw, I40E_QINT_RQCTL(i), reg);
2443266423Sjfv
2444266423Sjfv		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2445270346Sjfv		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2446266423Sjfv		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2447299545Serj		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2448266423Sjfv		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2449299545Serj		if (i == (vsi->num_queues - 1))
2450299545Serj			reg |= (IXL_QUEUE_EOL
2451299545Serj			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2452266423Sjfv		wr32(hw, I40E_QINT_TQCTL(i), reg);
2453266423Sjfv	}
2454266423Sjfv}
2455266423Sjfv
2456266423Sjfv/*
2457266423Sjfv * Configure for MSI single vector operation
2458266423Sjfv */
2459266423Sjfvstatic void
2460270346Sjfvixl_configure_legacy(struct ixl_pf *pf)
2461266423Sjfv{
2462266423Sjfv	struct i40e_hw	*hw = &pf->hw;
2463266423Sjfv	u32		reg;
2464266423Sjfv
2465266423Sjfv	wr32(hw, I40E_PFINT_ITR0(0), 0);
2466266423Sjfv	wr32(hw, I40E_PFINT_ITR0(1), 0);
2467266423Sjfv
2468266423Sjfv	/* Setup "other" causes */
2469266423Sjfv	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2470266423Sjfv	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2471266423Sjfv	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2472266423Sjfv	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2473266423Sjfv	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2474266423Sjfv	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2475266423Sjfv	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2476266423Sjfv	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2477266423Sjfv	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2478266423Sjfv	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2479266423Sjfv	    ;
2480266423Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2481266423Sjfv
2482266423Sjfv	/* SW_ITR_IDX = 0, but don't change INTENA */
2483266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0,
2484266423Sjfv	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2485266423Sjfv	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2486266423Sjfv	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2487266423Sjfv	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2488266423Sjfv
2489266423Sjfv	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2490266423Sjfv	wr32(hw, I40E_PFINT_LNKLST0, 0);
2491266423Sjfv
2492266423Sjfv	/* Associate the queue pair to the vector and enable the q int */
2493266423Sjfv	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2494270346Sjfv	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2495266423Sjfv	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2496266423Sjfv	wr32(hw, I40E_QINT_RQCTL(0), reg);
2497266423Sjfv
2498266423Sjfv	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2499270346Sjfv	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2500270346Sjfv	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2501266423Sjfv	wr32(hw, I40E_QINT_TQCTL(0), reg);
2502266423Sjfv
2503266423Sjfv}
2504266423Sjfv
2505266423Sjfv
2506266423Sjfv/*
2507266423Sjfv * Set the Initial ITR state
2508266423Sjfv */
2509266423Sjfvstatic void
2510270346Sjfvixl_configure_itr(struct ixl_pf *pf)
2511266423Sjfv{
2512266423Sjfv	struct i40e_hw		*hw = &pf->hw;
2513270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2514270346Sjfv	struct ixl_queue	*que = vsi->queues;
2515266423Sjfv
2516270346Sjfv	vsi->rx_itr_setting = ixl_rx_itr;
2517270346Sjfv	if (ixl_dynamic_rx_itr)
2518270346Sjfv		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2519270346Sjfv	vsi->tx_itr_setting = ixl_tx_itr;
2520270346Sjfv	if (ixl_dynamic_tx_itr)
2521270346Sjfv		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2522266423Sjfv
2523266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2524266423Sjfv		struct tx_ring	*txr = &que->txr;
2525266423Sjfv		struct rx_ring 	*rxr = &que->rxr;
2526266423Sjfv
2527270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2528266423Sjfv		    vsi->rx_itr_setting);
2529266423Sjfv		rxr->itr = vsi->rx_itr_setting;
2530270346Sjfv		rxr->latency = IXL_AVE_LATENCY;
2531270346Sjfv		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2532266423Sjfv		    vsi->tx_itr_setting);
2533266423Sjfv		txr->itr = vsi->tx_itr_setting;
2534270346Sjfv		txr->latency = IXL_AVE_LATENCY;
2535266423Sjfv	}
2536266423Sjfv}
2537266423Sjfv
2538266423Sjfv
2539266423Sjfvstatic int
2540270346Sjfvixl_allocate_pci_resources(struct ixl_pf *pf)
2541266423Sjfv{
2542266423Sjfv	int             rid;
2543266423Sjfv	device_t        dev = pf->dev;
2544266423Sjfv
2545266423Sjfv	rid = PCIR_BAR(0);
2546266423Sjfv	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2547266423Sjfv	    &rid, RF_ACTIVE);
2548266423Sjfv
2549266423Sjfv	if (!(pf->pci_mem)) {
2550299552Serj		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
2551266423Sjfv		return (ENXIO);
2552266423Sjfv	}
2553266423Sjfv
2554266423Sjfv	pf->osdep.mem_bus_space_tag =
2555266423Sjfv		rman_get_bustag(pf->pci_mem);
2556266423Sjfv	pf->osdep.mem_bus_space_handle =
2557266423Sjfv		rman_get_bushandle(pf->pci_mem);
2558270346Sjfv	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2559272285Srstone	pf->osdep.flush_reg = I40E_GLGEN_STAT;
2560266423Sjfv	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2561266423Sjfv
2562266423Sjfv	pf->hw.back = &pf->osdep;
2563266423Sjfv
2564266423Sjfv	/*
2565266423Sjfv	** Now setup MSI or MSI/X, should
2566266423Sjfv	** return us the number of supported
2567266423Sjfv	** vectors. (Will be 1 for MSI)
2568266423Sjfv	*/
2569270346Sjfv	pf->msix = ixl_init_msix(pf);
2570266423Sjfv	return (0);
2571266423Sjfv}
2572266423Sjfv
2573266423Sjfvstatic void
2574299547Serjixl_free_interrupt_resources(struct ixl_pf *pf)
2575266423Sjfv{
2576270346Sjfv	struct ixl_vsi		*vsi = &pf->vsi;
2577270346Sjfv	struct ixl_queue	*que = vsi->queues;
2578266423Sjfv	device_t		dev = pf->dev;
2579299547Serj	int rid;
2580266423Sjfv
2581266423Sjfv	/* We may get here before stations are setup */
2582270346Sjfv	if ((!ixl_enable_msix) || (que == NULL))
2583266423Sjfv		goto early;
2584266423Sjfv
2585266423Sjfv	/*
2586266423Sjfv	**  Release all msix VSI resources:
2587266423Sjfv	*/
2588266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2589266423Sjfv		rid = que->msix + 1;
2590266423Sjfv		if (que->tag != NULL) {
2591266423Sjfv			bus_teardown_intr(dev, que->res, que->tag);
2592266423Sjfv			que->tag = NULL;
2593266423Sjfv		}
2594299547Serj		if (que->res != NULL) {
2595266423Sjfv			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2596299547Serj			que->res = NULL;
2597299547Serj		}
2598266423Sjfv	}
2599266423Sjfv
2600266423Sjfvearly:
2601266423Sjfv	/* Clean the AdminQ interrupt last */
2602266423Sjfv	if (pf->admvec) /* we are doing MSIX */
2603266423Sjfv		rid = pf->admvec + 1;
2604266423Sjfv	else
2605266423Sjfv		(pf->msix != 0) ? (rid = 1):(rid = 0);
2606266423Sjfv
2607266423Sjfv	if (pf->tag != NULL) {
2608266423Sjfv		bus_teardown_intr(dev, pf->res, pf->tag);
2609266423Sjfv		pf->tag = NULL;
2610266423Sjfv	}
2611299547Serj	if (pf->res != NULL) {
2612266423Sjfv		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2613299547Serj		pf->res = NULL;
2614299547Serj	}
2615299547Serj}
2616266423Sjfv
2617299547Serjstatic void
2618299547Serjixl_free_pci_resources(struct ixl_pf *pf)
2619299547Serj{
2620299547Serj	device_t		dev = pf->dev;
2621299547Serj	int			memrid;
2622299547Serj
2623299547Serj	ixl_free_interrupt_resources(pf);
2624299547Serj
2625266423Sjfv	if (pf->msix)
2626266423Sjfv		pci_release_msi(dev);
2627266423Sjfv
2628299547Serj	memrid = PCIR_BAR(IXL_BAR);
2629299547Serj
2630266423Sjfv	if (pf->msix_mem != NULL)
2631266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2632266423Sjfv		    memrid, pf->msix_mem);
2633266423Sjfv
2634266423Sjfv	if (pf->pci_mem != NULL)
2635266423Sjfv		bus_release_resource(dev, SYS_RES_MEMORY,
2636266423Sjfv		    PCIR_BAR(0), pf->pci_mem);
2637266423Sjfv
2638266423Sjfv	return;
2639266423Sjfv}
2640266423Sjfv
2641274205Sjfvstatic void
2642274205Sjfvixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
2643274205Sjfv{
2644274205Sjfv	/* Display supported media types */
2645274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2646274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2647266423Sjfv
2648274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2649274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2650279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
2651279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2652279858Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
2653279858Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2654274205Sjfv
2655284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
2656279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
2657274205Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2658274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2659279033Sjfv
2660274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2661274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2662274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2663274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2664274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2665274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2666274205Sjfv
2667279033Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
2668279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2669279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
2670279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
2671279033Sjfv	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2672274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2673274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2674274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2675274205Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2676274205Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2677284049Sjfv
2678284049Sjfv#ifndef IFM_ETH_XTYPE
2679284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2680284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2681284049Sjfv
2682284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2683284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
2684284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
2685284049Sjfv	    phy_type & (1 << I40E_PHY_TYPE_SFI))
2686284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2687284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2688284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2689284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2690284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2691284049Sjfv
2692284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2693284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2694284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2695284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2696284049Sjfv#else
2697284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
2698284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
2699284049Sjfv
2700284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
2701284049Sjfv	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
2702284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
2703284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
2704284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
2705284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
2706284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
2707284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
2708284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
2709284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
2710284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
2711284049Sjfv
2712284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
2713284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
2714284049Sjfv
2715284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
2716284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
2717284049Sjfv	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
2718284049Sjfv		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
2719284049Sjfv#endif
2720274205Sjfv}
2721274205Sjfv
2722266423Sjfv/*********************************************************************
2723266423Sjfv *
2724266423Sjfv *  Setup networking device structure and register an interface.
2725266423Sjfv *
2726266423Sjfv **********************************************************************/
2727266423Sjfvstatic int
2728270346Sjfvixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2729266423Sjfv{
2730266423Sjfv	struct ifnet		*ifp;
2731266423Sjfv	struct i40e_hw		*hw = vsi->hw;
2732270346Sjfv	struct ixl_queue	*que = vsi->queues;
2733279033Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
2734266423Sjfv	enum i40e_status_code aq_error = 0;
2735266423Sjfv
2736270346Sjfv	INIT_DEBUGOUT("ixl_setup_interface: begin");
2737266423Sjfv
2738266423Sjfv	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2739266423Sjfv	if (ifp == NULL) {
2740266423Sjfv		device_printf(dev, "can not allocate ifnet structure\n");
2741266423Sjfv		return (-1);
2742266423Sjfv	}
2743266423Sjfv	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2744266423Sjfv	ifp->if_mtu = ETHERMTU;
2745299546Serj	ifp->if_baudrate = IF_Gbps(40);
2746270346Sjfv	ifp->if_init = ixl_init;
2747266423Sjfv	ifp->if_softc = vsi;
2748266423Sjfv	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2749270346Sjfv	ifp->if_ioctl = ixl_ioctl;
2750266423Sjfv
2751274205Sjfv#if __FreeBSD_version >= 1100036
2752272227Sglebius	if_setgetcounterfn(ifp, ixl_get_counter);
2753272227Sglebius#endif
2754272227Sglebius
2755270346Sjfv	ifp->if_transmit = ixl_mq_start;
2756266423Sjfv
2757270346Sjfv	ifp->if_qflush = ixl_qflush;
2758266423Sjfv
2759266423Sjfv	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2760266423Sjfv
2761266423Sjfv	vsi->max_frame_size =
2762266423Sjfv	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2763266423Sjfv	    + ETHER_VLAN_ENCAP_LEN;
2764266423Sjfv
2765266423Sjfv	/*
2766266423Sjfv	 * Tell the upper layer(s) we support long frames.
2767266423Sjfv	 */
2768270856Sglebius	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2769266423Sjfv
2770266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM;
2771266423Sjfv	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2772266423Sjfv	ifp->if_capabilities |= IFCAP_TSO;
2773266423Sjfv	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2774266423Sjfv	ifp->if_capabilities |= IFCAP_LRO;
2775266423Sjfv
2776266423Sjfv	/* VLAN capabilties */
2777266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2778266423Sjfv			     |  IFCAP_VLAN_HWTSO
2779266423Sjfv			     |  IFCAP_VLAN_MTU
2780266423Sjfv			     |  IFCAP_VLAN_HWCSUM;
2781266423Sjfv	ifp->if_capenable = ifp->if_capabilities;
2782266423Sjfv
2783266423Sjfv	/*
2784266423Sjfv	** Don't turn this on by default, if vlans are
2785266423Sjfv	** created on another pseudo device (eg. lagg)
2786266423Sjfv	** then vlan events are not passed thru, breaking
2787266423Sjfv	** operation, but with HW FILTER off it works. If
2788270346Sjfv	** using vlans directly on the ixl driver you can
2789266423Sjfv	** enable this and get full hardware tag filtering.
2790266423Sjfv	*/
2791266423Sjfv	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2792266423Sjfv
2793266423Sjfv	/*
2794266423Sjfv	 * Specify the media types supported by this adapter and register
2795266423Sjfv	 * callbacks to update media and link information
2796266423Sjfv	 */
2797270346Sjfv	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2798270346Sjfv		     ixl_media_status);
2799266423Sjfv
2800279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
2801279033Sjfv	    FALSE, TRUE, &abilities, NULL);
2802279033Sjfv	/* May need delay to detect fiber correctly */
2803274205Sjfv	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
2804274205Sjfv		i40e_msec_delay(200);
2805277084Sjfv		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
2806279033Sjfv		    TRUE, &abilities, NULL);
2807279033Sjfv	}
2808279033Sjfv	if (aq_error) {
2809274205Sjfv		if (aq_error == I40E_ERR_UNKNOWN_PHY)
2810274205Sjfv			device_printf(dev, "Unknown PHY type detected!\n");
2811274205Sjfv		else
2812279033Sjfv			device_printf(dev,
2813279033Sjfv			    "Error getting supported media types, err %d,"
2814279033Sjfv			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
2815279033Sjfv		return (0);
2816279033Sjfv	}
2817266423Sjfv
2818279033Sjfv	ixl_add_ifmedia(vsi, abilities.phy_type);
2819279033Sjfv
2820266423Sjfv	/* Use autoselect media by default */
2821266423Sjfv	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2822266423Sjfv	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2823266423Sjfv
2824274205Sjfv	ether_ifattach(ifp, hw->mac.addr);
2825274205Sjfv
2826266423Sjfv	return (0);
2827266423Sjfv}
2828266423Sjfv
2829279858Sjfv/*
2830299547Serj** Run when the Admin Queue gets a link state change interrupt.
2831279858Sjfv*/
2832279858Sjfvstatic void
2833279858Sjfvixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
2834266423Sjfv{
2835279858Sjfv	struct i40e_hw	*hw = &pf->hw;
2836299547Serj	device_t dev = pf->dev;
2837279858Sjfv	struct i40e_aqc_get_link_status *status =
2838279858Sjfv	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
2839266423Sjfv
2840299547Serj
2841299547Serj	/* Request link status from adapter */
2842279858Sjfv	hw->phy.get_link_info = TRUE;
2843299547Serj	i40e_get_link_status(hw, &pf->link_up);
2844299547Serj
2845299547Serj	/* Print out message if an unqualified module is found */
2846279858Sjfv	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2847279858Sjfv	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
2848279858Sjfv	    (!(status->link_info & I40E_AQ_LINK_UP)))
2849299547Serj		device_printf(dev, "Link failed because "
2850299547Serj		    "an unqualified module was detected!\n");
2851279858Sjfv
2852299547Serj	/* Update OS link info */
2853299547Serj	ixl_update_link_status(pf);
2854266423Sjfv}
2855266423Sjfv
2856266423Sjfv/*********************************************************************
2857266423Sjfv *
2858279033Sjfv *  Get Firmware Switch configuration
2859279033Sjfv *	- this will need to be more robust when more complex
2860279033Sjfv *	  switch configurations are enabled.
2861266423Sjfv *
2862266423Sjfv **********************************************************************/
2863266423Sjfvstatic int
2864279033Sjfvixl_switch_config(struct ixl_pf *pf)
2865266423Sjfv{
2866279033Sjfv	struct i40e_hw	*hw = &pf->hw;
2867279033Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
2868266423Sjfv	device_t 	dev = vsi->dev;
2869266423Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
2870266423Sjfv	u8	aq_buf[I40E_AQ_LARGE_BUF];
2871279858Sjfv	int	ret;
2872266423Sjfv	u16	next = 0;
2873266423Sjfv
2874279033Sjfv	memset(&aq_buf, 0, sizeof(aq_buf));
2875266423Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2876266423Sjfv	ret = i40e_aq_get_switch_config(hw, sw_config,
2877266423Sjfv	    sizeof(aq_buf), &next, NULL);
2878266423Sjfv	if (ret) {
2879279858Sjfv		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
2880279858Sjfv		    ret);
2881266423Sjfv		return (ret);
2882266423Sjfv	}
2883270346Sjfv#ifdef IXL_DEBUG
2884279858Sjfv	device_printf(dev,
2885279858Sjfv	    "Switch config: header reported: %d in structure, %d total\n",
2886266423Sjfv    	    sw_config->header.num_reported, sw_config->header.num_total);
2887279858Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
2888279858Sjfv		device_printf(dev,
2889279858Sjfv		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
2890279858Sjfv		    sw_config->element[i].element_type,
2891279858Sjfv		    sw_config->element[i].seid,
2892279858Sjfv		    sw_config->element[i].uplink_seid,
2893279858Sjfv		    sw_config->element[i].downlink_seid);
2894279858Sjfv	}
2895266423Sjfv#endif
2896279033Sjfv	/* Simplified due to a single VSI at the moment */
2897279858Sjfv	vsi->uplink_seid = sw_config->element[0].uplink_seid;
2898279858Sjfv	vsi->downlink_seid = sw_config->element[0].downlink_seid;
2899266423Sjfv	vsi->seid = sw_config->element[0].seid;
2900279033Sjfv	return (ret);
2901279033Sjfv}
2902266423Sjfv
2903279033Sjfv/*********************************************************************
2904279033Sjfv *
2905279033Sjfv *  Initialize the VSI:  this handles contexts, which means things
2906279033Sjfv *  			 like the number of descriptors, buffer size,
2907279033Sjfv *			 plus we init the rings thru this function.
2908279033Sjfv *
2909279033Sjfv **********************************************************************/
2910279033Sjfvstatic int
2911279033Sjfvixl_initialize_vsi(struct ixl_vsi *vsi)
2912279033Sjfv{
2913279858Sjfv	struct ixl_pf		*pf = vsi->back;
2914279033Sjfv	struct ixl_queue	*que = vsi->queues;
2915279033Sjfv	device_t		dev = vsi->dev;
2916279033Sjfv	struct i40e_hw		*hw = vsi->hw;
2917279033Sjfv	struct i40e_vsi_context	ctxt;
2918279033Sjfv	int			err = 0;
2919279033Sjfv
2920266423Sjfv	memset(&ctxt, 0, sizeof(ctxt));
2921266423Sjfv	ctxt.seid = vsi->seid;
2922279858Sjfv	if (pf->veb_seid != 0)
2923279858Sjfv		ctxt.uplink_seid = pf->veb_seid;
2924266423Sjfv	ctxt.pf_num = hw->pf_id;
2925279033Sjfv	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2926279033Sjfv	if (err) {
2927299548Serj		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err);
2928279033Sjfv		return (err);
2929266423Sjfv	}
2930270346Sjfv#ifdef IXL_DEBUG
2931299548Serj	device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2932266423Sjfv	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2933266423Sjfv	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2934266423Sjfv	    ctxt.uplink_seid, ctxt.vsi_number,
2935266423Sjfv	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2936266423Sjfv	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2937266423Sjfv	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2938266423Sjfv#endif
2939266423Sjfv	/*
2940266423Sjfv	** Set the queue and traffic class bits
2941266423Sjfv	**  - when multiple traffic classes are supported
2942266423Sjfv	**    this will need to be more robust.
2943266423Sjfv	*/
2944266423Sjfv	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2945266423Sjfv	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2946299545Serj	ctxt.info.queue_mapping[0] = 0;
2947299552Serj	/* This VSI is assigned 64 queues (we may not use all of them) */
2948299548Serj	ctxt.info.tc_mapping[0] = 0x0c00;
2949266423Sjfv
2950266423Sjfv	/* Set VLAN receive stripping mode */
2951266423Sjfv	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2952266423Sjfv	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2953266423Sjfv	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2954299548Serj		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2955266423Sjfv	else
2956299548Serj		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2957266423Sjfv
2958266423Sjfv	/* Keep copy of VSI info in VSI for statistic counters */
2959266423Sjfv	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2960266423Sjfv
2961266423Sjfv	/* Reset VSI statistics */
2962270346Sjfv	ixl_vsi_reset_stats(vsi);
2963266423Sjfv	vsi->hw_filters_add = 0;
2964266423Sjfv	vsi->hw_filters_del = 0;
2965266423Sjfv
2966279858Sjfv	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
2967279858Sjfv
2968279033Sjfv	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2969279033Sjfv	if (err) {
2970299548Serj		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
2971299548Serj		   err, hw->aq.asq_last_status);
2972279033Sjfv		return (err);
2973279033Sjfv	}
2974266423Sjfv
2975266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
2976266423Sjfv		struct tx_ring		*txr = &que->txr;
2977266423Sjfv		struct rx_ring 		*rxr = &que->rxr;
2978266423Sjfv		struct i40e_hmc_obj_txq tctx;
2979266423Sjfv		struct i40e_hmc_obj_rxq rctx;
2980266423Sjfv		u32			txctl;
2981266423Sjfv		u16			size;
2982266423Sjfv
2983266423Sjfv		/* Setup the HMC TX Context  */
2984266423Sjfv		size = que->num_desc * sizeof(struct i40e_tx_desc);
2985266423Sjfv		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2986266423Sjfv		tctx.new_context = 1;
2987279858Sjfv		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
2988266423Sjfv		tctx.qlen = que->num_desc;
2989266423Sjfv		tctx.fc_ena = 0;
2990269198Sjfv		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2991269198Sjfv		/* Enable HEAD writeback */
2992269198Sjfv		tctx.head_wb_ena = 1;
2993269198Sjfv		tctx.head_wb_addr = txr->dma.pa +
2994269198Sjfv		    (que->num_desc * sizeof(struct i40e_tx_desc));
2995266423Sjfv		tctx.rdylist_act = 0;
2996266423Sjfv		err = i40e_clear_lan_tx_queue_context(hw, i);
2997266423Sjfv		if (err) {
2998266423Sjfv			device_printf(dev, "Unable to clear TX context\n");
2999266423Sjfv			break;
3000266423Sjfv		}
3001266423Sjfv		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
3002266423Sjfv		if (err) {
3003266423Sjfv			device_printf(dev, "Unable to set TX context\n");
3004266423Sjfv			break;
3005266423Sjfv		}
3006266423Sjfv		/* Associate the ring with this PF */
3007266423Sjfv		txctl = I40E_QTX_CTL_PF_QUEUE;
3008266423Sjfv		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3009266423Sjfv		    I40E_QTX_CTL_PF_INDX_MASK);
3010266423Sjfv		wr32(hw, I40E_QTX_CTL(i), txctl);
3011270346Sjfv		ixl_flush(hw);
3012266423Sjfv
3013266423Sjfv		/* Do ring (re)init */
3014270346Sjfv		ixl_init_tx_ring(que);
3015266423Sjfv
3016266423Sjfv		/* Next setup the HMC RX Context  */
3017279858Sjfv		if (vsi->max_frame_size <= MCLBYTES)
3018266423Sjfv			rxr->mbuf_sz = MCLBYTES;
3019266423Sjfv		else
3020266423Sjfv			rxr->mbuf_sz = MJUMPAGESIZE;
3021266423Sjfv
3022266423Sjfv		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
3023266423Sjfv
3024266423Sjfv		/* Set up an RX context for the HMC */
3025266423Sjfv		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
3026266423Sjfv		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
3027266423Sjfv		/* ignore header split for now */
3028266423Sjfv		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
3029266423Sjfv		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
3030266423Sjfv		    vsi->max_frame_size : max_rxmax;
3031266423Sjfv		rctx.dtype = 0;
3032266423Sjfv		rctx.dsize = 1;	/* do 32byte descriptors */
3033266423Sjfv		rctx.hsplit_0 = 0;  /* no HDR split initially */
3034279858Sjfv		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
3035266423Sjfv		rctx.qlen = que->num_desc;
3036266423Sjfv		rctx.tphrdesc_ena = 1;
3037266423Sjfv		rctx.tphwdesc_ena = 1;
3038266423Sjfv		rctx.tphdata_ena = 0;
3039266423Sjfv		rctx.tphhead_ena = 0;
3040266423Sjfv		rctx.lrxqthresh = 2;
3041266423Sjfv		rctx.crcstrip = 1;
3042266423Sjfv		rctx.l2tsel = 1;
3043266423Sjfv		rctx.showiv = 1;
3044266423Sjfv		rctx.fc_ena = 0;
3045266423Sjfv		rctx.prefena = 1;
3046266423Sjfv
3047266423Sjfv		err = i40e_clear_lan_rx_queue_context(hw, i);
3048266423Sjfv		if (err) {
3049266423Sjfv			device_printf(dev,
3050266423Sjfv			    "Unable to clear RX context %d\n", i);
3051266423Sjfv			break;
3052266423Sjfv		}
3053266423Sjfv		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
3054266423Sjfv		if (err) {
3055266423Sjfv			device_printf(dev, "Unable to set RX context %d\n", i);
3056266423Sjfv			break;
3057266423Sjfv		}
3058270346Sjfv		err = ixl_init_rx_ring(que);
3059266423Sjfv		if (err) {
3060266423Sjfv			device_printf(dev, "Fail in init_rx_ring %d\n", i);
3061266423Sjfv			break;
3062266423Sjfv		}
3063299545Serj		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
3064279860Sjfv#ifdef DEV_NETMAP
3065279860Sjfv		/* preserve queue */
3066279860Sjfv		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
3067279860Sjfv			struct netmap_adapter *na = NA(vsi->ifp);
3068279860Sjfv			struct netmap_kring *kring = &na->rx_rings[i];
3069279860Sjfv			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3070279860Sjfv			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
3071279860Sjfv		} else
3072279860Sjfv#endif /* DEV_NETMAP */
3073266423Sjfv		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
3074266423Sjfv	}
3075266423Sjfv	return (err);
3076266423Sjfv}
3077266423Sjfv
3078266423Sjfv
3079266423Sjfv/*********************************************************************
3080266423Sjfv *
3081266423Sjfv *  Free all VSI structs.
3082266423Sjfv *
3083266423Sjfv **********************************************************************/
3084266423Sjfvvoid
3085270346Sjfvixl_free_vsi(struct ixl_vsi *vsi)
3086266423Sjfv{
3087270346Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
3088270346Sjfv	struct ixl_queue	*que = vsi->queues;
3089266423Sjfv
3090266423Sjfv	/* Free station queues */
3091299549Serj	if (!vsi->queues)
3092299549Serj		goto free_filters;
3093299549Serj
3094266423Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++) {
3095266423Sjfv		struct tx_ring *txr = &que->txr;
3096266423Sjfv		struct rx_ring *rxr = &que->rxr;
3097266423Sjfv
3098266423Sjfv		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
3099266423Sjfv			continue;
3100270346Sjfv		IXL_TX_LOCK(txr);
3101270346Sjfv		ixl_free_que_tx(que);
3102266423Sjfv		if (txr->base)
3103271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3104270346Sjfv		IXL_TX_UNLOCK(txr);
3105270346Sjfv		IXL_TX_LOCK_DESTROY(txr);
3106266423Sjfv
3107266423Sjfv		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
3108266423Sjfv			continue;
3109270346Sjfv		IXL_RX_LOCK(rxr);
3110270346Sjfv		ixl_free_que_rx(que);
3111266423Sjfv		if (rxr->base)
3112271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3113270346Sjfv		IXL_RX_UNLOCK(rxr);
3114270346Sjfv		IXL_RX_LOCK_DESTROY(rxr);
3115266423Sjfv
3116266423Sjfv	}
3117266423Sjfv	free(vsi->queues, M_DEVBUF);
3118266423Sjfv
3119299549Serjfree_filters:
3120266423Sjfv	/* Free VSI filter list */
3121279858Sjfv	ixl_free_mac_filters(vsi);
3122279858Sjfv}
3123279858Sjfv
3124279858Sjfvstatic void
3125279858Sjfvixl_free_mac_filters(struct ixl_vsi *vsi)
3126279858Sjfv{
3127279858Sjfv	struct ixl_mac_filter *f;
3128279858Sjfv
3129266423Sjfv	while (!SLIST_EMPTY(&vsi->ftl)) {
3130266423Sjfv		f = SLIST_FIRST(&vsi->ftl);
3131266423Sjfv		SLIST_REMOVE_HEAD(&vsi->ftl, next);
3132266423Sjfv		free(f, M_DEVBUF);
3133266423Sjfv	}
3134266423Sjfv}
3135266423Sjfv
3136266423Sjfv
3137266423Sjfv/*********************************************************************
3138266423Sjfv *
3139266423Sjfv *  Allocate memory for the VSI (virtual station interface) and their
3140266423Sjfv *  associated queues, rings and the descriptors associated with each,
3141266423Sjfv *  called only once at attach.
3142266423Sjfv *
3143266423Sjfv **********************************************************************/
3144266423Sjfvstatic int
3145270346Sjfvixl_setup_stations(struct ixl_pf *pf)
3146266423Sjfv{
3147266423Sjfv	device_t		dev = pf->dev;
3148270346Sjfv	struct ixl_vsi		*vsi;
3149270346Sjfv	struct ixl_queue	*que;
3150266423Sjfv	struct tx_ring		*txr;
3151266423Sjfv	struct rx_ring		*rxr;
3152266423Sjfv	int 			rsize, tsize;
3153266423Sjfv	int			error = I40E_SUCCESS;
3154266423Sjfv
3155266423Sjfv	vsi = &pf->vsi;
3156266423Sjfv	vsi->back = (void *)pf;
3157266423Sjfv	vsi->hw = &pf->hw;
3158266423Sjfv	vsi->id = 0;
3159266423Sjfv	vsi->num_vlans = 0;
3160279858Sjfv	vsi->back = pf;
3161266423Sjfv
3162266423Sjfv	/* Get memory for the station queues */
3163266423Sjfv        if (!(vsi->queues =
3164270346Sjfv            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
3165266423Sjfv            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
3166266423Sjfv                device_printf(dev, "Unable to allocate queue memory\n");
3167266423Sjfv                error = ENOMEM;
3168266423Sjfv                goto early;
3169266423Sjfv        }
3170266423Sjfv
3171266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3172266423Sjfv		que = &vsi->queues[i];
3173270346Sjfv		que->num_desc = ixl_ringsz;
3174266423Sjfv		que->me = i;
3175266423Sjfv		que->vsi = vsi;
3176269198Sjfv		/* mark the queue as active */
3177269198Sjfv		vsi->active_queues |= (u64)1 << que->me;
3178266423Sjfv		txr = &que->txr;
3179266423Sjfv		txr->que = que;
3180269198Sjfv		txr->tail = I40E_QTX_TAIL(que->me);
3181266423Sjfv
3182266423Sjfv		/* Initialize the TX lock */
3183266423Sjfv		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
3184266423Sjfv		    device_get_nameunit(dev), que->me);
3185266423Sjfv		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
3186266423Sjfv		/* Create the TX descriptor ring */
3187269198Sjfv		tsize = roundup2((que->num_desc *
3188269198Sjfv		    sizeof(struct i40e_tx_desc)) +
3189269198Sjfv		    sizeof(u32), DBA_ALIGN);
3190271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3191271834Sbz		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
3192266423Sjfv			device_printf(dev,
3193266423Sjfv			    "Unable to allocate TX Descriptor memory\n");
3194266423Sjfv			error = ENOMEM;
3195266423Sjfv			goto fail;
3196266423Sjfv		}
3197266423Sjfv		txr->base = (struct i40e_tx_desc *)txr->dma.va;
3198266423Sjfv		bzero((void *)txr->base, tsize);
3199266423Sjfv       		/* Now allocate transmit soft structs for the ring */
3200270346Sjfv       		if (ixl_allocate_tx_data(que)) {
3201266423Sjfv			device_printf(dev,
3202266423Sjfv			    "Critical Failure setting up TX structures\n");
3203266423Sjfv			error = ENOMEM;
3204266423Sjfv			goto fail;
3205266423Sjfv       		}
3206266423Sjfv		/* Allocate a buf ring */
3207266423Sjfv		txr->br = buf_ring_alloc(4096, M_DEVBUF,
3208299547Serj		    M_NOWAIT, &txr->mtx);
3209266423Sjfv		if (txr->br == NULL) {
3210266423Sjfv			device_printf(dev,
3211266423Sjfv			    "Critical Failure setting up TX buf ring\n");
3212266423Sjfv			error = ENOMEM;
3213266423Sjfv			goto fail;
3214266423Sjfv       		}
3215266423Sjfv
3216266423Sjfv		/*
3217266423Sjfv		 * Next the RX queues...
3218266423Sjfv		 */
3219266423Sjfv		rsize = roundup2(que->num_desc *
3220266423Sjfv		    sizeof(union i40e_rx_desc), DBA_ALIGN);
3221266423Sjfv		rxr = &que->rxr;
3222266423Sjfv		rxr->que = que;
3223269198Sjfv		rxr->tail = I40E_QRX_TAIL(que->me);
3224266423Sjfv
3225266423Sjfv		/* Initialize the RX side lock */
3226266423Sjfv		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
3227266423Sjfv		    device_get_nameunit(dev), que->me);
3228266423Sjfv		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
3229266423Sjfv
3230271834Sbz		if (i40e_allocate_dma_mem(&pf->hw,
3231271834Sbz		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
3232266423Sjfv			device_printf(dev,
3233266423Sjfv			    "Unable to allocate RX Descriptor memory\n");
3234266423Sjfv			error = ENOMEM;
3235266423Sjfv			goto fail;
3236266423Sjfv		}
3237266423Sjfv		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
3238266423Sjfv		bzero((void *)rxr->base, rsize);
3239266423Sjfv
3240266423Sjfv        	/* Allocate receive soft structs for the ring*/
3241270346Sjfv		if (ixl_allocate_rx_data(que)) {
3242266423Sjfv			device_printf(dev,
3243266423Sjfv			    "Critical Failure setting up receive structs\n");
3244266423Sjfv			error = ENOMEM;
3245266423Sjfv			goto fail;
3246266423Sjfv		}
3247266423Sjfv	}
3248266423Sjfv
3249266423Sjfv	return (0);
3250266423Sjfv
3251266423Sjfvfail:
3252266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
3253266423Sjfv		que = &vsi->queues[i];
3254266423Sjfv		rxr = &que->rxr;
3255266423Sjfv		txr = &que->txr;
3256266423Sjfv		if (rxr->base)
3257271834Sbz			i40e_free_dma_mem(&pf->hw, &rxr->dma);
3258266423Sjfv		if (txr->base)
3259271834Sbz			i40e_free_dma_mem(&pf->hw, &txr->dma);
3260266423Sjfv	}
3261266423Sjfv
3262266423Sjfvearly:
3263266423Sjfv	return (error);
3264266423Sjfv}
3265266423Sjfv
3266266423Sjfv/*
3267266423Sjfv** Provide a update to the queue RX
3268266423Sjfv** interrupt moderation value.
3269266423Sjfv*/
3270266423Sjfvstatic void
3271270346Sjfvixl_set_queue_rx_itr(struct ixl_queue *que)
3272266423Sjfv{
3273270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3274266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3275266423Sjfv	struct rx_ring	*rxr = &que->rxr;
3276266423Sjfv	u16		rx_itr;
3277266423Sjfv	u16		rx_latency = 0;
3278266423Sjfv	int		rx_bytes;
3279266423Sjfv
3280266423Sjfv
3281266423Sjfv	/* Idle, do nothing */
3282266423Sjfv	if (rxr->bytes == 0)
3283266423Sjfv		return;
3284266423Sjfv
3285270346Sjfv	if (ixl_dynamic_rx_itr) {
3286266423Sjfv		rx_bytes = rxr->bytes/rxr->itr;
3287266423Sjfv		rx_itr = rxr->itr;
3288266423Sjfv
3289266423Sjfv		/* Adjust latency range */
3290266423Sjfv		switch (rxr->latency) {
3291270346Sjfv		case IXL_LOW_LATENCY:
3292266423Sjfv			if (rx_bytes > 10) {
3293270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3294270346Sjfv				rx_itr = IXL_ITR_20K;
3295266423Sjfv			}
3296266423Sjfv			break;
3297270346Sjfv		case IXL_AVE_LATENCY:
3298266423Sjfv			if (rx_bytes > 20) {
3299270346Sjfv				rx_latency = IXL_BULK_LATENCY;
3300270346Sjfv				rx_itr = IXL_ITR_8K;
3301266423Sjfv			} else if (rx_bytes <= 10) {
3302270346Sjfv				rx_latency = IXL_LOW_LATENCY;
3303270346Sjfv				rx_itr = IXL_ITR_100K;
3304266423Sjfv			}
3305266423Sjfv			break;
3306270346Sjfv		case IXL_BULK_LATENCY:
3307266423Sjfv			if (rx_bytes <= 20) {
3308270346Sjfv				rx_latency = IXL_AVE_LATENCY;
3309270346Sjfv				rx_itr = IXL_ITR_20K;
3310266423Sjfv			}
3311266423Sjfv			break;
3312266423Sjfv       		 }
3313266423Sjfv
3314266423Sjfv		rxr->latency = rx_latency;
3315266423Sjfv
3316266423Sjfv		if (rx_itr != rxr->itr) {
3317266423Sjfv			/* do an exponential smoothing */
3318266423Sjfv			rx_itr = (10 * rx_itr * rxr->itr) /
3319266423Sjfv			    ((9 * rx_itr) + rxr->itr);
3320270346Sjfv			rxr->itr = rx_itr & IXL_MAX_ITR;
3321270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3322266423Sjfv			    que->me), rxr->itr);
3323266423Sjfv		}
3324266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3325270346Sjfv		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
3326270346Sjfv			vsi->rx_itr_setting = ixl_rx_itr;
3327266423Sjfv		/* Update the hardware if needed */
3328266423Sjfv		if (rxr->itr != vsi->rx_itr_setting) {
3329266423Sjfv			rxr->itr = vsi->rx_itr_setting;
3330270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
3331266423Sjfv			    que->me), rxr->itr);
3332266423Sjfv		}
3333266423Sjfv	}
3334266423Sjfv	rxr->bytes = 0;
3335266423Sjfv	rxr->packets = 0;
3336266423Sjfv	return;
3337266423Sjfv}
3338266423Sjfv
3339266423Sjfv
3340266423Sjfv/*
3341266423Sjfv** Provide a update to the queue TX
3342266423Sjfv** interrupt moderation value.
3343266423Sjfv*/
3344266423Sjfvstatic void
3345270346Sjfvixl_set_queue_tx_itr(struct ixl_queue *que)
3346266423Sjfv{
3347270346Sjfv	struct ixl_vsi	*vsi = que->vsi;
3348266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3349266423Sjfv	struct tx_ring	*txr = &que->txr;
3350266423Sjfv	u16		tx_itr;
3351266423Sjfv	u16		tx_latency = 0;
3352266423Sjfv	int		tx_bytes;
3353266423Sjfv
3354266423Sjfv
3355266423Sjfv	/* Idle, do nothing */
3356266423Sjfv	if (txr->bytes == 0)
3357266423Sjfv		return;
3358266423Sjfv
3359270346Sjfv	if (ixl_dynamic_tx_itr) {
3360266423Sjfv		tx_bytes = txr->bytes/txr->itr;
3361266423Sjfv		tx_itr = txr->itr;
3362266423Sjfv
3363266423Sjfv		switch (txr->latency) {
3364270346Sjfv		case IXL_LOW_LATENCY:
3365266423Sjfv			if (tx_bytes > 10) {
3366270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3367270346Sjfv				tx_itr = IXL_ITR_20K;
3368266423Sjfv			}
3369266423Sjfv			break;
3370270346Sjfv		case IXL_AVE_LATENCY:
3371266423Sjfv			if (tx_bytes > 20) {
3372270346Sjfv				tx_latency = IXL_BULK_LATENCY;
3373270346Sjfv				tx_itr = IXL_ITR_8K;
3374266423Sjfv			} else if (tx_bytes <= 10) {
3375270346Sjfv				tx_latency = IXL_LOW_LATENCY;
3376270346Sjfv				tx_itr = IXL_ITR_100K;
3377266423Sjfv			}
3378266423Sjfv			break;
3379270346Sjfv		case IXL_BULK_LATENCY:
3380266423Sjfv			if (tx_bytes <= 20) {
3381270346Sjfv				tx_latency = IXL_AVE_LATENCY;
3382270346Sjfv				tx_itr = IXL_ITR_20K;
3383266423Sjfv			}
3384266423Sjfv			break;
3385266423Sjfv		}
3386266423Sjfv
3387266423Sjfv		txr->latency = tx_latency;
3388266423Sjfv
3389266423Sjfv		if (tx_itr != txr->itr) {
3390266423Sjfv       	         /* do an exponential smoothing */
3391266423Sjfv			tx_itr = (10 * tx_itr * txr->itr) /
3392266423Sjfv			    ((9 * tx_itr) + txr->itr);
3393270346Sjfv			txr->itr = tx_itr & IXL_MAX_ITR;
3394270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3395266423Sjfv			    que->me), txr->itr);
3396266423Sjfv		}
3397266423Sjfv
3398266423Sjfv	} else { /* We may have have toggled to non-dynamic */
3399270346Sjfv		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
3400270346Sjfv			vsi->tx_itr_setting = ixl_tx_itr;
3401266423Sjfv		/* Update the hardware if needed */
3402266423Sjfv		if (txr->itr != vsi->tx_itr_setting) {
3403266423Sjfv			txr->itr = vsi->tx_itr_setting;
3404270346Sjfv			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
3405266423Sjfv			    que->me), txr->itr);
3406266423Sjfv		}
3407266423Sjfv	}
3408266423Sjfv	txr->bytes = 0;
3409266423Sjfv	txr->packets = 0;
3410266423Sjfv	return;
3411266423Sjfv}
3412266423Sjfv
3413279858Sjfv#define QUEUE_NAME_LEN 32
3414266423Sjfv
3415266423Sjfvstatic void
3416279858Sjfvixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
3417279858Sjfv    struct sysctl_ctx_list *ctx, const char *sysctl_name)
3418279858Sjfv{
3419279858Sjfv	struct sysctl_oid *tree;
3420279858Sjfv	struct sysctl_oid_list *child;
3421279858Sjfv	struct sysctl_oid_list *vsi_list;
3422279858Sjfv
3423279858Sjfv	tree = device_get_sysctl_tree(pf->dev);
3424279858Sjfv	child = SYSCTL_CHILDREN(tree);
3425279858Sjfv	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
3426279858Sjfv				   CTLFLAG_RD, NULL, "VSI Number");
3427279858Sjfv	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
3428279858Sjfv
3429279858Sjfv	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
3430279858Sjfv}
3431279858Sjfv
3432279858Sjfvstatic void
3433270346Sjfvixl_add_hw_stats(struct ixl_pf *pf)
3434266423Sjfv{
3435266423Sjfv	device_t dev = pf->dev;
3436270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
3437270346Sjfv	struct ixl_queue *queues = vsi->queues;
3438269198Sjfv	struct i40e_hw_port_stats *pf_stats = &pf->stats;
3439266423Sjfv
3440266423Sjfv	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3441266423Sjfv	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3442266423Sjfv	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3443279858Sjfv	struct sysctl_oid_list *vsi_list;
3444266423Sjfv
3445279858Sjfv	struct sysctl_oid *queue_node;
3446279858Sjfv	struct sysctl_oid_list *queue_list;
3447266423Sjfv
3448269198Sjfv	struct tx_ring *txr;
3449269198Sjfv	struct rx_ring *rxr;
3450279858Sjfv	char queue_namebuf[QUEUE_NAME_LEN];
3451266423Sjfv
3452266423Sjfv	/* Driver statistics */
3453266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3454266423Sjfv			CTLFLAG_RD, &pf->watchdog_events,
3455266423Sjfv			"Watchdog timeouts");
3456266423Sjfv	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
3457266423Sjfv			CTLFLAG_RD, &pf->admin_irq,
3458266423Sjfv			"Admin Queue IRQ Handled");
3459266423Sjfv
3460279858Sjfv	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
3461279858Sjfv	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
3462266423Sjfv
3463266423Sjfv	/* Queue statistics */
3464266423Sjfv	for (int q = 0; q < vsi->num_queues; q++) {
3465269198Sjfv		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3466279858Sjfv		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
3467279858Sjfv		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
3468266423Sjfv		queue_list = SYSCTL_CHILDREN(queue_node);
3469266423Sjfv
3470269198Sjfv		txr = &(queues[q].txr);
3471269198Sjfv		rxr = &(queues[q].rxr);
3472269198Sjfv
3473269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3474266423Sjfv				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3475266423Sjfv				"m_defrag() failed");
3476269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
3477266423Sjfv				CTLFLAG_RD, &(queues[q].dropped_pkts),
3478266423Sjfv				"Driver dropped packets");
3479266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3480266423Sjfv				CTLFLAG_RD, &(queues[q].irqs),
3481266423Sjfv				"irqs on this queue");
3482269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3483266423Sjfv				CTLFLAG_RD, &(queues[q].tso),
3484266423Sjfv				"TSO");
3485269198Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
3486266423Sjfv				CTLFLAG_RD, &(queues[q].tx_dma_setup),
3487266423Sjfv				"Driver tx dma failure in xmit");
3488266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3489266423Sjfv				CTLFLAG_RD, &(txr->no_desc),
3490266423Sjfv				"Queue No Descriptor Available");
3491266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3492266423Sjfv				CTLFLAG_RD, &(txr->total_packets),
3493266423Sjfv				"Queue Packets Transmitted");
3494266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3495270346Sjfv				CTLFLAG_RD, &(txr->tx_bytes),
3496266423Sjfv				"Queue Bytes Transmitted");
3497266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3498266423Sjfv				CTLFLAG_RD, &(rxr->rx_packets),
3499266423Sjfv				"Queue Packets Received");
3500266423Sjfv		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3501266423Sjfv				CTLFLAG_RD, &(rxr->rx_bytes),
3502266423Sjfv				"Queue Bytes Received");
3503266423Sjfv	}
3504266423Sjfv
3505266423Sjfv	/* MAC stats */
3506270346Sjfv	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3507266423Sjfv}
3508266423Sjfv
3509266423Sjfvstatic void
3510270346Sjfvixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3511266423Sjfv	struct sysctl_oid_list *child,
3512266423Sjfv	struct i40e_eth_stats *eth_stats)
3513266423Sjfv{
3514270346Sjfv	struct ixl_sysctl_info ctls[] =
3515266423Sjfv	{
3516266423Sjfv		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3517266423Sjfv		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3518266423Sjfv			"Unicast Packets Received"},
3519266423Sjfv		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3520266423Sjfv			"Multicast Packets Received"},
3521266423Sjfv		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3522266423Sjfv			"Broadcast Packets Received"},
3523269198Sjfv		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3524266423Sjfv		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3525266423Sjfv		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3526266423Sjfv		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3527266423Sjfv			"Multicast Packets Transmitted"},
3528266423Sjfv		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3529266423Sjfv			"Broadcast Packets Transmitted"},
3530266423Sjfv		// end
3531266423Sjfv		{0,0,0}
3532266423Sjfv	};
3533266423Sjfv
3534270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3535297753Spfg	while (entry->stat != NULL)
3536266423Sjfv	{
3537266423Sjfv		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3538266423Sjfv				CTLFLAG_RD, entry->stat,
3539266423Sjfv				entry->description);
3540266423Sjfv		entry++;
3541266423Sjfv	}
3542266423Sjfv}
3543266423Sjfv
3544266423Sjfvstatic void
3545270346Sjfvixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3546266423Sjfv	struct sysctl_oid_list *child,
3547266423Sjfv	struct i40e_hw_port_stats *stats)
3548266423Sjfv{
3549269198Sjfv	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3550266423Sjfv				    CTLFLAG_RD, NULL, "Mac Statistics");
3551266423Sjfv	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3552266423Sjfv
3553266423Sjfv	struct i40e_eth_stats *eth_stats = &stats->eth;
3554270346Sjfv	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3555266423Sjfv
3556270346Sjfv	struct ixl_sysctl_info ctls[] =
3557266423Sjfv	{
3558266423Sjfv		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3559266423Sjfv		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3560266423Sjfv		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3561266423Sjfv		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3562266423Sjfv		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3563266423Sjfv		/* Packet Reception Stats */
3564266423Sjfv		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3565266423Sjfv		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3566266423Sjfv		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3567266423Sjfv		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3568266423Sjfv		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3569266423Sjfv		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3570266423Sjfv		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3571266423Sjfv		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3572266423Sjfv		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3573266423Sjfv		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3574266423Sjfv		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3575266423Sjfv		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3576266423Sjfv		/* Packet Transmission Stats */
3577266423Sjfv		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3578266423Sjfv		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3579266423Sjfv		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3580266423Sjfv		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3581266423Sjfv		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3582266423Sjfv		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3583266423Sjfv		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3584266423Sjfv		/* Flow control */
3585266423Sjfv		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3586266423Sjfv		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3587266423Sjfv		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3588266423Sjfv		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3589266423Sjfv		/* End */
3590266423Sjfv		{0,0,0}
3591266423Sjfv	};
3592266423Sjfv
3593270346Sjfv	struct ixl_sysctl_info *entry = ctls;
3594297753Spfg	while (entry->stat != NULL)
3595266423Sjfv	{
3596266423Sjfv		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3597266423Sjfv				CTLFLAG_RD, entry->stat,
3598266423Sjfv				entry->description);
3599266423Sjfv		entry++;
3600266423Sjfv	}
3601266423Sjfv}
3602266423Sjfv
3603284049Sjfv
3604266423Sjfv/*
3605270346Sjfv** ixl_config_rss - setup RSS
3606266423Sjfv**  - note this is done for the single vsi
3607266423Sjfv*/
3608270346Sjfvstatic void ixl_config_rss(struct ixl_vsi *vsi)
3609266423Sjfv{
3610270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3611266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3612266423Sjfv	u32		lut = 0;
3613277084Sjfv	u64		set_hena = 0, hena;
3614277084Sjfv	int		i, j, que_id;
3615277084Sjfv#ifdef RSS
3616277084Sjfv	u32		rss_hash_config;
3617277084Sjfv	u32		rss_seed[IXL_KEYSZ];
3618277084Sjfv#else
3619277084Sjfv	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
3620277084Sjfv			    0x183cfd8c, 0xce880440, 0x580cbc3c,
3621277084Sjfv			    0x35897377, 0x328b25e1, 0x4fa98922,
3622277084Sjfv			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
3623277084Sjfv#endif
3624266423Sjfv
3625277084Sjfv#ifdef RSS
3626277084Sjfv        /* Fetch the configured RSS key */
3627277084Sjfv        rss_getkey((uint8_t *) &rss_seed);
3628277084Sjfv#endif
3629266423Sjfv
3630266423Sjfv	/* Fill out hash function seed */
3631277084Sjfv	for (i = 0; i < IXL_KEYSZ; i++)
3632277084Sjfv                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
3633266423Sjfv
3634266423Sjfv	/* Enable PCTYPES for RSS: */
3635277084Sjfv#ifdef RSS
3636277084Sjfv	rss_hash_config = rss_gethashconfig();
3637277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3638277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
3639277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3640277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
3641277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3642277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
3643277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3644277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
3645279033Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3646277151Sjfv		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
3647277084Sjfv	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3648277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
3649277084Sjfv        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3650277084Sjfv                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
3651277084Sjfv#else
3652266423Sjfv	set_hena =
3653266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3654266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3655266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3656266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3657266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3658266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3659266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3660266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3661266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3662266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3663266423Sjfv		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3664277084Sjfv#endif
3665266423Sjfv	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3666266423Sjfv	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3667266423Sjfv	hena |= set_hena;
3668266423Sjfv	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3669266423Sjfv	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3670266423Sjfv
3671266423Sjfv	/* Populate the LUT with max no. of queues in round robin fashion */
3672266423Sjfv	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3673266423Sjfv		if (j == vsi->num_queues)
3674266423Sjfv			j = 0;
3675277084Sjfv#ifdef RSS
3676277084Sjfv		/*
3677277084Sjfv		 * Fetch the RSS bucket id for the given indirection entry.
3678277084Sjfv		 * Cap it at the number of configured buckets (which is
3679277084Sjfv		 * num_queues.)
3680277084Sjfv		 */
3681277084Sjfv		que_id = rss_get_indirection_to_bucket(i);
3682277262Sjfv		que_id = que_id % vsi->num_queues;
3683277084Sjfv#else
3684277084Sjfv		que_id = j;
3685277084Sjfv#endif
3686266423Sjfv		/* lut = 4-byte sliding window of 4 lut entries */
3687277084Sjfv		lut = (lut << 8) | (que_id &
3688266423Sjfv		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3689266423Sjfv		/* On i = 3, we have 4 entries in lut; write to the register */
3690266423Sjfv		if ((i & 3) == 3)
3691266423Sjfv			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3692266423Sjfv	}
3693270346Sjfv	ixl_flush(hw);
3694266423Sjfv}
3695266423Sjfv
3696266423Sjfv
3697266423Sjfv/*
3698266423Sjfv** This routine is run via an vlan config EVENT,
3699266423Sjfv** it enables us to use the HW Filter table since
3700266423Sjfv** we can get the vlan id. This just creates the
3701266423Sjfv** entry in the soft version of the VFTA, init will
3702266423Sjfv** repopulate the real table.
3703266423Sjfv*/
3704266423Sjfvstatic void
3705270346Sjfvixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3706266423Sjfv{
3707270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3708266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3709270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3710266423Sjfv
3711266423Sjfv	if (ifp->if_softc !=  arg)   /* Not our event */
3712266423Sjfv		return;
3713266423Sjfv
3714266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3715266423Sjfv		return;
3716266423Sjfv
3717270346Sjfv	IXL_PF_LOCK(pf);
3718266423Sjfv	++vsi->num_vlans;
3719270346Sjfv	ixl_add_filter(vsi, hw->mac.addr, vtag);
3720270346Sjfv	IXL_PF_UNLOCK(pf);
3721266423Sjfv}
3722266423Sjfv
3723266423Sjfv/*
3724266423Sjfv** This routine is run via an vlan
3725266423Sjfv** unconfig EVENT, remove our entry
3726266423Sjfv** in the soft vfta.
3727266423Sjfv*/
3728266423Sjfvstatic void
3729270346Sjfvixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3730266423Sjfv{
3731270346Sjfv	struct ixl_vsi	*vsi = ifp->if_softc;
3732266423Sjfv	struct i40e_hw	*hw = vsi->hw;
3733270346Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3734266423Sjfv
3735266423Sjfv	if (ifp->if_softc !=  arg)
3736266423Sjfv		return;
3737266423Sjfv
3738266423Sjfv	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3739266423Sjfv		return;
3740266423Sjfv
3741270346Sjfv	IXL_PF_LOCK(pf);
3742266423Sjfv	--vsi->num_vlans;
3743270346Sjfv	ixl_del_filter(vsi, hw->mac.addr, vtag);
3744270346Sjfv	IXL_PF_UNLOCK(pf);
3745266423Sjfv}
3746266423Sjfv
3747266423Sjfv/*
3748266423Sjfv** This routine updates vlan filters, called by init
3749266423Sjfv** it scans the filter table and then updates the hw
3750266423Sjfv** after a soft reset.
3751266423Sjfv*/
3752266423Sjfvstatic void
3753270346Sjfvixl_setup_vlan_filters(struct ixl_vsi *vsi)
3754266423Sjfv{
3755270346Sjfv	struct ixl_mac_filter	*f;
3756266423Sjfv	int			cnt = 0, flags;
3757266423Sjfv
3758266423Sjfv	if (vsi->num_vlans == 0)
3759266423Sjfv		return;
3760266423Sjfv	/*
3761266423Sjfv	** Scan the filter list for vlan entries,
3762266423Sjfv	** mark them for addition and then call
3763266423Sjfv	** for the AQ update.
3764266423Sjfv	*/
3765266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3766270346Sjfv		if (f->flags & IXL_FILTER_VLAN) {
3767266423Sjfv			f->flags |=
3768270346Sjfv			    (IXL_FILTER_ADD |
3769270346Sjfv			    IXL_FILTER_USED);
3770266423Sjfv			cnt++;
3771266423Sjfv		}
3772266423Sjfv	}
3773266423Sjfv	if (cnt == 0) {
3774266423Sjfv		printf("setup vlan: no filters found!\n");
3775266423Sjfv		return;
3776266423Sjfv	}
3777270346Sjfv	flags = IXL_FILTER_VLAN;
3778270346Sjfv	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3779270346Sjfv	ixl_add_hw_filters(vsi, flags, cnt);
3780266423Sjfv	return;
3781266423Sjfv}
3782266423Sjfv
3783266423Sjfv/*
3784266423Sjfv** Initialize filter list and add filters that the hardware
3785266423Sjfv** needs to know about.
3786299552Serj**
3787299552Serj** Requires VSI's filter list & seid to be set before calling.
3788266423Sjfv*/
3789266423Sjfvstatic void
3790270346Sjfvixl_init_filters(struct ixl_vsi *vsi)
3791266423Sjfv{
3792269198Sjfv	/* Add broadcast address */
3793279858Sjfv	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
3794299552Serj
3795299552Serj	/*
3796299552Serj	 * Prevent Tx flow control frames from being sent out by
3797299552Serj	 * non-firmware transmitters.
3798299552Serj	 */
3799299552Serj	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
3800266423Sjfv}
3801266423Sjfv
3802266423Sjfv/*
3803266423Sjfv** This routine adds mulicast filters
3804266423Sjfv*/
3805266423Sjfvstatic void
3806270346Sjfvixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3807266423Sjfv{
3808270346Sjfv	struct ixl_mac_filter *f;
3809266423Sjfv
3810266423Sjfv	/* Does one already exist */
3811270346Sjfv	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3812266423Sjfv	if (f != NULL)
3813266423Sjfv		return;
3814266423Sjfv
3815270346Sjfv	f = ixl_get_filter(vsi);
3816266423Sjfv	if (f == NULL) {
3817266423Sjfv		printf("WARNING: no filter available!!\n");
3818266423Sjfv		return;
3819266423Sjfv	}
3820266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3821270346Sjfv	f->vlan = IXL_VLAN_ANY;
3822270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3823270346Sjfv	    | IXL_FILTER_MC);
3824266423Sjfv
3825266423Sjfv	return;
3826266423Sjfv}
3827266423Sjfv
3828279858Sjfvstatic void
3829279858Sjfvixl_reconfigure_filters(struct ixl_vsi *vsi)
3830279858Sjfv{
3831279858Sjfv
3832279858Sjfv	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
3833279858Sjfv}
3834279858Sjfv
3835266423Sjfv/*
3836266423Sjfv** This routine adds macvlan filters
3837266423Sjfv*/
3838266423Sjfvstatic void
3839270346Sjfvixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3840266423Sjfv{
3841270346Sjfv	struct ixl_mac_filter	*f, *tmp;
3842279858Sjfv	struct ixl_pf		*pf;
3843279858Sjfv	device_t		dev;
3844266423Sjfv
3845270346Sjfv	DEBUGOUT("ixl_add_filter: begin");
3846266423Sjfv
3847279858Sjfv	pf = vsi->back;
3848279858Sjfv	dev = pf->dev;
3849279858Sjfv
3850266423Sjfv	/* Does one already exist */
3851270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3852266423Sjfv	if (f != NULL)
3853266423Sjfv		return;
3854266423Sjfv	/*
3855266423Sjfv	** Is this the first vlan being registered, if so we
3856266423Sjfv	** need to remove the ANY filter that indicates we are
3857266423Sjfv	** not in a vlan, and replace that with a 0 filter.
3858266423Sjfv	*/
3859270346Sjfv	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3860270346Sjfv		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3861266423Sjfv		if (tmp != NULL) {
3862270346Sjfv			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3863270346Sjfv			ixl_add_filter(vsi, macaddr, 0);
3864266423Sjfv		}
3865266423Sjfv	}
3866266423Sjfv
3867270346Sjfv	f = ixl_get_filter(vsi);
3868266423Sjfv	if (f == NULL) {
3869266423Sjfv		device_printf(dev, "WARNING: no filter available!!\n");
3870266423Sjfv		return;
3871266423Sjfv	}
3872266423Sjfv	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3873266423Sjfv	f->vlan = vlan;
3874270346Sjfv	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3875270346Sjfv	if (f->vlan != IXL_VLAN_ANY)
3876270346Sjfv		f->flags |= IXL_FILTER_VLAN;
3877279858Sjfv	else
3878279858Sjfv		vsi->num_macs++;
3879266423Sjfv
3880270346Sjfv	ixl_add_hw_filters(vsi, f->flags, 1);
3881266423Sjfv	return;
3882266423Sjfv}
3883266423Sjfv
3884266423Sjfvstatic void
3885270346Sjfvixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3886266423Sjfv{
3887270346Sjfv	struct ixl_mac_filter *f;
3888266423Sjfv
3889270346Sjfv	f = ixl_find_filter(vsi, macaddr, vlan);
3890266423Sjfv	if (f == NULL)
3891266423Sjfv		return;
3892266423Sjfv
3893270346Sjfv	f->flags |= IXL_FILTER_DEL;
3894270346Sjfv	ixl_del_hw_filters(vsi, 1);
3895279858Sjfv	vsi->num_macs--;
3896266423Sjfv
3897266423Sjfv	/* Check if this is the last vlan removal */
3898270346Sjfv	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3899266423Sjfv		/* Switch back to a non-vlan filter */
3900270346Sjfv		ixl_del_filter(vsi, macaddr, 0);
3901270346Sjfv		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3902266423Sjfv	}
3903266423Sjfv	return;
3904266423Sjfv}
3905266423Sjfv
3906266423Sjfv/*
3907266423Sjfv** Find the filter with both matching mac addr and vlan id
3908266423Sjfv*/
3909270346Sjfvstatic struct ixl_mac_filter *
3910270346Sjfvixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3911266423Sjfv{
3912270346Sjfv	struct ixl_mac_filter	*f;
3913266423Sjfv	bool			match = FALSE;
3914266423Sjfv
3915266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3916266423Sjfv		if (!cmp_etheraddr(f->macaddr, macaddr))
3917266423Sjfv			continue;
3918266423Sjfv		if (f->vlan == vlan) {
3919266423Sjfv			match = TRUE;
3920266423Sjfv			break;
3921266423Sjfv		}
3922266423Sjfv	}
3923266423Sjfv
3924266423Sjfv	if (!match)
3925266423Sjfv		f = NULL;
3926266423Sjfv	return (f);
3927266423Sjfv}
3928266423Sjfv
3929266423Sjfv/*
3930266423Sjfv** This routine takes additions to the vsi filter
3931266423Sjfv** table and creates an Admin Queue call to create
3932266423Sjfv** the filters in the hardware.
3933266423Sjfv*/
3934266423Sjfvstatic void
3935270346Sjfvixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3936266423Sjfv{
3937266423Sjfv	struct i40e_aqc_add_macvlan_element_data *a, *b;
3938270346Sjfv	struct ixl_mac_filter	*f;
3939279858Sjfv	struct ixl_pf		*pf;
3940279858Sjfv	struct i40e_hw		*hw;
3941279858Sjfv	device_t		dev;
3942279858Sjfv	int			err, j = 0;
3943266423Sjfv
3944279858Sjfv	pf = vsi->back;
3945279858Sjfv	dev = pf->dev;
3946279858Sjfv	hw = &pf->hw;
3947279858Sjfv	IXL_PF_LOCK_ASSERT(pf);
3948279858Sjfv
3949266423Sjfv	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3950266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
3951266423Sjfv	if (a == NULL) {
3952277084Sjfv		device_printf(dev, "add_hw_filters failed to get memory\n");
3953266423Sjfv		return;
3954266423Sjfv	}
3955266423Sjfv
3956266423Sjfv	/*
3957266423Sjfv	** Scan the filter list, each time we find one
3958266423Sjfv	** we add it to the admin queue array and turn off
3959266423Sjfv	** the add bit.
3960266423Sjfv	*/
3961266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
3962266423Sjfv		if (f->flags == flags) {
3963266423Sjfv			b = &a[j]; // a pox on fvl long names :)
3964266423Sjfv			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3965279858Sjfv			if (f->vlan == IXL_VLAN_ANY) {
3966279858Sjfv				b->vlan_tag = 0;
3967279858Sjfv				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3968279858Sjfv			} else {
3969279858Sjfv				b->vlan_tag = f->vlan;
3970279858Sjfv				b->flags = 0;
3971279858Sjfv			}
3972279858Sjfv			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3973270346Sjfv			f->flags &= ~IXL_FILTER_ADD;
3974266423Sjfv			j++;
3975266423Sjfv		}
3976266423Sjfv		if (j == cnt)
3977266423Sjfv			break;
3978266423Sjfv	}
3979266423Sjfv	if (j > 0) {
3980266423Sjfv		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3981266423Sjfv		if (err)
3982279033Sjfv			device_printf(dev, "aq_add_macvlan err %d, "
3983279033Sjfv			    "aq_error %d\n", err, hw->aq.asq_last_status);
3984266423Sjfv		else
3985266423Sjfv			vsi->hw_filters_add += j;
3986266423Sjfv	}
3987266423Sjfv	free(a, M_DEVBUF);
3988266423Sjfv	return;
3989266423Sjfv}
3990266423Sjfv
3991266423Sjfv/*
3992266423Sjfv** This routine takes removals in the vsi filter
3993266423Sjfv** table and creates an Admin Queue call to delete
3994266423Sjfv** the filters in the hardware.
3995266423Sjfv*/
3996266423Sjfvstatic void
3997270346Sjfvixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3998266423Sjfv{
3999266423Sjfv	struct i40e_aqc_remove_macvlan_element_data *d, *e;
4000279858Sjfv	struct ixl_pf		*pf;
4001279858Sjfv	struct i40e_hw		*hw;
4002279858Sjfv	device_t		dev;
4003270346Sjfv	struct ixl_mac_filter	*f, *f_temp;
4004266423Sjfv	int			err, j = 0;
4005266423Sjfv
4006270346Sjfv	DEBUGOUT("ixl_del_hw_filters: begin\n");
4007266423Sjfv
4008279858Sjfv	pf = vsi->back;
4009279858Sjfv	hw = &pf->hw;
4010279858Sjfv	dev = pf->dev;
4011279858Sjfv
4012266423Sjfv	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
4013266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4014266423Sjfv	if (d == NULL) {
4015266423Sjfv		printf("del hw filter failed to get memory\n");
4016266423Sjfv		return;
4017266423Sjfv	}
4018266423Sjfv
4019266423Sjfv	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
4020270346Sjfv		if (f->flags & IXL_FILTER_DEL) {
4021266423Sjfv			e = &d[j]; // a pox on fvl long names :)
4022266423Sjfv			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
4023270346Sjfv			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
4024266423Sjfv			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4025266423Sjfv			/* delete entry from vsi list */
4026270346Sjfv			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
4027266423Sjfv			free(f, M_DEVBUF);
4028266423Sjfv			j++;
4029266423Sjfv		}
4030266423Sjfv		if (j == cnt)
4031266423Sjfv			break;
4032266423Sjfv	}
4033266423Sjfv	if (j > 0) {
4034266423Sjfv		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
4035266423Sjfv		/* NOTE: returns ENOENT every time but seems to work fine,
4036266423Sjfv		   so we'll ignore that specific error. */
4037277084Sjfv		// TODO: Does this still occur on current firmwares?
4038266423Sjfv		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
4039266423Sjfv			int sc = 0;
4040266423Sjfv			for (int i = 0; i < j; i++)
4041266423Sjfv				sc += (!d[i].error_code);
4042266423Sjfv			vsi->hw_filters_del += sc;
4043266423Sjfv			device_printf(dev,
4044266423Sjfv			    "Failed to remove %d/%d filters, aq error %d\n",
4045266423Sjfv			    j - sc, j, hw->aq.asq_last_status);
4046266423Sjfv		} else
4047266423Sjfv			vsi->hw_filters_del += j;
4048266423Sjfv	}
4049266423Sjfv	free(d, M_DEVBUF);
4050266423Sjfv
4051270346Sjfv	DEBUGOUT("ixl_del_hw_filters: end\n");
4052266423Sjfv	return;
4053266423Sjfv}
4054266423Sjfv
4055279858Sjfvstatic int
4056270346Sjfvixl_enable_rings(struct ixl_vsi *vsi)
4057266423Sjfv{
4058279858Sjfv	struct ixl_pf	*pf = vsi->back;
4059279858Sjfv	struct i40e_hw	*hw = &pf->hw;
4060279858Sjfv	int		index, error;
4061266423Sjfv	u32		reg;
4062266423Sjfv
4063279858Sjfv	error = 0;
4064266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
4065279858Sjfv		index = vsi->first_queue + i;
4066279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, TRUE);
4067266423Sjfv
4068279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
4069266423Sjfv		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
4070266423Sjfv		    I40E_QTX_ENA_QENA_STAT_MASK;
4071279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
4072266423Sjfv		/* Verify the enable took */
4073266423Sjfv		for (int j = 0; j < 10; j++) {
4074279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
4075266423Sjfv			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4076266423Sjfv				break;
4077266423Sjfv			i40e_msec_delay(10);
4078266423Sjfv		}
4079279858Sjfv		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
4080279858Sjfv			device_printf(pf->dev, "TX queue %d disabled!\n",
4081279858Sjfv			    index);
4082279858Sjfv			error = ETIMEDOUT;
4083279858Sjfv		}
4084266423Sjfv
4085279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
4086266423Sjfv		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
4087266423Sjfv		    I40E_QRX_ENA_QENA_STAT_MASK;
4088279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
4089266423Sjfv		/* Verify the enable took */
4090266423Sjfv		for (int j = 0; j < 10; j++) {
4091279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
4092266423Sjfv			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
4093266423Sjfv				break;
4094266423Sjfv			i40e_msec_delay(10);
4095266423Sjfv		}
4096279858Sjfv		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
4097279858Sjfv			device_printf(pf->dev, "RX queue %d disabled!\n",
4098279858Sjfv			    index);
4099279858Sjfv			error = ETIMEDOUT;
4100279858Sjfv		}
4101266423Sjfv	}
4102279858Sjfv
4103279858Sjfv	return (error);
4104266423Sjfv}
4105266423Sjfv
4106279858Sjfvstatic int
4107270346Sjfvixl_disable_rings(struct ixl_vsi *vsi)
4108266423Sjfv{
4109279858Sjfv	struct ixl_pf	*pf = vsi->back;
4110279858Sjfv	struct i40e_hw	*hw = &pf->hw;
4111279858Sjfv	int		index, error;
4112266423Sjfv	u32		reg;
4113266423Sjfv
4114279858Sjfv	error = 0;
4115266423Sjfv	for (int i = 0; i < vsi->num_queues; i++) {
4116279858Sjfv		index = vsi->first_queue + i;
4117279858Sjfv
4118279858Sjfv		i40e_pre_tx_queue_cfg(hw, index, FALSE);
4119266423Sjfv		i40e_usec_delay(500);
4120266423Sjfv
4121279858Sjfv		reg = rd32(hw, I40E_QTX_ENA(index));
4122266423Sjfv		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4123279858Sjfv		wr32(hw, I40E_QTX_ENA(index), reg);
4124266423Sjfv		/* Verify the disable took */
4125266423Sjfv		for (int j = 0; j < 10; j++) {
4126279858Sjfv			reg = rd32(hw, I40E_QTX_ENA(index));
4127266423Sjfv			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4128266423Sjfv				break;
4129266423Sjfv			i40e_msec_delay(10);
4130266423Sjfv		}
4131279858Sjfv		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
4132279858Sjfv			device_printf(pf->dev, "TX queue %d still enabled!\n",
4133279858Sjfv			    index);
4134279858Sjfv			error = ETIMEDOUT;
4135279858Sjfv		}
4136266423Sjfv
4137279858Sjfv		reg = rd32(hw, I40E_QRX_ENA(index));
4138266423Sjfv		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4139279858Sjfv		wr32(hw, I40E_QRX_ENA(index), reg);
4140266423Sjfv		/* Verify the disable took */
4141266423Sjfv		for (int j = 0; j < 10; j++) {
4142279858Sjfv			reg = rd32(hw, I40E_QRX_ENA(index));
4143266423Sjfv			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4144266423Sjfv				break;
4145266423Sjfv			i40e_msec_delay(10);
4146266423Sjfv		}
4147279858Sjfv		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
4148279858Sjfv			device_printf(pf->dev, "RX queue %d still enabled!\n",
4149279858Sjfv			    index);
4150279858Sjfv			error = ETIMEDOUT;
4151279858Sjfv		}
4152266423Sjfv	}
4153279858Sjfv
4154279858Sjfv	return (error);
4155266423Sjfv}
4156266423Sjfv
4157269198Sjfv/**
4158270346Sjfv * ixl_handle_mdd_event
4159269198Sjfv *
4160269198Sjfv * Called from interrupt handler to identify possibly malicious vfs
4161269198Sjfv * (But also detects events from the PF, as well)
4162269198Sjfv **/
4163270346Sjfvstatic void ixl_handle_mdd_event(struct ixl_pf *pf)
4164269198Sjfv{
4165269198Sjfv	struct i40e_hw *hw = &pf->hw;
4166269198Sjfv	device_t dev = pf->dev;
4167269198Sjfv	bool mdd_detected = false;
4168269198Sjfv	bool pf_mdd_detected = false;
4169269198Sjfv	u32 reg;
4170269198Sjfv
4171269198Sjfv	/* find what triggered the MDD event */
4172269198Sjfv	reg = rd32(hw, I40E_GL_MDET_TX);
4173269198Sjfv	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4174269198Sjfv		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
4175269198Sjfv				I40E_GL_MDET_TX_PF_NUM_SHIFT;
4176269198Sjfv		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4177269198Sjfv				I40E_GL_MDET_TX_EVENT_SHIFT;
4178269198Sjfv		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
4179269198Sjfv				I40E_GL_MDET_TX_QUEUE_SHIFT;
4180269198Sjfv		device_printf(dev,
4181269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4182269198Sjfv			 " on TX queue %d pf number 0x%02x\n",
4183269198Sjfv			 event, queue, pf_num);
4184269198Sjfv		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4185269198Sjfv		mdd_detected = true;
4186269198Sjfv	}
4187269198Sjfv	reg = rd32(hw, I40E_GL_MDET_RX);
4188269198Sjfv	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4189269198Sjfv		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
4190269198Sjfv				I40E_GL_MDET_RX_FUNCTION_SHIFT;
4191269198Sjfv		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4192269198Sjfv				I40E_GL_MDET_RX_EVENT_SHIFT;
4193269198Sjfv		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
4194269198Sjfv				I40E_GL_MDET_RX_QUEUE_SHIFT;
4195269198Sjfv		device_printf(dev,
4196269198Sjfv			 "Malicious Driver Detection event 0x%02x"
4197269198Sjfv			 " on RX queue %d of function 0x%02x\n",
4198269198Sjfv			 event, queue, func);
4199269198Sjfv		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4200269198Sjfv		mdd_detected = true;
4201269198Sjfv	}
4202269198Sjfv
4203269198Sjfv	if (mdd_detected) {
4204269198Sjfv		reg = rd32(hw, I40E_PF_MDET_TX);
4205269198Sjfv		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
4206269198Sjfv			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
4207269198Sjfv			device_printf(dev,
4208269198Sjfv				 "MDD TX event is for this function 0x%08x",
4209269198Sjfv				 reg);
4210269198Sjfv			pf_mdd_detected = true;
4211269198Sjfv		}
4212269198Sjfv		reg = rd32(hw, I40E_PF_MDET_RX);
4213269198Sjfv		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
4214269198Sjfv			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
4215269198Sjfv			device_printf(dev,
4216269198Sjfv				 "MDD RX event is for this function 0x%08x",
4217269198Sjfv				 reg);
4218269198Sjfv			pf_mdd_detected = true;
4219269198Sjfv		}
4220269198Sjfv	}
4221269198Sjfv
4222269198Sjfv	/* re-enable mdd interrupt cause */
4223269198Sjfv	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4224269198Sjfv	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4225269198Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4226270346Sjfv	ixl_flush(hw);
4227269198Sjfv}
4228269198Sjfv
4229266423Sjfvstatic void
4230270346Sjfvixl_enable_intr(struct ixl_vsi *vsi)
4231266423Sjfv{
4232266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4233270346Sjfv	struct ixl_queue	*que = vsi->queues;
4234266423Sjfv
4235270346Sjfv	if (ixl_enable_msix) {
4236270346Sjfv		ixl_enable_adminq(hw);
4237266423Sjfv		for (int i = 0; i < vsi->num_queues; i++, que++)
4238270346Sjfv			ixl_enable_queue(hw, que->me);
4239266423Sjfv	} else
4240270346Sjfv		ixl_enable_legacy(hw);
4241266423Sjfv}
4242266423Sjfv
4243266423Sjfvstatic void
4244279858Sjfvixl_disable_rings_intr(struct ixl_vsi *vsi)
4245266423Sjfv{
4246266423Sjfv	struct i40e_hw		*hw = vsi->hw;
4247270346Sjfv	struct ixl_queue	*que = vsi->queues;
4248266423Sjfv
4249279858Sjfv	for (int i = 0; i < vsi->num_queues; i++, que++)
4250279858Sjfv		ixl_disable_queue(hw, que->me);
4251279858Sjfv}
4252279858Sjfv
4253279858Sjfvstatic void
4254279858Sjfvixl_disable_intr(struct ixl_vsi *vsi)
4255279858Sjfv{
4256279858Sjfv	struct i40e_hw		*hw = vsi->hw;
4257279858Sjfv
4258279858Sjfv	if (ixl_enable_msix)
4259270346Sjfv		ixl_disable_adminq(hw);
4260279858Sjfv	else
4261270346Sjfv		ixl_disable_legacy(hw);
4262266423Sjfv}
4263266423Sjfv
4264266423Sjfvstatic void
4265270346Sjfvixl_enable_adminq(struct i40e_hw *hw)
4266266423Sjfv{
4267266423Sjfv	u32		reg;
4268266423Sjfv
4269266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4270266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4271270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4272266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4273270346Sjfv	ixl_flush(hw);
4274266423Sjfv}
4275266423Sjfv
4276266423Sjfvstatic void
4277270346Sjfvixl_disable_adminq(struct i40e_hw *hw)
4278266423Sjfv{
4279266423Sjfv	u32		reg;
4280266423Sjfv
4281270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4282266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4283299547Serj	ixl_flush(hw);
4284266423Sjfv}
4285266423Sjfv
4286266423Sjfvstatic void
4287270346Sjfvixl_enable_queue(struct i40e_hw *hw, int id)
4288266423Sjfv{
4289266423Sjfv	u32		reg;
4290266423Sjfv
4291266423Sjfv	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
4292266423Sjfv	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
4293270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4294266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4295266423Sjfv}
4296266423Sjfv
4297266423Sjfvstatic void
4298270346Sjfvixl_disable_queue(struct i40e_hw *hw, int id)
4299266423Sjfv{
4300266423Sjfv	u32		reg;
4301266423Sjfv
4302270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4303266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
4304266423Sjfv}
4305266423Sjfv
4306266423Sjfvstatic void
4307270346Sjfvixl_enable_legacy(struct i40e_hw *hw)
4308266423Sjfv{
4309266423Sjfv	u32		reg;
4310266423Sjfv	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4311266423Sjfv	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4312270346Sjfv	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4313266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4314266423Sjfv}
4315266423Sjfv
4316266423Sjfvstatic void
4317270346Sjfvixl_disable_legacy(struct i40e_hw *hw)
4318266423Sjfv{
4319266423Sjfv	u32		reg;
4320266423Sjfv
4321270346Sjfv	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
4322266423Sjfv	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
4323266423Sjfv}
4324266423Sjfv
4325266423Sjfvstatic void
4326270346Sjfvixl_update_stats_counters(struct ixl_pf *pf)
4327266423Sjfv{
4328266423Sjfv	struct i40e_hw	*hw = &pf->hw;
4329279858Sjfv	struct ixl_vsi	*vsi = &pf->vsi;
4330279858Sjfv	struct ixl_vf	*vf;
4331269198Sjfv
4332266423Sjfv	struct i40e_hw_port_stats *nsd = &pf->stats;
4333266423Sjfv	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
4334266423Sjfv
4335266423Sjfv	/* Update hw stats */
4336270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
4337266423Sjfv			   pf->stat_offsets_loaded,
4338266423Sjfv			   &osd->crc_errors, &nsd->crc_errors);
4339270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
4340266423Sjfv			   pf->stat_offsets_loaded,
4341266423Sjfv			   &osd->illegal_bytes, &nsd->illegal_bytes);
4342270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
4343266423Sjfv			   I40E_GLPRT_GORCL(hw->port),
4344266423Sjfv			   pf->stat_offsets_loaded,
4345266423Sjfv			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
4346270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
4347266423Sjfv			   I40E_GLPRT_GOTCL(hw->port),
4348266423Sjfv			   pf->stat_offsets_loaded,
4349266423Sjfv			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
4350270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
4351266423Sjfv			   pf->stat_offsets_loaded,
4352266423Sjfv			   &osd->eth.rx_discards,
4353266423Sjfv			   &nsd->eth.rx_discards);
4354270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
4355266423Sjfv			   I40E_GLPRT_UPRCL(hw->port),
4356266423Sjfv			   pf->stat_offsets_loaded,
4357266423Sjfv			   &osd->eth.rx_unicast,
4358266423Sjfv			   &nsd->eth.rx_unicast);
4359270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
4360266423Sjfv			   I40E_GLPRT_UPTCL(hw->port),
4361266423Sjfv			   pf->stat_offsets_loaded,
4362266423Sjfv			   &osd->eth.tx_unicast,
4363266423Sjfv			   &nsd->eth.tx_unicast);
4364270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
4365266423Sjfv			   I40E_GLPRT_MPRCL(hw->port),
4366266423Sjfv			   pf->stat_offsets_loaded,
4367266423Sjfv			   &osd->eth.rx_multicast,
4368266423Sjfv			   &nsd->eth.rx_multicast);
4369270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
4370266423Sjfv			   I40E_GLPRT_MPTCL(hw->port),
4371266423Sjfv			   pf->stat_offsets_loaded,
4372266423Sjfv			   &osd->eth.tx_multicast,
4373266423Sjfv			   &nsd->eth.tx_multicast);
4374270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
4375266423Sjfv			   I40E_GLPRT_BPRCL(hw->port),
4376266423Sjfv			   pf->stat_offsets_loaded,
4377266423Sjfv			   &osd->eth.rx_broadcast,
4378266423Sjfv			   &nsd->eth.rx_broadcast);
4379270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
4380266423Sjfv			   I40E_GLPRT_BPTCL(hw->port),
4381266423Sjfv			   pf->stat_offsets_loaded,
4382266423Sjfv			   &osd->eth.tx_broadcast,
4383266423Sjfv			   &nsd->eth.tx_broadcast);
4384266423Sjfv
4385270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
4386266423Sjfv			   pf->stat_offsets_loaded,
4387266423Sjfv			   &osd->tx_dropped_link_down,
4388266423Sjfv			   &nsd->tx_dropped_link_down);
4389270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
4390266423Sjfv			   pf->stat_offsets_loaded,
4391266423Sjfv			   &osd->mac_local_faults,
4392266423Sjfv			   &nsd->mac_local_faults);
4393270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
4394266423Sjfv			   pf->stat_offsets_loaded,
4395266423Sjfv			   &osd->mac_remote_faults,
4396266423Sjfv			   &nsd->mac_remote_faults);
4397270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
4398266423Sjfv			   pf->stat_offsets_loaded,
4399266423Sjfv			   &osd->rx_length_errors,
4400266423Sjfv			   &nsd->rx_length_errors);
4401266423Sjfv
4402269198Sjfv	/* Flow control (LFC) stats */
4403270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
4404266423Sjfv			   pf->stat_offsets_loaded,
4405266423Sjfv			   &osd->link_xon_rx, &nsd->link_xon_rx);
4406270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
4407266423Sjfv			   pf->stat_offsets_loaded,
4408266423Sjfv			   &osd->link_xon_tx, &nsd->link_xon_tx);
4409270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
4410266423Sjfv			   pf->stat_offsets_loaded,
4411266423Sjfv			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
4412270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
4413266423Sjfv			   pf->stat_offsets_loaded,
4414266423Sjfv			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
4415266423Sjfv
4416269198Sjfv	/* Packet size stats rx */
4417270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
4418266423Sjfv			   I40E_GLPRT_PRC64L(hw->port),
4419266423Sjfv			   pf->stat_offsets_loaded,
4420266423Sjfv			   &osd->rx_size_64, &nsd->rx_size_64);
4421270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
4422266423Sjfv			   I40E_GLPRT_PRC127L(hw->port),
4423266423Sjfv			   pf->stat_offsets_loaded,
4424266423Sjfv			   &osd->rx_size_127, &nsd->rx_size_127);
4425270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
4426266423Sjfv			   I40E_GLPRT_PRC255L(hw->port),
4427266423Sjfv			   pf->stat_offsets_loaded,
4428266423Sjfv			   &osd->rx_size_255, &nsd->rx_size_255);
4429270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
4430266423Sjfv			   I40E_GLPRT_PRC511L(hw->port),
4431266423Sjfv			   pf->stat_offsets_loaded,
4432266423Sjfv			   &osd->rx_size_511, &nsd->rx_size_511);
4433270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
4434266423Sjfv			   I40E_GLPRT_PRC1023L(hw->port),
4435266423Sjfv			   pf->stat_offsets_loaded,
4436266423Sjfv			   &osd->rx_size_1023, &nsd->rx_size_1023);
4437270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
4438266423Sjfv			   I40E_GLPRT_PRC1522L(hw->port),
4439266423Sjfv			   pf->stat_offsets_loaded,
4440266423Sjfv			   &osd->rx_size_1522, &nsd->rx_size_1522);
4441270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
4442266423Sjfv			   I40E_GLPRT_PRC9522L(hw->port),
4443266423Sjfv			   pf->stat_offsets_loaded,
4444266423Sjfv			   &osd->rx_size_big, &nsd->rx_size_big);
4445266423Sjfv
4446269198Sjfv	/* Packet size stats tx */
4447270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
4448266423Sjfv			   I40E_GLPRT_PTC64L(hw->port),
4449266423Sjfv			   pf->stat_offsets_loaded,
4450266423Sjfv			   &osd->tx_size_64, &nsd->tx_size_64);
4451270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
4452266423Sjfv			   I40E_GLPRT_PTC127L(hw->port),
4453266423Sjfv			   pf->stat_offsets_loaded,
4454266423Sjfv			   &osd->tx_size_127, &nsd->tx_size_127);
4455270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
4456266423Sjfv			   I40E_GLPRT_PTC255L(hw->port),
4457266423Sjfv			   pf->stat_offsets_loaded,
4458266423Sjfv			   &osd->tx_size_255, &nsd->tx_size_255);
4459270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
4460266423Sjfv			   I40E_GLPRT_PTC511L(hw->port),
4461266423Sjfv			   pf->stat_offsets_loaded,
4462266423Sjfv			   &osd->tx_size_511, &nsd->tx_size_511);
4463270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
4464266423Sjfv			   I40E_GLPRT_PTC1023L(hw->port),
4465266423Sjfv			   pf->stat_offsets_loaded,
4466266423Sjfv			   &osd->tx_size_1023, &nsd->tx_size_1023);
4467270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
4468266423Sjfv			   I40E_GLPRT_PTC1522L(hw->port),
4469266423Sjfv			   pf->stat_offsets_loaded,
4470266423Sjfv			   &osd->tx_size_1522, &nsd->tx_size_1522);
4471270346Sjfv	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
4472266423Sjfv			   I40E_GLPRT_PTC9522L(hw->port),
4473266423Sjfv			   pf->stat_offsets_loaded,
4474266423Sjfv			   &osd->tx_size_big, &nsd->tx_size_big);
4475266423Sjfv
4476270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
4477266423Sjfv			   pf->stat_offsets_loaded,
4478266423Sjfv			   &osd->rx_undersize, &nsd->rx_undersize);
4479270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
4480266423Sjfv			   pf->stat_offsets_loaded,
4481266423Sjfv			   &osd->rx_fragments, &nsd->rx_fragments);
4482270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
4483266423Sjfv			   pf->stat_offsets_loaded,
4484266423Sjfv			   &osd->rx_oversize, &nsd->rx_oversize);
4485270346Sjfv	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
4486266423Sjfv			   pf->stat_offsets_loaded,
4487266423Sjfv			   &osd->rx_jabber, &nsd->rx_jabber);
4488266423Sjfv	pf->stat_offsets_loaded = true;
4489269198Sjfv	/* End hw stats */
4490266423Sjfv
4491266423Sjfv	/* Update vsi stats */
4492279858Sjfv	ixl_update_vsi_stats(vsi);
4493266423Sjfv
4494279858Sjfv	for (int i = 0; i < pf->num_vfs; i++) {
4495279858Sjfv		vf = &pf->vfs[i];
4496279858Sjfv		if (vf->vf_flags & VF_FLAG_ENABLED)
4497279858Sjfv			ixl_update_eth_stats(&pf->vfs[i].vsi);
4498279858Sjfv	}
4499266423Sjfv}
4500266423Sjfv
4501266423Sjfv/*
4502266423Sjfv** Tasklet handler for MSIX Adminq interrupts
4503266423Sjfv**  - do outside interrupt since it might sleep
4504266423Sjfv*/
4505266423Sjfvstatic void
4506270346Sjfvixl_do_adminq(void *context, int pending)
4507266423Sjfv{
4508270346Sjfv	struct ixl_pf			*pf = context;
4509266423Sjfv	struct i40e_hw			*hw = &pf->hw;
4510266423Sjfv	struct i40e_arq_event_info	event;
4511266423Sjfv	i40e_status			ret;
4512299547Serj	device_t			dev = pf->dev;
4513299549Serj	u32				reg, loop = 0;
4514266423Sjfv	u16				opcode, result;
4515266423Sjfv
4516299549Serj	// XXX: Possibly inappropriate overload
4517299549Serj	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
4518299549Serj		int count = 0;
4519299549Serj		// ERJ: Typically finishes within 3-4 seconds
4520299549Serj		while (count++ < 100) {
4521299549Serj			reg = rd32(hw, I40E_GLGEN_RSTAT);
4522299549Serj			reg = reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
4523299549Serj			if (reg) {
4524299549Serj				i40e_msec_delay(100);
4525299549Serj			} else {
4526299549Serj				break;
4527299549Serj			}
4528299549Serj		}
4529299549Serj		device_printf(dev, "EMPR reset wait count: %d\n", count);
4530299549Serj
4531299549Serj		device_printf(dev, "Rebuilding HW structs...\n");
4532299549Serj		// XXX: I feel like this could cause a kernel panic some time in the future
4533299549Serj		ixl_stop(pf);
4534299549Serj		ixl_init(pf);
4535299549Serj
4536299549Serj		atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
4537299549Serj		return;
4538299549Serj	}
4539299549Serj
4540299549Serj	// Actually do Admin Queue handling
4541274205Sjfv	event.buf_len = IXL_AQ_BUF_SZ;
4542274205Sjfv	event.msg_buf = malloc(event.buf_len,
4543266423Sjfv	    M_DEVBUF, M_NOWAIT | M_ZERO);
4544266423Sjfv	if (!event.msg_buf) {
4545299547Serj		device_printf(dev, "%s: Unable to allocate memory for Admin"
4546299547Serj		    " Queue event!\n", __func__);
4547266423Sjfv		return;
4548266423Sjfv	}
4549266423Sjfv
4550279858Sjfv	IXL_PF_LOCK(pf);
4551266423Sjfv	/* clean and process any events */
4552266423Sjfv	do {
4553266423Sjfv		ret = i40e_clean_arq_element(hw, &event, &result);
4554266423Sjfv		if (ret)
4555266423Sjfv			break;
4556266423Sjfv		opcode = LE16_TO_CPU(event.desc.opcode);
4557299547Serj#ifdef IXL_DEBUG
4558299547Serj		device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode);
4559299547Serj#endif
4560266423Sjfv		switch (opcode) {
4561266423Sjfv		case i40e_aqc_opc_get_link_status:
4562279858Sjfv			ixl_link_event(pf, &event);
4563266423Sjfv			break;
4564266423Sjfv		case i40e_aqc_opc_send_msg_to_pf:
4565279858Sjfv#ifdef PCI_IOV
4566279858Sjfv			ixl_handle_vf_msg(pf, &event);
4567279858Sjfv#endif
4568266423Sjfv			break;
4569266423Sjfv		case i40e_aqc_opc_event_lan_overflow:
4570266423Sjfv		default:
4571266423Sjfv			break;
4572266423Sjfv		}
4573266423Sjfv
4574270346Sjfv	} while (result && (loop++ < IXL_ADM_LIMIT));
4575266423Sjfv
4576266423Sjfv	free(event.msg_buf, M_DEVBUF);
4577266423Sjfv
4578279858Sjfv	/*
4579279858Sjfv	 * If there are still messages to process, reschedule ourselves.
4580279858Sjfv	 * Otherwise, re-enable our interrupt and go to sleep.
4581279858Sjfv	 */
4582279858Sjfv	if (result > 0)
4583279858Sjfv		taskqueue_enqueue(pf->tq, &pf->adminq);
4584266423Sjfv	else
4585299547Serj		ixl_enable_adminq(hw);
4586279858Sjfv
4587279858Sjfv	IXL_PF_UNLOCK(pf);
4588266423Sjfv}
4589266423Sjfv
4590266423Sjfv/**
4591266423Sjfv * Update VSI-specific ethernet statistics counters.
4592266423Sjfv **/
4593270346Sjfvvoid ixl_update_eth_stats(struct ixl_vsi *vsi)
4594266423Sjfv{
4595270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4596266423Sjfv	struct i40e_hw *hw = &pf->hw;
4597266423Sjfv	struct i40e_eth_stats *es;
4598266423Sjfv	struct i40e_eth_stats *oes;
4599272227Sglebius	struct i40e_hw_port_stats *nsd;
4600266423Sjfv	u16 stat_idx = vsi->info.stat_counter_idx;
4601266423Sjfv
4602266423Sjfv	es = &vsi->eth_stats;
4603266423Sjfv	oes = &vsi->eth_stats_offsets;
4604272227Sglebius	nsd = &pf->stats;
4605266423Sjfv
4606266423Sjfv	/* Gather up the stats that the hw collects */
4607270346Sjfv	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4608266423Sjfv			   vsi->stat_offsets_loaded,
4609266423Sjfv			   &oes->tx_errors, &es->tx_errors);
4610270346Sjfv	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4611266423Sjfv			   vsi->stat_offsets_loaded,
4612266423Sjfv			   &oes->rx_discards, &es->rx_discards);
4613266423Sjfv
4614270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4615266423Sjfv			   I40E_GLV_GORCL(stat_idx),
4616266423Sjfv			   vsi->stat_offsets_loaded,
4617266423Sjfv			   &oes->rx_bytes, &es->rx_bytes);
4618270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4619266423Sjfv			   I40E_GLV_UPRCL(stat_idx),
4620266423Sjfv			   vsi->stat_offsets_loaded,
4621266423Sjfv			   &oes->rx_unicast, &es->rx_unicast);
4622270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4623266423Sjfv			   I40E_GLV_MPRCL(stat_idx),
4624266423Sjfv			   vsi->stat_offsets_loaded,
4625266423Sjfv			   &oes->rx_multicast, &es->rx_multicast);
4626270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4627266423Sjfv			   I40E_GLV_BPRCL(stat_idx),
4628266423Sjfv			   vsi->stat_offsets_loaded,
4629266423Sjfv			   &oes->rx_broadcast, &es->rx_broadcast);
4630266423Sjfv
4631270346Sjfv	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4632266423Sjfv			   I40E_GLV_GOTCL(stat_idx),
4633266423Sjfv			   vsi->stat_offsets_loaded,
4634266423Sjfv			   &oes->tx_bytes, &es->tx_bytes);
4635270346Sjfv	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4636266423Sjfv			   I40E_GLV_UPTCL(stat_idx),
4637266423Sjfv			   vsi->stat_offsets_loaded,
4638266423Sjfv			   &oes->tx_unicast, &es->tx_unicast);
4639270346Sjfv	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4640266423Sjfv			   I40E_GLV_MPTCL(stat_idx),
4641266423Sjfv			   vsi->stat_offsets_loaded,
4642266423Sjfv			   &oes->tx_multicast, &es->tx_multicast);
4643270346Sjfv	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4644266423Sjfv			   I40E_GLV_BPTCL(stat_idx),
4645266423Sjfv			   vsi->stat_offsets_loaded,
4646266423Sjfv			   &oes->tx_broadcast, &es->tx_broadcast);
4647266423Sjfv	vsi->stat_offsets_loaded = true;
4648279858Sjfv}
4649269198Sjfv
4650279858Sjfvstatic void
4651279858Sjfvixl_update_vsi_stats(struct ixl_vsi *vsi)
4652279858Sjfv{
4653279858Sjfv	struct ixl_pf		*pf;
4654279858Sjfv	struct ifnet		*ifp;
4655279858Sjfv	struct i40e_eth_stats	*es;
4656279858Sjfv	u64			tx_discards;
4657279858Sjfv
4658279858Sjfv	struct i40e_hw_port_stats *nsd;
4659279858Sjfv
4660279858Sjfv	pf = vsi->back;
4661279858Sjfv	ifp = vsi->ifp;
4662279858Sjfv	es = &vsi->eth_stats;
4663279858Sjfv	nsd = &pf->stats;
4664279858Sjfv
4665279858Sjfv	ixl_update_eth_stats(vsi);
4666279858Sjfv
4667272227Sglebius	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4668279858Sjfv	for (int i = 0; i < vsi->num_queues; i++)
4669272227Sglebius		tx_discards += vsi->queues[i].txr.br->br_drops;
4670272227Sglebius
4671269198Sjfv	/* Update ifnet stats */
4672272227Sglebius	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4673269198Sjfv	                   es->rx_multicast +
4674272227Sglebius			   es->rx_broadcast);
4675272227Sglebius	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4676269198Sjfv	                   es->tx_multicast +
4677272227Sglebius			   es->tx_broadcast);
4678272227Sglebius	IXL_SET_IBYTES(vsi, es->rx_bytes);
4679272227Sglebius	IXL_SET_OBYTES(vsi, es->tx_bytes);
4680272227Sglebius	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4681272227Sglebius	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4682269198Sjfv
4683279858Sjfv	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
4684279858Sjfv	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
4685279858Sjfv	    nsd->rx_jabber);
4686272227Sglebius	IXL_SET_OERRORS(vsi, es->tx_errors);
4687272227Sglebius	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4688272227Sglebius	IXL_SET_OQDROPS(vsi, tx_discards);
4689272227Sglebius	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4690272227Sglebius	IXL_SET_COLLISIONS(vsi, 0);
4691266423Sjfv}
4692266423Sjfv
4693266423Sjfv/**
4694266423Sjfv * Reset all of the stats for the given pf
4695266423Sjfv **/
4696270346Sjfvvoid ixl_pf_reset_stats(struct ixl_pf *pf)
4697266423Sjfv{
4698266423Sjfv	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4699266423Sjfv	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4700266423Sjfv	pf->stat_offsets_loaded = false;
4701266423Sjfv}
4702266423Sjfv
4703266423Sjfv/**
4704266423Sjfv * Resets all stats of the given vsi
4705266423Sjfv **/
4706270346Sjfvvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4707266423Sjfv{
4708266423Sjfv	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4709266423Sjfv	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4710266423Sjfv	vsi->stat_offsets_loaded = false;
4711266423Sjfv}
4712266423Sjfv
4713266423Sjfv/**
4714266423Sjfv * Read and update a 48 bit stat from the hw
4715266423Sjfv *
4716266423Sjfv * Since the device stats are not reset at PFReset, they likely will not
4717266423Sjfv * be zeroed when the driver starts.  We'll save the first values read
4718266423Sjfv * and use them as offsets to be subtracted from the raw values in order
4719266423Sjfv * to report stats that count from zero.
4720266423Sjfv **/
4721266423Sjfvstatic void
4722270346Sjfvixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4723266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4724266423Sjfv{
4725266423Sjfv	u64 new_data;
4726266423Sjfv
4727270799Sbz#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4728266423Sjfv	new_data = rd64(hw, loreg);
4729266423Sjfv#else
4730266423Sjfv	/*
4731269198Sjfv	 * Use two rd32's instead of one rd64; FreeBSD versions before
4732266423Sjfv	 * 10 don't support 8 byte bus reads/writes.
4733266423Sjfv	 */
4734266423Sjfv	new_data = rd32(hw, loreg);
4735266423Sjfv	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4736266423Sjfv#endif
4737266423Sjfv
4738266423Sjfv	if (!offset_loaded)
4739266423Sjfv		*offset = new_data;
4740266423Sjfv	if (new_data >= *offset)
4741266423Sjfv		*stat = new_data - *offset;
4742266423Sjfv	else
4743266423Sjfv		*stat = (new_data + ((u64)1 << 48)) - *offset;
4744266423Sjfv	*stat &= 0xFFFFFFFFFFFFULL;
4745266423Sjfv}
4746266423Sjfv
4747266423Sjfv/**
4748266423Sjfv * Read and update a 32 bit stat from the hw
4749266423Sjfv **/
4750266423Sjfvstatic void
4751270346Sjfvixl_stat_update32(struct i40e_hw *hw, u32 reg,
4752266423Sjfv	bool offset_loaded, u64 *offset, u64 *stat)
4753266423Sjfv{
4754266423Sjfv	u32 new_data;
4755266423Sjfv
4756266423Sjfv	new_data = rd32(hw, reg);
4757266423Sjfv	if (!offset_loaded)
4758266423Sjfv		*offset = new_data;
4759266423Sjfv	if (new_data >= *offset)
4760266423Sjfv		*stat = (u32)(new_data - *offset);
4761266423Sjfv	else
4762266423Sjfv		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4763266423Sjfv}
4764266423Sjfv
4765299549Serjstatic void
4766299549Serjixl_add_device_sysctls(struct ixl_pf *pf)
4767299549Serj{
4768299549Serj	device_t dev = pf->dev;
4769299549Serj
4770299549Serj	/* Set up sysctls */
4771299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4772299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4773299549Serj	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4774299551Serj	    pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
4775299549Serj
4776299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4777299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4778299549Serj	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4779299551Serj	    pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
4780299549Serj
4781299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4782299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4783299549Serj	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
4784299549Serj	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
4785299549Serj
4786299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4787299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4788299549Serj	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
4789299549Serj	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
4790299549Serj
4791299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4792299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4793299549Serj	    OID_AUTO, "rx_itr", CTLFLAG_RW,
4794299549Serj	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
4795299549Serj
4796299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4797299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4798299549Serj	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
4799299549Serj	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
4800299549Serj
4801299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4802299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4803299549Serj	    OID_AUTO, "tx_itr", CTLFLAG_RW,
4804299549Serj	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
4805299549Serj
4806299549Serj	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4807299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4808299549Serj	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
4809299549Serj	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
4810299549Serj
4811299549Serj#ifdef IXL_DEBUG_SYSCTL
4812299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4813299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4814299549Serj	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
4815299549Serj	    ixl_debug_info, "I", "Debug Information");
4816299549Serj
4817299551Serj	/* Shared-code debug message level */
4818299549Serj	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4819299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4820299549Serj	    OID_AUTO, "debug_mask", CTLFLAG_RW,
4821299549Serj	    &pf->hw.debug_mask, 0, "Debug Message Level");
4822299549Serj
4823299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4824299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4825299549Serj	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
4826299551Serj	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
4827299549Serj
4828299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4829299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4830299549Serj	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
4831299549Serj	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
4832299549Serj
4833299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4834299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4835299549Serj	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
4836299549Serj	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
4837299549Serj
4838299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4839299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4840299549Serj	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
4841299549Serj	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
4842299549Serj
4843299549Serj	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
4844299549Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4845299549Serj	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
4846299549Serj	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
4847299551Serj
4848299551Serj#ifdef PCI_IOV
4849299551Serj	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
4850299551Serj	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
4851299551Serj	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
4852299551Serj	    0, "PF/VF Virtual Channel debug level");
4853299549Serj#endif
4854299551Serj#endif
4855299549Serj}
4856299549Serj
4857266423Sjfv/*
4858266423Sjfv** Set flow control using sysctl:
4859266423Sjfv** 	0 - off
4860266423Sjfv**	1 - rx pause
4861266423Sjfv**	2 - tx pause
4862266423Sjfv**	3 - full
4863266423Sjfv*/
4864266423Sjfvstatic int
4865270346Sjfvixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4866266423Sjfv{
4867266423Sjfv	/*
4868266423Sjfv	 * TODO: ensure tx CRC by hardware should be enabled
4869266423Sjfv	 * if tx flow control is enabled.
4870299547Serj	 * ^ N/A for 40G ports
4871266423Sjfv	 */
4872270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4873266423Sjfv	struct i40e_hw *hw = &pf->hw;
4874266423Sjfv	device_t dev = pf->dev;
4875279033Sjfv	int error = 0;
4876266423Sjfv	enum i40e_status_code aq_error = 0;
4877266423Sjfv	u8 fc_aq_err = 0;
4878266423Sjfv
4879279033Sjfv	/* Get request */
4880279033Sjfv	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
4881266423Sjfv	if ((error) || (req->newptr == NULL))
4882269198Sjfv		return (error);
4883279033Sjfv	if (pf->fc < 0 || pf->fc > 3) {
4884266423Sjfv		device_printf(dev,
4885266423Sjfv		    "Invalid fc mode; valid modes are 0 through 3\n");
4886266423Sjfv		return (EINVAL);
4887266423Sjfv	}
4888266423Sjfv
4889269198Sjfv	/*
4890269198Sjfv	** Changing flow control mode currently does not work on
4891269198Sjfv	** 40GBASE-CR4 PHYs
4892269198Sjfv	*/
4893269198Sjfv	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4894269198Sjfv	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4895269198Sjfv		device_printf(dev, "Changing flow control mode unsupported"
4896269198Sjfv		    " on 40GBase-CR4 media.\n");
4897269198Sjfv		return (ENODEV);
4898269198Sjfv	}
4899269198Sjfv
4900266423Sjfv	/* Set fc ability for port */
4901279033Sjfv	hw->fc.requested_mode = pf->fc;
4902269198Sjfv	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4903269198Sjfv	if (aq_error) {
4904269198Sjfv		device_printf(dev,
4905269198Sjfv		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4906269198Sjfv		    __func__, aq_error, fc_aq_err);
4907299547Serj		return (EIO);
4908269198Sjfv	}
4909266423Sjfv
4910299547Serj	/* Get new link state */
4911299547Serj	i40e_msec_delay(250);
4912299547Serj	hw->phy.get_link_info = TRUE;
4913299547Serj	i40e_get_link_status(hw, &pf->link_up);
4914299547Serj
4915269198Sjfv	return (0);
4916269198Sjfv}
4917266423Sjfv
4918270346Sjfvstatic int
4919270346Sjfvixl_current_speed(SYSCTL_HANDLER_ARGS)
4920270346Sjfv{
4921270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4922270346Sjfv	struct i40e_hw *hw = &pf->hw;
4923270346Sjfv	int error = 0, index = 0;
4924270346Sjfv
4925270346Sjfv	char *speeds[] = {
4926270346Sjfv		"Unknown",
4927270346Sjfv		"100M",
4928270346Sjfv		"1G",
4929270346Sjfv		"10G",
4930270346Sjfv		"40G",
4931270346Sjfv		"20G"
4932270346Sjfv	};
4933270346Sjfv
4934270346Sjfv	ixl_update_link_status(pf);
4935270346Sjfv
4936270346Sjfv	switch (hw->phy.link_info.link_speed) {
4937270346Sjfv	case I40E_LINK_SPEED_100MB:
4938270346Sjfv		index = 1;
4939270346Sjfv		break;
4940270346Sjfv	case I40E_LINK_SPEED_1GB:
4941270346Sjfv		index = 2;
4942270346Sjfv		break;
4943270346Sjfv	case I40E_LINK_SPEED_10GB:
4944270346Sjfv		index = 3;
4945270346Sjfv		break;
4946270346Sjfv	case I40E_LINK_SPEED_40GB:
4947270346Sjfv		index = 4;
4948270346Sjfv		break;
4949270346Sjfv	case I40E_LINK_SPEED_20GB:
4950270346Sjfv		index = 5;
4951270346Sjfv		break;
4952270346Sjfv	case I40E_LINK_SPEED_UNKNOWN:
4953270346Sjfv	default:
4954270346Sjfv		index = 0;
4955270346Sjfv		break;
4956270346Sjfv	}
4957270346Sjfv
4958270346Sjfv	error = sysctl_handle_string(oidp, speeds[index],
4959270346Sjfv	    strlen(speeds[index]), req);
4960270346Sjfv	return (error);
4961270346Sjfv}
4962270346Sjfv
4963274205Sjfvstatic int
4964274205Sjfvixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
4965274205Sjfv{
4966274205Sjfv	struct i40e_hw *hw = &pf->hw;
4967274205Sjfv	device_t dev = pf->dev;
4968274205Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
4969274205Sjfv	struct i40e_aq_set_phy_config config;
4970274205Sjfv	enum i40e_status_code aq_error = 0;
4971274205Sjfv
4972274205Sjfv	/* Get current capability information */
4973279033Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
4974279033Sjfv	    FALSE, FALSE, &abilities, NULL);
4975274205Sjfv	if (aq_error) {
4976279033Sjfv		device_printf(dev,
4977279033Sjfv		    "%s: Error getting phy capabilities %d,"
4978274205Sjfv		    " aq error: %d\n", __func__, aq_error,
4979274205Sjfv		    hw->aq.asq_last_status);
4980274205Sjfv		return (EAGAIN);
4981274205Sjfv	}
4982274205Sjfv
4983274205Sjfv	/* Prepare new config */
4984274205Sjfv	bzero(&config, sizeof(config));
4985274205Sjfv	config.phy_type = abilities.phy_type;
4986274205Sjfv	config.abilities = abilities.abilities
4987274205Sjfv	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4988274205Sjfv	config.eee_capability = abilities.eee_capability;
4989274205Sjfv	config.eeer = abilities.eeer_val;
4990274205Sjfv	config.low_power_ctrl = abilities.d3_lpan;
4991274205Sjfv	/* Translate into aq cmd link_speed */
4992299552Serj	if (speeds & 0x10)
4993299552Serj		config.link_speed |= I40E_LINK_SPEED_40GB;
4994279858Sjfv	if (speeds & 0x8)
4995279858Sjfv		config.link_speed |= I40E_LINK_SPEED_20GB;
4996274205Sjfv	if (speeds & 0x4)
4997274205Sjfv		config.link_speed |= I40E_LINK_SPEED_10GB;
4998274205Sjfv	if (speeds & 0x2)
4999274205Sjfv		config.link_speed |= I40E_LINK_SPEED_1GB;
5000274205Sjfv	if (speeds & 0x1)
5001274205Sjfv		config.link_speed |= I40E_LINK_SPEED_100MB;
5002274205Sjfv
5003274205Sjfv	/* Do aq command & restart link */
5004274205Sjfv	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
5005274205Sjfv	if (aq_error) {
5006279033Sjfv		device_printf(dev,
5007279033Sjfv		    "%s: Error setting new phy config %d,"
5008274205Sjfv		    " aq error: %d\n", __func__, aq_error,
5009274205Sjfv		    hw->aq.asq_last_status);
5010274205Sjfv		return (EAGAIN);
5011274205Sjfv	}
5012274205Sjfv
5013277084Sjfv	/*
5014277084Sjfv	** This seems a bit heavy handed, but we
5015277084Sjfv	** need to get a reinit on some devices
5016277084Sjfv	*/
5017277084Sjfv	IXL_PF_LOCK(pf);
5018299547Serj	ixl_stop_locked(pf);
5019277084Sjfv	ixl_init_locked(pf);
5020277084Sjfv	IXL_PF_UNLOCK(pf);
5021277084Sjfv
5022274205Sjfv	return (0);
5023274205Sjfv}
5024274205Sjfv
5025269198Sjfv/*
5026269198Sjfv** Control link advertise speed:
5027270346Sjfv**	Flags:
5028299552Serj**	 0x1 - advertise 100 Mb
5029299552Serj**	 0x2 - advertise 1G
5030299552Serj**	 0x4 - advertise 10G
5031299552Serj**	 0x8 - advertise 20G
5032299552Serj**	0x10 - advertise 40G
5033269198Sjfv**
5034299552Serj**	Set to 0 to disable link
5035269198Sjfv*/
5036269198Sjfvstatic int
5037270346Sjfvixl_set_advertise(SYSCTL_HANDLER_ARGS)
5038269198Sjfv{
5039270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5040269198Sjfv	struct i40e_hw *hw = &pf->hw;
5041269198Sjfv	device_t dev = pf->dev;
5042270346Sjfv	int requested_ls = 0;
5043269198Sjfv	int error = 0;
5044266423Sjfv
5045269198Sjfv	/* Read in new mode */
5046270346Sjfv	requested_ls = pf->advertised_speed;
5047269198Sjfv	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
5048269198Sjfv	if ((error) || (req->newptr == NULL))
5049269198Sjfv		return (error);
5050279858Sjfv	/* Check for sane value */
5051299552Serj	if (requested_ls > 0x10) {
5052279858Sjfv		device_printf(dev, "Invalid advertised speed; "
5053299552Serj		    "valid modes are 0x1 through 0x10\n");
5054269198Sjfv		return (EINVAL);
5055266423Sjfv	}
5056279858Sjfv	/* Then check for validity based on adapter type */
5057279858Sjfv	switch (hw->device_id) {
5058279858Sjfv	case I40E_DEV_ID_10G_BASE_T:
5059299545Serj	case I40E_DEV_ID_10G_BASE_T4:
5060299552Serj		/* BaseT */
5061299552Serj		if (requested_ls & ~(0x7)) {
5062279858Sjfv			device_printf(dev,
5063299552Serj			    "Only 100M/1G/10G speeds supported on this device.\n");
5064279858Sjfv			return (EINVAL);
5065279858Sjfv		}
5066279858Sjfv		break;
5067279858Sjfv	case I40E_DEV_ID_20G_KR2:
5068299545Serj	case I40E_DEV_ID_20G_KR2_A:
5069299552Serj		/* 20G */
5070299552Serj		if (requested_ls & ~(0xE)) {
5071279858Sjfv			device_printf(dev,
5072299552Serj			    "Only 1G/10G/20G speeds supported on this device.\n");
5073279858Sjfv			return (EINVAL);
5074279858Sjfv		}
5075279858Sjfv		break;
5076299552Serj	case I40E_DEV_ID_KX_B:
5077299552Serj	case I40E_DEV_ID_QSFP_A:
5078299552Serj	case I40E_DEV_ID_QSFP_B:
5079299552Serj		/* 40G */
5080299552Serj		if (requested_ls & ~(0x10)) {
5081299552Serj			device_printf(dev,
5082299552Serj			    "Only 40G speeds supported on this device.\n");
5083299552Serj			return (EINVAL);
5084299552Serj		}
5085299552Serj		break;
5086279858Sjfv	default:
5087299552Serj		/* 10G (1G) */
5088299552Serj		if (requested_ls & ~(0x6)) {
5089279858Sjfv			device_printf(dev,
5090279858Sjfv			    "Only 1/10Gbs speeds are supported on this device.\n");
5091279858Sjfv			return (EINVAL);
5092279858Sjfv		}
5093279858Sjfv		break;
5094279858Sjfv	}
5095269198Sjfv
5096269198Sjfv	/* Exit if no change */
5097270346Sjfv	if (pf->advertised_speed == requested_ls)
5098269198Sjfv		return (0);
5099269198Sjfv
5100274205Sjfv	error = ixl_set_advertised_speeds(pf, requested_ls);
5101274205Sjfv	if (error)
5102274205Sjfv		return (error);
5103270346Sjfv
5104270346Sjfv	pf->advertised_speed = requested_ls;
5105270346Sjfv	ixl_update_link_status(pf);
5106269198Sjfv	return (0);
5107266423Sjfv}
5108266423Sjfv
5109266423Sjfv/*
5110266423Sjfv** Get the width and transaction speed of
5111266423Sjfv** the bus this adapter is plugged into.
5112266423Sjfv*/
5113266423Sjfvstatic u16
5114270346Sjfvixl_get_bus_info(struct i40e_hw *hw, device_t dev)
5115266423Sjfv{
5116266423Sjfv        u16                     link;
5117266423Sjfv        u32                     offset;
5118266423Sjfv
5119266423Sjfv        /* Get the PCI Express Capabilities offset */
5120266423Sjfv        pci_find_cap(dev, PCIY_EXPRESS, &offset);
5121266423Sjfv
5122266423Sjfv        /* ...and read the Link Status Register */
5123266423Sjfv        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
5124266423Sjfv
5125266423Sjfv        switch (link & I40E_PCI_LINK_WIDTH) {
5126266423Sjfv        case I40E_PCI_LINK_WIDTH_1:
5127266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x1;
5128266423Sjfv                break;
5129266423Sjfv        case I40E_PCI_LINK_WIDTH_2:
5130266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x2;
5131266423Sjfv                break;
5132266423Sjfv        case I40E_PCI_LINK_WIDTH_4:
5133266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x4;
5134266423Sjfv                break;
5135266423Sjfv        case I40E_PCI_LINK_WIDTH_8:
5136266423Sjfv                hw->bus.width = i40e_bus_width_pcie_x8;
5137266423Sjfv                break;
5138266423Sjfv        default:
5139266423Sjfv                hw->bus.width = i40e_bus_width_unknown;
5140266423Sjfv                break;
5141266423Sjfv        }
5142266423Sjfv
5143266423Sjfv        switch (link & I40E_PCI_LINK_SPEED) {
5144266423Sjfv        case I40E_PCI_LINK_SPEED_2500:
5145266423Sjfv                hw->bus.speed = i40e_bus_speed_2500;
5146266423Sjfv                break;
5147266423Sjfv        case I40E_PCI_LINK_SPEED_5000:
5148266423Sjfv                hw->bus.speed = i40e_bus_speed_5000;
5149266423Sjfv                break;
5150266423Sjfv        case I40E_PCI_LINK_SPEED_8000:
5151266423Sjfv                hw->bus.speed = i40e_bus_speed_8000;
5152266423Sjfv                break;
5153266423Sjfv        default:
5154266423Sjfv                hw->bus.speed = i40e_bus_speed_unknown;
5155266423Sjfv                break;
5156266423Sjfv        }
5157266423Sjfv
5158266423Sjfv
5159266423Sjfv        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
5160266423Sjfv            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
5161266423Sjfv            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
5162266423Sjfv            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
5163266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
5164266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
5165266423Sjfv            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
5166266423Sjfv            ("Unknown"));
5167266423Sjfv
5168266423Sjfv        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
5169266423Sjfv            (hw->bus.speed < i40e_bus_speed_8000)) {
5170266423Sjfv                device_printf(dev, "PCI-Express bandwidth available"
5171279858Sjfv                    " for this device\n     may be insufficient for"
5172279858Sjfv                    " optimal performance.\n");
5173266423Sjfv                device_printf(dev, "For expected performance a x8 "
5174266423Sjfv                    "PCIE Gen3 slot is required.\n");
5175266423Sjfv        }
5176266423Sjfv
5177266423Sjfv        return (link);
5178266423Sjfv}
5179266423Sjfv
5180274205Sjfvstatic int
5181274205Sjfvixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
5182274205Sjfv{
5183274205Sjfv	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
5184274205Sjfv	struct i40e_hw	*hw = &pf->hw;
5185299552Serj	struct sbuf	*sbuf;
5186274205Sjfv
5187299552Serj	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5188299552Serj	ixl_nvm_version_str(hw, sbuf);
5189299552Serj	sbuf_finish(sbuf);
5190299552Serj	sbuf_delete(sbuf);
5191299552Serj
5192299552Serj	return 0;
5193274205Sjfv}
5194274205Sjfv
5195299547Serjstatic int
5196299547Serjixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5197299547Serj{
5198299547Serj	struct i40e_hw *hw = &pf->hw;
5199299547Serj	struct i40e_nvm_access *nvma;
5200299547Serj	device_t dev = pf->dev;
5201299547Serj	enum i40e_status_code status = 0;
5202299547Serj	int perrno;
5203274205Sjfv
5204299547Serj	DEBUGFUNC("ixl_handle_nvmupd_cmd");
5205299547Serj
5206299547Serj	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
5207299547Serj	    ifd->ifd_data == NULL) {
5208299547Serj		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__);
5209299547Serj		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__,
5210299547Serj		    ifd->ifd_len, sizeof(struct i40e_nvm_access));
5211299547Serj		device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data);
5212299547Serj		return (EINVAL);
5213299547Serj	}
5214299547Serj
5215299547Serj	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5216299547Serj
5217299549Serj	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5218299549Serj		int count = 0;
5219299549Serj		while (count++ < 100) {
5220299549Serj			i40e_msec_delay(100);
5221299549Serj			if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5222299549Serj				break;
5223299549Serj		}
5224299549Serj		// device_printf(dev, "ioctl EMPR reset wait count %d\n", count);
5225299549Serj	}
5226299549Serj
5227299549Serj	if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5228299549Serj		IXL_PF_LOCK(pf);
5229299549Serj		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5230299549Serj		IXL_PF_UNLOCK(pf);
5231299549Serj	} else {
5232299549Serj		perrno = -EBUSY;
5233299549Serj	}
5234299549Serj
5235299548Serj	if (status)
5236299548Serj		device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
5237299548Serj		    status, perrno);
5238299547Serj
5239299549Serj	/*
5240299549Serj	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
5241299549Serj	 * to run this ioctl again. So use -EACCES for -EPERM instead.
5242299549Serj	 */
5243299548Serj	if (perrno == -EPERM)
5244299548Serj		return (-EACCES);
5245299548Serj	else
5246299548Serj		return (perrno);
5247299547Serj}
5248299547Serj
5249277084Sjfv#ifdef IXL_DEBUG_SYSCTL
5250266423Sjfvstatic int
5251270346Sjfvixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5252266423Sjfv{
5253270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5254266423Sjfv	struct i40e_hw *hw = &pf->hw;
5255266423Sjfv	struct i40e_link_status link_status;
5256266423Sjfv	char buf[512];
5257266423Sjfv
5258266423Sjfv	enum i40e_status_code aq_error = 0;
5259266423Sjfv
5260266423Sjfv	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
5261266423Sjfv	if (aq_error) {
5262266423Sjfv		printf("i40e_aq_get_link_info() error %d\n", aq_error);
5263266423Sjfv		return (EPERM);
5264266423Sjfv	}
5265266423Sjfv
5266266423Sjfv	sprintf(buf, "\n"
5267266423Sjfv	    "PHY Type : %#04x\n"
5268266423Sjfv	    "Speed    : %#04x\n"
5269266423Sjfv	    "Link info: %#04x\n"
5270266423Sjfv	    "AN info  : %#04x\n"
5271299551Serj	    "Ext info : %#04x\n"
5272299551Serj	    "Max Frame: %d\n"
5273299552Serj	    "Pacing   : %#04x\n"
5274299552Serj	    "CRC En?  : %d",
5275266423Sjfv	    link_status.phy_type, link_status.link_speed,
5276266423Sjfv	    link_status.link_info, link_status.an_info,
5277299551Serj	    link_status.ext_info, link_status.max_frame_size,
5278299552Serj	    link_status.pacing, link_status.crc_enable);
5279266423Sjfv
5280266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5281266423Sjfv}
5282266423Sjfv
5283266423Sjfvstatic int
5284270346Sjfvixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5285266423Sjfv{
5286279858Sjfv	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
5287279858Sjfv	struct i40e_hw		*hw = &pf->hw;
5288279858Sjfv	char			buf[512];
5289279858Sjfv	enum i40e_status_code	aq_error = 0;
5290266423Sjfv
5291279858Sjfv	struct i40e_aq_get_phy_abilities_resp abilities;
5292266423Sjfv
5293279858Sjfv	aq_error = i40e_aq_get_phy_capabilities(hw,
5294279858Sjfv	    TRUE, FALSE, &abilities, NULL);
5295266423Sjfv	if (aq_error) {
5296266423Sjfv		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5297266423Sjfv		return (EPERM);
5298266423Sjfv	}
5299266423Sjfv
5300266423Sjfv	sprintf(buf, "\n"
5301266423Sjfv	    "PHY Type : %#010x\n"
5302266423Sjfv	    "Speed    : %#04x\n"
5303266423Sjfv	    "Abilities: %#04x\n"
5304266423Sjfv	    "EEE cap  : %#06x\n"
5305266423Sjfv	    "EEER reg : %#010x\n"
5306266423Sjfv	    "D3 Lpan  : %#04x",
5307279858Sjfv	    abilities.phy_type, abilities.link_speed,
5308279858Sjfv	    abilities.abilities, abilities.eee_capability,
5309279858Sjfv	    abilities.eeer_val, abilities.d3_lpan);
5310266423Sjfv
5311266423Sjfv	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5312266423Sjfv}
5313266423Sjfv
5314266423Sjfvstatic int
5315270346Sjfvixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5316266423Sjfv{
5317270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5318270346Sjfv	struct ixl_vsi *vsi = &pf->vsi;
5319270346Sjfv	struct ixl_mac_filter *f;
5320266423Sjfv	char *buf, *buf_i;
5321266423Sjfv
5322266423Sjfv	int error = 0;
5323266423Sjfv	int ftl_len = 0;
5324266423Sjfv	int ftl_counter = 0;
5325266423Sjfv	int buf_len = 0;
5326266423Sjfv	int entry_len = 42;
5327266423Sjfv
5328266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5329266423Sjfv		ftl_len++;
5330266423Sjfv	}
5331266423Sjfv
5332266423Sjfv	if (ftl_len < 1) {
5333266423Sjfv		sysctl_handle_string(oidp, "(none)", 6, req);
5334266423Sjfv		return (0);
5335266423Sjfv	}
5336266423Sjfv
5337266423Sjfv	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5338266423Sjfv	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5339266423Sjfv
5340266423Sjfv	sprintf(buf_i++, "\n");
5341266423Sjfv	SLIST_FOREACH(f, &vsi->ftl, next) {
5342266423Sjfv		sprintf(buf_i,
5343266423Sjfv		    MAC_FORMAT ", vlan %4d, flags %#06x",
5344266423Sjfv		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5345266423Sjfv		buf_i += entry_len;
5346266423Sjfv		/* don't print '\n' for last entry */
5347266423Sjfv		if (++ftl_counter != ftl_len) {
5348266423Sjfv			sprintf(buf_i, "\n");
5349266423Sjfv			buf_i++;
5350266423Sjfv		}
5351266423Sjfv	}
5352266423Sjfv
5353266423Sjfv	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5354266423Sjfv	if (error)
5355266423Sjfv		printf("sysctl error: %d\n", error);
5356266423Sjfv	free(buf, M_DEVBUF);
5357266423Sjfv	return error;
5358266423Sjfv}
5359269198Sjfv
5360270346Sjfv#define IXL_SW_RES_SIZE 0x14
5361269198Sjfvstatic int
5362277084Sjfvixl_res_alloc_cmp(const void *a, const void *b)
5363277084Sjfv{
5364277084Sjfv	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5365284049Sjfv	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5366284049Sjfv	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5367277084Sjfv
5368277084Sjfv	return ((int)one->resource_type - (int)two->resource_type);
5369277084Sjfv}
5370277084Sjfv
5371299549Serj/*
5372299549Serj * Longest string length: 25
5373299549Serj */
5374299549Serjstatic char *
5375299549Serjixl_switch_res_type_string(u8 type)
5376299549Serj{
5377299549Serj	static char * ixl_switch_res_type_strings[0x14] = {
5378299549Serj		"VEB",
5379299549Serj		"VSI",
5380299549Serj		"Perfect Match MAC address",
5381299549Serj		"S-tag",
5382299549Serj		"(Reserved)",
5383299549Serj		"Multicast hash entry",
5384299549Serj		"Unicast hash entry",
5385299549Serj		"VLAN",
5386299549Serj		"VSI List entry",
5387299549Serj		"(Reserved)",
5388299549Serj		"VLAN Statistic Pool",
5389299549Serj		"Mirror Rule",
5390299549Serj		"Queue Set",
5391299549Serj		"Inner VLAN Forward filter",
5392299549Serj		"(Reserved)",
5393299549Serj		"Inner MAC",
5394299549Serj		"IP",
5395299549Serj		"GRE/VN1 Key",
5396299549Serj		"VN2 Key",
5397299549Serj		"Tunneling Port"
5398299549Serj	};
5399299549Serj
5400299549Serj	if (type < 0x14)
5401299549Serj		return ixl_switch_res_type_strings[type];
5402299549Serj	else
5403299549Serj		return "(Reserved)";
5404299549Serj}
5405299549Serj
5406277084Sjfvstatic int
5407274205Sjfvixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5408269198Sjfv{
5409270346Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5410269198Sjfv	struct i40e_hw *hw = &pf->hw;
5411269198Sjfv	device_t dev = pf->dev;
5412269198Sjfv	struct sbuf *buf;
5413269198Sjfv	int error = 0;
5414269198Sjfv
5415269198Sjfv	u8 num_entries;
5416270346Sjfv	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5417269198Sjfv
5418299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5419269198Sjfv	if (!buf) {
5420269198Sjfv		device_printf(dev, "Could not allocate sbuf for output.\n");
5421269198Sjfv		return (ENOMEM);
5422269198Sjfv	}
5423269198Sjfv
5424277084Sjfv	bzero(resp, sizeof(resp));
5425269198Sjfv	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5426269198Sjfv				resp,
5427270346Sjfv				IXL_SW_RES_SIZE,
5428269198Sjfv				NULL);
5429269198Sjfv	if (error) {
5430279858Sjfv		device_printf(dev,
5431279858Sjfv		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5432269198Sjfv		    __func__, error, hw->aq.asq_last_status);
5433269198Sjfv		sbuf_delete(buf);
5434269198Sjfv		return error;
5435269198Sjfv	}
5436269198Sjfv
5437277084Sjfv	/* Sort entries by type for display */
5438277084Sjfv	qsort(resp, num_entries,
5439277084Sjfv	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5440277084Sjfv	    &ixl_res_alloc_cmp);
5441277084Sjfv
5442269198Sjfv	sbuf_cat(buf, "\n");
5443277084Sjfv	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5444269198Sjfv	sbuf_printf(buf,
5445299549Serj#if 0
5446269198Sjfv	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
5447269198Sjfv	    "     | (this)     | (all) | (this) | (all)       \n");
5448299549Serj#endif
5449299549Serj	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5450299549Serj	    "                          | (this)     | (all) | (this) | (all)       \n");
5451269198Sjfv	for (int i = 0; i < num_entries; i++) {
5452269198Sjfv		sbuf_printf(buf,
5453299549Serj#if 0
5454269198Sjfv		    "%#4x | %10d   %5d   %6d   %12d",
5455269198Sjfv		    resp[i].resource_type,
5456299549Serj#endif
5457299549Serj		    "%25s | %10d   %5d   %6d   %12d",
5458299549Serj		    ixl_switch_res_type_string(resp[i].resource_type),
5459269198Sjfv		    resp[i].guaranteed,
5460269198Sjfv		    resp[i].total,
5461269198Sjfv		    resp[i].used,
5462269198Sjfv		    resp[i].total_unalloced);
5463269198Sjfv		if (i < num_entries - 1)
5464269198Sjfv			sbuf_cat(buf, "\n");
5465269198Sjfv	}
5466269198Sjfv
5467269198Sjfv	error = sbuf_finish(buf);
5468299546Serj	if (error)
5469299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5470299545Serj
5471290708Ssmh	sbuf_delete(buf);
5472299545Serj	return error;
5473274205Sjfv}
5474269198Sjfv
5475274205Sjfv/*
5476274205Sjfv** Caller must init and delete sbuf; this function will clear and
5477274205Sjfv** finish it for caller.
5478299549Serj**
5479299549Serj** XXX: Cannot use the SEID for this, since there is no longer a
5480299549Serj** fixed mapping between SEID and element type.
5481274205Sjfv*/
5482274205Sjfvstatic char *
5483299549Serjixl_switch_element_string(struct sbuf *s,
5484299549Serj    struct i40e_aqc_switch_config_element_resp *element)
5485274205Sjfv{
5486274205Sjfv	sbuf_clear(s);
5487274205Sjfv
5488299549Serj	switch (element->element_type) {
5489299549Serj	case I40E_AQ_SW_ELEM_TYPE_MAC:
5490299549Serj		sbuf_printf(s, "MAC %3d", element->element_info);
5491299549Serj		break;
5492299549Serj	case I40E_AQ_SW_ELEM_TYPE_PF:
5493299549Serj		sbuf_printf(s, "PF  %3d", element->element_info);
5494299549Serj		break;
5495299549Serj	case I40E_AQ_SW_ELEM_TYPE_VF:
5496299549Serj		sbuf_printf(s, "VF  %3d", element->element_info);
5497299549Serj		break;
5498299549Serj	case I40E_AQ_SW_ELEM_TYPE_EMP:
5499274205Sjfv		sbuf_cat(s, "EMP");
5500299549Serj		break;
5501299549Serj	case I40E_AQ_SW_ELEM_TYPE_BMC:
5502299549Serj		sbuf_cat(s, "BMC");
5503299549Serj		break;
5504299549Serj	case I40E_AQ_SW_ELEM_TYPE_PV:
5505299549Serj		sbuf_cat(s, "PV");
5506299549Serj		break;
5507299549Serj	case I40E_AQ_SW_ELEM_TYPE_VEB:
5508299549Serj		sbuf_cat(s, "VEB");
5509299549Serj		break;
5510299549Serj	case I40E_AQ_SW_ELEM_TYPE_PA:
5511299549Serj		sbuf_cat(s, "PA");
5512299549Serj		break;
5513299549Serj	case I40E_AQ_SW_ELEM_TYPE_VSI:
5514299549Serj		sbuf_printf(s, "VSI %3d", element->element_info);
5515299549Serj		break;
5516299549Serj	default:
5517299549Serj		sbuf_cat(s, "?");
5518299549Serj		break;
5519299549Serj	}
5520274205Sjfv
5521274205Sjfv	sbuf_finish(s);
5522274205Sjfv	return sbuf_data(s);
5523269198Sjfv}
5524269198Sjfv
5525274205Sjfvstatic int
5526274205Sjfvixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5527274205Sjfv{
5528274205Sjfv	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5529274205Sjfv	struct i40e_hw *hw = &pf->hw;
5530274205Sjfv	device_t dev = pf->dev;
5531274205Sjfv	struct sbuf *buf;
5532274205Sjfv	struct sbuf *nmbuf;
5533274205Sjfv	int error = 0;
5534299549Serj	u16 next = 0;
5535274205Sjfv	u8 aq_buf[I40E_AQ_LARGE_BUF];
5536274205Sjfv
5537274205Sjfv	struct i40e_aqc_get_switch_config_resp *sw_config;
5538274205Sjfv	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5539274205Sjfv
5540299546Serj	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5541274205Sjfv	if (!buf) {
5542274205Sjfv		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5543274205Sjfv		return (ENOMEM);
5544274205Sjfv	}
5545274205Sjfv
5546274205Sjfv	error = i40e_aq_get_switch_config(hw, sw_config,
5547274205Sjfv	    sizeof(aq_buf), &next, NULL);
5548274205Sjfv	if (error) {
5549279858Sjfv		device_printf(dev,
5550279858Sjfv		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5551274205Sjfv		    __func__, error, hw->aq.asq_last_status);
5552274205Sjfv		sbuf_delete(buf);
5553274205Sjfv		return error;
5554274205Sjfv	}
5555299549Serj	if (next)
5556299549Serj		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5557299549Serj		    __func__, next);
5558274205Sjfv
5559274205Sjfv	nmbuf = sbuf_new_auto();
5560274205Sjfv	if (!nmbuf) {
5561274205Sjfv		device_printf(dev, "Could not allocate sbuf for name output.\n");
5562299546Serj		sbuf_delete(buf);
5563274205Sjfv		return (ENOMEM);
5564274205Sjfv	}
5565274205Sjfv
5566274205Sjfv	sbuf_cat(buf, "\n");
5567274205Sjfv	// Assuming <= 255 elements in switch
5568299549Serj	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5569299549Serj	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5570274205Sjfv	/* Exclude:
5571274205Sjfv	** Revision -- all elements are revision 1 for now
5572274205Sjfv	*/
5573274205Sjfv	sbuf_printf(buf,
5574274205Sjfv	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5575274205Sjfv	    "                |          |          | (uplink)\n");
5576274205Sjfv	for (int i = 0; i < sw_config->header.num_reported; i++) {
5577274205Sjfv		// "%4d (%8s) | %8s   %8s   %#8x",
5578274205Sjfv		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5579274205Sjfv		sbuf_cat(buf, " ");
5580279858Sjfv		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5581299549Serj		    &sw_config->element[i]));
5582274205Sjfv		sbuf_cat(buf, " | ");
5583299549Serj		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5584274205Sjfv		sbuf_cat(buf, "   ");
5585299549Serj		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5586274205Sjfv		sbuf_cat(buf, "   ");
5587274205Sjfv		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5588274205Sjfv		if (i < sw_config->header.num_reported - 1)
5589274205Sjfv			sbuf_cat(buf, "\n");
5590274205Sjfv	}
5591274205Sjfv	sbuf_delete(nmbuf);
5592274205Sjfv
5593274205Sjfv	error = sbuf_finish(buf);
5594299546Serj	if (error)
5595299545Serj		device_printf(dev, "Error finishing sbuf: %d\n", error);
5596299545Serj
5597274205Sjfv	sbuf_delete(buf);
5598274205Sjfv
5599274205Sjfv	return (error);
5600274205Sjfv}
5601299552Serj
5602299552Serjstatic int
5603299552Serjixl_debug_info(SYSCTL_HANDLER_ARGS)
5604299552Serj{
5605299552Serj	struct ixl_pf	*pf;
5606299552Serj	int		error, input = 0;
5607299552Serj
5608299552Serj	error = sysctl_handle_int(oidp, &input, 0, req);
5609299552Serj
5610299552Serj	if (error || !req->newptr)
5611299552Serj		return (error);
5612299552Serj
5613299552Serj	if (input == 1) {
5614299552Serj		pf = (struct ixl_pf *)arg1;
5615299552Serj		ixl_print_debug_info(pf);
5616299552Serj	}
5617299552Serj
5618299552Serj	return (error);
5619299552Serj}
5620299552Serj
5621299552Serjstatic void
5622299552Serjixl_print_debug_info(struct ixl_pf *pf)
5623299552Serj{
5624299552Serj	struct i40e_hw		*hw = &pf->hw;
5625299552Serj	struct ixl_vsi		*vsi = &pf->vsi;
5626299552Serj	struct ixl_queue	*que = vsi->queues;
5627299552Serj	struct rx_ring		*rxr = &que->rxr;
5628299552Serj	struct tx_ring		*txr = &que->txr;
5629299552Serj	u32			reg;
5630299552Serj
5631299552Serj
5632299552Serj	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
5633299552Serj	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
5634299552Serj	printf("RX next check = %x\n", rxr->next_check);
5635299552Serj	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
5636299552Serj	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
5637299552Serj	printf("TX desc avail = %x\n", txr->avail);
5638299552Serj
5639299552Serj	reg = rd32(hw, I40E_GLV_GORCL(0xc));
5640299552Serj	 printf("RX Bytes = %x\n", reg);
5641299552Serj	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
5642299552Serj	 printf("Port RX Bytes = %x\n", reg);
5643299552Serj	reg = rd32(hw, I40E_GLV_RDPC(0xc));
5644299552Serj	 printf("RX discard = %x\n", reg);
5645299552Serj	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
5646299552Serj	 printf("Port RX discard = %x\n", reg);
5647299552Serj
5648299552Serj	reg = rd32(hw, I40E_GLV_TEPC(0xc));
5649299552Serj	 printf("TX errors = %x\n", reg);
5650299552Serj	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
5651299552Serj	 printf("TX Bytes = %x\n", reg);
5652299552Serj
5653299552Serj	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
5654299552Serj	 printf("RX undersize = %x\n", reg);
5655299552Serj	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
5656299552Serj	 printf("RX fragments = %x\n", reg);
5657299552Serj	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
5658299552Serj	 printf("RX oversize = %x\n", reg);
5659299552Serj	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
5660299552Serj	 printf("RX length error = %x\n", reg);
5661299552Serj	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
5662299552Serj	 printf("mac remote fault = %x\n", reg);
5663299552Serj	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
5664299552Serj	 printf("mac local fault = %x\n", reg);
5665299552Serj}
5666299552Serj
5667279858Sjfv#endif /* IXL_DEBUG_SYSCTL */
5668274205Sjfv
5669279858Sjfv#ifdef PCI_IOV
5670269198Sjfvstatic int
5671279858Sjfvixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5672269198Sjfv{
5673279858Sjfv	struct i40e_hw *hw;
5674279858Sjfv	struct ixl_vsi *vsi;
5675279858Sjfv	struct i40e_vsi_context vsi_ctx;
5676279858Sjfv	int i;
5677279858Sjfv	uint16_t first_queue;
5678279858Sjfv	enum i40e_status_code code;
5679269198Sjfv
5680279858Sjfv	hw = &pf->hw;
5681279858Sjfv	vsi = &pf->vsi;
5682269198Sjfv
5683279858Sjfv	vsi_ctx.pf_num = hw->pf_id;
5684279858Sjfv	vsi_ctx.uplink_seid = pf->veb_seid;
5685279858Sjfv	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
5686279858Sjfv	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5687279858Sjfv	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
5688279858Sjfv
5689279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
5690279858Sjfv
5691279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5692279858Sjfv	vsi_ctx.info.switch_id = htole16(0);
5693279858Sjfv
5694279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
5695279858Sjfv	vsi_ctx.info.sec_flags = 0;
5696279858Sjfv	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
5697279858Sjfv		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
5698279858Sjfv
5699299551Serj	/* TODO: If a port VLAN is set, then this needs to be changed */
5700279858Sjfv	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
5701279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5702279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5703279858Sjfv
5704279858Sjfv	vsi_ctx.info.valid_sections |=
5705279858Sjfv	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5706279858Sjfv	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5707279858Sjfv	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
5708279858Sjfv	for (i = 0; i < IXLV_MAX_QUEUES; i++)
5709279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
5710279858Sjfv	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
5711279858Sjfv		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
5712279858Sjfv
5713279858Sjfv	vsi_ctx.info.tc_mapping[0] = htole16(
5714279858Sjfv	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5715279858Sjfv	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5716279858Sjfv
5717279858Sjfv	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
5718279858Sjfv	if (code != I40E_SUCCESS)
5719279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5720279858Sjfv	vf->vsi.seid = vsi_ctx.seid;
5721279858Sjfv	vf->vsi.vsi_num = vsi_ctx.vsi_number;
5722279858Sjfv	vf->vsi.first_queue = first_queue;
5723279858Sjfv	vf->vsi.num_queues = IXLV_MAX_QUEUES;
5724279858Sjfv
5725279858Sjfv	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
5726279858Sjfv	if (code != I40E_SUCCESS)
5727279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5728279858Sjfv
5729279858Sjfv	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
5730279858Sjfv	if (code != I40E_SUCCESS) {
5731279858Sjfv		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
5732279858Sjfv		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5733279858Sjfv		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
5734269198Sjfv	}
5735269198Sjfv
5736279858Sjfv	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
5737279858Sjfv	return (0);
5738279858Sjfv}
5739279858Sjfv
5740279858Sjfvstatic int
5741279858Sjfvixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
5742279858Sjfv{
5743279858Sjfv	struct i40e_hw *hw;
5744279858Sjfv	int error;
5745279858Sjfv
5746279858Sjfv	hw = &pf->hw;
5747279858Sjfv
5748279858Sjfv	error = ixl_vf_alloc_vsi(pf, vf);
5749279858Sjfv	if (error != 0)
5750269198Sjfv		return (error);
5751279858Sjfv
5752279858Sjfv	vf->vsi.hw_filters_add = 0;
5753279858Sjfv	vf->vsi.hw_filters_del = 0;
5754279858Sjfv	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
5755279858Sjfv	ixl_reconfigure_filters(&vf->vsi);
5756279858Sjfv
5757279858Sjfv	return (0);
5758279858Sjfv}
5759279858Sjfv
5760279858Sjfvstatic void
5761279858Sjfvixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
5762279858Sjfv    uint32_t val)
5763279858Sjfv{
5764279858Sjfv	uint32_t qtable;
5765279858Sjfv	int index, shift;
5766279858Sjfv
5767279858Sjfv	/*
5768279858Sjfv	 * Two queues are mapped in a single register, so we have to do some
5769279858Sjfv	 * gymnastics to convert the queue number into a register index and
5770279858Sjfv	 * shift.
5771279858Sjfv	 */
5772279858Sjfv	index = qnum / 2;
5773279858Sjfv	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
5774279858Sjfv
5775279858Sjfv	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
5776279858Sjfv	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
5777279858Sjfv	qtable |= val << shift;
5778279858Sjfv	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
5779279858Sjfv}
5780279858Sjfv
5781279858Sjfvstatic void
5782279858Sjfvixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
5783279858Sjfv{
5784279858Sjfv	struct i40e_hw *hw;
5785279858Sjfv	uint32_t qtable;
5786279858Sjfv	int i;
5787279858Sjfv
5788279858Sjfv	hw = &pf->hw;
5789279858Sjfv
5790279858Sjfv	/*
5791279858Sjfv	 * Contiguous mappings aren't actually supported by the hardware,
5792279858Sjfv	 * so we have to use non-contiguous mappings.
5793279858Sjfv	 */
5794279858Sjfv	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
5795279858Sjfv	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
5796279858Sjfv
5797279858Sjfv	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
5798279858Sjfv	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
5799279858Sjfv
5800279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++) {
5801279858Sjfv		qtable = (vf->vsi.first_queue + i) <<
5802279858Sjfv		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
5803279858Sjfv
5804279858Sjfv		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
5805279858Sjfv	}
5806279858Sjfv
5807279858Sjfv	/* Map queues allocated to VF to its VSI. */
5808279858Sjfv	for (i = 0; i < vf->vsi.num_queues; i++)
5809279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
5810279858Sjfv
5811279858Sjfv	/* Set rest of VSI queues as unused. */
5812279858Sjfv	for (; i < IXL_MAX_VSI_QUEUES; i++)
5813279858Sjfv		ixl_vf_map_vsi_queue(hw, vf, i,
5814279858Sjfv		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
5815279858Sjfv
5816279858Sjfv	ixl_flush(hw);
5817279858Sjfv}
5818279858Sjfv
5819279858Sjfvstatic void
5820279858Sjfvixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
5821279858Sjfv{
5822279858Sjfv	struct i40e_hw *hw;
5823279858Sjfv
5824279858Sjfv	hw = &pf->hw;
5825279858Sjfv
5826279858Sjfv	if (vsi->seid == 0)
5827279858Sjfv		return;
5828279858Sjfv
5829279858Sjfv	i40e_aq_delete_element(hw, vsi->seid, NULL);
5830279858Sjfv}
5831279858Sjfv
5832279858Sjfvstatic void
5833279858Sjfvixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
5834279858Sjfv{
5835279858Sjfv
5836279858Sjfv	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
5837279858Sjfv	ixl_flush(hw);
5838279858Sjfv}
5839279858Sjfv
5840279858Sjfvstatic void
5841279858Sjfvixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
5842279858Sjfv{
5843279858Sjfv
5844279858Sjfv	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
5845279858Sjfv	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
5846279858Sjfv	ixl_flush(hw);
5847279858Sjfv}
5848279858Sjfv
5849279858Sjfvstatic void
5850279858Sjfvixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
5851279858Sjfv{
5852279858Sjfv	struct i40e_hw *hw;
5853279858Sjfv	uint32_t vfint_reg, vpint_reg;
5854279858Sjfv	int i;
5855279858Sjfv
5856279858Sjfv	hw = &pf->hw;
5857279858Sjfv
5858279858Sjfv	ixl_vf_vsi_release(pf, &vf->vsi);
5859279858Sjfv
5860279858Sjfv	/* Index 0 has a special register. */
5861279858Sjfv	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
5862279858Sjfv
5863279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5864279858Sjfv		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
5865279858Sjfv		ixl_vf_disable_queue_intr(hw, vfint_reg);
5866279858Sjfv	}
5867279858Sjfv
5868279858Sjfv	/* Index 0 has a special register. */
5869279858Sjfv	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
5870279858Sjfv
5871279858Sjfv	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
5872279858Sjfv		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
5873279858Sjfv		ixl_vf_unregister_intr(hw, vpint_reg);
5874279858Sjfv	}
5875279858Sjfv
5876279858Sjfv	vf->vsi.num_queues = 0;
5877279858Sjfv}
5878279858Sjfv
5879279858Sjfvstatic int
5880279858Sjfvixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
5881279858Sjfv{
5882279858Sjfv	struct i40e_hw *hw;
5883279858Sjfv	int i;
5884279858Sjfv	uint16_t global_vf_num;
5885279858Sjfv	uint32_t ciad;
5886279858Sjfv
5887279858Sjfv	hw = &pf->hw;
5888279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
5889279858Sjfv
5890279858Sjfv	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
5891279858Sjfv	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
5892279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5893279858Sjfv		ciad = rd32(hw, I40E_PF_PCI_CIAD);
5894279858Sjfv		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
5895279858Sjfv			return (0);
5896279858Sjfv		DELAY(1);
5897279858Sjfv	}
5898279858Sjfv
5899279858Sjfv	return (ETIMEDOUT);
5900279858Sjfv}
5901279858Sjfv
5902279858Sjfvstatic void
5903279858Sjfvixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5904279858Sjfv{
5905279858Sjfv	struct i40e_hw *hw;
5906279858Sjfv	uint32_t vfrtrig;
5907279858Sjfv
5908279858Sjfv	hw = &pf->hw;
5909279858Sjfv
5910279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5911279858Sjfv	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5912279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5913279858Sjfv	ixl_flush(hw);
5914279858Sjfv
5915279858Sjfv	ixl_reinit_vf(pf, vf);
5916279858Sjfv}
5917279858Sjfv
5918279858Sjfvstatic void
5919279858Sjfvixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
5920279858Sjfv{
5921279858Sjfv	struct i40e_hw *hw;
5922279858Sjfv	uint32_t vfrstat, vfrtrig;
5923279858Sjfv	int i, error;
5924279858Sjfv
5925279858Sjfv	hw = &pf->hw;
5926279858Sjfv
5927279858Sjfv	error = ixl_flush_pcie(pf, vf);
5928279858Sjfv	if (error != 0)
5929279858Sjfv		device_printf(pf->dev,
5930279858Sjfv		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
5931279858Sjfv		    vf->vf_num);
5932279858Sjfv
5933279858Sjfv	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
5934279858Sjfv		DELAY(10);
5935279858Sjfv
5936279858Sjfv		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
5937279858Sjfv		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
5938279858Sjfv			break;
5939279858Sjfv	}
5940279858Sjfv
5941279858Sjfv	if (i == IXL_VF_RESET_TIMEOUT)
5942279858Sjfv		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
5943279858Sjfv
5944279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
5945279858Sjfv
5946279858Sjfv	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
5947279858Sjfv	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
5948279858Sjfv	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
5949279858Sjfv
5950279858Sjfv	if (vf->vsi.seid != 0)
5951279858Sjfv		ixl_disable_rings(&vf->vsi);
5952279858Sjfv
5953279858Sjfv	ixl_vf_release_resources(pf, vf);
5954279858Sjfv	ixl_vf_setup_vsi(pf, vf);
5955279858Sjfv	ixl_vf_map_queues(pf, vf);
5956279858Sjfv
5957279858Sjfv	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
5958279858Sjfv	ixl_flush(hw);
5959279858Sjfv}
5960279858Sjfv
5961279858Sjfvstatic const char *
5962279858Sjfvixl_vc_opcode_str(uint16_t op)
5963279858Sjfv{
5964279858Sjfv
5965279858Sjfv	switch (op) {
5966279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
5967279858Sjfv		return ("VERSION");
5968279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
5969279858Sjfv		return ("RESET_VF");
5970279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
5971279858Sjfv		return ("GET_VF_RESOURCES");
5972279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
5973279858Sjfv		return ("CONFIG_TX_QUEUE");
5974279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
5975279858Sjfv		return ("CONFIG_RX_QUEUE");
5976279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
5977279858Sjfv		return ("CONFIG_VSI_QUEUES");
5978279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
5979279858Sjfv		return ("CONFIG_IRQ_MAP");
5980279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
5981279858Sjfv		return ("ENABLE_QUEUES");
5982279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
5983279858Sjfv		return ("DISABLE_QUEUES");
5984279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
5985279858Sjfv		return ("ADD_ETHER_ADDRESS");
5986279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
5987279858Sjfv		return ("DEL_ETHER_ADDRESS");
5988279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
5989279858Sjfv		return ("ADD_VLAN");
5990279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
5991279858Sjfv		return ("DEL_VLAN");
5992279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
5993279858Sjfv		return ("CONFIG_PROMISCUOUS_MODE");
5994279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
5995279858Sjfv		return ("GET_STATS");
5996279858Sjfv	case I40E_VIRTCHNL_OP_FCOE:
5997279858Sjfv		return ("FCOE");
5998279858Sjfv	case I40E_VIRTCHNL_OP_EVENT:
5999279858Sjfv		return ("EVENT");
6000279858Sjfv	default:
6001279858Sjfv		return ("UNKNOWN");
6002279858Sjfv	}
6003279858Sjfv}
6004279858Sjfv
6005279858Sjfvstatic int
6006279858Sjfvixl_vc_opcode_level(uint16_t opcode)
6007279858Sjfv{
6008279858Sjfv	switch (opcode) {
6009279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6010279858Sjfv		return (10);
6011279858Sjfv	default:
6012279858Sjfv		return (5);
6013279858Sjfv	}
6014279858Sjfv}
6015279858Sjfv
6016279858Sjfvstatic void
6017279858Sjfvixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6018279858Sjfv    enum i40e_status_code status, void *msg, uint16_t len)
6019279858Sjfv{
6020279858Sjfv	struct i40e_hw *hw;
6021279858Sjfv	int global_vf_id;
6022279858Sjfv
6023279858Sjfv	hw = &pf->hw;
6024279858Sjfv	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
6025279858Sjfv
6026279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
6027279858Sjfv	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
6028279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
6029279858Sjfv
6030279858Sjfv	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
6031279858Sjfv}
6032279858Sjfv
6033279858Sjfvstatic void
6034279858Sjfvixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
6035279858Sjfv{
6036279858Sjfv
6037279858Sjfv	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
6038279858Sjfv}
6039279858Sjfv
6040279858Sjfvstatic void
6041279858Sjfvixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6042279858Sjfv    enum i40e_status_code status, const char *file, int line)
6043279858Sjfv{
6044279858Sjfv
6045279858Sjfv	I40E_VC_DEBUG(pf, 1,
6046279858Sjfv	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
6047279858Sjfv	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
6048279858Sjfv	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
6049279858Sjfv}
6050279858Sjfv
6051279858Sjfvstatic void
6052279858Sjfvixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6053279858Sjfv    uint16_t msg_size)
6054279858Sjfv{
6055279858Sjfv	struct i40e_virtchnl_version_info reply;
6056279858Sjfv
6057279858Sjfv	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
6058279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
6059279858Sjfv		    I40E_ERR_PARAM);
6060279858Sjfv		return;
6061279858Sjfv	}
6062279858Sjfv
6063299552Serj	vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
6064299552Serj
6065279858Sjfv	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
6066279858Sjfv	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
6067279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
6068279858Sjfv	    sizeof(reply));
6069279858Sjfv}
6070279858Sjfv
6071279858Sjfvstatic void
6072279858Sjfvixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6073279858Sjfv    uint16_t msg_size)
6074279858Sjfv{
6075279858Sjfv
6076279858Sjfv	if (msg_size != 0) {
6077279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
6078279858Sjfv		    I40E_ERR_PARAM);
6079279858Sjfv		return;
6080279858Sjfv	}
6081279858Sjfv
6082279858Sjfv	ixl_reset_vf(pf, vf);
6083279858Sjfv
6084279858Sjfv	/* No response to a reset message. */
6085279858Sjfv}
6086279858Sjfv
6087279858Sjfvstatic void
6088279858Sjfvixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6089279858Sjfv    uint16_t msg_size)
6090279858Sjfv{
6091279858Sjfv	struct i40e_virtchnl_vf_resource reply;
6092279858Sjfv
6093299552Serj	if ((vf->version == 0 && msg_size != 0) ||
6094299552Serj	    (vf->version == 1 && msg_size != 4)) {
6095299552Serj		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
6096299552Serj		    " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
6097299552Serj		    vf->version);
6098279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6099279858Sjfv		    I40E_ERR_PARAM);
6100279858Sjfv		return;
6101279858Sjfv	}
6102279858Sjfv
6103279858Sjfv	bzero(&reply, sizeof(reply));
6104279858Sjfv
6105299552Serj	if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
6106299552Serj		reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
6107299552Serj					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
6108299552Serj					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
6109299552Serj	else
6110299552Serj		reply.vf_offload_flags = *(u32 *)msg;
6111279858Sjfv
6112279858Sjfv	reply.num_vsis = 1;
6113279858Sjfv	reply.num_queue_pairs = vf->vsi.num_queues;
6114279858Sjfv	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
6115279858Sjfv	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
6116279858Sjfv	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
6117279858Sjfv	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
6118279858Sjfv	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
6119279858Sjfv
6120279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6121279858Sjfv	    I40E_SUCCESS, &reply, sizeof(reply));
6122279858Sjfv}
6123279858Sjfv
6124279858Sjfvstatic int
6125279858Sjfvixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6126279858Sjfv    struct i40e_virtchnl_txq_info *info)
6127279858Sjfv{
6128279858Sjfv	struct i40e_hw *hw;
6129279858Sjfv	struct i40e_hmc_obj_txq txq;
6130279858Sjfv	uint16_t global_queue_num, global_vf_num;
6131279858Sjfv	enum i40e_status_code status;
6132279858Sjfv	uint32_t qtx_ctl;
6133279858Sjfv
6134279858Sjfv	hw = &pf->hw;
6135279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
6136279858Sjfv	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6137279858Sjfv	bzero(&txq, sizeof(txq));
6138279858Sjfv
6139279858Sjfv	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
6140279858Sjfv	if (status != I40E_SUCCESS)
6141269198Sjfv		return (EINVAL);
6142279858Sjfv
6143279858Sjfv	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
6144279858Sjfv
6145279858Sjfv	txq.head_wb_ena = info->headwb_enabled;
6146279858Sjfv	txq.head_wb_addr = info->dma_headwb_addr;
6147279858Sjfv	txq.qlen = info->ring_len;
6148279858Sjfv	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
6149279858Sjfv	txq.rdylist_act = 0;
6150279858Sjfv
6151279858Sjfv	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
6152279858Sjfv	if (status != I40E_SUCCESS)
6153279858Sjfv		return (EINVAL);
6154279858Sjfv
6155279858Sjfv	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
6156279858Sjfv	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
6157279858Sjfv	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
6158279858Sjfv	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
6159279858Sjfv	ixl_flush(hw);
6160279858Sjfv
6161279858Sjfv	return (0);
6162279858Sjfv}
6163279858Sjfv
6164279858Sjfvstatic int
6165279858Sjfvixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6166279858Sjfv    struct i40e_virtchnl_rxq_info *info)
6167279858Sjfv{
6168279858Sjfv	struct i40e_hw *hw;
6169279858Sjfv	struct i40e_hmc_obj_rxq rxq;
6170279858Sjfv	uint16_t global_queue_num;
6171279858Sjfv	enum i40e_status_code status;
6172279858Sjfv
6173279858Sjfv	hw = &pf->hw;
6174279858Sjfv	global_queue_num = vf->vsi.first_queue + info->queue_id;
6175279858Sjfv	bzero(&rxq, sizeof(rxq));
6176279858Sjfv
6177279858Sjfv	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
6178279858Sjfv		return (EINVAL);
6179279858Sjfv
6180279858Sjfv	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
6181279858Sjfv	    info->max_pkt_size < ETHER_MIN_LEN)
6182279858Sjfv		return (EINVAL);
6183279858Sjfv
6184279858Sjfv	if (info->splithdr_enabled) {
6185279858Sjfv		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
6186279858Sjfv			return (EINVAL);
6187279858Sjfv
6188279858Sjfv		rxq.hsplit_0 = info->rx_split_pos &
6189279858Sjfv		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
6190279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
6191279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
6192279858Sjfv		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
6193279858Sjfv		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
6194279858Sjfv
6195279858Sjfv		rxq.dtype = 2;
6196269198Sjfv	}
6197269198Sjfv
6198279858Sjfv	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
6199279858Sjfv	if (status != I40E_SUCCESS)
6200279858Sjfv		return (EINVAL);
6201269198Sjfv
6202279858Sjfv	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
6203279858Sjfv	rxq.qlen = info->ring_len;
6204269198Sjfv
6205279858Sjfv	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
6206269198Sjfv
6207279858Sjfv	rxq.dsize = 1;
6208279858Sjfv	rxq.crcstrip = 1;
6209279858Sjfv	rxq.l2tsel = 1;
6210269198Sjfv
6211279858Sjfv	rxq.rxmax = info->max_pkt_size;
6212279858Sjfv	rxq.tphrdesc_ena = 1;
6213279858Sjfv	rxq.tphwdesc_ena = 1;
6214279858Sjfv	rxq.tphdata_ena = 1;
6215279858Sjfv	rxq.tphhead_ena = 1;
6216279858Sjfv	rxq.lrxqthresh = 2;
6217279858Sjfv	rxq.prefena = 1;
6218279858Sjfv
6219279858Sjfv	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
6220279858Sjfv	if (status != I40E_SUCCESS)
6221279858Sjfv		return (EINVAL);
6222279858Sjfv
6223279858Sjfv	return (0);
6224279858Sjfv}
6225279858Sjfv
6226279858Sjfvstatic void
6227279858Sjfvixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6228279858Sjfv    uint16_t msg_size)
6229279858Sjfv{
6230279858Sjfv	struct i40e_virtchnl_vsi_queue_config_info *info;
6231279858Sjfv	struct i40e_virtchnl_queue_pair_info *pair;
6232279858Sjfv	int i;
6233279858Sjfv
6234279858Sjfv	if (msg_size < sizeof(*info)) {
6235279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6236279858Sjfv		    I40E_ERR_PARAM);
6237279858Sjfv		return;
6238279858Sjfv	}
6239279858Sjfv
6240279858Sjfv	info = msg;
6241279858Sjfv	if (info->num_queue_pairs == 0) {
6242279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6243279858Sjfv		    I40E_ERR_PARAM);
6244279858Sjfv		return;
6245279858Sjfv	}
6246279858Sjfv
6247279858Sjfv	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
6248279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6249279858Sjfv		    I40E_ERR_PARAM);
6250279858Sjfv		return;
6251279858Sjfv	}
6252279858Sjfv
6253279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6254279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6255279858Sjfv		    I40E_ERR_PARAM);
6256279858Sjfv		return;
6257279858Sjfv	}
6258279858Sjfv
6259279858Sjfv	for (i = 0; i < info->num_queue_pairs; i++) {
6260279858Sjfv		pair = &info->qpair[i];
6261279858Sjfv
6262279858Sjfv		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
6263279858Sjfv		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
6264279858Sjfv		    pair->txq.queue_id != pair->rxq.queue_id ||
6265279858Sjfv		    pair->txq.queue_id >= vf->vsi.num_queues) {
6266279858Sjfv
6267279858Sjfv			i40e_send_vf_nack(pf, vf,
6268279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6269279858Sjfv			return;
6270279858Sjfv		}
6271279858Sjfv
6272279858Sjfv		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
6273279858Sjfv			i40e_send_vf_nack(pf, vf,
6274279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6275279858Sjfv			return;
6276279858Sjfv		}
6277279858Sjfv
6278279858Sjfv		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
6279279858Sjfv			i40e_send_vf_nack(pf, vf,
6280279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6281279858Sjfv			return;
6282279858Sjfv		}
6283279858Sjfv	}
6284279858Sjfv
6285279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
6286279858Sjfv}
6287279858Sjfv
6288279858Sjfvstatic void
6289279858Sjfvixl_vf_set_qctl(struct ixl_pf *pf,
6290279858Sjfv    const struct i40e_virtchnl_vector_map *vector,
6291279858Sjfv    enum i40e_queue_type cur_type, uint16_t cur_queue,
6292279858Sjfv    enum i40e_queue_type *last_type, uint16_t *last_queue)
6293279858Sjfv{
6294279858Sjfv	uint32_t offset, qctl;
6295279858Sjfv	uint16_t itr_indx;
6296279858Sjfv
6297279858Sjfv	if (cur_type == I40E_QUEUE_TYPE_RX) {
6298279858Sjfv		offset = I40E_QINT_RQCTL(cur_queue);
6299279858Sjfv		itr_indx = vector->rxitr_idx;
6300279858Sjfv	} else {
6301279858Sjfv		offset = I40E_QINT_TQCTL(cur_queue);
6302279858Sjfv		itr_indx = vector->txitr_idx;
6303279858Sjfv	}
6304279858Sjfv
6305279858Sjfv	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
6306279858Sjfv	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
6307279858Sjfv	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
6308279858Sjfv	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
6309279858Sjfv	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
6310279858Sjfv
6311279858Sjfv	wr32(&pf->hw, offset, qctl);
6312279858Sjfv
6313279858Sjfv	*last_type = cur_type;
6314279858Sjfv	*last_queue = cur_queue;
6315279858Sjfv}
6316279858Sjfv
6317279858Sjfvstatic void
6318279858Sjfvixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
6319279858Sjfv    const struct i40e_virtchnl_vector_map *vector)
6320279858Sjfv{
6321279858Sjfv	struct i40e_hw *hw;
6322279858Sjfv	u_int qindex;
6323279858Sjfv	enum i40e_queue_type type, last_type;
6324279858Sjfv	uint32_t lnklst_reg;
6325279858Sjfv	uint16_t rxq_map, txq_map, cur_queue, last_queue;
6326279858Sjfv
6327279858Sjfv	hw = &pf->hw;
6328279858Sjfv
6329279858Sjfv	rxq_map = vector->rxq_map;
6330279858Sjfv	txq_map = vector->txq_map;
6331279858Sjfv
6332279858Sjfv	last_queue = IXL_END_OF_INTR_LNKLST;
6333279858Sjfv	last_type = I40E_QUEUE_TYPE_RX;
6334279858Sjfv
6335279858Sjfv	/*
6336279858Sjfv	 * The datasheet says to optimize performance, RX queues and TX queues
6337279858Sjfv	 * should be interleaved in the interrupt linked list, so we process
6338279858Sjfv	 * both at once here.
6339279858Sjfv	 */
6340279858Sjfv	while ((rxq_map != 0) || (txq_map != 0)) {
6341279858Sjfv		if (txq_map != 0) {
6342279858Sjfv			qindex = ffs(txq_map) - 1;
6343279858Sjfv			type = I40E_QUEUE_TYPE_TX;
6344279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6345279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6346279858Sjfv			    &last_type, &last_queue);
6347279858Sjfv			txq_map &= ~(1 << qindex);
6348279858Sjfv		}
6349279858Sjfv
6350279858Sjfv		if (rxq_map != 0) {
6351279858Sjfv			qindex = ffs(rxq_map) - 1;
6352279858Sjfv			type = I40E_QUEUE_TYPE_RX;
6353279858Sjfv			cur_queue = vf->vsi.first_queue + qindex;
6354279858Sjfv			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6355279858Sjfv			    &last_type, &last_queue);
6356279858Sjfv			rxq_map &= ~(1 << qindex);
6357279858Sjfv		}
6358279858Sjfv	}
6359279858Sjfv
6360279858Sjfv	if (vector->vector_id == 0)
6361279858Sjfv		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
6362279858Sjfv	else
6363279858Sjfv		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
6364279858Sjfv		    vf->vf_num);
6365279858Sjfv	wr32(hw, lnklst_reg,
6366279858Sjfv	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
6367279858Sjfv	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
6368279858Sjfv
6369279858Sjfv	ixl_flush(hw);
6370279858Sjfv}
6371279858Sjfv
6372279858Sjfvstatic void
6373279858Sjfvixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6374279858Sjfv    uint16_t msg_size)
6375279858Sjfv{
6376279858Sjfv	struct i40e_virtchnl_irq_map_info *map;
6377279858Sjfv	struct i40e_virtchnl_vector_map *vector;
6378279858Sjfv	struct i40e_hw *hw;
6379279858Sjfv	int i, largest_txq, largest_rxq;
6380279858Sjfv
6381279858Sjfv	hw = &pf->hw;
6382279858Sjfv
6383279858Sjfv	if (msg_size < sizeof(*map)) {
6384279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6385279858Sjfv		    I40E_ERR_PARAM);
6386279858Sjfv		return;
6387279858Sjfv	}
6388279858Sjfv
6389279858Sjfv	map = msg;
6390279858Sjfv	if (map->num_vectors == 0) {
6391279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6392279858Sjfv		    I40E_ERR_PARAM);
6393279858Sjfv		return;
6394279858Sjfv	}
6395279858Sjfv
6396279858Sjfv	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
6397279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6398279858Sjfv		    I40E_ERR_PARAM);
6399279858Sjfv		return;
6400279858Sjfv	}
6401279858Sjfv
6402279858Sjfv	for (i = 0; i < map->num_vectors; i++) {
6403279858Sjfv		vector = &map->vecmap[i];
6404279858Sjfv
6405279858Sjfv		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6406279858Sjfv		    vector->vsi_id != vf->vsi.vsi_num) {
6407279858Sjfv			i40e_send_vf_nack(pf, vf,
6408279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6409279858Sjfv			return;
6410279858Sjfv		}
6411279858Sjfv
6412279858Sjfv		if (vector->rxq_map != 0) {
6413279858Sjfv			largest_rxq = fls(vector->rxq_map) - 1;
6414279858Sjfv			if (largest_rxq >= vf->vsi.num_queues) {
6415279858Sjfv				i40e_send_vf_nack(pf, vf,
6416279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6417279858Sjfv				    I40E_ERR_PARAM);
6418279858Sjfv				return;
6419279858Sjfv			}
6420279858Sjfv		}
6421279858Sjfv
6422279858Sjfv		if (vector->txq_map != 0) {
6423279858Sjfv			largest_txq = fls(vector->txq_map) - 1;
6424279858Sjfv			if (largest_txq >= vf->vsi.num_queues) {
6425279858Sjfv				i40e_send_vf_nack(pf, vf,
6426279858Sjfv				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6427279858Sjfv				    I40E_ERR_PARAM);
6428279858Sjfv				return;
6429279858Sjfv			}
6430279858Sjfv		}
6431279858Sjfv
6432279858Sjfv		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6433279858Sjfv		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6434279858Sjfv			i40e_send_vf_nack(pf, vf,
6435279858Sjfv			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6436279858Sjfv			    I40E_ERR_PARAM);
6437279858Sjfv			return;
6438279858Sjfv		}
6439279858Sjfv
6440279858Sjfv		ixl_vf_config_vector(pf, vf, vector);
6441279858Sjfv	}
6442279858Sjfv
6443279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6444279858Sjfv}
6445279858Sjfv
6446279858Sjfvstatic void
6447279858Sjfvixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6448279858Sjfv    uint16_t msg_size)
6449279858Sjfv{
6450279858Sjfv	struct i40e_virtchnl_queue_select *select;
6451279858Sjfv	int error;
6452279858Sjfv
6453279858Sjfv	if (msg_size != sizeof(*select)) {
6454279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6455279858Sjfv		    I40E_ERR_PARAM);
6456279858Sjfv		return;
6457279858Sjfv	}
6458279858Sjfv
6459279858Sjfv	select = msg;
6460279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6461279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6462279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6463279858Sjfv		    I40E_ERR_PARAM);
6464279858Sjfv		return;
6465279858Sjfv	}
6466279858Sjfv
6467279858Sjfv	error = ixl_enable_rings(&vf->vsi);
6468269198Sjfv	if (error) {
6469279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6470279858Sjfv		    I40E_ERR_TIMEOUT);
6471279858Sjfv		return;
6472269198Sjfv	}
6473269198Sjfv
6474279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6475269198Sjfv}
6476266423Sjfv
6477279858Sjfvstatic void
6478279858Sjfvixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6479279858Sjfv    void *msg, uint16_t msg_size)
6480279858Sjfv{
6481279858Sjfv	struct i40e_virtchnl_queue_select *select;
6482279858Sjfv	int error;
6483279858Sjfv
6484279858Sjfv	if (msg_size != sizeof(*select)) {
6485279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6486279858Sjfv		    I40E_ERR_PARAM);
6487279858Sjfv		return;
6488279858Sjfv	}
6489279858Sjfv
6490279858Sjfv	select = msg;
6491279858Sjfv	if (select->vsi_id != vf->vsi.vsi_num ||
6492279858Sjfv	    select->rx_queues == 0 || select->tx_queues == 0) {
6493279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6494279858Sjfv		    I40E_ERR_PARAM);
6495279858Sjfv		return;
6496279858Sjfv	}
6497279858Sjfv
6498279858Sjfv	error = ixl_disable_rings(&vf->vsi);
6499279858Sjfv	if (error) {
6500279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6501279858Sjfv		    I40E_ERR_TIMEOUT);
6502279858Sjfv		return;
6503279858Sjfv	}
6504279858Sjfv
6505279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6506279858Sjfv}
6507279858Sjfv
6508279858Sjfvstatic boolean_t
6509279858Sjfvixl_zero_mac(const uint8_t *addr)
6510279858Sjfv{
6511279858Sjfv	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6512279858Sjfv
6513279858Sjfv	return (cmp_etheraddr(addr, zero));
6514279858Sjfv}
6515279858Sjfv
6516279858Sjfvstatic boolean_t
6517279858Sjfvixl_bcast_mac(const uint8_t *addr)
6518279858Sjfv{
6519279858Sjfv
6520279858Sjfv	return (cmp_etheraddr(addr, ixl_bcast_addr));
6521279858Sjfv}
6522279858Sjfv
6523279858Sjfvstatic int
6524279858Sjfvixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6525279858Sjfv{
6526279858Sjfv
6527279858Sjfv	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6528279858Sjfv		return (EINVAL);
6529279858Sjfv
6530279858Sjfv	/*
6531279858Sjfv	 * If the VF is not allowed to change its MAC address, don't let it
6532279858Sjfv	 * set a MAC filter for an address that is not a multicast address and
6533279858Sjfv	 * is not its assigned MAC.
6534279858Sjfv	 */
6535279858Sjfv	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6536279858Sjfv	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6537279858Sjfv		return (EPERM);
6538279858Sjfv
6539279858Sjfv	return (0);
6540279858Sjfv}
6541279858Sjfv
6542279858Sjfvstatic void
6543279858Sjfvixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6544279858Sjfv    uint16_t msg_size)
6545279858Sjfv{
6546279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6547279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6548279858Sjfv	struct ixl_vsi *vsi;
6549279858Sjfv	int i;
6550279858Sjfv	size_t expected_size;
6551279858Sjfv
6552279858Sjfv	vsi = &vf->vsi;
6553279858Sjfv
6554279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6555279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6556279858Sjfv		    I40E_ERR_PARAM);
6557279858Sjfv		return;
6558279858Sjfv	}
6559279858Sjfv
6560279858Sjfv	addr_list = msg;
6561279858Sjfv	expected_size = sizeof(*addr_list) +
6562279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6563279858Sjfv
6564279858Sjfv	if (addr_list->num_elements == 0 ||
6565279858Sjfv	    addr_list->vsi_id != vsi->vsi_num ||
6566279858Sjfv	    msg_size != expected_size) {
6567279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6568279858Sjfv		    I40E_ERR_PARAM);
6569279858Sjfv		return;
6570279858Sjfv	}
6571279858Sjfv
6572279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6573279858Sjfv		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6574279858Sjfv			i40e_send_vf_nack(pf, vf,
6575279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6576279858Sjfv			return;
6577279858Sjfv		}
6578279858Sjfv	}
6579279858Sjfv
6580279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6581279858Sjfv		addr = &addr_list->list[i];
6582279858Sjfv		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6583279858Sjfv	}
6584279858Sjfv
6585279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6586279858Sjfv}
6587279858Sjfv
6588279858Sjfvstatic void
6589279858Sjfvixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6590279858Sjfv    uint16_t msg_size)
6591279858Sjfv{
6592279858Sjfv	struct i40e_virtchnl_ether_addr_list *addr_list;
6593279858Sjfv	struct i40e_virtchnl_ether_addr *addr;
6594279858Sjfv	size_t expected_size;
6595279858Sjfv	int i;
6596279858Sjfv
6597279858Sjfv	if (msg_size < sizeof(*addr_list)) {
6598279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6599279858Sjfv		    I40E_ERR_PARAM);
6600279858Sjfv		return;
6601279858Sjfv	}
6602279858Sjfv
6603279858Sjfv	addr_list = msg;
6604279858Sjfv	expected_size = sizeof(*addr_list) +
6605279858Sjfv	    addr_list->num_elements * sizeof(*addr);
6606279858Sjfv
6607279858Sjfv	if (addr_list->num_elements == 0 ||
6608279858Sjfv	    addr_list->vsi_id != vf->vsi.vsi_num ||
6609279858Sjfv	    msg_size != expected_size) {
6610279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6611279858Sjfv		    I40E_ERR_PARAM);
6612279858Sjfv		return;
6613279858Sjfv	}
6614279858Sjfv
6615279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6616279858Sjfv		addr = &addr_list->list[i];
6617279858Sjfv		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6618279858Sjfv			i40e_send_vf_nack(pf, vf,
6619279858Sjfv			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6620279858Sjfv			return;
6621279858Sjfv		}
6622279858Sjfv	}
6623279858Sjfv
6624279858Sjfv	for (i = 0; i < addr_list->num_elements; i++) {
6625279858Sjfv		addr = &addr_list->list[i];
6626279858Sjfv		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6627279858Sjfv	}
6628279858Sjfv
6629279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6630279858Sjfv}
6631279858Sjfv
6632279858Sjfvstatic enum i40e_status_code
6633279858Sjfvixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6634279858Sjfv{
6635279858Sjfv	struct i40e_vsi_context vsi_ctx;
6636279858Sjfv
6637279858Sjfv	vsi_ctx.seid = vf->vsi.seid;
6638279858Sjfv
6639279858Sjfv	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6640279858Sjfv	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6641279858Sjfv	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6642279858Sjfv	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6643279858Sjfv	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6644279858Sjfv}
6645279858Sjfv
6646279858Sjfvstatic void
6647279858Sjfvixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6648279858Sjfv    uint16_t msg_size)
6649279858Sjfv{
6650279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6651279858Sjfv	enum i40e_status_code code;
6652279858Sjfv	size_t expected_size;
6653279858Sjfv	int i;
6654279858Sjfv
6655279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6656279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6657279858Sjfv		    I40E_ERR_PARAM);
6658279858Sjfv		return;
6659279858Sjfv	}
6660279858Sjfv
6661279858Sjfv	filter_list = msg;
6662279858Sjfv	expected_size = sizeof(*filter_list) +
6663279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6664279858Sjfv	if (filter_list->num_elements == 0 ||
6665279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6666279858Sjfv	    msg_size != expected_size) {
6667279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6668279858Sjfv		    I40E_ERR_PARAM);
6669279858Sjfv		return;
6670279858Sjfv	}
6671279858Sjfv
6672279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6673279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6674279858Sjfv		    I40E_ERR_PARAM);
6675279858Sjfv		return;
6676279858Sjfv	}
6677279858Sjfv
6678279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6679279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6680279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6681279858Sjfv			    I40E_ERR_PARAM);
6682279858Sjfv			return;
6683279858Sjfv		}
6684279858Sjfv	}
6685279858Sjfv
6686279858Sjfv	code = ixl_vf_enable_vlan_strip(pf, vf);
6687279858Sjfv	if (code != I40E_SUCCESS) {
6688279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6689279858Sjfv		    I40E_ERR_PARAM);
6690279858Sjfv	}
6691279858Sjfv
6692279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6693279858Sjfv		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6694279858Sjfv
6695279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
6696279858Sjfv}
6697279858Sjfv
6698279858Sjfvstatic void
6699279858Sjfvixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6700279858Sjfv    uint16_t msg_size)
6701279858Sjfv{
6702279858Sjfv	struct i40e_virtchnl_vlan_filter_list *filter_list;
6703279858Sjfv	int i;
6704279858Sjfv	size_t expected_size;
6705279858Sjfv
6706279858Sjfv	if (msg_size < sizeof(*filter_list)) {
6707279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6708279858Sjfv		    I40E_ERR_PARAM);
6709279858Sjfv		return;
6710279858Sjfv	}
6711279858Sjfv
6712279858Sjfv	filter_list = msg;
6713279858Sjfv	expected_size = sizeof(*filter_list) +
6714279858Sjfv	    filter_list->num_elements * sizeof(uint16_t);
6715279858Sjfv	if (filter_list->num_elements == 0 ||
6716279858Sjfv	    filter_list->vsi_id != vf->vsi.vsi_num ||
6717279858Sjfv	    msg_size != expected_size) {
6718279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
6719279858Sjfv		    I40E_ERR_PARAM);
6720279858Sjfv		return;
6721279858Sjfv	}
6722279858Sjfv
6723279858Sjfv	for (i = 0; i < filter_list->num_elements; i++) {
6724279858Sjfv		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
6725279858Sjfv			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6726279858Sjfv			    I40E_ERR_PARAM);
6727279858Sjfv			return;
6728279858Sjfv		}
6729279858Sjfv	}
6730279858Sjfv
6731279858Sjfv	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
6732279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
6733279858Sjfv		    I40E_ERR_PARAM);
6734279858Sjfv		return;
6735279858Sjfv	}
6736279858Sjfv
6737279858Sjfv	for (i = 0; i < filter_list->num_elements; i++)
6738279858Sjfv		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
6739279858Sjfv
6740279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
6741279858Sjfv}
6742279858Sjfv
6743279858Sjfvstatic void
6744279858Sjfvixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6745279858Sjfv    void *msg, uint16_t msg_size)
6746279858Sjfv{
6747279858Sjfv	struct i40e_virtchnl_promisc_info *info;
6748279858Sjfv	enum i40e_status_code code;
6749279858Sjfv
6750279858Sjfv	if (msg_size != sizeof(*info)) {
6751279858Sjfv		i40e_send_vf_nack(pf, vf,
6752279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6753279858Sjfv		return;
6754279858Sjfv	}
6755279858Sjfv
6756295787Skevlo	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
6757279858Sjfv		i40e_send_vf_nack(pf, vf,
6758279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6759279858Sjfv		return;
6760279858Sjfv	}
6761279858Sjfv
6762279858Sjfv	info = msg;
6763279858Sjfv	if (info->vsi_id != vf->vsi.vsi_num) {
6764279858Sjfv		i40e_send_vf_nack(pf, vf,
6765279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
6766279858Sjfv		return;
6767279858Sjfv	}
6768279858Sjfv
6769279858Sjfv	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
6770279858Sjfv	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
6771279858Sjfv	if (code != I40E_SUCCESS) {
6772279858Sjfv		i40e_send_vf_nack(pf, vf,
6773279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6774279858Sjfv		return;
6775279858Sjfv	}
6776279858Sjfv
6777279858Sjfv	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
6778279858Sjfv	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
6779279858Sjfv	if (code != I40E_SUCCESS) {
6780279858Sjfv		i40e_send_vf_nack(pf, vf,
6781279858Sjfv		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
6782279858Sjfv		return;
6783279858Sjfv	}
6784279858Sjfv
6785279858Sjfv	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
6786279858Sjfv}
6787279858Sjfv
6788279858Sjfvstatic void
6789279858Sjfvixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6790279858Sjfv    uint16_t msg_size)
6791279858Sjfv{
6792279858Sjfv	struct i40e_virtchnl_queue_select *queue;
6793279858Sjfv
6794279858Sjfv	if (msg_size != sizeof(*queue)) {
6795279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6796279858Sjfv		    I40E_ERR_PARAM);
6797279858Sjfv		return;
6798279858Sjfv	}
6799279858Sjfv
6800279858Sjfv	queue = msg;
6801279858Sjfv	if (queue->vsi_id != vf->vsi.vsi_num) {
6802279858Sjfv		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6803279858Sjfv		    I40E_ERR_PARAM);
6804279858Sjfv		return;
6805279858Sjfv	}
6806279858Sjfv
6807279858Sjfv	ixl_update_eth_stats(&vf->vsi);
6808279858Sjfv
6809279858Sjfv	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
6810279858Sjfv	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
6811279858Sjfv}
6812279858Sjfv
6813279858Sjfvstatic void
6814279858Sjfvixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
6815279858Sjfv{
6816279858Sjfv	struct ixl_vf *vf;
6817279858Sjfv	void *msg;
6818279858Sjfv	uint16_t vf_num, msg_size;
6819279858Sjfv	uint32_t opcode;
6820279858Sjfv
6821279858Sjfv	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
6822279858Sjfv	opcode = le32toh(event->desc.cookie_high);
6823279858Sjfv
6824279858Sjfv	if (vf_num >= pf->num_vfs) {
6825279858Sjfv		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
6826279858Sjfv		return;
6827279858Sjfv	}
6828279858Sjfv
6829279858Sjfv	vf = &pf->vfs[vf_num];
6830279858Sjfv	msg = event->msg_buf;
6831279858Sjfv	msg_size = event->msg_len;
6832279858Sjfv
6833279858Sjfv	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
6834279858Sjfv	    "Got msg %s(%d) from VF-%d of size %d\n",
6835279858Sjfv	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
6836279858Sjfv
6837279858Sjfv	switch (opcode) {
6838279858Sjfv	case I40E_VIRTCHNL_OP_VERSION:
6839279858Sjfv		ixl_vf_version_msg(pf, vf, msg, msg_size);
6840279858Sjfv		break;
6841279858Sjfv	case I40E_VIRTCHNL_OP_RESET_VF:
6842279858Sjfv		ixl_vf_reset_msg(pf, vf, msg, msg_size);
6843279858Sjfv		break;
6844279858Sjfv	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6845279858Sjfv		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
6846279858Sjfv		break;
6847279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6848279858Sjfv		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
6849279858Sjfv		break;
6850279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6851279858Sjfv		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
6852279858Sjfv		break;
6853279858Sjfv	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6854279858Sjfv		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
6855279858Sjfv		break;
6856279858Sjfv	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6857279858Sjfv		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
6858279858Sjfv		break;
6859279858Sjfv	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6860279858Sjfv		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
6861279858Sjfv		break;
6862279858Sjfv	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6863279858Sjfv		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
6864279858Sjfv		break;
6865279858Sjfv	case I40E_VIRTCHNL_OP_ADD_VLAN:
6866279858Sjfv		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
6867279858Sjfv		break;
6868279858Sjfv	case I40E_VIRTCHNL_OP_DEL_VLAN:
6869279858Sjfv		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
6870279858Sjfv		break;
6871279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6872279858Sjfv		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
6873279858Sjfv		break;
6874279858Sjfv	case I40E_VIRTCHNL_OP_GET_STATS:
6875279858Sjfv		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
6876279858Sjfv		break;
6877279858Sjfv
6878279858Sjfv	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
6879279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6880279858Sjfv	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6881279858Sjfv	default:
6882279858Sjfv		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
6883279858Sjfv		break;
6884279858Sjfv	}
6885279858Sjfv}
6886279858Sjfv
6887279858Sjfv/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
6888279858Sjfvstatic void
6889279858Sjfvixl_handle_vflr(void *arg, int pending)
6890279858Sjfv{
6891279858Sjfv	struct ixl_pf *pf;
6892279858Sjfv	struct i40e_hw *hw;
6893279858Sjfv	uint16_t global_vf_num;
6894279858Sjfv	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
6895279858Sjfv	int i;
6896279858Sjfv
6897279858Sjfv	pf = arg;
6898279858Sjfv	hw = &pf->hw;
6899279858Sjfv
6900279858Sjfv	IXL_PF_LOCK(pf);
6901279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
6902279858Sjfv		global_vf_num = hw->func_caps.vf_base_id + i;
6903279858Sjfv
6904279858Sjfv		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
6905279858Sjfv		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
6906279858Sjfv		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
6907279858Sjfv		if (vflrstat & vflrstat_mask) {
6908279858Sjfv			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
6909279858Sjfv			    vflrstat_mask);
6910279858Sjfv
6911279858Sjfv			ixl_reinit_vf(pf, &pf->vfs[i]);
6912279858Sjfv		}
6913279858Sjfv	}
6914279858Sjfv
6915279858Sjfv	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
6916279858Sjfv	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
6917279858Sjfv	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
6918279858Sjfv	ixl_flush(hw);
6919279858Sjfv
6920279858Sjfv	IXL_PF_UNLOCK(pf);
6921279858Sjfv}
6922279858Sjfv
6923279858Sjfvstatic int
6924279858Sjfvixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
6925279858Sjfv{
6926279858Sjfv
6927279858Sjfv	switch (err) {
6928279858Sjfv	case I40E_AQ_RC_EPERM:
6929279858Sjfv		return (EPERM);
6930279858Sjfv	case I40E_AQ_RC_ENOENT:
6931279858Sjfv		return (ENOENT);
6932279858Sjfv	case I40E_AQ_RC_ESRCH:
6933279858Sjfv		return (ESRCH);
6934279858Sjfv	case I40E_AQ_RC_EINTR:
6935279858Sjfv		return (EINTR);
6936279858Sjfv	case I40E_AQ_RC_EIO:
6937279858Sjfv		return (EIO);
6938279858Sjfv	case I40E_AQ_RC_ENXIO:
6939279858Sjfv		return (ENXIO);
6940279858Sjfv	case I40E_AQ_RC_E2BIG:
6941279858Sjfv		return (E2BIG);
6942279858Sjfv	case I40E_AQ_RC_EAGAIN:
6943279858Sjfv		return (EAGAIN);
6944279858Sjfv	case I40E_AQ_RC_ENOMEM:
6945279858Sjfv		return (ENOMEM);
6946279858Sjfv	case I40E_AQ_RC_EACCES:
6947279858Sjfv		return (EACCES);
6948279858Sjfv	case I40E_AQ_RC_EFAULT:
6949279858Sjfv		return (EFAULT);
6950279858Sjfv	case I40E_AQ_RC_EBUSY:
6951279858Sjfv		return (EBUSY);
6952279858Sjfv	case I40E_AQ_RC_EEXIST:
6953279858Sjfv		return (EEXIST);
6954279858Sjfv	case I40E_AQ_RC_EINVAL:
6955279858Sjfv		return (EINVAL);
6956279858Sjfv	case I40E_AQ_RC_ENOTTY:
6957279858Sjfv		return (ENOTTY);
6958279858Sjfv	case I40E_AQ_RC_ENOSPC:
6959279858Sjfv		return (ENOSPC);
6960279858Sjfv	case I40E_AQ_RC_ENOSYS:
6961279858Sjfv		return (ENOSYS);
6962279858Sjfv	case I40E_AQ_RC_ERANGE:
6963279858Sjfv		return (ERANGE);
6964279858Sjfv	case I40E_AQ_RC_EFLUSHED:
6965279858Sjfv		return (EINVAL);	/* No exact equivalent in errno.h */
6966279858Sjfv	case I40E_AQ_RC_BAD_ADDR:
6967279858Sjfv		return (EFAULT);
6968279858Sjfv	case I40E_AQ_RC_EMODE:
6969279858Sjfv		return (EPERM);
6970279858Sjfv	case I40E_AQ_RC_EFBIG:
6971279858Sjfv		return (EFBIG);
6972279858Sjfv	default:
6973279858Sjfv		return (EINVAL);
6974279858Sjfv	}
6975279858Sjfv}
6976279858Sjfv
6977279858Sjfvstatic int
6978299546Serjixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
6979279858Sjfv{
6980279858Sjfv	struct ixl_pf *pf;
6981279858Sjfv	struct i40e_hw *hw;
6982279858Sjfv	struct ixl_vsi *pf_vsi;
6983279858Sjfv	enum i40e_status_code ret;
6984279858Sjfv	int i, error;
6985279858Sjfv
6986279858Sjfv	pf = device_get_softc(dev);
6987279858Sjfv	hw = &pf->hw;
6988279858Sjfv	pf_vsi = &pf->vsi;
6989279858Sjfv
6990279858Sjfv	IXL_PF_LOCK(pf);
6991279858Sjfv	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
6992279858Sjfv	    M_ZERO);
6993279858Sjfv
6994279858Sjfv	if (pf->vfs == NULL) {
6995279858Sjfv		error = ENOMEM;
6996279858Sjfv		goto fail;
6997279858Sjfv	}
6998279858Sjfv
6999279858Sjfv	for (i = 0; i < num_vfs; i++)
7000279858Sjfv		sysctl_ctx_init(&pf->vfs[i].ctx);
7001279858Sjfv
7002279858Sjfv	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
7003299552Serj	    1, FALSE, &pf->veb_seid, FALSE, NULL);
7004279858Sjfv	if (ret != I40E_SUCCESS) {
7005279858Sjfv		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
7006279858Sjfv		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
7007279858Sjfv		    error);
7008279858Sjfv		goto fail;
7009279858Sjfv	}
7010279858Sjfv
7011279858Sjfv	ixl_configure_msix(pf);
7012279858Sjfv	ixl_enable_adminq(hw);
7013279858Sjfv
7014279858Sjfv	pf->num_vfs = num_vfs;
7015279858Sjfv	IXL_PF_UNLOCK(pf);
7016279858Sjfv	return (0);
7017279858Sjfv
7018279858Sjfvfail:
7019279858Sjfv	free(pf->vfs, M_IXL);
7020279858Sjfv	pf->vfs = NULL;
7021279858Sjfv	IXL_PF_UNLOCK(pf);
7022279858Sjfv	return (error);
7023279858Sjfv}
7024279858Sjfv
7025279858Sjfvstatic void
7026299546Serjixl_iov_uninit(device_t dev)
7027279858Sjfv{
7028279858Sjfv	struct ixl_pf *pf;
7029279858Sjfv	struct i40e_hw *hw;
7030279858Sjfv	struct ixl_vsi *vsi;
7031279858Sjfv	struct ifnet *ifp;
7032279858Sjfv	struct ixl_vf *vfs;
7033279858Sjfv	int i, num_vfs;
7034279858Sjfv
7035279858Sjfv	pf = device_get_softc(dev);
7036279858Sjfv	hw = &pf->hw;
7037279858Sjfv	vsi = &pf->vsi;
7038279858Sjfv	ifp = vsi->ifp;
7039279858Sjfv
7040279858Sjfv	IXL_PF_LOCK(pf);
7041279858Sjfv	for (i = 0; i < pf->num_vfs; i++) {
7042279858Sjfv		if (pf->vfs[i].vsi.seid != 0)
7043279858Sjfv			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
7044279858Sjfv	}
7045279858Sjfv
7046279858Sjfv	if (pf->veb_seid != 0) {
7047279858Sjfv		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
7048279858Sjfv		pf->veb_seid = 0;
7049279858Sjfv	}
7050279858Sjfv
7051279858Sjfv	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
7052279858Sjfv		ixl_disable_intr(vsi);
7053279858Sjfv
7054279858Sjfv	vfs = pf->vfs;
7055279858Sjfv	num_vfs = pf->num_vfs;
7056279858Sjfv
7057279858Sjfv	pf->vfs = NULL;
7058279858Sjfv	pf->num_vfs = 0;
7059279858Sjfv	IXL_PF_UNLOCK(pf);
7060279858Sjfv
7061279858Sjfv	/* Do this after the unlock as sysctl_ctx_free might sleep. */
7062279858Sjfv	for (i = 0; i < num_vfs; i++)
7063279858Sjfv		sysctl_ctx_free(&vfs[i].ctx);
7064279858Sjfv	free(vfs, M_IXL);
7065279858Sjfv}
7066279858Sjfv
7067279858Sjfvstatic int
7068279858Sjfvixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
7069279858Sjfv{
7070279858Sjfv	char sysctl_name[QUEUE_NAME_LEN];
7071279858Sjfv	struct ixl_pf *pf;
7072279858Sjfv	struct ixl_vf *vf;
7073279858Sjfv	const void *mac;
7074279858Sjfv	size_t size;
7075279858Sjfv	int error;
7076279858Sjfv
7077279858Sjfv	pf = device_get_softc(dev);
7078279858Sjfv	vf = &pf->vfs[vfnum];
7079279858Sjfv
7080279858Sjfv	IXL_PF_LOCK(pf);
7081279858Sjfv	vf->vf_num = vfnum;
7082279858Sjfv
7083279858Sjfv	vf->vsi.back = pf;
7084279858Sjfv	vf->vf_flags = VF_FLAG_ENABLED;
7085279858Sjfv	SLIST_INIT(&vf->vsi.ftl);
7086279858Sjfv
7087279858Sjfv	error = ixl_vf_setup_vsi(pf, vf);
7088279858Sjfv	if (error != 0)
7089279858Sjfv		goto out;
7090279858Sjfv
7091279858Sjfv	if (nvlist_exists_binary(params, "mac-addr")) {
7092279858Sjfv		mac = nvlist_get_binary(params, "mac-addr", &size);
7093279858Sjfv		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
7094279858Sjfv
7095279858Sjfv		if (nvlist_get_bool(params, "allow-set-mac"))
7096279858Sjfv			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7097279858Sjfv	} else
7098279858Sjfv		/*
7099279858Sjfv		 * If the administrator has not specified a MAC address then
7100279858Sjfv		 * we must allow the VF to choose one.
7101279858Sjfv		 */
7102279858Sjfv		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7103279858Sjfv
7104279858Sjfv	if (nvlist_get_bool(params, "mac-anti-spoof"))
7105279858Sjfv		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
7106279858Sjfv
7107279858Sjfv	if (nvlist_get_bool(params, "allow-promisc"))
7108279858Sjfv		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
7109279858Sjfv
7110299552Serj	/* TODO: Get VLAN that PF has set for the VF */
7111299552Serj
7112279858Sjfv	vf->vf_flags |= VF_FLAG_VLAN_CAP;
7113279858Sjfv
7114279858Sjfv	ixl_reset_vf(pf, vf);
7115279858Sjfvout:
7116279858Sjfv	IXL_PF_UNLOCK(pf);
7117279858Sjfv	if (error == 0) {
7118279858Sjfv		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
7119279858Sjfv		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
7120279858Sjfv	}
7121279858Sjfv
7122279858Sjfv	return (error);
7123279858Sjfv}
7124279858Sjfv#endif /* PCI_IOV */
7125