if_ixl.c revision 302384
133965Sjdp/******************************************************************************
260484Sobrien
338889Sjdp  Copyright (c) 2013-2015, Intel Corporation
433965Sjdp  All rights reserved.
533965Sjdp
633965Sjdp  Redistribution and use in source and binary forms, with or without
733965Sjdp  modification, are permitted provided that the following conditions are met:
833965Sjdp
933965Sjdp   1. Redistributions of source code must retain the above copyright notice,
1033965Sjdp      this list of conditions and the following disclaimer.
1133965Sjdp
1233965Sjdp   2. Redistributions in binary form must reproduce the above copyright
1333965Sjdp      notice, this list of conditions and the following disclaimer in the
1433965Sjdp      documentation and/or other materials provided with the distribution.
1533965Sjdp
1633965Sjdp   3. Neither the name of the Intel Corporation nor the names of its
1733965Sjdp      contributors may be used to endorse or promote products derived from
1833965Sjdp      this software without specific prior written permission.
1933965Sjdp
2033965Sjdp  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2133965Sjdp  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2233965Sjdp  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2333965Sjdp  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2433965Sjdp  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2533965Sjdp  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2633965Sjdp  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2733965Sjdp  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2833965Sjdp  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2933965Sjdp  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3033965Sjdp  POSSIBILITY OF SUCH DAMAGE.
3133965Sjdp
3233965Sjdp******************************************************************************/
3333965Sjdp/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 302384 2016-07-07 03:39:18Z sbruno $*/
3433965Sjdp
3533965Sjdp#ifndef IXL_STANDALONE_BUILD
3633965Sjdp#include "opt_inet.h"
3733965Sjdp#include "opt_inet6.h"
3833965Sjdp#include "opt_rss.h"
3933965Sjdp#endif
4033965Sjdp
4133965Sjdp#include "ixl.h"
4233965Sjdp#include "ixl_pf.h"
4333965Sjdp
4433965Sjdp#ifdef RSS
4533965Sjdp#include <net/rss_config.h>
4633965Sjdp#endif
4733965Sjdp
4833965Sjdp/*********************************************************************
4933965Sjdp *  Driver version
5033965Sjdp *********************************************************************/
5133965Sjdpchar ixl_driver_version[] = "1.4.27-k";
5233965Sjdp
5333965Sjdp/*********************************************************************
5433965Sjdp *  PCI Device ID Table
5533965Sjdp *
5633965Sjdp *  Used by probe to select devices to load on
5733965Sjdp *  Last field stores an index into ixl_strings
5833965Sjdp *  Last entry must be all 0s
5933965Sjdp *
6033965Sjdp *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
6133965Sjdp *********************************************************************/
6233965Sjdp
6333965Sjdpstatic ixl_vendor_info_t ixl_vendor_info_array[] =
6433965Sjdp{
6533965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
6633965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
6733965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
6833965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
6933965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
7033965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
7133965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
7233965Sjdp	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
7333965Sjdp	/* required last entry */
7433965Sjdp	{0, 0, 0, 0, 0}
7533965Sjdp};
7633965Sjdp
7733965Sjdp/*********************************************************************
7833965Sjdp *  Table of branding strings
7933965Sjdp *********************************************************************/
8033965Sjdp
8133965Sjdpstatic char    *ixl_strings[] = {
8233965Sjdp	"Intel(R) Ethernet Connection XL710 Driver"
8333965Sjdp};
8433965Sjdp
8533965Sjdp
8633965Sjdp/*********************************************************************
8733965Sjdp *  Function prototypes
8833965Sjdp *********************************************************************/
8933965Sjdpstatic int      ixl_probe(device_t);
9033965Sjdpstatic int      ixl_attach(device_t);
9133965Sjdpstatic int      ixl_detach(device_t);
9233965Sjdpstatic int      ixl_shutdown(device_t);
9333965Sjdpstatic int	ixl_get_hw_capabilities(struct ixl_pf *);
9433965Sjdpstatic void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
9533965Sjdpstatic int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
9633965Sjdpstatic void	ixl_init(void *);
9733965Sjdpstatic void	ixl_init_locked(struct ixl_pf *);
9833965Sjdpstatic void     ixl_stop(struct ixl_pf *);
9933965Sjdpstatic void	ixl_stop_locked(struct ixl_pf *);
10033965Sjdpstatic void     ixl_media_status(struct ifnet *, struct ifmediareq *);
10133965Sjdpstatic int      ixl_media_change(struct ifnet *);
10233965Sjdpstatic void     ixl_update_link_status(struct ixl_pf *);
10333965Sjdpstatic int      ixl_allocate_pci_resources(struct ixl_pf *);
10433965Sjdpstatic u16	ixl_get_bus_info(struct i40e_hw *, device_t);
10533965Sjdpstatic int	ixl_setup_stations(struct ixl_pf *);
10633965Sjdpstatic int	ixl_switch_config(struct ixl_pf *);
10733965Sjdpstatic int	ixl_initialize_vsi(struct ixl_vsi *);
10833965Sjdp
10933965Sjdpstatic int	ixl_setup_adminq_msix(struct ixl_pf *);
11033965Sjdpstatic int	ixl_setup_adminq_tq(struct ixl_pf *);
11133965Sjdpstatic int	ixl_setup_queue_msix(struct ixl_vsi *);
11233965Sjdpstatic int	ixl_setup_queue_tqs(struct ixl_vsi *);
11333965Sjdpstatic int	ixl_teardown_adminq_msix(struct ixl_pf *);
11433965Sjdpstatic int	ixl_teardown_queue_msix(struct ixl_vsi *);
11533965Sjdpstatic void	ixl_configure_intr0_msix(struct ixl_pf *);
11633965Sjdpstatic void	ixl_configure_queue_intr_msix(struct ixl_pf *);
11733965Sjdpstatic void	ixl_free_queue_tqs(struct ixl_vsi *);
11833965Sjdpstatic void	ixl_free_adminq_tq(struct ixl_pf *);
11933965Sjdp
12033965Sjdpstatic int	ixl_assign_vsi_legacy(struct ixl_pf *);
12133965Sjdpstatic int	ixl_init_msix(struct ixl_pf *);
12233965Sjdpstatic void	ixl_configure_itr(struct ixl_pf *);
12333965Sjdpstatic void	ixl_configure_legacy(struct ixl_pf *);
12433965Sjdpstatic void	ixl_free_pci_resources(struct ixl_pf *);
12533965Sjdpstatic void	ixl_local_timer(void *);
12633965Sjdpstatic int	ixl_setup_interface(device_t, struct ixl_vsi *);
12733965Sjdpstatic void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
12833965Sjdpstatic void	ixl_config_rss(struct ixl_vsi *);
12933965Sjdpstatic void	ixl_set_queue_rx_itr(struct ixl_queue *);
13033965Sjdpstatic void	ixl_set_queue_tx_itr(struct ixl_queue *);
13133965Sjdpstatic int	ixl_set_advertised_speeds(struct ixl_pf *, int);
13233965Sjdpstatic void	ixl_get_initial_advertised_speeds(struct ixl_pf *);
13333965Sjdp
13460484Sobrienstatic int	ixl_enable_rings(struct ixl_vsi *);
13533965Sjdpstatic int	ixl_disable_rings(struct ixl_vsi *);
13633965Sjdpstatic void	ixl_enable_intr(struct ixl_vsi *);
13733965Sjdpstatic void	ixl_disable_intr(struct ixl_vsi *);
13833965Sjdpstatic void	ixl_disable_rings_intr(struct ixl_vsi *);
13933965Sjdp
14033965Sjdpstatic void     ixl_enable_adminq(struct i40e_hw *);
14133965Sjdpstatic void     ixl_disable_adminq(struct i40e_hw *);
14233965Sjdpstatic void     ixl_enable_queue(struct i40e_hw *, int);
14333965Sjdpstatic void     ixl_disable_queue(struct i40e_hw *, int);
14433965Sjdpstatic void     ixl_enable_legacy(struct i40e_hw *);
14533965Sjdpstatic void     ixl_disable_legacy(struct i40e_hw *);
14633965Sjdp
14733965Sjdpstatic void     ixl_set_promisc(struct ixl_vsi *);
14833965Sjdpstatic void     ixl_add_multi(struct ixl_vsi *);
14933965Sjdpstatic void     ixl_del_multi(struct ixl_vsi *);
15033965Sjdpstatic void	ixl_register_vlan(void *, struct ifnet *, u16);
15133965Sjdpstatic void	ixl_unregister_vlan(void *, struct ifnet *, u16);
15233965Sjdpstatic void	ixl_setup_vlan_filters(struct ixl_vsi *);
15333965Sjdp
15433965Sjdpstatic void	ixl_init_filters(struct ixl_vsi *);
15533965Sjdpstatic void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
15633965Sjdpstatic void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
15733965Sjdpstatic void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
15833965Sjdpstatic void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
15933965Sjdpstatic void	ixl_del_hw_filters(struct ixl_vsi *, int);
16033965Sjdpstatic struct ixl_mac_filter *
16133965Sjdp		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
16233965Sjdpstatic void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
16333965Sjdpstatic void	ixl_free_mac_filters(struct ixl_vsi *vsi);
16433965Sjdp
16533965Sjdp/* Sysctls*/
16633965Sjdpstatic void	ixl_add_device_sysctls(struct ixl_pf *);
16733965Sjdp
16833965Sjdpstatic int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
16933965Sjdpstatic int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
17033965Sjdpstatic int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
17133965Sjdpstatic int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
17233965Sjdp
17333965Sjdp#ifdef IXL_DEBUG_SYSCTL
17433965Sjdpstatic int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
17533965Sjdpstatic void	ixl_print_debug_info(struct ixl_pf *);
17633965Sjdp
17733965Sjdpstatic int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
17833965Sjdpstatic int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
17933965Sjdpstatic int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
18033965Sjdpstatic int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
18133965Sjdpstatic int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
18233965Sjdp#endif
18333965Sjdp
18433965Sjdp/* The MSI/X Interrupt handlers */
18533965Sjdpstatic void	ixl_intr(void *);
18633965Sjdpstatic void	ixl_msix_que(void *);
18733965Sjdpstatic void	ixl_msix_adminq(void *);
18833965Sjdpstatic void	ixl_handle_mdd_event(struct ixl_pf *);
18933965Sjdp
19033965Sjdp/* Deferred interrupt tasklets */
19133965Sjdpstatic void	ixl_do_adminq(void *, int);
19233965Sjdp
19333965Sjdp/* Statistics */
19433965Sjdpstatic void     ixl_add_hw_stats(struct ixl_pf *);
19533965Sjdpstatic void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
19633965Sjdp		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
19733965Sjdpstatic void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
19833965Sjdp		    struct sysctl_oid_list *,
19933965Sjdp		    struct i40e_eth_stats *);
20033965Sjdpstatic void	ixl_update_stats_counters(struct ixl_pf *);
20133965Sjdpstatic void	ixl_update_eth_stats(struct ixl_vsi *);
20233965Sjdpstatic void	ixl_update_vsi_stats(struct ixl_vsi *);
20333965Sjdpstatic void	ixl_pf_reset_stats(struct ixl_pf *);
20433965Sjdpstatic void	ixl_vsi_reset_stats(struct ixl_vsi *);
20533965Sjdpstatic void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
20633965Sjdp		    u64 *, u64 *);
20733965Sjdpstatic void	ixl_stat_update32(struct i40e_hw *, u32, bool,
20833965Sjdp		    u64 *, u64 *);
20933965Sjdp/* NVM update */
21033965Sjdpstatic int	ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
21133965Sjdpstatic void	ixl_handle_empr_reset(struct ixl_pf *);
21233965Sjdpstatic int	ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
21333965Sjdp
21433965Sjdp/* Debug helper functions */
21533965Sjdp#ifdef IXL_DEBUG
21633965Sjdpstatic void	ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
21733965Sjdp#endif
21833965Sjdp
21933965Sjdp#ifdef PCI_IOV
22033965Sjdpstatic int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
22133965Sjdp
22233965Sjdpstatic int	ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
22333965Sjdpstatic void	ixl_iov_uninit(device_t dev);
22433965Sjdpstatic int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
22533965Sjdp
22633965Sjdpstatic void	ixl_handle_vf_msg(struct ixl_pf *,
22733965Sjdp		    struct i40e_arq_event_info *);
22833965Sjdpstatic void	ixl_handle_vflr(void *arg, int pending);
22933965Sjdp
23033965Sjdpstatic void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
23133965Sjdpstatic void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
23233965Sjdp#endif
23333965Sjdp
23433965Sjdp/*********************************************************************
23533965Sjdp *  FreeBSD Device Interface Entry Points
23633965Sjdp *********************************************************************/
23733965Sjdp
23833965Sjdpstatic device_method_t ixl_methods[] = {
23933965Sjdp	/* Device interface */
24033965Sjdp	DEVMETHOD(device_probe, ixl_probe),
24133965Sjdp	DEVMETHOD(device_attach, ixl_attach),
24233965Sjdp	DEVMETHOD(device_detach, ixl_detach),
24333965Sjdp	DEVMETHOD(device_shutdown, ixl_shutdown),
24433965Sjdp#ifdef PCI_IOV
24533965Sjdp	DEVMETHOD(pci_iov_init, ixl_iov_init),
24633965Sjdp	DEVMETHOD(pci_iov_uninit, ixl_iov_uninit),
24733965Sjdp	DEVMETHOD(pci_iov_add_vf, ixl_add_vf),
24833965Sjdp#endif
24933965Sjdp	{0, 0}
25033965Sjdp};
25133965Sjdp
25233965Sjdpstatic driver_t ixl_driver = {
25333965Sjdp	"ixl", ixl_methods, sizeof(struct ixl_pf),
25433965Sjdp};
25533965Sjdp
25633965Sjdpdevclass_t ixl_devclass;
25733965SjdpDRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
25833965Sjdp
25933965SjdpMODULE_DEPEND(ixl, pci, 1, 1, 1);
26033965SjdpMODULE_DEPEND(ixl, ether, 1, 1, 1);
26133965Sjdp#ifdef DEV_NETMAP
26233965SjdpMODULE_DEPEND(ixl, netmap, 1, 1, 1);
26333965Sjdp#endif /* DEV_NETMAP */
26433965Sjdp
26533965Sjdp/*
26633965Sjdp** Global reset mutex
26733965Sjdp*/
26833965Sjdpstatic struct mtx ixl_reset_mtx;
26933965Sjdp
27033965Sjdp/*
27133965Sjdp** TUNEABLE PARAMETERS:
27233965Sjdp*/
27333965Sjdp
27433965Sjdpstatic SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
27533965Sjdp                   "IXL driver parameters");
27633965Sjdp
27733965Sjdp/*
27833965Sjdp * MSIX should be the default for best performance,
27933965Sjdp * but this allows it to be forced off for testing.
28033965Sjdp */
28133965Sjdpstatic int ixl_enable_msix = 1;
28233965SjdpTUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
28333965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
28433965Sjdp    "Enable MSI-X interrupts");
28533965Sjdp
28633965Sjdp/*
28733965Sjdp** Number of descriptors per ring:
28833965Sjdp**   - TX and RX are the same size
28933965Sjdp*/
29033965Sjdpstatic int ixl_ringsz = DEFAULT_RING;
29133965SjdpTUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
29233965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
29333965Sjdp    &ixl_ringsz, 0, "Descriptor Ring Size");
29433965Sjdp
29533965Sjdp/*
29633965Sjdp** This can be set manually, if left as 0 the
29733965Sjdp** number of queues will be calculated based
29833965Sjdp** on cpus and msix vectors available.
29933965Sjdp*/
30033965Sjdpint ixl_max_queues = 0;
30133965SjdpTUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
30233965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
30333965Sjdp    &ixl_max_queues, 0, "Number of Queues");
30433965Sjdp
30533965Sjdp/*
30633965Sjdp** Controls for Interrupt Throttling
30733965Sjdp**	- true/false for dynamic adjustment
30833965Sjdp** 	- default values for static ITR
30933965Sjdp*/
31033965Sjdpint ixl_dynamic_rx_itr = 1;
31133965SjdpTUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
31233965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
31333965Sjdp    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
31460484Sobrien
31560484Sobrienint ixl_dynamic_tx_itr = 1;
31633965SjdpTUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
31760484SobrienSYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
31860484Sobrien    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
31960484Sobrien
32033965Sjdpint ixl_rx_itr = IXL_ITR_8K;
32133965SjdpTUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
32233965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
32333965Sjdp    &ixl_rx_itr, 0, "RX Interrupt Rate");
32433965Sjdp
32533965Sjdpint ixl_tx_itr = IXL_ITR_4K;
32633965SjdpTUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
32733965SjdpSYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
32833965Sjdp    &ixl_tx_itr, 0, "TX Interrupt Rate");
32933965Sjdp
33033965Sjdp#ifdef IXL_FDIR
33133965Sjdpstatic int ixl_enable_fdir = 1;
33233965SjdpTUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
33333965Sjdp/* Rate at which we sample */
33460484Sobrienint ixl_atr_rate = 20;
33560484SobrienTUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
33633965Sjdp#endif
33733965Sjdp
33833965Sjdp#ifdef DEV_NETMAP
33933965Sjdp#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
34033965Sjdp#include <dev/netmap/if_ixl_netmap.h>
34133965Sjdp#endif /* DEV_NETMAP */
34233965Sjdp
34333965Sjdpstatic char *ixl_fc_string[6] = {
34433965Sjdp	"None",
34560484Sobrien	"Rx",
34660484Sobrien	"Tx",
34760484Sobrien	"Full",
34860484Sobrien	"Priority",
34960484Sobrien	"Default"
35060484Sobrien};
35160484Sobrien
35277298Sobrienstatic MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
35377298Sobrien
35477298Sobrienstatic uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
35577298Sobrien    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
35677298Sobrien
35777298Sobrien/*********************************************************************
35877298Sobrien *  Device identification routine
35977298Sobrien *
36077298Sobrien *  ixl_probe determines if the driver should be loaded on
36177298Sobrien *  the hardware based on PCI vendor/device id of the device.
36277298Sobrien *
36377298Sobrien *  return BUS_PROBE_DEFAULT on success, positive on failure
36477298Sobrien *********************************************************************/
36533965Sjdp
36633965Sjdpstatic int
36733965Sjdpixl_probe(device_t dev)
36833965Sjdp{
36933965Sjdp	ixl_vendor_info_t *ent;
37033965Sjdp
37133965Sjdp	u16	pci_vendor_id, pci_device_id;
37233965Sjdp	u16	pci_subvendor_id, pci_subdevice_id;
37333965Sjdp	char	device_name[256];
37433965Sjdp	static bool lock_init = FALSE;
37533965Sjdp
37633965Sjdp#if 0
37733965Sjdp	INIT_DEBUGOUT("ixl_probe: begin");
37833965Sjdp#endif
37933965Sjdp	pci_vendor_id = pci_get_vendor(dev);
38033965Sjdp	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
38133965Sjdp		return (ENXIO);
38233965Sjdp
38333965Sjdp	pci_device_id = pci_get_device(dev);
38433965Sjdp	pci_subvendor_id = pci_get_subvendor(dev);
38533965Sjdp	pci_subdevice_id = pci_get_subdevice(dev);
38633965Sjdp
38733965Sjdp	ent = ixl_vendor_info_array;
38833965Sjdp	while (ent->vendor_id != 0) {
38933965Sjdp		if ((pci_vendor_id == ent->vendor_id) &&
39033965Sjdp		    (pci_device_id == ent->device_id) &&
39133965Sjdp
39233965Sjdp		    ((pci_subvendor_id == ent->subvendor_id) ||
39333965Sjdp		     (ent->subvendor_id == 0)) &&
39433965Sjdp
39533965Sjdp		    ((pci_subdevice_id == ent->subdevice_id) ||
39633965Sjdp		     (ent->subdevice_id == 0))) {
39733965Sjdp			sprintf(device_name, "%s, Version - %s",
39833965Sjdp				ixl_strings[ent->index],
39933965Sjdp				ixl_driver_version);
40033965Sjdp			device_set_desc_copy(dev, device_name);
40133965Sjdp			/* One shot mutex init */
40277298Sobrien			if (lock_init == FALSE) {
40333965Sjdp				lock_init = TRUE;
40477298Sobrien				mtx_init(&ixl_reset_mtx,
40577298Sobrien				    "ixl_reset",
40677298Sobrien				    "IXL RESET Lock", MTX_DEF);
40777298Sobrien			}
40877298Sobrien			return (BUS_PROBE_DEFAULT);
40933965Sjdp		}
41033965Sjdp		ent++;
41133965Sjdp	}
41277298Sobrien	return (ENXIO);
41333965Sjdp}
41477298Sobrien
41577298Sobrien/*********************************************************************
41677298Sobrien *  Device initialization routine
41777298Sobrien *
41877298Sobrien *  The attach entry point is called when the driver is being loaded.
41977298Sobrien *  This routine identifies the type of hardware, allocates all resources
42033965Sjdp *  and initializes the hardware.
42133965Sjdp *
42233965Sjdp *  return 0 on success, positive on failure
42333965Sjdp *********************************************************************/
42433965Sjdp
42533965Sjdpstatic int
42633965Sjdpixl_attach(device_t dev)
42733965Sjdp{
42833965Sjdp	struct ixl_pf	*pf;
42933965Sjdp	struct i40e_hw	*hw;
43033965Sjdp	struct ixl_vsi  *vsi;
43133965Sjdp	u16		bus;
43233965Sjdp	int             error = 0;
43333965Sjdp#ifdef PCI_IOV
43433965Sjdp	nvlist_t	*pf_schema, *vf_schema;
43533965Sjdp	int		iov_error;
43633965Sjdp#endif
43733965Sjdp
43833965Sjdp	INIT_DEBUGOUT("ixl_attach: begin");
43933965Sjdp
44033965Sjdp	/* Allocate, clear, and link in our primary soft structure */
44133965Sjdp	pf = device_get_softc(dev);
44233965Sjdp	pf->dev = pf->osdep.dev = dev;
44333965Sjdp	hw = &pf->hw;
44433965Sjdp
44533965Sjdp	/*
44633965Sjdp	** Note this assumes we have a single embedded VSI,
44733965Sjdp	** this could be enhanced later to allocate multiple
44833965Sjdp	*/
44933965Sjdp	vsi = &pf->vsi;
45033965Sjdp	vsi->dev = pf->dev;
45133965Sjdp
45233965Sjdp	/* Core Lock Init*/
45333965Sjdp	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
45433965Sjdp
45533965Sjdp	/* Set up the timer callout */
45677298Sobrien	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
45777298Sobrien
45877298Sobrien	/* Save off the PCI information */
45977298Sobrien	hw->vendor_id = pci_get_vendor(dev);
46077298Sobrien	hw->device_id = pci_get_device(dev);
46177298Sobrien	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
46277298Sobrien	hw->subsystem_vendor_id =
46377298Sobrien	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
46477298Sobrien	hw->subsystem_device_id =
46577298Sobrien	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
46633965Sjdp
46733965Sjdp	hw->bus.device = pci_get_slot(dev);
46833965Sjdp	hw->bus.func = pci_get_function(dev);
46933965Sjdp
47033965Sjdp	pf->vc_debug_lvl = 1;
47160484Sobrien
47260484Sobrien	/* Do PCI setup - map BAR0, etc */
47360484Sobrien	if (ixl_allocate_pci_resources(pf)) {
47460484Sobrien		device_printf(dev, "Allocation of PCI resources failed\n");
47560484Sobrien		error = ENXIO;
47660484Sobrien		goto err_out;
47760484Sobrien	}
47860484Sobrien
47960484Sobrien	/* Establish a clean starting point */
48060484Sobrien	i40e_clear_hw(hw);
48160484Sobrien	error = i40e_pf_reset(hw);
48260484Sobrien	if (error) {
48360484Sobrien		device_printf(dev, "PF reset failure %d\n", error);
48460484Sobrien		error = EIO;
48560484Sobrien		goto err_out;
48660484Sobrien	}
48760484Sobrien
48860484Sobrien	/* Set admin queue parameters */
48960484Sobrien	hw->aq.num_arq_entries = IXL_AQ_LEN;
49060484Sobrien	hw->aq.num_asq_entries = IXL_AQ_LEN;
49160484Sobrien	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
49260484Sobrien	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
49360484Sobrien
49460484Sobrien	/* Initialize mac filter list for VSI */
49560484Sobrien	SLIST_INIT(&vsi->ftl);
49660484Sobrien
49760484Sobrien	/* Initialize the shared code */
49860484Sobrien	error = i40e_init_shared_code(hw);
49960484Sobrien	if (error) {
50060484Sobrien		device_printf(dev, "Unable to initialize shared code, error %d\n",
50160484Sobrien		    error);
50260484Sobrien		error = EIO;
50360484Sobrien		goto err_out;
50460484Sobrien	}
50560484Sobrien
50660484Sobrien	/* Set up the admin queue */
50760484Sobrien	error = i40e_init_adminq(hw);
50860484Sobrien	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
50960484Sobrien		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
51060484Sobrien		    error);
51160484Sobrien		error = EIO;
51260484Sobrien		goto err_out;
51360484Sobrien	}
51433965Sjdp	ixl_print_nvm_version(pf);
51560484Sobrien
51660484Sobrien	if (error == I40E_ERR_FIRMWARE_API_VERSION) {
51760484Sobrien		device_printf(dev, "The driver for the device stopped "
51860484Sobrien		    "because the NVM image is newer than expected.\n"
51960484Sobrien		    "You must install the most recent version of "
52060484Sobrien		    "the network driver.\n");
52160484Sobrien		error = EIO;
52260484Sobrien		goto err_out;
52377298Sobrien	}
52477298Sobrien
52560484Sobrien        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
52660484Sobrien	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
52733965Sjdp		device_printf(dev, "The driver for the device detected "
52860484Sobrien		    "a newer version of the NVM image than expected.\n"
52960484Sobrien		    "Please install the most recent version of the network driver.\n");
53060484Sobrien	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
53160484Sobrien	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
53277298Sobrien		device_printf(dev, "The driver for the device detected "
53360484Sobrien		    "an older version of the NVM image than expected.\n"
53460484Sobrien		    "Please update the NVM image.\n");
53560484Sobrien
53660484Sobrien	/* Clear PXE mode */
53760484Sobrien	i40e_clear_pxe_mode(hw);
53860484Sobrien
53960484Sobrien	/* Get capabilities from the device */
54077298Sobrien	error = ixl_get_hw_capabilities(pf);
54133965Sjdp	if (error) {
54260484Sobrien		device_printf(dev, "HW capabilities failure!\n");
54360484Sobrien		goto err_get_cap;
54460484Sobrien	}
54560484Sobrien
54660484Sobrien	/* Set up host memory cache */
54760484Sobrien	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
54860484Sobrien	    hw->func_caps.num_rx_qp, 0, 0);
54960484Sobrien	if (error) {
55060484Sobrien		device_printf(dev, "init_lan_hmc failed: %d\n", error);
55160484Sobrien		goto err_get_cap;
55233965Sjdp	}
55360484Sobrien
55460484Sobrien	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
55533965Sjdp	if (error) {
55633965Sjdp		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
55760484Sobrien		goto err_mac_hmc;
55833965Sjdp	}
55933965Sjdp
56033965Sjdp	/* Disable LLDP from the firmware for certain NVM versions */
56133965Sjdp	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
56233965Sjdp	    (pf->hw.aq.fw_maj_ver < 4))
56377298Sobrien		i40e_aq_stop_lldp(hw, TRUE, NULL);
56477298Sobrien
56577298Sobrien	i40e_get_mac_addr(hw, hw->mac.addr);
56677298Sobrien	error = i40e_validate_mac_addr(hw->mac.addr);
56777298Sobrien	if (error) {
56877298Sobrien		device_printf(dev, "validate_mac_addr failed: %d\n", error);
56977298Sobrien		goto err_mac_hmc;
57077298Sobrien	}
57177298Sobrien	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
57277298Sobrien	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
57333965Sjdp
57433965Sjdp	/* Set up VSI and queues */
57533965Sjdp	if (ixl_setup_stations(pf) != 0) {
57633965Sjdp		device_printf(dev, "setup stations failed!\n");
57733965Sjdp		error = ENOMEM;
57833965Sjdp		goto err_mac_hmc;
57933965Sjdp	}
58033965Sjdp
58133965Sjdp	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
58233965Sjdp	    (hw->aq.fw_maj_ver < 4)) {
58333965Sjdp		i40e_msec_delay(75);
58433965Sjdp		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
58533965Sjdp		if (error) {
58633965Sjdp			device_printf(dev, "link restart failed, aq_err=%d\n",
58733965Sjdp			    pf->hw.aq.asq_last_status);
58833965Sjdp			goto err_late;
58933965Sjdp		}
59033965Sjdp	}
59133965Sjdp
59233965Sjdp	/* Determine link state */
59333965Sjdp	hw->phy.get_link_info = TRUE;
59433965Sjdp	i40e_get_link_status(hw, &pf->link_up);
59533965Sjdp
59633965Sjdp	/* Setup OS network interface / ifnet */
59733965Sjdp	if (ixl_setup_interface(dev, vsi) != 0) {
59833965Sjdp		device_printf(dev, "interface setup failed!\n");
59933965Sjdp		error = EIO;
60033965Sjdp		goto err_late;
60133965Sjdp	}
60233965Sjdp
60333965Sjdp	error = ixl_switch_config(pf);
60433965Sjdp	if (error) {
60533965Sjdp		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
60633965Sjdp		     error);
60733965Sjdp		goto err_late;
60833965Sjdp	}
60933965Sjdp
61033965Sjdp	/* Limit PHY interrupts to link, autoneg, and modules failure */
61133965Sjdp	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
61233965Sjdp	    NULL);
61338889Sjdp        if (error) {
61433965Sjdp		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
61533965Sjdp		    " aq_err %d\n", error, hw->aq.asq_last_status);
61633965Sjdp		goto err_late;
61733965Sjdp	}
61833965Sjdp
61933965Sjdp	/* Get the bus configuration and set the shared code's config */
62033965Sjdp	bus = ixl_get_bus_info(hw, dev);
62133965Sjdp	i40e_set_pci_config_data(hw, bus);
62233965Sjdp
62333965Sjdp	/*
62433965Sjdp	 * In MSI-X mode, initialize the Admin Queue interrupt,
62533965Sjdp	 * so userland tools can communicate with the adapter regardless of
62633965Sjdp	 * the ifnet interface's status.
62733965Sjdp	 */
62833965Sjdp	if (pf->msix > 1) {
62933965Sjdp		error = ixl_setup_adminq_msix(pf);
63033965Sjdp		if (error) {
63133965Sjdp			device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
63233965Sjdp			    error);
63333965Sjdp			goto err_late;
63433965Sjdp		}
63533965Sjdp		error = ixl_setup_adminq_tq(pf);
63633965Sjdp		if (error) {
63733965Sjdp			device_printf(dev, "ixl_setup_adminq_tq error: %d\n",
63833965Sjdp			    error);
63933965Sjdp			goto err_late;
64033965Sjdp		}
64133965Sjdp		ixl_configure_intr0_msix(pf);
64233965Sjdp		ixl_enable_adminq(hw);
64333965Sjdp	}
64477298Sobrien
64533965Sjdp	/* Initialize statistics & add sysctls */
64633965Sjdp	ixl_add_device_sysctls(pf);
64733965Sjdp
64877298Sobrien	ixl_pf_reset_stats(pf);
64977298Sobrien	ixl_update_stats_counters(pf);
65077298Sobrien	ixl_add_hw_stats(pf);
65133965Sjdp
65233965Sjdp	/* Register for VLAN events */
65333965Sjdp	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
65433965Sjdp	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
65533965Sjdp	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
65633965Sjdp	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
65733965Sjdp
65833965Sjdp#ifdef PCI_IOV
65933965Sjdp	/* SR-IOV is only supported when MSI-X is in use. */
66033965Sjdp	if (pf->msix > 1) {
66133965Sjdp		pf_schema = pci_iov_schema_alloc_node();
66233965Sjdp		vf_schema = pci_iov_schema_alloc_node();
66333965Sjdp		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66433965Sjdp		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
66533965Sjdp		    IOV_SCHEMA_HASDEFAULT, TRUE);
66633965Sjdp		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
66733965Sjdp		    IOV_SCHEMA_HASDEFAULT, FALSE);
66833965Sjdp		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
66933965Sjdp		    IOV_SCHEMA_HASDEFAULT, FALSE);
67033965Sjdp
67133965Sjdp		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
67233965Sjdp		if (iov_error != 0) {
67333965Sjdp			device_printf(dev,
67433965Sjdp			    "Failed to initialize SR-IOV (error=%d)\n",
67533965Sjdp			    iov_error);
67633965Sjdp		} else
67733965Sjdp			device_printf(dev, "SR-IOV ready\n");
67833965Sjdp	}
67933965Sjdp#endif
68033965Sjdp
68133965Sjdp#ifdef DEV_NETMAP
68233965Sjdp	ixl_netmap_attach(vsi);
68333965Sjdp#endif /* DEV_NETMAP */
68433965Sjdp	INIT_DEBUGOUT("ixl_attach: end");
68560484Sobrien	return (0);
68660484Sobrien
68760484Sobrienerr_late:
68860484Sobrien	if (vsi->ifp != NULL)
68960484Sobrien		if_free(vsi->ifp);
69060484Sobrienerr_mac_hmc:
69160484Sobrien	i40e_shutdown_lan_hmc(hw);
69260484Sobrienerr_get_cap:
69360484Sobrien	i40e_shutdown_adminq(hw);
69460484Sobrienerr_out:
69560484Sobrien	ixl_free_pci_resources(pf);
69660484Sobrien	ixl_free_vsi(vsi);
69760484Sobrien	IXL_PF_LOCK_DESTROY(pf);
69860484Sobrien	return (error);
69960484Sobrien}
70060484Sobrien
70160484Sobrien/*********************************************************************
70260484Sobrien *  Device removal routine
70360484Sobrien *
70460484Sobrien *  The detach entry point is called when the driver is being removed.
70560484Sobrien *  This routine stops the adapter and deallocates all the resources
70660484Sobrien *  that were allocated for driver operation.
70760484Sobrien *
70860484Sobrien *  return 0 on success, positive on failure
70960484Sobrien *********************************************************************/
71060484Sobrien
71160484Sobrienstatic int
71260484Sobrienixl_detach(device_t dev)
71360484Sobrien{
71460484Sobrien	struct ixl_pf		*pf = device_get_softc(dev);
71560484Sobrien	struct i40e_hw		*hw = &pf->hw;
71660484Sobrien	struct ixl_vsi		*vsi = &pf->vsi;
71760484Sobrien	enum i40e_status_code	status;
71860484Sobrien#ifdef PCI_IOV
71960484Sobrien	int			error;
72060484Sobrien#endif
72160484Sobrien
72260484Sobrien	INIT_DEBUGOUT("ixl_detach: begin");
72360484Sobrien
72460484Sobrien	/* Make sure VLANS are not using driver */
72560484Sobrien	if (vsi->ifp->if_vlantrunk != NULL) {
72660484Sobrien		device_printf(dev, "Vlan in use, detach first\n");
72760484Sobrien		return (EBUSY);
72860484Sobrien	}
72960484Sobrien
73060484Sobrien#ifdef PCI_IOV
73160484Sobrien	error = pci_iov_detach(dev);
73260484Sobrien	if (error != 0) {
73360484Sobrien		device_printf(dev, "SR-IOV in use; detach first.\n");
73460484Sobrien		return (error);
73560484Sobrien	}
73660484Sobrien#endif
73760484Sobrien
73860484Sobrien	ether_ifdetach(vsi->ifp);
73960484Sobrien	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
74060484Sobrien		ixl_stop(pf);
74160484Sobrien
74260484Sobrien	ixl_free_queue_tqs(vsi);
74360484Sobrien
74460484Sobrien	/* Shutdown LAN HMC */
74560484Sobrien	status = i40e_shutdown_lan_hmc(hw);
74660484Sobrien	if (status)
74760484Sobrien		device_printf(dev,
74860484Sobrien		    "Shutdown LAN HMC failed with code %d\n", status);
74960484Sobrien
75060484Sobrien	/* Shutdown admin queue */
75160484Sobrien	ixl_disable_adminq(hw);
75260484Sobrien	ixl_free_adminq_tq(pf);
75360484Sobrien	ixl_teardown_adminq_msix(pf);
75433965Sjdp	status = i40e_shutdown_adminq(hw);
75533965Sjdp	if (status)
75633965Sjdp		device_printf(dev,
75760484Sobrien		    "Shutdown Admin queue failed with code %d\n", status);
75860484Sobrien
75960484Sobrien	/* Unregister VLAN events */
76060484Sobrien	if (vsi->vlan_attach != NULL)
76160484Sobrien		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
76260484Sobrien	if (vsi->vlan_detach != NULL)
76360484Sobrien		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
76460484Sobrien
76560484Sobrien	callout_drain(&pf->timer);
76660484Sobrien#ifdef DEV_NETMAP
76760484Sobrien	netmap_detach(vsi->ifp);
76860484Sobrien#endif /* DEV_NETMAP */
76960484Sobrien	ixl_free_pci_resources(pf);
77060484Sobrien	bus_generic_detach(dev);
77160484Sobrien	if_free(vsi->ifp);
77260484Sobrien	ixl_free_vsi(vsi);
77377298Sobrien	IXL_PF_LOCK_DESTROY(pf);
77460484Sobrien	return (0);
77560484Sobrien}
77660484Sobrien
77760484Sobrien/*********************************************************************
77833965Sjdp *
77933965Sjdp *  Shutdown entry point
78033965Sjdp *
78133965Sjdp **********************************************************************/
78233965Sjdp
78333965Sjdpstatic int
78433965Sjdpixl_shutdown(device_t dev)
78533965Sjdp{
78633965Sjdp	struct ixl_pf *pf = device_get_softc(dev);
78733965Sjdp	ixl_stop(pf);
78833965Sjdp	return (0);
78960484Sobrien}
79060484Sobrien
79160484Sobrien
79260484Sobrien/*********************************************************************
79360484Sobrien *
79460484Sobrien *  Get the hardware capabilities
79560484Sobrien *
79660484Sobrien **********************************************************************/
79760484Sobrien
79860484Sobrienstatic int
79933965Sjdpixl_get_hw_capabilities(struct ixl_pf *pf)
80033965Sjdp{
80160484Sobrien	struct i40e_aqc_list_capabilities_element_resp *buf;
80260484Sobrien	struct i40e_hw	*hw = &pf->hw;
80360484Sobrien	device_t 	dev = pf->dev;
80433965Sjdp	int             error, len;
80560484Sobrien	u16		needed;
80660484Sobrien	bool		again = TRUE;
80733965Sjdp
80833965Sjdp	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
80933965Sjdpretry:
81033965Sjdp	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
81160484Sobrien	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
81260484Sobrien		device_printf(dev, "Unable to allocate cap memory\n");
81333965Sjdp                return (ENOMEM);
81433965Sjdp	}
81533965Sjdp
81633965Sjdp	/* This populates the hw struct */
81733965Sjdp        error = i40e_aq_discover_capabilities(hw, buf, len,
81833965Sjdp	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
81933965Sjdp	free(buf, M_DEVBUF);
82033965Sjdp	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
82133965Sjdp	    (again == TRUE)) {
82233965Sjdp		/* retry once with a larger buffer */
82360484Sobrien		again = FALSE;
82433965Sjdp		len = needed;
82560484Sobrien		goto retry;
82660484Sobrien	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
82760484Sobrien		device_printf(dev, "capability discovery failed: %d\n",
82860484Sobrien		    pf->hw.aq.asq_last_status);
82960484Sobrien		return (ENODEV);
83060484Sobrien	}
83160484Sobrien
83233965Sjdp	/* Capture this PF's starting queue pair */
83360484Sobrien	pf->qbase = hw->func_caps.base_queue;
83460484Sobrien
83560484Sobrien#ifdef IXL_DEBUG
83660484Sobrien	device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
83760484Sobrien	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
83860484Sobrien	    hw->pf_id, hw->func_caps.num_vfs,
83960484Sobrien	    hw->func_caps.num_msix_vectors,
84060484Sobrien	    hw->func_caps.num_msix_vectors_vf,
84160484Sobrien	    hw->func_caps.fd_filters_guaranteed,
84260484Sobrien	    hw->func_caps.fd_filters_best_effort,
84360484Sobrien	    hw->func_caps.num_tx_qp,
84460484Sobrien	    hw->func_caps.num_rx_qp,
84560484Sobrien	    hw->func_caps.base_queue);
84660484Sobrien#endif
84760484Sobrien	return (error);
84860484Sobrien}
84960484Sobrien
85033965Sjdpstatic void
85160484Sobrienixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
85233965Sjdp{
85333965Sjdp	device_t 	dev = vsi->dev;
85433965Sjdp
85560484Sobrien	/* Enable/disable TXCSUM/TSO4 */
85633965Sjdp	if (!(ifp->if_capenable & IFCAP_TXCSUM)
85760484Sobrien	    && !(ifp->if_capenable & IFCAP_TSO4)) {
85860484Sobrien		if (mask & IFCAP_TXCSUM) {
85960484Sobrien			ifp->if_capenable |= IFCAP_TXCSUM;
86060484Sobrien			/* enable TXCSUM, restore TSO if previously enabled */
86133965Sjdp			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
86260484Sobrien				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
86333965Sjdp				ifp->if_capenable |= IFCAP_TSO4;
86460484Sobrien			}
86560484Sobrien		}
86660484Sobrien		else if (mask & IFCAP_TSO4) {
86760484Sobrien			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
86860484Sobrien			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
86960484Sobrien			device_printf(dev,
87060484Sobrien			    "TSO4 requires txcsum, enabling both...\n");
87160484Sobrien		}
87260484Sobrien	} else if((ifp->if_capenable & IFCAP_TXCSUM)
87360484Sobrien	    && !(ifp->if_capenable & IFCAP_TSO4)) {
87460484Sobrien		if (mask & IFCAP_TXCSUM)
87533965Sjdp			ifp->if_capenable &= ~IFCAP_TXCSUM;
87660484Sobrien		else if (mask & IFCAP_TSO4)
87760484Sobrien			ifp->if_capenable |= IFCAP_TSO4;
87860484Sobrien	} else if((ifp->if_capenable & IFCAP_TXCSUM)
87933965Sjdp	    && (ifp->if_capenable & IFCAP_TSO4)) {
88060484Sobrien		if (mask & IFCAP_TXCSUM) {
88160484Sobrien			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
88260484Sobrien			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
88360484Sobrien			device_printf(dev,
88433965Sjdp			    "TSO4 requires txcsum, disabling both...\n");
88560484Sobrien		} else if (mask & IFCAP_TSO4)
88660484Sobrien			ifp->if_capenable &= ~IFCAP_TSO4;
88760484Sobrien	}
88860484Sobrien
88933965Sjdp	/* Enable/disable TXCSUM_IPV6/TSO6 */
89060484Sobrien	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
89160484Sobrien	    && !(ifp->if_capenable & IFCAP_TSO6)) {
89277298Sobrien		if (mask & IFCAP_TXCSUM_IPV6) {
89360484Sobrien			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
89433965Sjdp			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
89560484Sobrien				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
89660484Sobrien				ifp->if_capenable |= IFCAP_TSO6;
89760484Sobrien			}
89860484Sobrien		} else if (mask & IFCAP_TSO6) {
89960484Sobrien			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
90060484Sobrien			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
90160484Sobrien			device_printf(dev,
90260484Sobrien			    "TSO6 requires txcsum6, enabling both...\n");
90360484Sobrien		}
90460484Sobrien	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
90560484Sobrien	    && !(ifp->if_capenable & IFCAP_TSO6)) {
90660484Sobrien		if (mask & IFCAP_TXCSUM_IPV6)
90760484Sobrien			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
90860484Sobrien		else if (mask & IFCAP_TSO6)
90960484Sobrien			ifp->if_capenable |= IFCAP_TSO6;
91060484Sobrien	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
91160484Sobrien	    && (ifp->if_capenable & IFCAP_TSO6)) {
91260484Sobrien		if (mask & IFCAP_TXCSUM_IPV6) {
91360484Sobrien			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
91460484Sobrien			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
91560484Sobrien			device_printf(dev,
91660484Sobrien			    "TSO6 requires txcsum6, disabling both...\n");
91760484Sobrien		} else if (mask & IFCAP_TSO6)
91860484Sobrien			ifp->if_capenable &= ~IFCAP_TSO6;
91960484Sobrien	}
92060484Sobrien}
92160484Sobrien
92260484Sobrien/*********************************************************************
92360484Sobrien *  Ioctl entry point
92460484Sobrien *
92560484Sobrien *  ixl_ioctl is called when the user wants to configure the
92660484Sobrien *  interface.
92777298Sobrien *
92860484Sobrien *  return 0 on success, positive on failure
92960484Sobrien **********************************************************************/
93060484Sobrien
93160484Sobrienstatic int
93260484Sobrienixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
93360484Sobrien{
93460484Sobrien	struct ixl_vsi	*vsi = ifp->if_softc;
93560484Sobrien	struct ixl_pf	*pf = vsi->back;
93660484Sobrien	struct ifreq	*ifr = (struct ifreq *)data;
93760484Sobrien	struct ifdrv	*ifd = (struct ifdrv *)data;
93860484Sobrien#if defined(INET) || defined(INET6)
93960484Sobrien	struct ifaddr *ifa = (struct ifaddr *)data;
94060484Sobrien	bool		avoid_reset = FALSE;
94160484Sobrien#endif
94260484Sobrien	int             error = 0;
94360484Sobrien
94460484Sobrien	switch (command) {
94560484Sobrien
94660484Sobrien        case SIOCSIFADDR:
94760484Sobrien#ifdef INET
94860484Sobrien		if (ifa->ifa_addr->sa_family == AF_INET)
94960484Sobrien			avoid_reset = TRUE;
95060484Sobrien#endif
95160484Sobrien#ifdef INET6
95260484Sobrien		if (ifa->ifa_addr->sa_family == AF_INET6)
95360484Sobrien			avoid_reset = TRUE;
95460484Sobrien#endif
95560484Sobrien#if defined(INET) || defined(INET6)
95660484Sobrien		/*
95760484Sobrien		** Calling init results in link renegotiation,
95860484Sobrien		** so we avoid doing it when possible.
95960484Sobrien		*/
96060484Sobrien		if (avoid_reset) {
96160484Sobrien			ifp->if_flags |= IFF_UP;
96260484Sobrien			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
96360484Sobrien				ixl_init(pf);
96460484Sobrien#ifdef INET
96560484Sobrien			if (!(ifp->if_flags & IFF_NOARP))
96660484Sobrien				arp_ifinit(ifp, ifa);
96760484Sobrien#endif
96860484Sobrien		} else
96960484Sobrien			error = ether_ioctl(ifp, command, data);
97060484Sobrien		break;
97160484Sobrien#endif
97260484Sobrien	case SIOCSIFMTU:
97360484Sobrien		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
97460484Sobrien		if (ifr->ifr_mtu > IXL_MAX_FRAME -
97560484Sobrien		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
97660484Sobrien			error = EINVAL;
97760484Sobrien		} else {
97860484Sobrien			IXL_PF_LOCK(pf);
97977298Sobrien			ifp->if_mtu = ifr->ifr_mtu;
98060484Sobrien			vsi->max_frame_size =
98160484Sobrien				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
98260484Sobrien			    + ETHER_VLAN_ENCAP_LEN;
98360484Sobrien			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
98460484Sobrien				ixl_init_locked(pf);
98533965Sjdp			IXL_PF_UNLOCK(pf);
98660484Sobrien		}
98777298Sobrien		break;
98860484Sobrien	case SIOCSIFFLAGS:
98960484Sobrien		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
99060484Sobrien		IXL_PF_LOCK(pf);
99160484Sobrien		if (ifp->if_flags & IFF_UP) {
99233965Sjdp			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
99360484Sobrien				if ((ifp->if_flags ^ pf->if_flags) &
99460484Sobrien				    (IFF_PROMISC | IFF_ALLMULTI)) {
99560484Sobrien					ixl_set_promisc(vsi);
99660484Sobrien				}
99760484Sobrien			} else {
99877298Sobrien				IXL_PF_UNLOCK(pf);
99960484Sobrien				ixl_init(pf);
100060484Sobrien				IXL_PF_LOCK(pf);
100160484Sobrien			}
100260484Sobrien		} else {
100360484Sobrien			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
100460484Sobrien				IXL_PF_UNLOCK(pf);
100560484Sobrien				ixl_stop(pf);
100660484Sobrien				IXL_PF_LOCK(pf);
100760484Sobrien			}
100860484Sobrien		}
100960484Sobrien		pf->if_flags = ifp->if_flags;
101060484Sobrien		IXL_PF_UNLOCK(pf);
101160484Sobrien		break;
101260484Sobrien	case SIOCSDRVSPEC:
101360484Sobrien	case SIOCGDRVSPEC:
101460484Sobrien		IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
101533965Sjdp		    "Info)\n");
101633965Sjdp
101733965Sjdp		/* NVM update command */
101860484Sobrien		if (ifd->ifd_cmd == I40E_NVM_ACCESS)
101933965Sjdp			error = ixl_handle_nvmupd_cmd(pf, ifd);
102077298Sobrien		else
102177298Sobrien			error = EINVAL;
102277298Sobrien		break;
102333965Sjdp	case SIOCADDMULTI:
102433965Sjdp		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
102560484Sobrien		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
102660484Sobrien			IXL_PF_LOCK(pf);
102760484Sobrien			ixl_disable_intr(vsi);
102860484Sobrien			ixl_add_multi(vsi);
102960484Sobrien			ixl_enable_intr(vsi);
103060484Sobrien			IXL_PF_UNLOCK(pf);
103160484Sobrien		}
103260484Sobrien		break;
103360484Sobrien	case SIOCDELMULTI:
103460484Sobrien		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
103533965Sjdp		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
103633965Sjdp			IXL_PF_LOCK(pf);
103760484Sobrien			ixl_disable_intr(vsi);
103833965Sjdp			ixl_del_multi(vsi);
103933965Sjdp			ixl_enable_intr(vsi);
104060484Sobrien			IXL_PF_UNLOCK(pf);
104160484Sobrien		}
104233965Sjdp		break;
104333965Sjdp	case SIOCSIFMEDIA:
104433965Sjdp	case SIOCGIFMEDIA:
104533965Sjdp#ifdef IFM_ETH_XTYPE
104633965Sjdp	case SIOCGIFXMEDIA:
104733965Sjdp#endif
104833965Sjdp		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
104933965Sjdp		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
105060484Sobrien		break;
105160484Sobrien	case SIOCSIFCAP:
105260484Sobrien	{
105360484Sobrien		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
105460484Sobrien		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
105560484Sobrien
105660484Sobrien		ixl_cap_txcsum_tso(vsi, ifp, mask);
105760484Sobrien
105860484Sobrien		if (mask & IFCAP_RXCSUM)
105960484Sobrien			ifp->if_capenable ^= IFCAP_RXCSUM;
106060484Sobrien		if (mask & IFCAP_RXCSUM_IPV6)
106160484Sobrien			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
106260484Sobrien		if (mask & IFCAP_LRO)
106360484Sobrien			ifp->if_capenable ^= IFCAP_LRO;
106460484Sobrien		if (mask & IFCAP_VLAN_HWTAGGING)
106560484Sobrien			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
106633965Sjdp		if (mask & IFCAP_VLAN_HWFILTER)
106733965Sjdp			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
106833965Sjdp		if (mask & IFCAP_VLAN_HWTSO)
106933965Sjdp			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
107033965Sjdp		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
107133965Sjdp			IXL_PF_LOCK(pf);
107233965Sjdp			ixl_init_locked(pf);
107333965Sjdp			IXL_PF_UNLOCK(pf);
107433965Sjdp		}
107533965Sjdp		VLAN_CAPABILITIES(ifp);
107633965Sjdp
107733965Sjdp		break;
107833965Sjdp	}
107933965Sjdp
108033965Sjdp	default:
108133965Sjdp		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
108233965Sjdp		error = ether_ioctl(ifp, command, data);
108333965Sjdp		break;
108433965Sjdp	}
108533965Sjdp
108633965Sjdp	return (error);
108733965Sjdp}
108833965Sjdp
108933965Sjdp
109033965Sjdp/*********************************************************************
109133965Sjdp *  Init entry point
109233965Sjdp *
109333965Sjdp *  This routine is used in two ways. It is used by the stack as
109433965Sjdp *  init entry point in network interface structure. It is also used
109533965Sjdp *  by the driver as a hw/sw initialization routine to get to a
109633965Sjdp *  consistent state.
109733965Sjdp *
109833965Sjdp *  return 0 on success, positive on failure
109933965Sjdp **********************************************************************/
110033965Sjdp
110133965Sjdpstatic void
110233965Sjdpixl_init_locked(struct ixl_pf *pf)
110333965Sjdp{
110433965Sjdp	struct i40e_hw	*hw = &pf->hw;
110533965Sjdp	struct ixl_vsi	*vsi = &pf->vsi;
110633965Sjdp	struct ifnet	*ifp = vsi->ifp;
110733965Sjdp	device_t 	dev = pf->dev;
110833965Sjdp	struct i40e_filter_control_settings	filter;
110933965Sjdp	u8		tmpaddr[ETHER_ADDR_LEN];
111033965Sjdp	int		ret;
111133965Sjdp
111233965Sjdp	mtx_assert(&pf->pf_mtx, MA_OWNED);
111333965Sjdp	INIT_DEBUGOUT("ixl_init_locked: begin");
111433965Sjdp
111533965Sjdp	ixl_stop_locked(pf);
111633965Sjdp
111733965Sjdp	/* Get the latest mac address... User might use a LAA */
111833965Sjdp	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
111933965Sjdp	      I40E_ETH_LENGTH_OF_ADDRESS);
112033965Sjdp	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
112133965Sjdp	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
112233965Sjdp		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
112333965Sjdp		bcopy(tmpaddr, hw->mac.addr,
112433965Sjdp		    I40E_ETH_LENGTH_OF_ADDRESS);
112533965Sjdp		ret = i40e_aq_mac_address_write(hw,
112633965Sjdp		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
112733965Sjdp		    hw->mac.addr, NULL);
112833965Sjdp		if (ret) {
112933965Sjdp			device_printf(dev, "LLA address"
113033965Sjdp			 "change failed!!\n");
113133965Sjdp			return;
113233965Sjdp		}
113333965Sjdp	}
113433965Sjdp
113533965Sjdp	ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
113633965Sjdp
113733965Sjdp	/* Set the various hardware offload abilities */
113833965Sjdp	ifp->if_hwassist = 0;
113933965Sjdp	if (ifp->if_capenable & IFCAP_TSO)
114033965Sjdp		ifp->if_hwassist |= CSUM_TSO;
114160484Sobrien	if (ifp->if_capenable & IFCAP_TXCSUM)
114233965Sjdp		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
114333965Sjdp	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
114433965Sjdp		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
114577298Sobrien
114677298Sobrien	/* Set up the device filtering */
114733965Sjdp	bzero(&filter, sizeof(filter));
114833965Sjdp	filter.enable_ethtype = TRUE;
114933965Sjdp	filter.enable_macvlan = TRUE;
115033965Sjdp#ifdef IXL_FDIR
115133965Sjdp	filter.enable_fdir = TRUE;
115233965Sjdp#endif
115333965Sjdp	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
115433965Sjdp	if (i40e_set_filter_control(hw, &filter))
115533965Sjdp		device_printf(dev, "i40e_set_filter_control() failed\n");
115633965Sjdp
115733965Sjdp	/* Set up RSS */
115833965Sjdp	ixl_config_rss(vsi);
115933965Sjdp
116033965Sjdp	/* Prepare the VSI: rings, hmc contexts, etc... */
116133965Sjdp	if (ixl_initialize_vsi(vsi)) {
116233965Sjdp		device_printf(dev, "initialize vsi failed!!\n");
116333965Sjdp		return;
116433965Sjdp	}
116533965Sjdp
116633965Sjdp	/* Add protocol filters to list */
116733965Sjdp	ixl_init_filters(vsi);
116833965Sjdp
116933965Sjdp	/* Setup vlan's if needed */
117033965Sjdp	ixl_setup_vlan_filters(vsi);
117133965Sjdp
117233965Sjdp	/* Set up MSI/X routing and the ITR settings */
117333965Sjdp	if (ixl_enable_msix) {
117433965Sjdp		ixl_configure_queue_intr_msix(pf);
117533965Sjdp		ixl_configure_itr(pf);
117660484Sobrien	} else
117760484Sobrien		ixl_configure_legacy(pf);
117833965Sjdp
117933965Sjdp	ixl_enable_rings(vsi);
118033965Sjdp
118133965Sjdp	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
118233965Sjdp
118333965Sjdp	ixl_reconfigure_filters(vsi);
118433965Sjdp
118533965Sjdp	/* And now turn on interrupts */
118633965Sjdp	ixl_enable_intr(vsi);
118733965Sjdp
118833965Sjdp	/* Get link info */
118933965Sjdp	hw->phy.get_link_info = TRUE;
119033965Sjdp	i40e_get_link_status(hw, &pf->link_up);
119133965Sjdp	ixl_update_link_status(pf);
119233965Sjdp
119333965Sjdp	/* Set initial advertised speed sysctl value */
119433965Sjdp	ixl_get_initial_advertised_speeds(pf);
119533965Sjdp
119633965Sjdp	/* Start the local timer */
119733965Sjdp	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
119833965Sjdp
119933965Sjdp	/* Now inform the stack we're ready */
120033965Sjdp	ifp->if_drv_flags |= IFF_DRV_RUNNING;
120133965Sjdp
120233965Sjdp	return;
120333965Sjdp}
120433965Sjdp
120533965Sjdp/* For the set_advertise sysctl */
120633965Sjdpstatic void
120733965Sjdpixl_get_initial_advertised_speeds(struct ixl_pf *pf)
120833965Sjdp{
120933965Sjdp	struct i40e_hw *hw = &pf->hw;
121033965Sjdp	device_t dev = pf->dev;
121133965Sjdp	enum i40e_status_code status;
121233965Sjdp	struct i40e_aq_get_phy_abilities_resp abilities;
121333965Sjdp
121460484Sobrien	/* Set initial sysctl values */
121533965Sjdp	status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
121633965Sjdp					      NULL);
121733965Sjdp	if (status) {
121833965Sjdp		/* Non-fatal error */
121933965Sjdp		device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
122033965Sjdp		     __func__, status);
122133965Sjdp		return;
122233965Sjdp	}
122333965Sjdp
122433965Sjdp	if (abilities.link_speed & I40E_LINK_SPEED_40GB)
122533965Sjdp		pf->advertised_speed |= 0x10;
122633965Sjdp	if (abilities.link_speed & I40E_LINK_SPEED_20GB)
122733965Sjdp		pf->advertised_speed |= 0x8;
122833965Sjdp	if (abilities.link_speed & I40E_LINK_SPEED_10GB)
122933965Sjdp		pf->advertised_speed |= 0x4;
123033965Sjdp	if (abilities.link_speed & I40E_LINK_SPEED_1GB)
123133965Sjdp		pf->advertised_speed |= 0x2;
123233965Sjdp	if (abilities.link_speed & I40E_LINK_SPEED_100MB)
123333965Sjdp		pf->advertised_speed |= 0x1;
123433965Sjdp}
123533965Sjdp
123633965Sjdpstatic int
123733965Sjdpixl_teardown_hw_structs(struct ixl_pf *pf)
123833965Sjdp{
123933965Sjdp	enum i40e_status_code status = 0;
124033965Sjdp	struct i40e_hw *hw = &pf->hw;
124133965Sjdp	device_t dev = pf->dev;
124233965Sjdp
124333965Sjdp	/* Shutdown LAN HMC */
124433965Sjdp	if (hw->hmc.hmc_obj) {
124533965Sjdp		status = i40e_shutdown_lan_hmc(hw);
124633965Sjdp		if (status) {
124733965Sjdp			device_printf(dev,
124833965Sjdp			    "init: LAN HMC shutdown failure; status %d\n", status);
124977298Sobrien			goto err_out;
125033965Sjdp		}
125133965Sjdp	}
125233965Sjdp
125333965Sjdp	// XXX: This gets called when we know the adminq is inactive;
125477298Sobrien	// so we already know it's setup when we get here.
125533965Sjdp
125633965Sjdp	/* Shutdown admin queue */
125738889Sjdp	status = i40e_shutdown_adminq(hw);
125860484Sobrien	if (status)
125960484Sobrien		device_printf(dev,
126038889Sjdp		    "init: Admin Queue shutdown failure; status %d\n", status);
126138889Sjdp
126238889Sjdperr_out:
126338889Sjdp	return (status);
126433965Sjdp}
126533965Sjdp
126633965Sjdpstatic int
126733965Sjdpixl_reset(struct ixl_pf *pf)
126833965Sjdp{
126933965Sjdp	struct i40e_hw *hw = &pf->hw;
127033965Sjdp	device_t dev = pf->dev;
127133965Sjdp	u8 set_fc_err_mask;
127233965Sjdp	int error = 0;
127333965Sjdp
127433965Sjdp	// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
127533965Sjdp	i40e_clear_hw(hw);
127633965Sjdp	error = i40e_pf_reset(hw);
127733965Sjdp	if (error) {
127833965Sjdp		device_printf(dev, "init: PF reset failure");
127933965Sjdp		error = EIO;
128033965Sjdp		goto err_out;
128133965Sjdp	}
128233965Sjdp
128333965Sjdp	error = i40e_init_adminq(hw);
128433965Sjdp	if (error) {
128533965Sjdp		device_printf(dev, "init: Admin queue init failure;"
128633965Sjdp		    " status code %d", error);
128733965Sjdp		error = EIO;
128833965Sjdp		goto err_out;
128933965Sjdp	}
129033965Sjdp
129133965Sjdp	i40e_clear_pxe_mode(hw);
129233965Sjdp
129333965Sjdp	error = ixl_get_hw_capabilities(pf);
129433965Sjdp	if (error) {
129533965Sjdp		device_printf(dev, "init: Error retrieving HW capabilities;"
129633965Sjdp		    " status code %d\n", error);
129733965Sjdp		goto err_out;
129833965Sjdp	}
129933965Sjdp
130033965Sjdp	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
130133965Sjdp	    hw->func_caps.num_rx_qp, 0, 0);
130233965Sjdp	if (error) {
130333965Sjdp		device_printf(dev, "init: LAN HMC init failed; status code %d\n",
130433965Sjdp		    error);
130560484Sobrien		error = EIO;
130633965Sjdp		goto err_out;
130733965Sjdp	}
130833965Sjdp
130933965Sjdp	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
131033965Sjdp	if (error) {
131133965Sjdp		device_printf(dev, "init: LAN HMC config failed; status code %d\n",
131233965Sjdp		    error);
131333965Sjdp		error = EIO;
131433965Sjdp		goto err_out;
131533965Sjdp	}
131633965Sjdp
131733965Sjdp	// XXX: possible fix for panic, but our failure recovery is still broken
131833965Sjdp	error = ixl_switch_config(pf);
131933965Sjdp	if (error) {
132033965Sjdp		device_printf(dev, "init: ixl_switch_config() failed: %d\n",
132133965Sjdp		     error);
132233965Sjdp		goto err_out;
132333965Sjdp	}
132433965Sjdp
132533965Sjdp	error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
132633965Sjdp	    NULL);
132733965Sjdp        if (error) {
132833965Sjdp		device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
132933965Sjdp		    " aq_err %d\n", error, hw->aq.asq_last_status);
133033965Sjdp		error = EIO;
133160484Sobrien		goto err_out;
133260484Sobrien	}
133360484Sobrien
133433965Sjdp	error = i40e_set_fc(hw, &set_fc_err_mask, true);
133533965Sjdp	if (error) {
133633965Sjdp		device_printf(dev, "init: setting link flow control failed; retcode %d,"
133733965Sjdp		    " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
133833965Sjdp		goto err_out;
133933965Sjdp	}
134033965Sjdp
134133965Sjdp	// XXX: (Rebuild VSIs?)
134233965Sjdp
134333965Sjdp	/* Firmware delay workaround */
134477298Sobrien	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
134577298Sobrien	    (hw->aq.fw_maj_ver < 4)) {
134677298Sobrien		i40e_msec_delay(75);
134777298Sobrien		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
134877298Sobrien		if (error) {
134977298Sobrien			device_printf(dev, "init: link restart failed, aq_err %d\n",
135033965Sjdp			    hw->aq.asq_last_status);
135133965Sjdp			goto err_out;
135233965Sjdp		}
135333965Sjdp	}
135433965Sjdp
135533965Sjdp
135633965Sjdperr_out:
135733965Sjdp	return (error);
135833965Sjdp}
135933965Sjdp
136033965Sjdpstatic void
136133965Sjdpixl_init(void *arg)
136260484Sobrien{
136360484Sobrien	struct ixl_pf *pf = arg;
136433965Sjdp	struct ixl_vsi *vsi = &pf->vsi;
136533965Sjdp	device_t dev = pf->dev;
136633965Sjdp	int error = 0;
136733965Sjdp
136833965Sjdp	/*
136933965Sjdp	 * If the aq is dead here, it probably means something outside of the driver
137033965Sjdp	 * did something to the adapter, like a PF reset.
137133965Sjdp	 * So rebuild the driver's state here if that occurs.
137233965Sjdp	 */
137333965Sjdp	if (!i40e_check_asq_alive(&pf->hw)) {
137433965Sjdp		device_printf(dev, "Admin Queue is down; resetting...\n");
137533965Sjdp		IXL_PF_LOCK(pf);
137633965Sjdp		ixl_teardown_hw_structs(pf);
137733965Sjdp		ixl_reset(pf);
137833965Sjdp		IXL_PF_UNLOCK(pf);
137933965Sjdp	}
138033965Sjdp
138133965Sjdp	/*
138233965Sjdp	 * Set up LAN queue interrupts here.
138333965Sjdp	 * Kernel interrupt setup functions cannot be called while holding a lock,
138433965Sjdp	 * so this is done outside of init_locked().
138533965Sjdp	 */
138660484Sobrien	if (pf->msix > 1) {
138760484Sobrien		/* Teardown existing interrupts, if they exist */
138838889Sjdp		ixl_teardown_queue_msix(vsi);
138938889Sjdp		ixl_free_queue_tqs(vsi);
139038889Sjdp		/* Then set them up again */
139133965Sjdp		error = ixl_setup_queue_msix(vsi);
139233965Sjdp		if (error)
139333965Sjdp			device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
139433965Sjdp			    error);
139533965Sjdp		error = ixl_setup_queue_tqs(vsi);
139633965Sjdp		if (error)
139760484Sobrien			device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
139833965Sjdp			    error);
139933965Sjdp	} else
140033965Sjdp		// possibly broken
140133965Sjdp		error = ixl_assign_vsi_legacy(pf);
140233965Sjdp	if (error) {
140333965Sjdp		device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
140433965Sjdp		return;
140533965Sjdp	}
140633965Sjdp
140733965Sjdp	IXL_PF_LOCK(pf);
140833965Sjdp	ixl_init_locked(pf);
140933965Sjdp	IXL_PF_UNLOCK(pf);
141033965Sjdp}
141133965Sjdp
141233965Sjdp/*
141333965Sjdp** MSIX Interrupt Handlers and Tasklets
141433965Sjdp*/
141560484Sobrienstatic void
141633965Sjdpixl_handle_que(void *context, int pending)
141733965Sjdp{
141833965Sjdp	struct ixl_queue *que = context;
141933965Sjdp	struct ixl_vsi *vsi = que->vsi;
142033965Sjdp	struct i40e_hw  *hw = vsi->hw;
142133965Sjdp	struct tx_ring  *txr = &que->txr;
142260484Sobrien	struct ifnet    *ifp = vsi->ifp;
142360484Sobrien	bool		more;
142433965Sjdp
142560484Sobrien	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
142660484Sobrien		more = ixl_rxeof(que, IXL_RX_LIMIT);
142760484Sobrien		IXL_TX_LOCK(txr);
142860484Sobrien		ixl_txeof(que);
142960484Sobrien		if (!drbr_empty(ifp, txr->br))
143060484Sobrien			ixl_mq_start_locked(ifp, txr);
143160484Sobrien		IXL_TX_UNLOCK(txr);
143260484Sobrien		if (more) {
143360484Sobrien			taskqueue_enqueue(que->tq, &que->task);
143460484Sobrien			return;
143560484Sobrien		}
143660484Sobrien	}
143760484Sobrien
143860484Sobrien	/* Reenable this interrupt - hmmm */
143960484Sobrien	ixl_enable_queue(hw, que->me);
144060484Sobrien	return;
144160484Sobrien}
144260484Sobrien
144360484Sobrien
144460484Sobrien/*********************************************************************
144560484Sobrien *
144660484Sobrien *  Legacy Interrupt Service routine
144760484Sobrien *
144860484Sobrien **********************************************************************/
144960484Sobrienvoid
145060484Sobrienixl_intr(void *arg)
145160484Sobrien{
145260484Sobrien	struct ixl_pf		*pf = arg;
145360484Sobrien	struct i40e_hw		*hw =  &pf->hw;
145460484Sobrien	struct ixl_vsi		*vsi = &pf->vsi;
145560484Sobrien	struct ixl_queue	*que = vsi->queues;
145660484Sobrien	struct ifnet		*ifp = vsi->ifp;
145760484Sobrien	struct tx_ring		*txr = &que->txr;
145860484Sobrien        u32			reg, icr0, mask;
145960484Sobrien	bool			more_tx, more_rx;
146060484Sobrien
146160484Sobrien	++que->irqs;
146260484Sobrien
146360484Sobrien	/* Protect against spurious interrupts */
146460484Sobrien	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
146560484Sobrien		return;
146660484Sobrien
146760484Sobrien	icr0 = rd32(hw, I40E_PFINT_ICR0);
146860484Sobrien
146960484Sobrien	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
147060484Sobrien	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
147160484Sobrien	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
147260484Sobrien
147360484Sobrien        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
147460484Sobrien
147560484Sobrien#ifdef PCI_IOV
147660484Sobrien	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
147760484Sobrien		taskqueue_enqueue(pf->tq, &pf->vflr_task);
147860484Sobrien#endif
147960484Sobrien
148060484Sobrien	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
148160484Sobrien		taskqueue_enqueue(pf->tq, &pf->adminq);
148260484Sobrien		return;
148360484Sobrien	}
148460484Sobrien
148533965Sjdp	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
148633965Sjdp
148777298Sobrien	IXL_TX_LOCK(txr);
148877298Sobrien	more_tx = ixl_txeof(que);
148933965Sjdp	if (!drbr_empty(vsi->ifp, txr->br))
149038889Sjdp		more_tx = 1;
149138889Sjdp	IXL_TX_UNLOCK(txr);
149233965Sjdp
149333965Sjdp	/* re-enable other interrupt causes */
149433965Sjdp	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
149533965Sjdp
149633965Sjdp	/* And now the queues */
149733965Sjdp	reg = rd32(hw, I40E_QINT_RQCTL(0));
149833965Sjdp	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
149933965Sjdp	wr32(hw, I40E_QINT_RQCTL(0), reg);
150033965Sjdp
150133965Sjdp	reg = rd32(hw, I40E_QINT_TQCTL(0));
150233965Sjdp	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
150333965Sjdp	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
150433965Sjdp	wr32(hw, I40E_QINT_TQCTL(0), reg);
150533965Sjdp
150633965Sjdp	ixl_enable_legacy(hw);
150733965Sjdp
150838889Sjdp	return;
150938889Sjdp}
151038889Sjdp
151138889Sjdp
151233965Sjdp/*********************************************************************
151338889Sjdp *
151438889Sjdp *  MSIX VSI Interrupt Service routine
151538889Sjdp *
151638889Sjdp **********************************************************************/
151738889Sjdpvoid
151838889Sjdpixl_msix_que(void *arg)
151938889Sjdp{
152038889Sjdp	struct ixl_queue	*que = arg;
152138889Sjdp	struct ixl_vsi	*vsi = que->vsi;
152238889Sjdp	struct i40e_hw	*hw = vsi->hw;
152338889Sjdp	struct tx_ring	*txr = &que->txr;
152460484Sobrien	bool		more_tx, more_rx;
152560484Sobrien
152660484Sobrien	/* Protect against spurious interrupts */
152733965Sjdp	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
152833965Sjdp		return;
152933965Sjdp
153033965Sjdp	++que->irqs;
153138889Sjdp
153233965Sjdp	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
153333965Sjdp
153433965Sjdp	IXL_TX_LOCK(txr);
153533965Sjdp	more_tx = ixl_txeof(que);
153633965Sjdp	/*
153733965Sjdp	** Make certain that if the stack
153833965Sjdp	** has anything queued the task gets
153960484Sobrien	** scheduled to handle it.
154033965Sjdp	*/
154133965Sjdp	if (!drbr_empty(vsi->ifp, txr->br))
154233965Sjdp		more_tx = 1;
154333965Sjdp	IXL_TX_UNLOCK(txr);
154433965Sjdp
154533965Sjdp	ixl_set_queue_rx_itr(que);
154638889Sjdp	ixl_set_queue_tx_itr(que);
154738889Sjdp
154833965Sjdp	if (more_tx || more_rx)
154933965Sjdp		taskqueue_enqueue(que->tq, &que->task);
155033965Sjdp	else
155138889Sjdp		ixl_enable_queue(hw, que->me);
155260484Sobrien
155377298Sobrien	return;
155460484Sobrien}
155560484Sobrien
155677298Sobrien
155777298Sobrien/*********************************************************************
155877298Sobrien *
155933965Sjdp *  MSIX Admin Queue Interrupt Service routine
156077298Sobrien *
156177298Sobrien **********************************************************************/
156277298Sobrienstatic void
156377298Sobrienixl_msix_adminq(void *arg)
156433965Sjdp{
156533965Sjdp	struct ixl_pf	*pf = arg;
156638889Sjdp	struct i40e_hw	*hw = &pf->hw;
156733965Sjdp	u32		reg, mask, rstat_reg;
156833965Sjdp	bool		do_task = FALSE;
156933965Sjdp
157033965Sjdp	++pf->admin_irq;
157133965Sjdp
157233965Sjdp	reg = rd32(hw, I40E_PFINT_ICR0);
157333965Sjdp	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
157433965Sjdp
157533965Sjdp	/* Check on the cause */
157633965Sjdp	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
157733965Sjdp		mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
157833965Sjdp		do_task = TRUE;
157933965Sjdp	}
158033965Sjdp
158133965Sjdp	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
158233965Sjdp		ixl_handle_mdd_event(pf);
158333965Sjdp		mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
158433965Sjdp	}
158533965Sjdp
158660484Sobrien	if (reg & I40E_PFINT_ICR0_GRST_MASK) {
158733965Sjdp		device_printf(pf->dev, "Reset Requested!\n");
158833965Sjdp		rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
158933965Sjdp		rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
159033965Sjdp		    >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
159133965Sjdp		device_printf(pf->dev, "Reset type: ");
159233965Sjdp		switch (rstat_reg) {
159333965Sjdp		/* These others might be handled similarly to an EMPR reset */
159433965Sjdp		case I40E_RESET_CORER:
159533965Sjdp			printf("CORER\n");
159633965Sjdp			break;
159733965Sjdp		case I40E_RESET_GLOBR:
159833965Sjdp			printf("GLOBR\n");
159933965Sjdp			break;
160033965Sjdp		case I40E_RESET_EMPR:
160160484Sobrien			printf("EMPR\n");
160260484Sobrien			atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
160360484Sobrien			break;
160460484Sobrien		default:
160533965Sjdp			printf("?\n");
160660484Sobrien			break;
160760484Sobrien		}
160860484Sobrien		// overload admin queue task to check reset progress?
160933965Sjdp		do_task = TRUE;
161060484Sobrien	}
161160484Sobrien
161233965Sjdp	if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
161360484Sobrien		device_printf(pf->dev, "ECC Error detected!\n");
161460484Sobrien	}
161560484Sobrien
161660484Sobrien	if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
161760484Sobrien		device_printf(pf->dev, "HMC Error detected!\n");
161860484Sobrien	}
161933965Sjdp
162060484Sobrien	if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
162160484Sobrien		device_printf(pf->dev, "PCI Exception detected!\n");
162233965Sjdp	}
162333965Sjdp
162460484Sobrien#ifdef PCI_IOV
162560484Sobrien	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
162633965Sjdp		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
162760484Sobrien		taskqueue_enqueue(pf->tq, &pf->vflr_task);
162877298Sobrien	}
162977298Sobrien#endif
163077298Sobrien
163177298Sobrien	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
163277298Sobrien	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
163377298Sobrien	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
163477298Sobrien
163577298Sobrien	if (do_task)
163677298Sobrien		taskqueue_enqueue(pf->tq, &pf->adminq);
163777298Sobrien}
163877298Sobrien
163977298Sobrien/*********************************************************************
164077298Sobrien *
164177298Sobrien *  Media Ioctl callback
164277298Sobrien *
164377298Sobrien *  This routine is called whenever the user queries the status of
164477298Sobrien *  the interface using ifconfig.
164533965Sjdp *
164633965Sjdp **********************************************************************/
164733965Sjdpstatic void
164833965Sjdpixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
164933965Sjdp{
165033965Sjdp	struct ixl_vsi	*vsi = ifp->if_softc;
165133965Sjdp	struct ixl_pf	*pf = vsi->back;
165233965Sjdp	struct i40e_hw  *hw = &pf->hw;
165333965Sjdp
165433965Sjdp	INIT_DEBUGOUT("ixl_media_status: begin");
165533965Sjdp	IXL_PF_LOCK(pf);
165633965Sjdp
165733965Sjdp	hw->phy.get_link_info = TRUE;
165833965Sjdp	i40e_get_link_status(hw, &pf->link_up);
165933965Sjdp	ixl_update_link_status(pf);
166033965Sjdp
166133965Sjdp	ifmr->ifm_status = IFM_AVALID;
166233965Sjdp	ifmr->ifm_active = IFM_ETHER;
166333965Sjdp
166433965Sjdp	if (!pf->link_up) {
166533965Sjdp		IXL_PF_UNLOCK(pf);
166633965Sjdp		return;
166733965Sjdp	}
166833965Sjdp
166933965Sjdp	ifmr->ifm_status |= IFM_ACTIVE;
167033965Sjdp
167133965Sjdp	/* Hardware always does full-duplex */
167233965Sjdp	ifmr->ifm_active |= IFM_FDX;
167333965Sjdp
167433965Sjdp	switch (hw->phy.link_info.phy_type) {
167533965Sjdp		/* 100 M */
167633965Sjdp		case I40E_PHY_TYPE_100BASE_TX:
167733965Sjdp			ifmr->ifm_active |= IFM_100_TX;
167833965Sjdp			break;
167933965Sjdp		/* 1 G */
168033965Sjdp		case I40E_PHY_TYPE_1000BASE_T:
168133965Sjdp			ifmr->ifm_active |= IFM_1000_T;
168233965Sjdp			break;
168333965Sjdp		case I40E_PHY_TYPE_1000BASE_SX:
168433965Sjdp			ifmr->ifm_active |= IFM_1000_SX;
168533965Sjdp			break;
168633965Sjdp		case I40E_PHY_TYPE_1000BASE_LX:
168733965Sjdp			ifmr->ifm_active |= IFM_1000_LX;
168833965Sjdp			break;
168933965Sjdp		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
169033965Sjdp			ifmr->ifm_active |= IFM_OTHER;
169133965Sjdp			break;
169233965Sjdp		/* 10 G */
169333965Sjdp		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
169433965Sjdp			ifmr->ifm_active |= IFM_10G_TWINAX;
169533965Sjdp			break;
169638889Sjdp		case I40E_PHY_TYPE_10GBASE_SR:
169733965Sjdp			ifmr->ifm_active |= IFM_10G_SR;
169833965Sjdp			break;
169933965Sjdp		case I40E_PHY_TYPE_10GBASE_LR:
170033965Sjdp			ifmr->ifm_active |= IFM_10G_LR;
170133965Sjdp			break;
170233965Sjdp		case I40E_PHY_TYPE_10GBASE_T:
170333965Sjdp			ifmr->ifm_active |= IFM_10G_T;
170433965Sjdp			break;
170533965Sjdp		case I40E_PHY_TYPE_XAUI:
170633965Sjdp		case I40E_PHY_TYPE_XFI:
170733965Sjdp		case I40E_PHY_TYPE_10GBASE_AOC:
170833965Sjdp			ifmr->ifm_active |= IFM_OTHER;
170933965Sjdp			break;
171033965Sjdp		/* 40 G */
171133965Sjdp		case I40E_PHY_TYPE_40GBASE_CR4:
171233965Sjdp		case I40E_PHY_TYPE_40GBASE_CR4_CU:
171333965Sjdp			ifmr->ifm_active |= IFM_40G_CR4;
171433965Sjdp			break;
171533965Sjdp		case I40E_PHY_TYPE_40GBASE_SR4:
171633965Sjdp			ifmr->ifm_active |= IFM_40G_SR4;
171733965Sjdp			break;
171833965Sjdp		case I40E_PHY_TYPE_40GBASE_LR4:
171933965Sjdp			ifmr->ifm_active |= IFM_40G_LR4;
172033965Sjdp			break;
172133965Sjdp		case I40E_PHY_TYPE_XLAUI:
172233965Sjdp			ifmr->ifm_active |= IFM_OTHER;
172333965Sjdp			break;
172433965Sjdp#ifndef IFM_ETH_XTYPE
172533965Sjdp		case I40E_PHY_TYPE_1000BASE_KX:
172633965Sjdp			ifmr->ifm_active |= IFM_1000_CX;
172733965Sjdp			break;
172833965Sjdp		case I40E_PHY_TYPE_SGMII:
172933965Sjdp			ifmr->ifm_active |= IFM_OTHER;
173060484Sobrien			break;
173133965Sjdp		case I40E_PHY_TYPE_10GBASE_CR1_CU:
173233965Sjdp		case I40E_PHY_TYPE_10GBASE_CR1:
173333965Sjdp			ifmr->ifm_active |= IFM_10G_TWINAX;
173433965Sjdp			break;
173533965Sjdp		case I40E_PHY_TYPE_10GBASE_KX4:
173633965Sjdp			ifmr->ifm_active |= IFM_10G_CX4;
173733965Sjdp			break;
173833965Sjdp		case I40E_PHY_TYPE_10GBASE_KR:
173933965Sjdp			ifmr->ifm_active |= IFM_10G_SR;
174033965Sjdp			break;
174133965Sjdp		case I40E_PHY_TYPE_SFI:
174233965Sjdp			ifmr->ifm_active |= IFM_OTHER;
174333965Sjdp			break;
174433965Sjdp		case I40E_PHY_TYPE_40GBASE_KR4:
174533965Sjdp		case I40E_PHY_TYPE_XLPPI:
174633965Sjdp		case I40E_PHY_TYPE_40GBASE_AOC:
174733965Sjdp			ifmr->ifm_active |= IFM_40G_SR4;
174833965Sjdp			break;
174960484Sobrien#else
175060484Sobrien		case I40E_PHY_TYPE_1000BASE_KX:
175160484Sobrien			ifmr->ifm_active |= IFM_1000_KX;
175233965Sjdp			break;
175360484Sobrien		case I40E_PHY_TYPE_SGMII:
175460484Sobrien			ifmr->ifm_active |= IFM_1000_SGMII;
175533965Sjdp			break;
175633965Sjdp		/* ERJ: What's the difference between these? */
175733965Sjdp		case I40E_PHY_TYPE_10GBASE_CR1_CU:
175833965Sjdp		case I40E_PHY_TYPE_10GBASE_CR1:
175933965Sjdp			ifmr->ifm_active |= IFM_10G_CR1;
176033965Sjdp			break;
176133965Sjdp		case I40E_PHY_TYPE_10GBASE_KX4:
176260484Sobrien			ifmr->ifm_active |= IFM_10G_KX4;
176333965Sjdp			break;
176433965Sjdp		case I40E_PHY_TYPE_10GBASE_KR:
176533965Sjdp			ifmr->ifm_active |= IFM_10G_KR;
176633965Sjdp			break;
176733965Sjdp		case I40E_PHY_TYPE_SFI:
176833965Sjdp			ifmr->ifm_active |= IFM_10G_SFI;
176977298Sobrien			break;
177077298Sobrien		/* Our single 20G media type */
177177298Sobrien		case I40E_PHY_TYPE_20GBASE_KR2:
177277298Sobrien			ifmr->ifm_active |= IFM_20G_KR2;
177377298Sobrien			break;
177433965Sjdp		case I40E_PHY_TYPE_40GBASE_KR4:
177533965Sjdp			ifmr->ifm_active |= IFM_40G_KR4;
177633965Sjdp			break;
177733965Sjdp		case I40E_PHY_TYPE_XLPPI:
177833965Sjdp		case I40E_PHY_TYPE_40GBASE_AOC:
177933965Sjdp			ifmr->ifm_active |= IFM_40G_XLPPI;
178033965Sjdp			break;
178133965Sjdp#endif
178233965Sjdp		/* Unknown to driver */
178333965Sjdp		default:
178433965Sjdp			ifmr->ifm_active |= IFM_UNKNOWN;
178533965Sjdp			break;
178633965Sjdp	}
178777298Sobrien	/* Report flow control status as well */
178838889Sjdp	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
178960484Sobrien		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
179038889Sjdp	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
179138889Sjdp		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
179277298Sobrien
179360484Sobrien	IXL_PF_UNLOCK(pf);
179460484Sobrien
179560484Sobrien	return;
179660484Sobrien}
179760484Sobrien
179860484Sobrien/*
179960484Sobrien * NOTE: Fortville does not support forcing media speeds. Instead,
180033965Sjdp * use the set_advertise sysctl to set the speeds Fortville
180133965Sjdp * will advertise or be allowed to operate at.
180233965Sjdp */
180333965Sjdpstatic int
180433965Sjdpixl_media_change(struct ifnet * ifp)
180533965Sjdp{
180633965Sjdp	struct ixl_vsi *vsi = ifp->if_softc;
180733965Sjdp	struct ifmedia *ifm = &vsi->media;
180833965Sjdp
180933965Sjdp	INIT_DEBUGOUT("ixl_media_change: begin");
181033965Sjdp
181133965Sjdp	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
181233965Sjdp		return (EINVAL);
181333965Sjdp
181433965Sjdp	if_printf(ifp, "Media change is not supported.\n");
181533965Sjdp
181633965Sjdp	return (ENODEV);
181733965Sjdp}
181833965Sjdp
181933965Sjdp
182033965Sjdp#ifdef IXL_FDIR
182133965Sjdp/*
182233965Sjdp** ATR: Application Targetted Receive - creates a filter
182333965Sjdp**	based on TX flow info that will keep the receive
182433965Sjdp**	portion of the flow on the same queue. Based on the
182533965Sjdp**	implementation this is only available for TCP connections
182633965Sjdp*/
182777298Sobrienvoid
182833965Sjdpixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
182933965Sjdp{
183033965Sjdp	struct ixl_vsi			*vsi = que->vsi;
183133965Sjdp	struct tx_ring			*txr = &que->txr;
183233965Sjdp	struct i40e_filter_program_desc	*FDIR;
183333965Sjdp	u32				ptype, dtype;
183433965Sjdp	int				idx;
183533965Sjdp
183633965Sjdp	/* check if ATR is enabled and sample rate */
183733965Sjdp	if ((!ixl_enable_fdir) || (!txr->atr_rate))
183877298Sobrien		return;
183977298Sobrien	/*
184077298Sobrien	** We sample all TCP SYN/FIN packets,
184177298Sobrien	** or at the selected sample rate
184277298Sobrien	*/
184377298Sobrien	txr->atr_count++;
184433965Sjdp	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
184533965Sjdp	    (txr->atr_count < txr->atr_rate))
184633965Sjdp                return;
184733965Sjdp	txr->atr_count = 0;
184833965Sjdp
184933965Sjdp	/* Get a descriptor to use */
185033965Sjdp	idx = txr->next_avail;
185133965Sjdp	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
185233965Sjdp	if (++idx == que->num_desc)
185360484Sobrien		idx = 0;
185460484Sobrien	txr->avail--;
185533965Sjdp	txr->next_avail = idx;
185638889Sjdp
185738889Sjdp	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
185860484Sobrien	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
185960484Sobrien
186060484Sobrien	ptype |= (etype == ETHERTYPE_IP) ?
186160484Sobrien	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
186260484Sobrien	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
186360484Sobrien	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
186460484Sobrien	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
186560484Sobrien
186638889Sjdp	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
186733965Sjdp
186833965Sjdp	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
186933965Sjdp
187033965Sjdp	/*
187133965Sjdp	** We use the TCP TH_FIN as a trigger to remove
187233965Sjdp	** the filter, otherwise its an update.
187333965Sjdp	*/
187433965Sjdp	dtype |= (th->th_flags & TH_FIN) ?
187533965Sjdp	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
187633965Sjdp	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
187733965Sjdp	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
187833965Sjdp	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
187933965Sjdp
188033965Sjdp	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
188133965Sjdp	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
188238889Sjdp
188333965Sjdp	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
188433965Sjdp	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
188533965Sjdp
188633965Sjdp	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
188733965Sjdp	FDIR->dtype_cmd_cntindex = htole32(dtype);
188833965Sjdp	return;
188933965Sjdp}
189033965Sjdp#endif
189133965Sjdp
189233965Sjdp
189333965Sjdpstatic void
189433965Sjdpixl_set_promisc(struct ixl_vsi *vsi)
189533965Sjdp{
189633965Sjdp	struct ifnet	*ifp = vsi->ifp;
189733965Sjdp	struct i40e_hw	*hw = vsi->hw;
189833965Sjdp	int		err, mcnt = 0;
189933965Sjdp	bool		uni = FALSE, multi = FALSE;
190033965Sjdp
190133965Sjdp	if (ifp->if_flags & IFF_ALLMULTI)
190233965Sjdp                multi = TRUE;
190333965Sjdp	else { /* Need to count the multicast addresses */
190433965Sjdp		struct  ifmultiaddr *ifma;
190533965Sjdp		if_maddr_rlock(ifp);
190633965Sjdp		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
190733965Sjdp                        if (ifma->ifma_addr->sa_family != AF_LINK)
190833965Sjdp                                continue;
190933965Sjdp                        if (mcnt == MAX_MULTICAST_ADDR)
191033965Sjdp                                break;
191133965Sjdp                        mcnt++;
191233965Sjdp		}
191333965Sjdp		if_maddr_runlock(ifp);
191433965Sjdp	}
191533965Sjdp
191633965Sjdp	if (mcnt >= MAX_MULTICAST_ADDR)
191733965Sjdp                multi = TRUE;
191833965Sjdp        if (ifp->if_flags & IFF_PROMISC)
191933965Sjdp		uni = TRUE;
192033965Sjdp
192133965Sjdp	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
192233965Sjdp	    vsi->seid, uni, NULL);
192333965Sjdp	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
192433965Sjdp	    vsi->seid, multi, NULL);
192533965Sjdp	return;
192633965Sjdp}
192733965Sjdp
192833965Sjdp/*********************************************************************
192933965Sjdp * 	Filter Routines
193033965Sjdp *
193133965Sjdp *	Routines for multicast and vlan filter management.
193233965Sjdp *
193333965Sjdp *********************************************************************/
193433965Sjdpstatic void
193533965Sjdpixl_add_multi(struct ixl_vsi *vsi)
193633965Sjdp{
193733965Sjdp	struct	ifmultiaddr	*ifma;
193833965Sjdp	struct ifnet		*ifp = vsi->ifp;
193933965Sjdp	struct i40e_hw		*hw = vsi->hw;
194033965Sjdp	int			mcnt = 0, flags;
194133965Sjdp
194233965Sjdp	IOCTL_DEBUGOUT("ixl_add_multi: begin");
194333965Sjdp
194433965Sjdp	if_maddr_rlock(ifp);
194533965Sjdp	/*
194633965Sjdp	** First just get a count, to decide if we
194733965Sjdp	** we simply use multicast promiscuous.
194833965Sjdp	*/
194933965Sjdp	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
195033965Sjdp		if (ifma->ifma_addr->sa_family != AF_LINK)
195133965Sjdp			continue;
195277298Sobrien		mcnt++;
195377298Sobrien	}
195477298Sobrien	if_maddr_runlock(ifp);
195533965Sjdp
195633965Sjdp	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
195733965Sjdp		/* delete existing MC filters */
195877298Sobrien		ixl_del_hw_filters(vsi, mcnt);
195933965Sjdp		i40e_aq_set_vsi_multicast_promiscuous(hw,
196033965Sjdp		    vsi->seid, TRUE, NULL);
196133965Sjdp		return;
196233965Sjdp	}
196333965Sjdp
196433965Sjdp	mcnt = 0;
196533965Sjdp	if_maddr_rlock(ifp);
196633965Sjdp	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
196733965Sjdp		if (ifma->ifma_addr->sa_family != AF_LINK)
196833965Sjdp			continue;
196933965Sjdp		ixl_add_mc_filter(vsi,
197033965Sjdp		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
197133965Sjdp		mcnt++;
197233965Sjdp	}
197333965Sjdp	if_maddr_runlock(ifp);
197460484Sobrien	if (mcnt > 0) {
197533965Sjdp		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
197633965Sjdp		ixl_add_hw_filters(vsi, flags, mcnt);
197760484Sobrien	}
197833965Sjdp
197977298Sobrien	IOCTL_DEBUGOUT("ixl_add_multi: end");
198060484Sobrien	return;
198160484Sobrien}
198260484Sobrien
198360484Sobrienstatic void
198460484Sobrienixl_del_multi(struct ixl_vsi *vsi)
198577298Sobrien{
198633965Sjdp	struct ifnet		*ifp = vsi->ifp;
198733965Sjdp	struct ifmultiaddr	*ifma;
198833965Sjdp	struct ixl_mac_filter	*f;
198933965Sjdp	int			mcnt = 0;
199060484Sobrien	bool		match = FALSE;
199133965Sjdp
199233965Sjdp	IOCTL_DEBUGOUT("ixl_del_multi: begin");
199333965Sjdp
199433965Sjdp	/* Search for removed multicast addresses */
199533965Sjdp	if_maddr_rlock(ifp);
199633965Sjdp	SLIST_FOREACH(f, &vsi->ftl, next) {
199733965Sjdp		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
199833965Sjdp			match = FALSE;
199933965Sjdp			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
200033965Sjdp				if (ifma->ifma_addr->sa_family != AF_LINK)
200133965Sjdp					continue;
200233965Sjdp				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
200333965Sjdp				if (cmp_etheraddr(f->macaddr, mc_addr)) {
200477298Sobrien					match = TRUE;
200533965Sjdp					break;
200677298Sobrien				}
200777298Sobrien			}
200877298Sobrien			if (match == FALSE) {
200977298Sobrien				f->flags |= IXL_FILTER_DEL;
201033965Sjdp				mcnt++;
201177298Sobrien			}
201277298Sobrien		}
201333965Sjdp	}
201433965Sjdp	if_maddr_runlock(ifp);
201533965Sjdp
201633965Sjdp	if (mcnt > 0)
201733965Sjdp		ixl_del_hw_filters(vsi, mcnt);
201877298Sobrien}
201933965Sjdp
202033965Sjdp
202133965Sjdp/*********************************************************************
202277298Sobrien *  Timer routine
202333965Sjdp *
202433965Sjdp *  This routine checks for link status,updates statistics,
202533965Sjdp *  and runs the watchdog check.
202677298Sobrien *
202733965Sjdp *  Only runs when the driver is configured UP and RUNNING.
202833965Sjdp *
202933965Sjdp **********************************************************************/
203077298Sobrien
203133965Sjdpstatic void
203233965Sjdpixl_local_timer(void *arg)
203333965Sjdp{
203433965Sjdp	struct ixl_pf		*pf = arg;
203533965Sjdp	struct i40e_hw		*hw = &pf->hw;
203633965Sjdp	struct ixl_vsi		*vsi = &pf->vsi;
203733965Sjdp	struct ixl_queue	*que = vsi->queues;
203833965Sjdp	device_t		dev = pf->dev;
203933965Sjdp	int			hung = 0;
204033965Sjdp	u32			mask;
204133965Sjdp
204233965Sjdp	mtx_assert(&pf->pf_mtx, MA_OWNED);
204333965Sjdp
204433965Sjdp	/* Fire off the adminq task */
204533965Sjdp	taskqueue_enqueue(pf->tq, &pf->adminq);
204633965Sjdp
204733965Sjdp	/* Update stats */
204833965Sjdp	ixl_update_stats_counters(pf);
204933965Sjdp
205033965Sjdp	/*
205133965Sjdp	** Check status of the queues
205233965Sjdp	*/
205333965Sjdp	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
205433965Sjdp		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
205533965Sjdp
205633965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
205733965Sjdp		/* Any queues with outstanding work get a sw irq */
205833965Sjdp		if (que->busy)
205933965Sjdp			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
206033965Sjdp		/*
206133965Sjdp		** Each time txeof runs without cleaning, but there
206233965Sjdp		** are uncleaned descriptors it increments busy. If
206333965Sjdp		** we get to 5 we declare it hung.
206433965Sjdp		*/
206533965Sjdp		if (que->busy == IXL_QUEUE_HUNG) {
206633965Sjdp			++hung;
206733965Sjdp			/* Mark the queue as inactive */
206833965Sjdp			vsi->active_queues &= ~((u64)1 << que->me);
206933965Sjdp			continue;
207033965Sjdp		} else {
207133965Sjdp			/* Check if we've come back from hung */
207233965Sjdp			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
207333965Sjdp				vsi->active_queues |= ((u64)1 << que->me);
207460484Sobrien		}
207560484Sobrien		if (que->busy >= IXL_MAX_TX_BUSY) {
207660484Sobrien#ifdef IXL_DEBUG
207733965Sjdp			device_printf(dev,"Warning queue %d "
207833965Sjdp			    "appears to be hung!\n", i);
207933965Sjdp#endif
208033965Sjdp			que->busy = IXL_QUEUE_HUNG;
208133965Sjdp			++hung;
208260484Sobrien		}
208360484Sobrien	}
208460484Sobrien	/* Only reinit if all queues show hung */
208560484Sobrien	if (hung == vsi->num_queues)
208660484Sobrien		goto hung;
208760484Sobrien
208860484Sobrien	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
208933965Sjdp	return;
209033965Sjdp
209133965Sjdphung:
209233965Sjdp	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
209333965Sjdp	ixl_init_locked(pf);
209433965Sjdp}
209533965Sjdp
209633965Sjdp/*
209733965Sjdp** Note: this routine updates the OS on the link state
209833965Sjdp**	the real check of the hardware only happens with
209933965Sjdp**	a link interrupt.
210033965Sjdp*/
210133965Sjdpstatic void
210233965Sjdpixl_update_link_status(struct ixl_pf *pf)
210333965Sjdp{
210433965Sjdp	struct ixl_vsi		*vsi = &pf->vsi;
210533965Sjdp	struct i40e_hw		*hw = &pf->hw;
210638889Sjdp	struct ifnet		*ifp = vsi->ifp;
210738889Sjdp	device_t		dev = pf->dev;
210838889Sjdp
210933965Sjdp	if (pf->link_up) {
211033965Sjdp		if (vsi->link_active == FALSE) {
211133965Sjdp			pf->fc = hw->fc.current_mode;
211277298Sobrien			if (bootverbose) {
211377298Sobrien				device_printf(dev,"Link is up %d Gbps %s,"
211477298Sobrien				    " Flow Control: %s\n",
211577298Sobrien				    ((pf->link_speed ==
211677298Sobrien				    I40E_LINK_SPEED_40GB)? 40:10),
211777298Sobrien				    "Full Duplex", ixl_fc_string[pf->fc]);
211877298Sobrien			}
211977298Sobrien			vsi->link_active = TRUE;
212077298Sobrien			/*
212177298Sobrien			** Warn user if link speed on NPAR enabled
212277298Sobrien			** partition is not at least 10GB
212377298Sobrien			*/
212477298Sobrien			if (hw->func_caps.npar_enable &&
212577298Sobrien			   (hw->phy.link_info.link_speed ==
212677298Sobrien			   I40E_LINK_SPEED_1GB ||
212777298Sobrien			   hw->phy.link_info.link_speed ==
212877298Sobrien			   I40E_LINK_SPEED_100MB))
212977298Sobrien				device_printf(dev, "The partition detected"
213077298Sobrien				    "link speed that is less than 10Gbps\n");
213177298Sobrien			if_link_state_change(ifp, LINK_STATE_UP);
213277298Sobrien		}
213377298Sobrien	} else { /* Link down */
213477298Sobrien		if (vsi->link_active == TRUE) {
213577298Sobrien			if (bootverbose)
213677298Sobrien				device_printf(dev, "Link is Down\n");
213777298Sobrien			if_link_state_change(ifp, LINK_STATE_DOWN);
213877298Sobrien			vsi->link_active = FALSE;
213977298Sobrien		}
214077298Sobrien	}
214177298Sobrien
214277298Sobrien	return;
214360484Sobrien}
214460484Sobrien
214560484Sobrienstatic void
214660484Sobrienixl_stop(struct ixl_pf *pf)
214760484Sobrien{
214838889Sjdp	IXL_PF_LOCK(pf);
214960484Sobrien	ixl_stop_locked(pf);
215060484Sobrien	IXL_PF_UNLOCK(pf);
215160484Sobrien
215260484Sobrien	ixl_teardown_queue_msix(&pf->vsi);
215360484Sobrien	ixl_free_queue_tqs(&pf->vsi);
215433965Sjdp}
215533965Sjdp
215633965Sjdp/*********************************************************************
215733965Sjdp *
215833965Sjdp *  This routine disables all traffic on the adapter by issuing a
215933965Sjdp *  global reset on the MAC and deallocates TX/RX buffers.
216033965Sjdp *
216133965Sjdp **********************************************************************/
216233965Sjdp
216333965Sjdpstatic void
216433965Sjdpixl_stop_locked(struct ixl_pf *pf)
216533965Sjdp{
216633965Sjdp	struct ixl_vsi	*vsi = &pf->vsi;
216733965Sjdp	struct ifnet	*ifp = vsi->ifp;
216833965Sjdp
216933965Sjdp	INIT_DEBUGOUT("ixl_stop: begin\n");
217060484Sobrien
217133965Sjdp	IXL_PF_LOCK_ASSERT(pf);
217233965Sjdp
217333965Sjdp	/* Stop the local timer */
217433965Sjdp	callout_stop(&pf->timer);
217533965Sjdp
217633965Sjdp	ixl_disable_rings_intr(vsi);
217733965Sjdp	ixl_disable_rings(vsi);
217833965Sjdp
217933965Sjdp	/* Tell the stack that the interface is no longer active */
218033965Sjdp	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
218133965Sjdp}
218233965Sjdp
218333965Sjdp
218433965Sjdp/*********************************************************************
218577298Sobrien *
218677298Sobrien *  Setup MSIX Interrupt resources and handlers for the VSI
218777298Sobrien *
218877298Sobrien **********************************************************************/
218933965Sjdpstatic int
219033965Sjdpixl_assign_vsi_legacy(struct ixl_pf *pf)
219133965Sjdp{
219233965Sjdp	device_t        dev = pf->dev;
219333965Sjdp	struct 		ixl_vsi *vsi = &pf->vsi;
219433965Sjdp	struct		ixl_queue *que = vsi->queues;
219533965Sjdp	int 		error, rid = 0;
219633965Sjdp
219733965Sjdp	if (pf->msix == 1)
219860484Sobrien		rid = 1;
219933965Sjdp	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
220033965Sjdp	    &rid, RF_SHAREABLE | RF_ACTIVE);
220133965Sjdp	if (pf->res == NULL) {
220233965Sjdp		device_printf(dev, "Unable to allocate"
220333965Sjdp		    " bus resource: vsi legacy/msi interrupt\n");
220433965Sjdp		return (ENXIO);
220533965Sjdp	}
220633965Sjdp
220733965Sjdp	/* Set the handler function */
220833965Sjdp	error = bus_setup_intr(dev, pf->res,
220933965Sjdp	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
221033965Sjdp	    ixl_intr, pf, &pf->tag);
221133965Sjdp	if (error) {
221233965Sjdp		pf->res = NULL;
221333965Sjdp		device_printf(dev, "Failed to register legacy/msi handler\n");
221433965Sjdp		return (error);
221533965Sjdp	}
221633965Sjdp	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
221733965Sjdp	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
221833965Sjdp	TASK_INIT(&que->task, 0, ixl_handle_que, que);
221933965Sjdp	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
222033965Sjdp	    taskqueue_thread_enqueue, &que->tq);
222133965Sjdp	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
222233965Sjdp	    device_get_nameunit(dev));
222333965Sjdp	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
222433965Sjdp
222533965Sjdp	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
222633965Sjdp	    taskqueue_thread_enqueue, &pf->tq);
222733965Sjdp	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
222833965Sjdp	    device_get_nameunit(dev));
222933965Sjdp
223033965Sjdp	return (0);
223133965Sjdp}
223233965Sjdp
223333965Sjdpstatic int
223433965Sjdpixl_setup_adminq_tq(struct ixl_pf *pf)
223533965Sjdp{
223633965Sjdp	device_t dev = pf->dev;
223733965Sjdp	int error = 0;
223860484Sobrien
223960484Sobrien	/* Tasklet for Admin Queue interrupts */
224033965Sjdp	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
224133965Sjdp#ifdef PCI_IOV
224260484Sobrien	/* VFLR Tasklet */
224333965Sjdp	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
224433965Sjdp#endif
224533965Sjdp	/* Create and start Admin Queue taskqueue */
224633965Sjdp	pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
224733965Sjdp	    taskqueue_thread_enqueue, &pf->tq);
224833965Sjdp	if (!pf->tq) {
224933965Sjdp		device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
225033965Sjdp		return (ENOMEM);
225133965Sjdp	}
225233965Sjdp	error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
225333965Sjdp	    device_get_nameunit(dev));
225433965Sjdp	if (error) {
225533965Sjdp		device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
225633965Sjdp		    error);
225733965Sjdp		taskqueue_free(pf->tq);
225833965Sjdp		return (error);
225933965Sjdp	}
226033965Sjdp	return (0);
226133965Sjdp}
226233965Sjdp
226333965Sjdpstatic int
226433965Sjdpixl_setup_queue_tqs(struct ixl_vsi *vsi)
226533965Sjdp{
226633965Sjdp	struct ixl_queue *que = vsi->queues;
226760484Sobrien	device_t dev = vsi->dev;
226860484Sobrien#ifdef	RSS
226960484Sobrien	cpuset_t cpu_mask;
227060484Sobrien	int cpu_id;
227160484Sobrien#endif
227260484Sobrien
227333965Sjdp	/* Create queue tasks and start queue taskqueues */
227433965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
227533965Sjdp		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
227633965Sjdp		TASK_INIT(&que->task, 0, ixl_handle_que, que);
227733965Sjdp		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
227833965Sjdp		    taskqueue_thread_enqueue, &que->tq);
227933965Sjdp#ifdef RSS
228033965Sjdp		cpu_id = rss_getcpu(i % rss_getnumbuckets());
228133965Sjdp		CPU_SETOF(cpu_id, &cpu_mask);
228233965Sjdp		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
228333965Sjdp		    &cpu_mask, "%s (bucket %d)",
228433965Sjdp		    device_get_nameunit(dev), cpu_id);
228533965Sjdp#else
228633965Sjdp		taskqueue_start_threads(&que->tq, 1, PI_NET,
228733965Sjdp		    "%s (que %d)", device_get_nameunit(dev), que->me);
228833965Sjdp#endif
228933965Sjdp	}
229033965Sjdp
229133965Sjdp	return (0);
229233965Sjdp}
229333965Sjdp
229433965Sjdpstatic void
229533965Sjdpixl_free_adminq_tq(struct ixl_pf *pf)
229633965Sjdp{
229733965Sjdp	if (pf->tq) {
229833965Sjdp		taskqueue_free(pf->tq);
229933965Sjdp		pf->tq = NULL;
230033965Sjdp	}
230133965Sjdp}
230233965Sjdp
230333965Sjdpstatic void
230433965Sjdpixl_free_queue_tqs(struct ixl_vsi *vsi)
230533965Sjdp{
230633965Sjdp	struct ixl_queue *que = vsi->queues;
230733965Sjdp
230833965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
230933965Sjdp		if (que->tq) {
231033965Sjdp			taskqueue_free(que->tq);
231133965Sjdp			que->tq = NULL;
231233965Sjdp		}
231333965Sjdp	}
231433965Sjdp}
231533965Sjdp
231633965Sjdpstatic int
231733965Sjdpixl_setup_adminq_msix(struct ixl_pf *pf)
231833965Sjdp{
231933965Sjdp	device_t dev = pf->dev;
232033965Sjdp	int rid, error = 0;
232133965Sjdp
232233965Sjdp	/* Admin IRQ rid is 1, vector is 0 */
232333965Sjdp	rid = 1;
232433965Sjdp	/* Get interrupt resource from bus */
232533965Sjdp	pf->res = bus_alloc_resource_any(dev,
232633965Sjdp    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
232733965Sjdp	if (!pf->res) {
232833965Sjdp		device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
232933965Sjdp		    " interrupt failed [rid=%d]\n", rid);
233033965Sjdp		return (ENXIO);
233133965Sjdp	}
233233965Sjdp	/* Then associate interrupt with handler */
233333965Sjdp	error = bus_setup_intr(dev, pf->res,
233433965Sjdp	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
233533965Sjdp	    ixl_msix_adminq, pf, &pf->tag);
233633965Sjdp	if (error) {
233733965Sjdp		pf->res = NULL;
233833965Sjdp		device_printf(dev, "bus_setup_intr() for Admin Queue"
233933965Sjdp		    " interrupt handler failed, error %d\n", error);
234033965Sjdp		return (ENXIO);
234133965Sjdp	}
234233965Sjdp	error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
234333965Sjdp	if (error) {
234433965Sjdp		/* Probably non-fatal? */
234533965Sjdp		device_printf(dev, "bus_describe_intr() for Admin Queue"
234633965Sjdp		    " interrupt name failed, error %d\n", error);
234733965Sjdp	}
234833965Sjdp	pf->admvec = 0;
234933965Sjdp
235033965Sjdp	return (0);
235133965Sjdp}
235233965Sjdp
235333965Sjdp/*
235433965Sjdp * Allocate interrupt resources from bus and associate an interrupt handler
235533965Sjdp * to those for the VSI's queues.
235633965Sjdp */
235733965Sjdpstatic int
235833965Sjdpixl_setup_queue_msix(struct ixl_vsi *vsi)
235933965Sjdp{
236033965Sjdp	device_t	dev = vsi->dev;
236133965Sjdp	struct 		ixl_queue *que = vsi->queues;
236233965Sjdp	struct		tx_ring	 *txr;
236333965Sjdp	int 		error, rid, vector = 1;
236433965Sjdp
236533965Sjdp	/* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
236633965Sjdp	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
236733965Sjdp		int cpu_id = i;
236833965Sjdp		rid = vector + 1;
236933965Sjdp		txr = &que->txr;
237033965Sjdp		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
237133965Sjdp		    RF_SHAREABLE | RF_ACTIVE);
237233965Sjdp		if (!que->res) {
237333965Sjdp			device_printf(dev, "bus_alloc_resource_any() for"
237477298Sobrien			    " Queue %d interrupt failed [rid=%d]\n",
237577298Sobrien			    que->me, rid);
237677298Sobrien			return (ENXIO);
237777298Sobrien		}
237877298Sobrien		/* Set the handler function */
237977298Sobrien		error = bus_setup_intr(dev, que->res,
238077298Sobrien		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
238177298Sobrien		    ixl_msix_que, que, &que->tag);
238277298Sobrien		if (error) {
238377298Sobrien			device_printf(dev, "bus_setup_intr() for Queue %d"
238477298Sobrien			    " interrupt handler failed, error %d\n",
238577298Sobrien			    que->me, error);
238677298Sobrien			// TODO: Check for error from this?
238777298Sobrien			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
238877298Sobrien			return (error);
238977298Sobrien		}
239033965Sjdp		error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
239133965Sjdp		if (error) {
239233965Sjdp			device_printf(dev, "bus_describe_intr() for Queue %d"
239333965Sjdp			    " interrupt name failed, error %d\n",
239433965Sjdp			    que->me, error);
239533965Sjdp		}
239633965Sjdp		/* Bind the vector to a CPU */
239733965Sjdp#ifdef RSS
239833965Sjdp		cpu_id = rss_getcpu(i % rss_getnumbuckets());
239933965Sjdp#endif
240033965Sjdp		error = bus_bind_intr(dev, que->res, cpu_id);
240133965Sjdp		if (error) {
240233965Sjdp			device_printf(dev, "bus_bind_intr() for Queue %d"
240333965Sjdp			    " to CPU %d failed, error %d\n",
240477298Sobrien			    que->me, cpu_id, error);
240533965Sjdp		}
240633965Sjdp		que->msix = vector;
240733965Sjdp	}
240833965Sjdp
240933965Sjdp	return (0);
241033965Sjdp}
241133965Sjdp
241233965Sjdp
241333965Sjdp/*
241433965Sjdp * Allocate MSI/X vectors
241533965Sjdp */
241633965Sjdpstatic int
241733965Sjdpixl_init_msix(struct ixl_pf *pf)
241833965Sjdp{
241933965Sjdp	device_t dev = pf->dev;
242033965Sjdp	int rid, want, vectors, queues, available;
242133965Sjdp
242233965Sjdp	/* Override by tuneable */
242333965Sjdp	if (ixl_enable_msix == 0)
242433965Sjdp		goto no_msix;
242533965Sjdp
242633965Sjdp	/*
242733965Sjdp	** When used in a virtualized environment
242833965Sjdp	** PCI BUSMASTER capability may not be set
242933965Sjdp	** so explicity set it here and rewrite
243033965Sjdp	** the ENABLE in the MSIX control register
243133965Sjdp	** at this point to cause the host to
243233965Sjdp	** successfully initialize us.
243333965Sjdp	*/
243460484Sobrien	{
243560484Sobrien		u16 pci_cmd_word;
243660484Sobrien		int msix_ctrl;
243733965Sjdp		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
243860484Sobrien		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
243933965Sjdp		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
244033965Sjdp		pci_find_cap(dev, PCIY_MSIX, &rid);
244133965Sjdp		rid += PCIR_MSIX_CTRL;
244233965Sjdp		msix_ctrl = pci_read_config(dev, rid, 2);
244333965Sjdp		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
244433965Sjdp		pci_write_config(dev, rid, msix_ctrl, 2);
244533965Sjdp	}
244633965Sjdp
244733965Sjdp	/* First try MSI/X */
244833965Sjdp	rid = PCIR_BAR(IXL_BAR);
244933965Sjdp	pf->msix_mem = bus_alloc_resource_any(dev,
245033965Sjdp	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
245133965Sjdp       	if (!pf->msix_mem) {
245233965Sjdp		/* May not be enabled */
245333965Sjdp		device_printf(pf->dev,
245433965Sjdp		    "Unable to map MSIX table\n");
245533965Sjdp		goto no_msix;
245633965Sjdp	}
245733965Sjdp
245833965Sjdp	available = pci_msix_count(dev);
245933965Sjdp	if (available == 0) { /* system has msix disabled */
246033965Sjdp		bus_release_resource(dev, SYS_RES_MEMORY,
246133965Sjdp		    rid, pf->msix_mem);
246233965Sjdp		pf->msix_mem = NULL;
246360484Sobrien		goto no_msix;
246460484Sobrien	}
246533965Sjdp
246633965Sjdp	/* Figure out a reasonable auto config value */
246733965Sjdp	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
246833965Sjdp
246933965Sjdp	/* Override with tunable value if tunable is less than autoconfig count */
247033965Sjdp	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
247133965Sjdp		queues = ixl_max_queues;
247233965Sjdp	else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
247333965Sjdp		device_printf(dev, "ixl_max_queues > # of cpus, using "
247433965Sjdp		    "autoconfig amount...\n");
247533965Sjdp	/* Or limit maximum auto-configured queues to 8 */
247633965Sjdp	else if ((ixl_max_queues == 0) && (queues > 8))
247733965Sjdp		queues = 8;
247833965Sjdp
247933965Sjdp#ifdef  RSS
248033965Sjdp	/* If we're doing RSS, clamp at the number of RSS buckets */
248133965Sjdp	if (queues > rss_getnumbuckets())
248233965Sjdp		queues = rss_getnumbuckets();
248360484Sobrien#endif
248460484Sobrien
248533965Sjdp	/*
248633965Sjdp	** Want one vector (RX/TX pair) per queue
248733965Sjdp	** plus an additional for the admin queue.
248833965Sjdp	*/
248933965Sjdp	want = queues + 1;
249033965Sjdp	if (want <= available)	/* Have enough */
249133965Sjdp		vectors = want;
249233965Sjdp	else {
249333965Sjdp               	device_printf(pf->dev,
249433965Sjdp		    "MSIX Configuration Problem, "
249533965Sjdp		    "%d vectors available but %d wanted!\n",
249633965Sjdp		    available, want);
249733965Sjdp		return (0); /* Will go to Legacy setup */
249833965Sjdp	}
249933965Sjdp
250033965Sjdp	if (pci_alloc_msix(dev, &vectors) == 0) {
250133965Sjdp               	device_printf(pf->dev,
250233965Sjdp		    "Using MSIX interrupts with %d vectors\n", vectors);
250333965Sjdp		pf->msix = vectors;
250433965Sjdp		pf->vsi.num_queues = queues;
250533965Sjdp#ifdef RSS
250633965Sjdp		/*
250733965Sjdp		 * If we're doing RSS, the number of queues needs to
250833965Sjdp		 * match the number of RSS buckets that are configured.
250933965Sjdp		 *
251033965Sjdp		 * + If there's more queues than RSS buckets, we'll end
251133965Sjdp		 *   up with queues that get no traffic.
251233965Sjdp		 *
251333965Sjdp		 * + If there's more RSS buckets than queues, we'll end
251433965Sjdp		 *   up having multiple RSS buckets map to the same queue,
251533965Sjdp		 *   so there'll be some contention.
251633965Sjdp		 */
251733965Sjdp		if (queues != rss_getnumbuckets()) {
251833965Sjdp			device_printf(dev,
251933965Sjdp			    "%s: queues (%d) != RSS buckets (%d)"
252033965Sjdp			    "; performance will be impacted.\n",
252133965Sjdp			    __func__, queues, rss_getnumbuckets());
252233965Sjdp		}
252333965Sjdp#endif
252433965Sjdp		return (vectors);
252533965Sjdp	}
252633965Sjdpno_msix:
252733965Sjdp	vectors = pci_msi_count(dev);
252833965Sjdp	pf->vsi.num_queues = 1;
252933965Sjdp	ixl_max_queues = 1;
253033965Sjdp	ixl_enable_msix = 0;
253133965Sjdp	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
253233965Sjdp		device_printf(pf->dev, "Using an MSI interrupt\n");
253333965Sjdp	else {
253433965Sjdp		vectors = 0;
253533965Sjdp		device_printf(pf->dev, "Using a Legacy interrupt\n");
253633965Sjdp	}
253733965Sjdp	return (vectors);
253833965Sjdp}
253933965Sjdp
254033965Sjdp/*
254133965Sjdp * Configure admin queue/misc interrupt cause registers in hardware.
254233965Sjdp */
254333965Sjdpstatic void
254433965Sjdpixl_configure_intr0_msix(struct ixl_pf *pf)
254533965Sjdp{
254633965Sjdp	struct i40e_hw *hw = &pf->hw;
254733965Sjdp	u32 reg;
254833965Sjdp
254938889Sjdp	/* First set up the adminq - vector 0 */
255038889Sjdp	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
255138889Sjdp	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
255238889Sjdp
255338889Sjdp	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
255438889Sjdp	    I40E_PFINT_ICR0_ENA_GRST_MASK |
255577298Sobrien	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
255677298Sobrien	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
255777298Sobrien	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
255877298Sobrien	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
255977298Sobrien	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
256077298Sobrien	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
256177298Sobrien
256277298Sobrien	/*
256377298Sobrien	 * 0x7FF is the end of the queue list.
256477298Sobrien	 * This means we won't use MSI-X vector 0 for a queue interrupt
256577298Sobrien	 * in MSIX mode.
256677298Sobrien	 */
256777298Sobrien	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
256877298Sobrien	/* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
256977298Sobrien	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
257077298Sobrien
257177298Sobrien	wr32(hw, I40E_PFINT_DYN_CTL0,
257277298Sobrien	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
257377298Sobrien	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
257477298Sobrien
257577298Sobrien	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
257677298Sobrien}
257777298Sobrien
257877298Sobrien/*
257977298Sobrien * Configure queue interrupt cause registers in hardware.
258077298Sobrien */
258177298Sobrienstatic void
258260484Sobrienixl_configure_queue_intr_msix(struct ixl_pf *pf)
258360484Sobrien{
258460484Sobrien	struct i40e_hw	*hw = &pf->hw;
258560484Sobrien	struct ixl_vsi *vsi = &pf->vsi;
258660484Sobrien	u32		reg;
258733965Sjdp	u16		vector = 1;
258833965Sjdp
258960484Sobrien	for (int i = 0; i < vsi->num_queues; i++, vector++) {
259060484Sobrien		wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
259160484Sobrien		/* First queue type is RX / 0 */
259238889Sjdp		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
259360484Sobrien
259438889Sjdp		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
259538889Sjdp		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
259638889Sjdp		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
259738889Sjdp		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
259838889Sjdp		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
259977298Sobrien		wr32(hw, I40E_QINT_RQCTL(i), reg);
260038889Sjdp
260138889Sjdp		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
260277298Sobrien		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
260338889Sjdp		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
260438889Sjdp		(IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
260538889Sjdp		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
260638889Sjdp		wr32(hw, I40E_QINT_TQCTL(i), reg);
260738889Sjdp	}
260838889Sjdp}
260938889Sjdp
261038889Sjdp/*
261138889Sjdp * Configure for MSI single vector operation
261238889Sjdp */
261338889Sjdpstatic void
261438889Sjdpixl_configure_legacy(struct ixl_pf *pf)
261538889Sjdp{
261660484Sobrien	struct i40e_hw	*hw = &pf->hw;
261777298Sobrien	u32		reg;
261877298Sobrien
261977298Sobrien	wr32(hw, I40E_PFINT_ITR0(0), 0);
262077298Sobrien	wr32(hw, I40E_PFINT_ITR0(1), 0);
262138889Sjdp
262233965Sjdp	/* Setup "other" causes */
262333965Sjdp	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
262433965Sjdp	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
262533965Sjdp	    | I40E_PFINT_ICR0_ENA_GRST_MASK
262633965Sjdp	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
262733965Sjdp	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
262833965Sjdp	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
262933965Sjdp	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
263033965Sjdp	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
263133965Sjdp	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
263233965Sjdp	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
263333965Sjdp	    ;
263477298Sobrien	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
263533965Sjdp
263633965Sjdp	/* SW_ITR_IDX = 0, but don't change INTENA */
263733965Sjdp	wr32(hw, I40E_PFINT_DYN_CTL0,
263833965Sjdp	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
263933965Sjdp	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
264033965Sjdp	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
264133965Sjdp	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
264233965Sjdp
264333965Sjdp	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
264433965Sjdp	wr32(hw, I40E_PFINT_LNKLST0, 0);
264533965Sjdp
264677298Sobrien	/* Associate the queue pair to the vector and enable the q int */
264777298Sobrien	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
264877298Sobrien	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
264977298Sobrien	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
265077298Sobrien	wr32(hw, I40E_QINT_RQCTL(0), reg);
265177298Sobrien
265233965Sjdp	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
265333965Sjdp	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
265433965Sjdp	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
265533965Sjdp	wr32(hw, I40E_QINT_TQCTL(0), reg);
265633965Sjdp}
265733965Sjdp
265833965Sjdp
265933965Sjdp/*
266033965Sjdp * Get initial ITR values from tunable values.
266133965Sjdp */
266233965Sjdpstatic void
266333965Sjdpixl_configure_itr(struct ixl_pf *pf)
266433965Sjdp{
266577298Sobrien	struct i40e_hw		*hw = &pf->hw;
266633965Sjdp	struct ixl_vsi		*vsi = &pf->vsi;
266733965Sjdp	struct ixl_queue	*que = vsi->queues;
266833965Sjdp
266933965Sjdp	vsi->rx_itr_setting = ixl_rx_itr;
267033965Sjdp	vsi->tx_itr_setting = ixl_tx_itr;
267133965Sjdp
267233965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
267333965Sjdp		struct tx_ring	*txr = &que->txr;
267433965Sjdp		struct rx_ring 	*rxr = &que->rxr;
267533965Sjdp
267633965Sjdp		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
267733965Sjdp		    vsi->rx_itr_setting);
267833965Sjdp		rxr->itr = vsi->rx_itr_setting;
267933965Sjdp		rxr->latency = IXL_AVE_LATENCY;
268033965Sjdp
268133965Sjdp		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
268233965Sjdp		    vsi->tx_itr_setting);
268333965Sjdp		txr->itr = vsi->tx_itr_setting;
268433965Sjdp		txr->latency = IXL_AVE_LATENCY;
268533965Sjdp	}
268633965Sjdp}
268733965Sjdp
268833965Sjdp
268933965Sjdpstatic int
269033965Sjdpixl_allocate_pci_resources(struct ixl_pf *pf)
269133965Sjdp{
269233965Sjdp	int             rid;
269333965Sjdp	device_t        dev = pf->dev;
269433965Sjdp
269533965Sjdp	rid = PCIR_BAR(0);
269633965Sjdp	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
269760484Sobrien	    &rid, RF_ACTIVE);
269860484Sobrien
269960484Sobrien	if (!(pf->pci_mem)) {
270033965Sjdp		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
270133965Sjdp		return (ENXIO);
270233965Sjdp	}
270333965Sjdp
270460484Sobrien	pf->osdep.mem_bus_space_tag =
270533965Sjdp		rman_get_bustag(pf->pci_mem);
270633965Sjdp	pf->osdep.mem_bus_space_handle =
270733965Sjdp		rman_get_bushandle(pf->pci_mem);
270833965Sjdp	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
270960484Sobrien	pf->osdep.flush_reg = I40E_GLGEN_STAT;
271060484Sobrien	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
271160484Sobrien
271260484Sobrien	pf->hw.back = &pf->osdep;
271360484Sobrien
271460484Sobrien	/*
271560484Sobrien	** Now setup MSI or MSI/X, should
271633965Sjdp	** return us the number of supported
271733965Sjdp	** vectors. (Will be 1 for MSI)
271833965Sjdp	*/
271933965Sjdp	pf->msix = ixl_init_msix(pf);
272077298Sobrien	return (0);
272133965Sjdp}
272233965Sjdp
272333965Sjdp/*
272433965Sjdp * Teardown and release the admin queue/misc vector
272533965Sjdp * interrupt.
272633965Sjdp */
272733965Sjdpstatic int
272833965Sjdpixl_teardown_adminq_msix(struct ixl_pf *pf)
272933965Sjdp{
273033965Sjdp	device_t		dev = pf->dev;
273133965Sjdp	int			rid;
273233965Sjdp
273333965Sjdp	if (pf->admvec) /* we are doing MSIX */
273433965Sjdp		rid = pf->admvec + 1;
273533965Sjdp	else
273633965Sjdp		(pf->msix != 0) ? (rid = 1):(rid = 0);
273733965Sjdp
273833965Sjdp	// TODO: Check for errors from bus_teardown_intr
273933965Sjdp	// TODO: Check for errors from bus_release_resource
274033965Sjdp	if (pf->tag != NULL) {
274133965Sjdp		bus_teardown_intr(dev, pf->res, pf->tag);
274233965Sjdp		pf->tag = NULL;
274333965Sjdp	}
274433965Sjdp	if (pf->res != NULL) {
274533965Sjdp		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
274633965Sjdp		pf->res = NULL;
274733965Sjdp	}
274833965Sjdp
274933965Sjdp	return (0);
275077298Sobrien}
275133965Sjdp
275233965Sjdpstatic int
275333965Sjdpixl_teardown_queue_msix(struct ixl_vsi *vsi)
275433965Sjdp{
275577298Sobrien	struct ixl_queue	*que = vsi->queues;
275677298Sobrien	device_t		dev = vsi->dev;
275777298Sobrien	int			rid, error = 0;
275877298Sobrien
275977298Sobrien	/* We may get here before stations are setup */
276077298Sobrien	if ((!ixl_enable_msix) || (que == NULL))
276177298Sobrien		return (0);
276277298Sobrien
276333965Sjdp	/* Release all MSIX queue resources */
276433965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
276533965Sjdp		rid = que->msix + 1;
276633965Sjdp		if (que->tag != NULL) {
276760484Sobrien			error = bus_teardown_intr(dev, que->res, que->tag);
276860484Sobrien			if (error) {
276960484Sobrien				device_printf(dev, "bus_teardown_intr() for"
277060484Sobrien				    " Queue %d interrupt failed\n",
277160484Sobrien				    que->me);
277277298Sobrien				// return (ENXIO);
277368765Sobrien			}
277468765Sobrien			que->tag = NULL;
277568765Sobrien		}
277668765Sobrien		if (que->res != NULL) {
277768765Sobrien			error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
277868765Sobrien			if (error) {
277977298Sobrien				device_printf(dev, "bus_release_resource() for"
278077298Sobrien				    " Queue %d interrupt failed [rid=%d]\n",
278177298Sobrien				    que->me, rid);
278233965Sjdp				// return (ENXIO);
278333965Sjdp			}
278433965Sjdp			que->res = NULL;
278533965Sjdp		}
278633965Sjdp	}
278733965Sjdp
278833965Sjdp	return (0);
278933965Sjdp}
279033965Sjdp
279133965Sjdpstatic void
279233965Sjdpixl_free_pci_resources(struct ixl_pf *pf)
279333965Sjdp{
279433965Sjdp	device_t		dev = pf->dev;
279533965Sjdp	int			memrid;
279633965Sjdp
279733965Sjdp	ixl_teardown_queue_msix(&pf->vsi);
279833965Sjdp	ixl_teardown_adminq_msix(pf);
279933965Sjdp
280033965Sjdp	if (pf->msix)
280133965Sjdp		pci_release_msi(dev);
280233965Sjdp
280333965Sjdp	memrid = PCIR_BAR(IXL_BAR);
280477298Sobrien
280533965Sjdp	if (pf->msix_mem != NULL)
280633965Sjdp		bus_release_resource(dev, SYS_RES_MEMORY,
280760484Sobrien		    memrid, pf->msix_mem);
280833965Sjdp
280960484Sobrien	if (pf->pci_mem != NULL)
281060484Sobrien		bus_release_resource(dev, SYS_RES_MEMORY,
281160484Sobrien		    PCIR_BAR(0), pf->pci_mem);
281260484Sobrien
281360484Sobrien	return;
281460484Sobrien}
281560484Sobrien
281660484Sobrienstatic void
281760484Sobrienixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
281860484Sobrien{
281960484Sobrien	/* Display supported media types */
282060484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
282160484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
282260484Sobrien
282360484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
282460484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
282560484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
282660484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
282760484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
282860484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
282960484Sobrien
283060484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
283160484Sobrien	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
283277298Sobrien	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
283333965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
283438889Sjdp
283538889Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
283638889Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
283777298Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
283860484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
283960484Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
284038889Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
284133965Sjdp
284233965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
284333965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
284433965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
284533965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
284633965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
284760484Sobrien		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
284833965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
284938889Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
285033965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
285133965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
285233965Sjdp
285333965Sjdp#ifndef IFM_ETH_XTYPE
285433965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
285533965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
285633965Sjdp
285733965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
285833965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
285933965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
286033965Sjdp	    phy_type & (1 << I40E_PHY_TYPE_SFI))
286133965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
286233965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
286333965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
286433965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
286533965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
286633965Sjdp
286733965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
286833965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
286933965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
287033965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
287133965Sjdp#else
287233965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
287333965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
287433965Sjdp
287577298Sobrien	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
287677298Sobrien	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
287733965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
287833965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
287933965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
288033965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
288133965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
288233965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
288333965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
288433965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
288533965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
288633965Sjdp
288733965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
288833965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
288933965Sjdp
289033965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
289133965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
289233965Sjdp	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
289333965Sjdp		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
289477298Sobrien#endif
289533965Sjdp}
289633965Sjdp
289733965Sjdp/*********************************************************************
289833965Sjdp *
289933965Sjdp *  Setup networking device structure and register an interface.
290033965Sjdp *
290133965Sjdp **********************************************************************/
290233965Sjdpstatic int
290333965Sjdpixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
290433965Sjdp{
290533965Sjdp	struct ifnet		*ifp;
290633965Sjdp	struct i40e_hw		*hw = vsi->hw;
290733965Sjdp	struct ixl_queue	*que = vsi->queues;
290833965Sjdp	struct i40e_aq_get_phy_abilities_resp abilities;
290933965Sjdp	enum i40e_status_code aq_error = 0;
291033965Sjdp
291133965Sjdp	INIT_DEBUGOUT("ixl_setup_interface: begin");
291233965Sjdp
291333965Sjdp	ifp = vsi->ifp = if_alloc(IFT_ETHER);
291460484Sobrien	if (ifp == NULL) {
291533965Sjdp		device_printf(dev, "can not allocate ifnet structure\n");
291633965Sjdp		return (-1);
291760484Sobrien	}
291833965Sjdp	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
291933965Sjdp	ifp->if_mtu = ETHERMTU;
292033965Sjdp	ifp->if_baudrate = IF_Gbps(40);
292133965Sjdp	ifp->if_init = ixl_init;
292260484Sobrien	ifp->if_softc = vsi;
292333965Sjdp	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
292433965Sjdp	ifp->if_ioctl = ixl_ioctl;
292533965Sjdp
292633965Sjdp#if __FreeBSD_version >= 1100036
292733965Sjdp	if_setgetcounterfn(ifp, ixl_get_counter);
292833965Sjdp#endif
292960484Sobrien
293033965Sjdp	ifp->if_transmit = ixl_mq_start;
293133965Sjdp
293260484Sobrien	ifp->if_qflush = ixl_qflush;
293360484Sobrien
293460484Sobrien	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
293560484Sobrien
293660484Sobrien	vsi->max_frame_size =
293760484Sobrien	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
293860484Sobrien	    + ETHER_VLAN_ENCAP_LEN;
293960484Sobrien
294060484Sobrien	/*
294160484Sobrien	 * Tell the upper layer(s) we support long frames.
294260484Sobrien	 */
294360484Sobrien	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
294460484Sobrien
294560484Sobrien	ifp->if_capabilities |= IFCAP_HWCSUM;
294660484Sobrien	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
294760484Sobrien	ifp->if_capabilities |= IFCAP_TSO;
294860484Sobrien	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
294960484Sobrien	ifp->if_capabilities |= IFCAP_LRO;
295060484Sobrien
295160484Sobrien	/* VLAN capabilties */
295260484Sobrien	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
295360484Sobrien			     |  IFCAP_VLAN_HWTSO
295460484Sobrien			     |  IFCAP_VLAN_MTU
295560484Sobrien			     |  IFCAP_VLAN_HWCSUM;
295660484Sobrien	ifp->if_capenable = ifp->if_capabilities;
295760484Sobrien
295860484Sobrien	/*
295960484Sobrien	** Don't turn this on by default, if vlans are
296060484Sobrien	** created on another pseudo device (eg. lagg)
296160484Sobrien	** then vlan events are not passed thru, breaking
296260484Sobrien	** operation, but with HW FILTER off it works. If
296360484Sobrien	** using vlans directly on the ixl driver you can
296460484Sobrien	** enable this and get full hardware tag filtering.
296560484Sobrien	*/
296660484Sobrien	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
296760484Sobrien
296860484Sobrien	/*
296960484Sobrien	 * Specify the media types supported by this adapter and register
297060484Sobrien	 * callbacks to update media and link information
297160484Sobrien	 */
297260484Sobrien	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
297360484Sobrien		     ixl_media_status);
297460484Sobrien
297560484Sobrien	aq_error = i40e_aq_get_phy_capabilities(hw,
297660484Sobrien	    FALSE, TRUE, &abilities, NULL);
297760484Sobrien	/* May need delay to detect fiber correctly */
297860484Sobrien	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
297960484Sobrien		i40e_msec_delay(200);
298060484Sobrien		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
298160484Sobrien		    TRUE, &abilities, NULL);
298260484Sobrien	}
298360484Sobrien	if (aq_error) {
298460484Sobrien		if (aq_error == I40E_ERR_UNKNOWN_PHY)
298560484Sobrien			device_printf(dev, "Unknown PHY type detected!\n");
298660484Sobrien		else
298760484Sobrien			device_printf(dev,
298860484Sobrien			    "Error getting supported media types, err %d,"
298960484Sobrien			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
299060484Sobrien		return (0);
299160484Sobrien	}
299260484Sobrien
299360484Sobrien	ixl_add_ifmedia(vsi, abilities.phy_type);
299460484Sobrien
299560484Sobrien	/* Use autoselect media by default */
299660484Sobrien	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
299760484Sobrien	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
299860484Sobrien
299960484Sobrien	ether_ifattach(ifp, hw->mac.addr);
300060484Sobrien
300160484Sobrien	return (0);
300260484Sobrien}
300360484Sobrien
300433965Sjdp/*
300560484Sobrien** Run when the Admin Queue gets a link state change interrupt.
300633965Sjdp*/
300760484Sobrienstatic void
300833965Sjdpixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
300938889Sjdp{
301060484Sobrien	struct i40e_hw	*hw = &pf->hw;
301160484Sobrien	device_t dev = pf->dev;
301260484Sobrien	struct i40e_aqc_get_link_status *status =
301338889Sjdp	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
301460484Sobrien
301560484Sobrien	/* Request link status from adapter */
301660484Sobrien	hw->phy.get_link_info = TRUE;
301760484Sobrien	i40e_get_link_status(hw, &pf->link_up);
301838889Sjdp
301960484Sobrien	/* Print out message if an unqualified module is found */
302060484Sobrien	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
302160484Sobrien	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
302260484Sobrien	    (!(status->link_info & I40E_AQ_LINK_UP)))
302360484Sobrien		device_printf(dev, "Link failed because "
302460484Sobrien		    "an unqualified module was detected!\n");
302560484Sobrien
302660484Sobrien	/* Update OS link info */
302760484Sobrien	ixl_update_link_status(pf);
302838889Sjdp}
302938889Sjdp
303060484Sobrien/*********************************************************************
303133965Sjdp *
303233965Sjdp *  Get Firmware Switch configuration
303333965Sjdp *	- this will need to be more robust when more complex
303460484Sobrien *	  switch configurations are enabled.
303560484Sobrien *
303660484Sobrien **********************************************************************/
303760484Sobrienstatic int
303860484Sobrienixl_switch_config(struct ixl_pf *pf)
303960484Sobrien{
304033965Sjdp	struct i40e_hw	*hw = &pf->hw;
304133965Sjdp	struct ixl_vsi	*vsi = &pf->vsi;
304233965Sjdp	device_t 	dev = vsi->dev;
304377298Sobrien	struct i40e_aqc_get_switch_config_resp *sw_config;
304438889Sjdp	u8	aq_buf[I40E_AQ_LARGE_BUF];
304533965Sjdp	int	ret;
304633965Sjdp	u16	next = 0;
304733965Sjdp
304833965Sjdp	memset(&aq_buf, 0, sizeof(aq_buf));
304933965Sjdp	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
305033965Sjdp	ret = i40e_aq_get_switch_config(hw, sw_config,
305133965Sjdp	    sizeof(aq_buf), &next, NULL);
305233965Sjdp	if (ret) {
305333965Sjdp		device_printf(dev, "aq_get_switch_config() failed, error %d,"
305433965Sjdp		    " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
305533965Sjdp		return (ret);
305633965Sjdp	}
305733965Sjdp#ifdef IXL_DEBUG
305833965Sjdp	device_printf(dev,
305933965Sjdp	    "Switch config: header reported: %d in structure, %d total\n",
306033965Sjdp    	    sw_config->header.num_reported, sw_config->header.num_total);
306133965Sjdp	for (int i = 0; i < sw_config->header.num_reported; i++) {
306233965Sjdp		device_printf(dev,
306333965Sjdp		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
306433965Sjdp		    sw_config->element[i].element_type,
306533965Sjdp		    sw_config->element[i].seid,
306633965Sjdp		    sw_config->element[i].uplink_seid,
306733965Sjdp		    sw_config->element[i].downlink_seid);
306833965Sjdp	}
306933965Sjdp#endif
307060484Sobrien	/* Simplified due to a single VSI at the moment */
307133965Sjdp	vsi->uplink_seid = sw_config->element[0].uplink_seid;
307233965Sjdp	vsi->downlink_seid = sw_config->element[0].downlink_seid;
307333965Sjdp	vsi->seid = sw_config->element[0].seid;
307433965Sjdp	return (ret);
307533965Sjdp}
307638889Sjdp
307733965Sjdp/*********************************************************************
307833965Sjdp *
307933965Sjdp *  Initialize the VSI:  this handles contexts, which means things
308033965Sjdp *  			 like the number of descriptors, buffer size,
308133965Sjdp *			 plus we init the rings thru this function.
308233965Sjdp *
308333965Sjdp **********************************************************************/
308433965Sjdpstatic int
308533965Sjdpixl_initialize_vsi(struct ixl_vsi *vsi)
308633965Sjdp{
308733965Sjdp	struct ixl_pf		*pf = vsi->back;
308833965Sjdp	struct ixl_queue	*que = vsi->queues;
308933965Sjdp	device_t		dev = vsi->dev;
309033965Sjdp	struct i40e_hw		*hw = vsi->hw;
309133965Sjdp	struct i40e_vsi_context	ctxt;
309233965Sjdp	int			err = 0;
309333965Sjdp
309433965Sjdp	memset(&ctxt, 0, sizeof(ctxt));
309533965Sjdp	ctxt.seid = vsi->seid;
309633965Sjdp	if (pf->veb_seid != 0)
309738889Sjdp		ctxt.uplink_seid = pf->veb_seid;
309838889Sjdp	ctxt.pf_num = hw->pf_id;
309938889Sjdp	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
310038889Sjdp	if (err) {
310138889Sjdp		device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
310238889Sjdp		    " aq_error %d\n", err, hw->aq.asq_last_status);
310338889Sjdp		return (err);
310438889Sjdp	}
310533965Sjdp#ifdef IXL_DEBUG
310633965Sjdp	device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
310733965Sjdp	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
310833965Sjdp	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
310933965Sjdp	    ctxt.uplink_seid, ctxt.vsi_number,
311033965Sjdp	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
311133965Sjdp	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
311233965Sjdp	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
311333965Sjdp#endif
311433965Sjdp	/*
311533965Sjdp	** Set the queue and traffic class bits
311633965Sjdp	**  - when multiple traffic classes are supported
311733965Sjdp	**    this will need to be more robust.
311833965Sjdp	*/
311933965Sjdp	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
312033965Sjdp	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
312133965Sjdp	/* In contig mode, que_mapping[0] is first queue index used by this VSI */
312233965Sjdp	ctxt.info.queue_mapping[0] = 0;
312333965Sjdp	/*
312433965Sjdp	 * This VSI will only use traffic class 0; start traffic class 0's
312533965Sjdp	 * queue allocation at queue 0, and assign it 64 (2^6) queues (though
312633965Sjdp	 * the driver may not use all of them).
312733965Sjdp	 */
312833965Sjdp	ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
312933965Sjdp	    & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
313033965Sjdp	    ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
313133965Sjdp	    & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
313233965Sjdp
313333965Sjdp	/* Set VLAN receive stripping mode */
313433965Sjdp	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
313533965Sjdp	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
313633965Sjdp	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
313733965Sjdp		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
313833965Sjdp	else
313933965Sjdp		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
314033965Sjdp
314133965Sjdp	/* Keep copy of VSI info in VSI for statistic counters */
314233965Sjdp	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
314333965Sjdp
314433965Sjdp	/* Reset VSI statistics */
314533965Sjdp	ixl_vsi_reset_stats(vsi);
314633965Sjdp	vsi->hw_filters_add = 0;
314733965Sjdp	vsi->hw_filters_del = 0;
314833965Sjdp
314933965Sjdp	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
315033965Sjdp
315133965Sjdp	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
315233965Sjdp	if (err) {
315333965Sjdp		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
315433965Sjdp		   err, hw->aq.asq_last_status);
315533965Sjdp		return (err);
315633965Sjdp	}
315733965Sjdp
315833965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
315933965Sjdp		struct tx_ring		*txr = &que->txr;
316033965Sjdp		struct rx_ring 		*rxr = &que->rxr;
316133965Sjdp		struct i40e_hmc_obj_txq tctx;
316233965Sjdp		struct i40e_hmc_obj_rxq rctx;
316333965Sjdp		u32			txctl;
316433965Sjdp		u16			size;
316533965Sjdp
316633965Sjdp		/* Setup the HMC TX Context  */
316733965Sjdp		size = que->num_desc * sizeof(struct i40e_tx_desc);
316833965Sjdp		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
316933965Sjdp		tctx.new_context = 1;
317033965Sjdp		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
317133965Sjdp		tctx.qlen = que->num_desc;
317233965Sjdp		tctx.fc_ena = 0;
317333965Sjdp		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
317433965Sjdp		/* Enable HEAD writeback */
317533965Sjdp		tctx.head_wb_ena = 1;
317633965Sjdp		tctx.head_wb_addr = txr->dma.pa +
317733965Sjdp		    (que->num_desc * sizeof(struct i40e_tx_desc));
317833965Sjdp		tctx.rdylist_act = 0;
317933965Sjdp		err = i40e_clear_lan_tx_queue_context(hw, i);
318033965Sjdp		if (err) {
318133965Sjdp			device_printf(dev, "Unable to clear TX context\n");
318233965Sjdp			break;
318333965Sjdp		}
318433965Sjdp		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
318533965Sjdp		if (err) {
318633965Sjdp			device_printf(dev, "Unable to set TX context\n");
318733965Sjdp			break;
318833965Sjdp		}
318933965Sjdp		/* Associate the ring with this PF */
319033965Sjdp		txctl = I40E_QTX_CTL_PF_QUEUE;
319133965Sjdp		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
319233965Sjdp		    I40E_QTX_CTL_PF_INDX_MASK);
319333965Sjdp		wr32(hw, I40E_QTX_CTL(i), txctl);
319433965Sjdp		ixl_flush(hw);
319533965Sjdp
319633965Sjdp		/* Do ring (re)init */
319733965Sjdp		ixl_init_tx_ring(que);
319833965Sjdp
319933965Sjdp		/* Next setup the HMC RX Context  */
320033965Sjdp		if (vsi->max_frame_size <= MCLBYTES)
320133965Sjdp			rxr->mbuf_sz = MCLBYTES;
320233965Sjdp		else
320333965Sjdp			rxr->mbuf_sz = MJUMPAGESIZE;
320433965Sjdp
320533965Sjdp		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
320633965Sjdp
320733965Sjdp		/* Set up an RX context for the HMC */
320833965Sjdp		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
320933965Sjdp		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
321033965Sjdp		/* ignore header split for now */
321133965Sjdp		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
321233965Sjdp		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
321333965Sjdp		    vsi->max_frame_size : max_rxmax;
321433965Sjdp		rctx.dtype = 0;
321533965Sjdp		rctx.dsize = 1;	/* do 32byte descriptors */
321633965Sjdp		rctx.hsplit_0 = 0;  /* no HDR split initially */
321733965Sjdp		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
321833965Sjdp		rctx.qlen = que->num_desc;
321933965Sjdp		rctx.tphrdesc_ena = 1;
322033965Sjdp		rctx.tphwdesc_ena = 1;
322133965Sjdp		rctx.tphdata_ena = 0;
322233965Sjdp		rctx.tphhead_ena = 0;
322333965Sjdp		rctx.lrxqthresh = 2;
322433965Sjdp		rctx.crcstrip = 1;
322533965Sjdp		rctx.l2tsel = 1;
322633965Sjdp		rctx.showiv = 1;
322760484Sobrien		rctx.fc_ena = 0;
322833965Sjdp		rctx.prefena = 1;
322933965Sjdp
323033965Sjdp		err = i40e_clear_lan_rx_queue_context(hw, i);
323133965Sjdp		if (err) {
323277298Sobrien			device_printf(dev,
323333965Sjdp			    "Unable to clear RX context %d\n", i);
323433965Sjdp			break;
323533965Sjdp		}
323633965Sjdp		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
323733965Sjdp		if (err) {
323833965Sjdp			device_printf(dev, "Unable to set RX context %d\n", i);
323933965Sjdp			break;
324033965Sjdp		}
324133965Sjdp		err = ixl_init_rx_ring(que);
324233965Sjdp		if (err) {
324333965Sjdp			device_printf(dev, "Fail in init_rx_ring %d\n", i);
324433965Sjdp			break;
324533965Sjdp		}
324633965Sjdp#ifdef DEV_NETMAP
324733965Sjdp		/* preserve queue */
324833965Sjdp		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
324960484Sobrien			struct netmap_adapter *na = NA(vsi->ifp);
325033965Sjdp			struct netmap_kring *kring = &na->rx_rings[i];
325133965Sjdp			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
325233965Sjdp			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
325333965Sjdp		} else
325433965Sjdp#endif /* DEV_NETMAP */
325533965Sjdp		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
325633965Sjdp	}
325733965Sjdp	return (err);
325833965Sjdp}
325933965Sjdp
326033965Sjdp
326133965Sjdp/*********************************************************************
326233965Sjdp *
326377298Sobrien *  Free all VSI structs.
326477298Sobrien *
326577298Sobrien **********************************************************************/
326677298Sobrienvoid
326777298Sobrienixl_free_vsi(struct ixl_vsi *vsi)
326877298Sobrien{
326977298Sobrien	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
327077298Sobrien	struct ixl_queue	*que = vsi->queues;
327133965Sjdp
327277298Sobrien	/* Free station queues */
327377298Sobrien	if (!vsi->queues)
327433965Sjdp		goto free_filters;
327533965Sjdp
327633965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++) {
327733965Sjdp		struct tx_ring *txr = &que->txr;
327833965Sjdp		struct rx_ring *rxr = &que->rxr;
327933965Sjdp
328033965Sjdp		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
328133965Sjdp			continue;
328233965Sjdp		IXL_TX_LOCK(txr);
328333965Sjdp		ixl_free_que_tx(que);
328433965Sjdp		if (txr->base)
328560484Sobrien			i40e_free_dma_mem(&pf->hw, &txr->dma);
328633965Sjdp		IXL_TX_UNLOCK(txr);
328733965Sjdp		IXL_TX_LOCK_DESTROY(txr);
328833965Sjdp
328933965Sjdp		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
329033965Sjdp			continue;
329133965Sjdp		IXL_RX_LOCK(rxr);
329233965Sjdp		ixl_free_que_rx(que);
329333965Sjdp		if (rxr->base)
329460484Sobrien			i40e_free_dma_mem(&pf->hw, &rxr->dma);
329577298Sobrien		IXL_RX_UNLOCK(rxr);
329677298Sobrien		IXL_RX_LOCK_DESTROY(rxr);
329777298Sobrien
329877298Sobrien	}
329977298Sobrien	free(vsi->queues, M_DEVBUF);
330033965Sjdp
330133965Sjdpfree_filters:
330233965Sjdp	/* Free VSI filter list */
330333965Sjdp	ixl_free_mac_filters(vsi);
330433965Sjdp}
330533965Sjdp
330633965Sjdpstatic void
330733965Sjdpixl_free_mac_filters(struct ixl_vsi *vsi)
330833965Sjdp{
330933965Sjdp	struct ixl_mac_filter *f;
331033965Sjdp
331160484Sobrien	while (!SLIST_EMPTY(&vsi->ftl)) {
331233965Sjdp		f = SLIST_FIRST(&vsi->ftl);
331333965Sjdp		SLIST_REMOVE_HEAD(&vsi->ftl, next);
331460484Sobrien		free(f, M_DEVBUF);
331533965Sjdp	}
331633965Sjdp}
331760484Sobrien
331833965Sjdp
331933965Sjdp/*********************************************************************
332033965Sjdp *
332133965Sjdp *  Allocate memory for the VSI (virtual station interface) and their
332233965Sjdp *  associated queues, rings and the descriptors associated with each,
332333965Sjdp *  called only once at attach.
332433965Sjdp *
332533965Sjdp **********************************************************************/
332633965Sjdpstatic int
332733965Sjdpixl_setup_stations(struct ixl_pf *pf)
332833965Sjdp{
332933965Sjdp	device_t		dev = pf->dev;
333033965Sjdp	struct ixl_vsi		*vsi;
333133965Sjdp	struct ixl_queue	*que;
333260484Sobrien	struct tx_ring		*txr;
333333965Sjdp	struct rx_ring		*rxr;
333433965Sjdp	int 			rsize, tsize;
333533965Sjdp	int			error = I40E_SUCCESS;
333633965Sjdp
333760484Sobrien	vsi = &pf->vsi;
333860484Sobrien	vsi->back = (void *)pf;
333933965Sjdp	vsi->hw = &pf->hw;
334033965Sjdp	vsi->id = 0;
334133965Sjdp	vsi->num_vlans = 0;
334233965Sjdp	vsi->back = pf;
334333965Sjdp
334433965Sjdp	/* Get memory for the station queues */
334533965Sjdp        if (!(vsi->queues =
334633965Sjdp            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
334733965Sjdp            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
334833965Sjdp                device_printf(dev, "Unable to allocate queue memory\n");
334960484Sobrien                error = ENOMEM;
335033965Sjdp                goto early;
335133965Sjdp        }
335233965Sjdp
335333965Sjdp	for (int i = 0; i < vsi->num_queues; i++) {
335433965Sjdp		que = &vsi->queues[i];
335533965Sjdp		que->num_desc = ixl_ringsz;
335633965Sjdp		que->me = i;
335733965Sjdp		que->vsi = vsi;
335833965Sjdp		/* mark the queue as active */
335933965Sjdp		vsi->active_queues |= (u64)1 << que->me;
336033965Sjdp		txr = &que->txr;
336133965Sjdp		txr->que = que;
336233965Sjdp		txr->tail = I40E_QTX_TAIL(que->me);
336333965Sjdp
336433965Sjdp		/* Initialize the TX lock */
336533965Sjdp		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
336633965Sjdp		    device_get_nameunit(dev), que->me);
336733965Sjdp		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
336833965Sjdp		/* Create the TX descriptor ring */
336933965Sjdp		tsize = roundup2((que->num_desc *
337033965Sjdp		    sizeof(struct i40e_tx_desc)) +
337133965Sjdp		    sizeof(u32), DBA_ALIGN);
337233965Sjdp		if (i40e_allocate_dma_mem(&pf->hw,
337333965Sjdp		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
337477298Sobrien			device_printf(dev,
337577298Sobrien			    "Unable to allocate TX Descriptor memory\n");
337677298Sobrien			error = ENOMEM;
337733965Sjdp			goto fail;
337833965Sjdp		}
337933965Sjdp		txr->base = (struct i40e_tx_desc *)txr->dma.va;
338033965Sjdp		bzero((void *)txr->base, tsize);
338133965Sjdp       		/* Now allocate transmit soft structs for the ring */
338233965Sjdp       		if (ixl_allocate_tx_data(que)) {
338333965Sjdp			device_printf(dev,
338433965Sjdp			    "Critical Failure setting up TX structures\n");
338533965Sjdp			error = ENOMEM;
338633965Sjdp			goto fail;
338733965Sjdp       		}
338833965Sjdp		/* Allocate a buf ring */
338933965Sjdp		txr->br = buf_ring_alloc(4096, M_DEVBUF,
339033965Sjdp		    M_NOWAIT, &txr->mtx);
339133965Sjdp		if (txr->br == NULL) {
339233965Sjdp			device_printf(dev,
339333965Sjdp			    "Critical Failure setting up TX buf ring\n");
339433965Sjdp			error = ENOMEM;
339533965Sjdp			goto fail;
339633965Sjdp       		}
339733965Sjdp
339833965Sjdp		/*
339933965Sjdp		 * Next the RX queues...
340033965Sjdp		 */
340133965Sjdp		rsize = roundup2(que->num_desc *
340233965Sjdp		    sizeof(union i40e_rx_desc), DBA_ALIGN);
340333965Sjdp		rxr = &que->rxr;
340433965Sjdp		rxr->que = que;
340533965Sjdp		rxr->tail = I40E_QRX_TAIL(que->me);
340633965Sjdp
340760484Sobrien		/* Initialize the RX side lock */
340860484Sobrien		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
340933965Sjdp		    device_get_nameunit(dev), que->me);
341033965Sjdp		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
341160484Sobrien
341233965Sjdp		if (i40e_allocate_dma_mem(&pf->hw,
341333965Sjdp		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
341460484Sobrien			device_printf(dev,
341560484Sobrien			    "Unable to allocate RX Descriptor memory\n");
341660484Sobrien			error = ENOMEM;
341733965Sjdp			goto fail;
341833965Sjdp		}
341977298Sobrien		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
342033965Sjdp		bzero((void *)rxr->base, rsize);
342133965Sjdp
342233965Sjdp        	/* Allocate receive soft structs for the ring*/
342333965Sjdp		if (ixl_allocate_rx_data(que)) {
342433965Sjdp			device_printf(dev,
342533965Sjdp			    "Critical Failure setting up receive structs\n");
342633965Sjdp			error = ENOMEM;
342777298Sobrien			goto fail;
342833965Sjdp		}
342933965Sjdp	}
343033965Sjdp
343133965Sjdp	return (0);
343233965Sjdp
343333965Sjdpfail:
343433965Sjdp	for (int i = 0; i < vsi->num_queues; i++) {
343533965Sjdp		que = &vsi->queues[i];
343633965Sjdp		rxr = &que->rxr;
343733965Sjdp		txr = &que->txr;
343833965Sjdp		if (rxr->base)
343933965Sjdp			i40e_free_dma_mem(&pf->hw, &rxr->dma);
344033965Sjdp		if (txr->base)
344133965Sjdp			i40e_free_dma_mem(&pf->hw, &txr->dma);
344233965Sjdp	}
344333965Sjdp
344433965Sjdpearly:
344533965Sjdp	return (error);
344633965Sjdp}
344733965Sjdp
344877298Sobrien/*
344960484Sobrien** Provide a update to the queue RX
345077298Sobrien** interrupt moderation value.
345160484Sobrien*/
345233965Sjdpstatic void
345377298Sobrienixl_set_queue_rx_itr(struct ixl_queue *que)
345477298Sobrien{
345560484Sobrien	struct ixl_vsi	*vsi = que->vsi;
345633965Sjdp	struct i40e_hw	*hw = vsi->hw;
345733965Sjdp	struct rx_ring	*rxr = &que->rxr;
345860484Sobrien	u16		rx_itr;
345960484Sobrien	u16		rx_latency = 0;
346060484Sobrien	int		rx_bytes;
346160484Sobrien
346260484Sobrien	/* Idle, do nothing */
346333965Sjdp	if (rxr->bytes == 0)
346433965Sjdp		return;
346533965Sjdp
346633965Sjdp	if (ixl_dynamic_rx_itr) {
346733965Sjdp		rx_bytes = rxr->bytes/rxr->itr;
346833965Sjdp		rx_itr = rxr->itr;
346933965Sjdp
347060484Sobrien		/* Adjust latency range */
347177298Sobrien		switch (rxr->latency) {
347233965Sjdp		case IXL_LOW_LATENCY:
347333965Sjdp			if (rx_bytes > 10) {
347433965Sjdp				rx_latency = IXL_AVE_LATENCY;
347533965Sjdp				rx_itr = IXL_ITR_20K;
347633965Sjdp			}
347733965Sjdp			break;
347833965Sjdp		case IXL_AVE_LATENCY:
347933965Sjdp			if (rx_bytes > 20) {
348033965Sjdp				rx_latency = IXL_BULK_LATENCY;
348133965Sjdp				rx_itr = IXL_ITR_8K;
348233965Sjdp			} else if (rx_bytes <= 10) {
348338889Sjdp				rx_latency = IXL_LOW_LATENCY;
348438889Sjdp				rx_itr = IXL_ITR_100K;
348533965Sjdp			}
348638889Sjdp			break;
348733965Sjdp		case IXL_BULK_LATENCY:
348833965Sjdp			if (rx_bytes <= 20) {
348933965Sjdp				rx_latency = IXL_AVE_LATENCY;
349038889Sjdp				rx_itr = IXL_ITR_20K;
349138889Sjdp			}
349233965Sjdp			break;
349338889Sjdp       		 }
349438889Sjdp
349538889Sjdp		rxr->latency = rx_latency;
349638889Sjdp
349738889Sjdp		if (rx_itr != rxr->itr) {
349838889Sjdp			/* do an exponential smoothing */
349933965Sjdp			rx_itr = (10 * rx_itr * rxr->itr) /
350033965Sjdp			    ((9 * rx_itr) + rxr->itr);
350133965Sjdp			rxr->itr = rx_itr & IXL_MAX_ITR;
350233965Sjdp			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
350333965Sjdp			    que->me), rxr->itr);
350433965Sjdp		}
350533965Sjdp	} else { /* We may have have toggled to non-dynamic */
350633965Sjdp		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
350733965Sjdp			vsi->rx_itr_setting = ixl_rx_itr;
350838889Sjdp		/* Update the hardware if needed */
350938889Sjdp		if (rxr->itr != vsi->rx_itr_setting) {
351038889Sjdp			rxr->itr = vsi->rx_itr_setting;
351133965Sjdp			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
351233965Sjdp			    que->me), rxr->itr);
351333965Sjdp		}
351433965Sjdp	}
351533965Sjdp	rxr->bytes = 0;
351633965Sjdp	rxr->packets = 0;
351733965Sjdp	return;
351833965Sjdp}
351933965Sjdp
352033965Sjdp
352133965Sjdp/*
352233965Sjdp** Provide a update to the queue TX
352333965Sjdp** interrupt moderation value.
352433965Sjdp*/
352533965Sjdpstatic void
352633965Sjdpixl_set_queue_tx_itr(struct ixl_queue *que)
352733965Sjdp{
352833965Sjdp	struct ixl_vsi	*vsi = que->vsi;
352933965Sjdp	struct i40e_hw	*hw = vsi->hw;
353033965Sjdp	struct tx_ring	*txr = &que->txr;
353133965Sjdp	u16		tx_itr;
353233965Sjdp	u16		tx_latency = 0;
353333965Sjdp	int		tx_bytes;
353433965Sjdp
353533965Sjdp
353633965Sjdp	/* Idle, do nothing */
353733965Sjdp	if (txr->bytes == 0)
353833965Sjdp		return;
353933965Sjdp
354033965Sjdp	if (ixl_dynamic_tx_itr) {
354133965Sjdp		tx_bytes = txr->bytes/txr->itr;
354233965Sjdp		tx_itr = txr->itr;
354333965Sjdp
354433965Sjdp		switch (txr->latency) {
354533965Sjdp		case IXL_LOW_LATENCY:
354633965Sjdp			if (tx_bytes > 10) {
354738889Sjdp				tx_latency = IXL_AVE_LATENCY;
354838889Sjdp				tx_itr = IXL_ITR_20K;
354938889Sjdp			}
355038889Sjdp			break;
355138889Sjdp		case IXL_AVE_LATENCY:
355238889Sjdp			if (tx_bytes > 20) {
355338889Sjdp				tx_latency = IXL_BULK_LATENCY;
355438889Sjdp				tx_itr = IXL_ITR_8K;
355538889Sjdp			} else if (tx_bytes <= 10) {
355638889Sjdp				tx_latency = IXL_LOW_LATENCY;
355738889Sjdp				tx_itr = IXL_ITR_100K;
355838889Sjdp			}
355938889Sjdp			break;
356038889Sjdp		case IXL_BULK_LATENCY:
356138889Sjdp			if (tx_bytes <= 20) {
356238889Sjdp				tx_latency = IXL_AVE_LATENCY;
356338889Sjdp				tx_itr = IXL_ITR_20K;
356438889Sjdp			}
356533965Sjdp			break;
356633965Sjdp		}
356733965Sjdp
356833965Sjdp		txr->latency = tx_latency;
356933965Sjdp
357033965Sjdp		if (tx_itr != txr->itr) {
357133965Sjdp       	         /* do an exponential smoothing */
357233965Sjdp			tx_itr = (10 * tx_itr * txr->itr) /
357333965Sjdp			    ((9 * tx_itr) + txr->itr);
357433965Sjdp			txr->itr = tx_itr & IXL_MAX_ITR;
357533965Sjdp			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
357633965Sjdp			    que->me), txr->itr);
357733965Sjdp		}
357833965Sjdp
357933965Sjdp	} else { /* We may have have toggled to non-dynamic */
358033965Sjdp		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
358133965Sjdp			vsi->tx_itr_setting = ixl_tx_itr;
358233965Sjdp		/* Update the hardware if needed */
358333965Sjdp		if (txr->itr != vsi->tx_itr_setting) {
358433965Sjdp			txr->itr = vsi->tx_itr_setting;
358533965Sjdp			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
358633965Sjdp			    que->me), txr->itr);
358733965Sjdp		}
358833965Sjdp	}
358933965Sjdp	txr->bytes = 0;
359033965Sjdp	txr->packets = 0;
359133965Sjdp	return;
359260484Sobrien}
359377298Sobrien
359433965Sjdp#define QUEUE_NAME_LEN 32
359533965Sjdp
359633965Sjdpstatic void
359733965Sjdpixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
359833965Sjdp    struct sysctl_ctx_list *ctx, const char *sysctl_name)
359977298Sobrien{
360033965Sjdp	struct sysctl_oid *tree;
360133965Sjdp	struct sysctl_oid_list *child;
360233965Sjdp	struct sysctl_oid_list *vsi_list;
360333965Sjdp
360433965Sjdp	tree = device_get_sysctl_tree(pf->dev);
360533965Sjdp	child = SYSCTL_CHILDREN(tree);
360633965Sjdp	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
360733965Sjdp				   CTLFLAG_RD, NULL, "VSI Number");
360833965Sjdp	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
360933965Sjdp
361033965Sjdp	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
361133965Sjdp}
361233965Sjdp
361333965Sjdp#ifdef IXL_DEBUG
361460484Sobrien/**
361533965Sjdp * ixl_sysctl_qtx_tail_handler
361633965Sjdp * Retrieves I40E_QTX_TAIL value from hardware
361733965Sjdp * for a sysctl.
361833965Sjdp */
361933965Sjdpstatic int
362060484Sobrienixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
362133965Sjdp{
362233965Sjdp	struct ixl_queue *que;
362333965Sjdp	int error;
362433965Sjdp	u32 val;
362533965Sjdp
362633965Sjdp	que = ((struct ixl_queue *)oidp->oid_arg1);
362733965Sjdp	if (!que) return 0;
362833965Sjdp
362933965Sjdp	val = rd32(que->vsi->hw, que->txr.tail);
363033965Sjdp	error = sysctl_handle_int(oidp, &val, 0, req);
363133965Sjdp	if (error || !req->newptr)
363260484Sobrien		return error;
363360484Sobrien	return (0);
363460484Sobrien}
363560484Sobrien
363633965Sjdp/**
363760484Sobrien * ixl_sysctl_qrx_tail_handler
363833965Sjdp * Retrieves I40E_QRX_TAIL value from hardware
363933965Sjdp * for a sysctl.
364033965Sjdp */
364133965Sjdpstatic int
364260484Sobrienixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
364333965Sjdp{
364477298Sobrien	struct ixl_queue *que;
364577298Sobrien	int error;
364677298Sobrien	u32 val;
364777298Sobrien
364877298Sobrien	que = ((struct ixl_queue *)oidp->oid_arg1);
364960484Sobrien	if (!que) return 0;
365060484Sobrien
365160484Sobrien	val = rd32(que->vsi->hw, que->rxr.tail);
365238889Sjdp	error = sysctl_handle_int(oidp, &val, 0, req);
365333965Sjdp	if (error || !req->newptr)
365433965Sjdp		return error;
365533965Sjdp	return (0);
365633965Sjdp}
365733965Sjdp#endif
365833965Sjdp
365933965Sjdpstatic void
366033965Sjdpixl_add_hw_stats(struct ixl_pf *pf)
366133965Sjdp{
366233965Sjdp	device_t dev = pf->dev;
366333965Sjdp	struct ixl_vsi *vsi = &pf->vsi;
366433965Sjdp	struct ixl_queue *queues = vsi->queues;
366533965Sjdp	struct i40e_hw_port_stats *pf_stats = &pf->stats;
366633965Sjdp
366733965Sjdp	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
366833965Sjdp	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
366933965Sjdp	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
367033965Sjdp	struct sysctl_oid_list *vsi_list;
367133965Sjdp
367233965Sjdp	struct sysctl_oid *queue_node;
367333965Sjdp	struct sysctl_oid_list *queue_list;
367433965Sjdp
367577298Sobrien	struct tx_ring *txr;
367633965Sjdp	struct rx_ring *rxr;
367733965Sjdp	char queue_namebuf[QUEUE_NAME_LEN];
367833965Sjdp
367933965Sjdp	/* Driver statistics */
368033965Sjdp	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
368133965Sjdp			CTLFLAG_RD, &pf->watchdog_events,
368233965Sjdp			"Watchdog timeouts");
368333965Sjdp	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
368433965Sjdp			CTLFLAG_RD, &pf->admin_irq,
368533965Sjdp			"Admin Queue IRQ Handled");
368633965Sjdp
368733965Sjdp	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
368833965Sjdp	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
368933965Sjdp
369077298Sobrien	/* Queue statistics */
369177298Sobrien	for (int q = 0; q < vsi->num_queues; q++) {
369277298Sobrien		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
369377298Sobrien		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
369460484Sobrien		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
369560484Sobrien		queue_list = SYSCTL_CHILDREN(queue_node);
369660484Sobrien
369760484Sobrien		txr = &(queues[q].txr);
369833965Sjdp		rxr = &(queues[q].rxr);
369933965Sjdp
370033965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
370133965Sjdp				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
370233965Sjdp				"m_defrag() failed");
370333965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
370433965Sjdp				CTLFLAG_RD, &(queues[q].irqs),
370533965Sjdp				"irqs on this queue");
370633965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
370733965Sjdp				CTLFLAG_RD, &(queues[q].tso),
370833965Sjdp				"TSO");
370933965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
371033965Sjdp				CTLFLAG_RD, &(queues[q].tx_dma_setup),
371133965Sjdp				"Driver tx dma failure in xmit");
371233965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
371333965Sjdp				CTLFLAG_RD, &(txr->no_desc),
371433965Sjdp				"Queue No Descriptor Available");
371533965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
371633965Sjdp				CTLFLAG_RD, &(txr->total_packets),
371733965Sjdp				"Queue Packets Transmitted");
371833965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
371933965Sjdp				CTLFLAG_RD, &(txr->tx_bytes),
372033965Sjdp				"Queue Bytes Transmitted");
372133965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
372233965Sjdp				CTLFLAG_RD, &(rxr->rx_packets),
372333965Sjdp				"Queue Packets Received");
372433965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
372533965Sjdp				CTLFLAG_RD, &(rxr->rx_bytes),
372633965Sjdp				"Queue Bytes Received");
372733965Sjdp		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
372833965Sjdp				CTLFLAG_RD, &(rxr->desc_errs),
372933965Sjdp				"Queue Rx Descriptor Errors");
373033965Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
373133965Sjdp				CTLFLAG_RD, &(rxr->itr), 0,
373233965Sjdp				"Queue Rx ITR Interval");
373333965Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
373433965Sjdp				CTLFLAG_RD, &(txr->itr), 0,
373533965Sjdp				"Queue Tx ITR Interval");
373633965Sjdp		// Not actual latency; just a calculated value to put in a register
373777298Sobrien		// TODO: Put in better descriptions here
373838889Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_latency",
373933965Sjdp				CTLFLAG_RD, &(rxr->latency), 0,
374033965Sjdp				"Queue Rx ITRL Average Interval");
374133965Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_latency",
374233965Sjdp				CTLFLAG_RD, &(txr->latency), 0,
374338889Sjdp				"Queue Tx ITRL Average Interval");
374460484Sobrien
374560484Sobrien#ifdef IXL_DEBUG
374660484Sobrien		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
374777298Sobrien				CTLFLAG_RD, &(rxr->not_done),
374860484Sobrien				"Queue Rx Descriptors not Done");
374933965Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
375033965Sjdp				CTLFLAG_RD, &(rxr->next_refresh), 0,
375133965Sjdp				"Queue Rx Descriptors not Done");
375233965Sjdp		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
375333965Sjdp				CTLFLAG_RD, &(rxr->next_check), 0,
375433965Sjdp				"Queue Rx Descriptors not Done");
375533965Sjdp		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
375633965Sjdp				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
375733965Sjdp				sizeof(struct ixl_queue),
375877298Sobrien				ixl_sysctl_qtx_tail_handler, "IU",
375977298Sobrien				"Queue Transmit Descriptor Tail");
376077298Sobrien		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
376177298Sobrien				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
376277298Sobrien				sizeof(struct ixl_queue),
376333965Sjdp				ixl_sysctl_qrx_tail_handler, "IU",
376433965Sjdp				"Queue Receive Descriptor Tail");
376533965Sjdp#endif
376633965Sjdp	}
376733965Sjdp
376833965Sjdp	/* MAC stats */
376933965Sjdp	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
377038889Sjdp}
377133965Sjdp
377233965Sjdpstatic void
377333965Sjdpixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
377433965Sjdp	struct sysctl_oid_list *child,
377533965Sjdp	struct i40e_eth_stats *eth_stats)
377633965Sjdp{
377760484Sobrien	struct ixl_sysctl_info ctls[] =
377860484Sobrien	{
377960484Sobrien		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
378060484Sobrien		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
378160484Sobrien			"Unicast Packets Received"},
378260484Sobrien		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
378360484Sobrien			"Multicast Packets Received"},
378460484Sobrien		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
378560484Sobrien			"Broadcast Packets Received"},
378660484Sobrien		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
378733965Sjdp		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
378833965Sjdp		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
378933965Sjdp		{&eth_stats->tx_multicast, "mcast_pkts_txd",
379033965Sjdp			"Multicast Packets Transmitted"},
379133965Sjdp		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
379233965Sjdp			"Broadcast Packets Transmitted"},
379333965Sjdp		// end
379433965Sjdp		{0,0,0}
379533965Sjdp	};
379633965Sjdp
379733965Sjdp	struct ixl_sysctl_info *entry = ctls;
379833965Sjdp	while (entry->stat != NULL)
379933965Sjdp	{
380033965Sjdp		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
380133965Sjdp				CTLFLAG_RD, entry->stat,
380233965Sjdp				entry->description);
380333965Sjdp		entry++;
380433965Sjdp	}
380533965Sjdp}
380633965Sjdp
380733965Sjdpstatic void
380833965Sjdpixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
380933965Sjdp	struct sysctl_oid_list *child,
381033965Sjdp	struct i40e_hw_port_stats *stats)
381133965Sjdp{
381233965Sjdp	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
381333965Sjdp				    CTLFLAG_RD, NULL, "Mac Statistics");
381433965Sjdp	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
381533965Sjdp
381633965Sjdp	struct i40e_eth_stats *eth_stats = &stats->eth;
381760484Sobrien	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
381877298Sobrien
381960484Sobrien	struct ixl_sysctl_info ctls[] =
382060484Sobrien	{
382160484Sobrien		{&stats->crc_errors, "crc_errors", "CRC Errors"},
382260484Sobrien		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
382360484Sobrien		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
382460484Sobrien		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
382560484Sobrien		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
382633965Sjdp		/* Packet Reception Stats */
382733965Sjdp		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
382833965Sjdp		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
382933965Sjdp		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
383033965Sjdp		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
383133965Sjdp		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
383233965Sjdp		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
383333965Sjdp		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
383433965Sjdp		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
383533965Sjdp		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
383633965Sjdp		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
383733965Sjdp		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
383833965Sjdp		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
383933965Sjdp		/* Packet Transmission Stats */
384033965Sjdp		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
384133965Sjdp		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
384233965Sjdp		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
384333965Sjdp		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
384433965Sjdp		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
384533965Sjdp		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
384633965Sjdp		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
384733965Sjdp		/* Flow control */
384833965Sjdp		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
384933965Sjdp		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
385033965Sjdp		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
385133965Sjdp		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
385233965Sjdp		/* End */
385333965Sjdp		{0,0,0}
385433965Sjdp	};
385533965Sjdp
385633965Sjdp	struct ixl_sysctl_info *entry = ctls;
385733965Sjdp	while (entry->stat != NULL)
385833965Sjdp	{
385933965Sjdp		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
386033965Sjdp				CTLFLAG_RD, entry->stat,
386133965Sjdp				entry->description);
386233965Sjdp		entry++;
386333965Sjdp	}
386433965Sjdp}
386533965Sjdp
386633965Sjdp
386733965Sjdp/*
386833965Sjdp** ixl_config_rss - setup RSS
386933965Sjdp**  - note this is done for the single vsi
387033965Sjdp*/
387133965Sjdpstatic void
387233965Sjdpixl_config_rss(struct ixl_vsi *vsi)
387333965Sjdp{
387433965Sjdp	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
387533965Sjdp	struct i40e_hw	*hw = vsi->hw;
387633965Sjdp	u32		lut = 0;
387733965Sjdp	u64		set_hena = 0, hena;
387833965Sjdp	int		i, j, que_id;
387933965Sjdp#ifdef RSS
388033965Sjdp	u32		rss_hash_config;
388133965Sjdp	u32		rss_seed[IXL_KEYSZ];
388233965Sjdp#else
388333965Sjdp	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
388433965Sjdp			    0x183cfd8c, 0xce880440, 0x580cbc3c,
388533965Sjdp			    0x35897377, 0x328b25e1, 0x4fa98922,
388633965Sjdp			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
388733965Sjdp#endif
388833965Sjdp
388933965Sjdp#ifdef RSS
389033965Sjdp        /* Fetch the configured RSS key */
389133965Sjdp        rss_getkey((uint8_t *) &rss_seed);
389233965Sjdp#endif
389333965Sjdp
389433965Sjdp	/* Fill out hash function seed */
389533965Sjdp	for (i = 0; i < IXL_KEYSZ; i++)
389633965Sjdp                i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
389733965Sjdp
389833965Sjdp	/* Enable PCTYPES for RSS: */
389933965Sjdp#ifdef RSS
390033965Sjdp	rss_hash_config = rss_gethashconfig();
390133965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
390233965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
390333965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
390433965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
390533965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
390633965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
390733965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
390833965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
390933965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
391033965Sjdp		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
391133965Sjdp	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
391233965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
391333965Sjdp        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
391433965Sjdp                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
391533965Sjdp#else
391633965Sjdp	set_hena =
391733965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
391833965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
391933965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
392033965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
392133965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
392233965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
392333965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
392433965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
392533965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
392633965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
392733965Sjdp		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
392833965Sjdp#endif
392933965Sjdp	hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
393033965Sjdp	    ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
393133965Sjdp	hena |= set_hena;
393233965Sjdp	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
393333965Sjdp	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
393433965Sjdp
393533965Sjdp	/* Populate the LUT with max no. of queues in round robin fashion */
393633965Sjdp	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
393733965Sjdp		if (j == vsi->num_queues)
393833965Sjdp			j = 0;
393933965Sjdp#ifdef RSS
394033965Sjdp		/*
394133965Sjdp		 * Fetch the RSS bucket id for the given indirection entry.
394233965Sjdp		 * Cap it at the number of configured buckets (which is
394333965Sjdp		 * num_queues.)
394433965Sjdp		 */
394533965Sjdp		que_id = rss_get_indirection_to_bucket(i);
394633965Sjdp		que_id = que_id % vsi->num_queues;
394733965Sjdp#else
394833965Sjdp		que_id = j;
394933965Sjdp#endif
395033965Sjdp		/* lut = 4-byte sliding window of 4 lut entries */
395133965Sjdp		lut = (lut << 8) | (que_id &
395233965Sjdp		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
395333965Sjdp		/* On i = 3, we have 4 entries in lut; write to the register */
395433965Sjdp		if ((i & 3) == 3)
395533965Sjdp			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
395633965Sjdp	}
395733965Sjdp	ixl_flush(hw);
395833965Sjdp}
395977298Sobrien
396033965Sjdp
396160484Sobrien/*
396260484Sobrien** This routine is run via an vlan config EVENT,
396377298Sobrien** it enables us to use the HW Filter table since
396460484Sobrien** we can get the vlan id. This just creates the
396577298Sobrien** entry in the soft version of the VFTA, init will
396660484Sobrien** repopulate the real table.
396777298Sobrien*/
396833965Sjdpstatic void
396960484Sobrienixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
397077298Sobrien{
397160484Sobrien	struct ixl_vsi	*vsi = ifp->if_softc;
397277298Sobrien	struct i40e_hw	*hw = vsi->hw;
397360484Sobrien	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
397433965Sjdp
397533965Sjdp	if (ifp->if_softc !=  arg)   /* Not our event */
397677298Sobrien		return;
397733965Sjdp
397833965Sjdp	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
397977298Sobrien		return;
398077298Sobrien
398160484Sobrien	IXL_PF_LOCK(pf);
398260484Sobrien	++vsi->num_vlans;
398360484Sobrien	ixl_add_filter(vsi, hw->mac.addr, vtag);
398460484Sobrien	IXL_PF_UNLOCK(pf);
398577298Sobrien}
398660484Sobrien
398777298Sobrien/*
398833965Sjdp** This routine is run via an vlan
398960484Sobrien** unconfig EVENT, remove our entry
399077298Sobrien** in the soft vfta.
399160484Sobrien*/
399277298Sobrienstatic void
399360484Sobrienixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
399433965Sjdp{
399533965Sjdp	struct ixl_vsi	*vsi = ifp->if_softc;
399633965Sjdp	struct i40e_hw	*hw = vsi->hw;
399733965Sjdp	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
399833965Sjdp
399933965Sjdp	if (ifp->if_softc !=  arg)
400033965Sjdp		return;
400133965Sjdp
400233965Sjdp	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
400333965Sjdp		return;
400433965Sjdp
400560484Sobrien	IXL_PF_LOCK(pf);
400633965Sjdp	--vsi->num_vlans;
400733965Sjdp	ixl_del_filter(vsi, hw->mac.addr, vtag);
400833965Sjdp	IXL_PF_UNLOCK(pf);
400933965Sjdp}
401033965Sjdp
401133965Sjdp/*
401233965Sjdp** This routine updates vlan filters, called by init
401333965Sjdp** it scans the filter table and then updates the hw
401433965Sjdp** after a soft reset.
401533965Sjdp*/
401633965Sjdpstatic void
401733965Sjdpixl_setup_vlan_filters(struct ixl_vsi *vsi)
401833965Sjdp{
401933965Sjdp	struct ixl_mac_filter	*f;
402033965Sjdp	int			cnt = 0, flags;
402133965Sjdp
402233965Sjdp	if (vsi->num_vlans == 0)
402333965Sjdp		return;
402433965Sjdp	/*
402533965Sjdp	** Scan the filter list for vlan entries,
402633965Sjdp	** mark them for addition and then call
402733965Sjdp	** for the AQ update.
402833965Sjdp	*/
402933965Sjdp	SLIST_FOREACH(f, &vsi->ftl, next) {
403033965Sjdp		if (f->flags & IXL_FILTER_VLAN) {
403133965Sjdp			f->flags |=
403233965Sjdp			    (IXL_FILTER_ADD |
403333965Sjdp			    IXL_FILTER_USED);
403433965Sjdp			cnt++;
403533965Sjdp		}
403633965Sjdp	}
403733965Sjdp	if (cnt == 0) {
403833965Sjdp		printf("setup vlan: no filters found!\n");
403933965Sjdp		return;
404033965Sjdp	}
404133965Sjdp	flags = IXL_FILTER_VLAN;
404233965Sjdp	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
404333965Sjdp	ixl_add_hw_filters(vsi, flags, cnt);
404433965Sjdp	return;
404533965Sjdp}
404633965Sjdp
404733965Sjdp/*
404833965Sjdp** Initialize filter list and add filters that the hardware
404933965Sjdp** needs to know about.
405077298Sobrien**
405133965Sjdp** Requires VSI's filter list & seid to be set before calling.
405233965Sjdp*/
405333965Sjdpstatic void
405433965Sjdpixl_init_filters(struct ixl_vsi *vsi)
405533965Sjdp{
405633965Sjdp	/* Add broadcast address */
405733965Sjdp	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
405833965Sjdp
405933965Sjdp	/*
406033965Sjdp	 * Prevent Tx flow control frames from being sent out by
406133965Sjdp	 * non-firmware transmitters.
406233965Sjdp	 */
406333965Sjdp	i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
406433965Sjdp}
406533965Sjdp
406633965Sjdp/*
406733965Sjdp** This routine adds mulicast filters
406833965Sjdp*/
406933965Sjdpstatic void
407033965Sjdpixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
407177298Sobrien{
407233965Sjdp	struct ixl_mac_filter *f;
407333965Sjdp
407433965Sjdp	/* Does one already exist */
407533965Sjdp	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
407633965Sjdp	if (f != NULL)
407733965Sjdp		return;
407833965Sjdp
407933965Sjdp	f = ixl_get_filter(vsi);
408033965Sjdp	if (f == NULL) {
408133965Sjdp		printf("WARNING: no filter available!!\n");
408233965Sjdp		return;
408333965Sjdp	}
408433965Sjdp	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
408533965Sjdp	f->vlan = IXL_VLAN_ANY;
408633965Sjdp	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
408733965Sjdp	    | IXL_FILTER_MC);
408833965Sjdp
408933965Sjdp	return;
409033965Sjdp}
409133965Sjdp
409233965Sjdpstatic void
409333965Sjdpixl_reconfigure_filters(struct ixl_vsi *vsi)
409433965Sjdp{
409533965Sjdp
409633965Sjdp	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
409733965Sjdp}
409833965Sjdp
409933965Sjdp/*
410033965Sjdp** This routine adds macvlan filters
410133965Sjdp*/
410233965Sjdpstatic void
410333965Sjdpixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
410433965Sjdp{
410533965Sjdp	struct ixl_mac_filter	*f, *tmp;
410633965Sjdp	struct ixl_pf		*pf;
410733965Sjdp	device_t		dev;
410833965Sjdp
410933965Sjdp	DEBUGOUT("ixl_add_filter: begin");
411033965Sjdp
411133965Sjdp	pf = vsi->back;
411233965Sjdp	dev = pf->dev;
411333965Sjdp
411433965Sjdp	/* Does one already exist */
411533965Sjdp	f = ixl_find_filter(vsi, macaddr, vlan);
411633965Sjdp	if (f != NULL)
411733965Sjdp		return;
411833965Sjdp	/*
411933965Sjdp	** Is this the first vlan being registered, if so we
412033965Sjdp	** need to remove the ANY filter that indicates we are
412133965Sjdp	** not in a vlan, and replace that with a 0 filter.
412233965Sjdp	*/
412333965Sjdp	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
412433965Sjdp		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
412533965Sjdp		if (tmp != NULL) {
412633965Sjdp			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
412733965Sjdp			ixl_add_filter(vsi, macaddr, 0);
412833965Sjdp		}
412933965Sjdp	}
413033965Sjdp
413133965Sjdp	f = ixl_get_filter(vsi);
413233965Sjdp	if (f == NULL) {
413333965Sjdp		device_printf(dev, "WARNING: no filter available!!\n");
413433965Sjdp		return;
413533965Sjdp	}
413633965Sjdp	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
413733965Sjdp	f->vlan = vlan;
413833965Sjdp	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
413933965Sjdp	if (f->vlan != IXL_VLAN_ANY)
414060484Sobrien		f->flags |= IXL_FILTER_VLAN;
414160484Sobrien	else
414260484Sobrien		vsi->num_macs++;
414333965Sjdp
414433965Sjdp	ixl_add_hw_filters(vsi, f->flags, 1);
414533965Sjdp	return;
414633965Sjdp}
414733965Sjdp
414833965Sjdpstatic void
414933965Sjdpixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
415033965Sjdp{
415133965Sjdp	struct ixl_mac_filter *f;
415233965Sjdp
415333965Sjdp	f = ixl_find_filter(vsi, macaddr, vlan);
415433965Sjdp	if (f == NULL)
415533965Sjdp		return;
415633965Sjdp
415733965Sjdp	f->flags |= IXL_FILTER_DEL;
415833965Sjdp	ixl_del_hw_filters(vsi, 1);
415933965Sjdp	vsi->num_macs--;
416033965Sjdp
416133965Sjdp	/* Check if this is the last vlan removal */
416233965Sjdp	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
416333965Sjdp		/* Switch back to a non-vlan filter */
416460484Sobrien		ixl_del_filter(vsi, macaddr, 0);
416533965Sjdp		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
416633965Sjdp	}
416733965Sjdp	return;
416833965Sjdp}
416933965Sjdp
417033965Sjdp/*
417133965Sjdp** Find the filter with both matching mac addr and vlan id
417233965Sjdp*/
417333965Sjdpstatic struct ixl_mac_filter *
417433965Sjdpixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
417533965Sjdp{
417633965Sjdp	struct ixl_mac_filter	*f;
417733965Sjdp	bool			match = FALSE;
417833965Sjdp
417977298Sobrien	SLIST_FOREACH(f, &vsi->ftl, next) {
418033965Sjdp		if (!cmp_etheraddr(f->macaddr, macaddr))
418133965Sjdp			continue;
418233965Sjdp		if (f->vlan == vlan) {
418333965Sjdp			match = TRUE;
418433965Sjdp			break;
418533965Sjdp		}
418633965Sjdp	}
418733965Sjdp
418833965Sjdp	if (!match)
418933965Sjdp		f = NULL;
419038889Sjdp	return (f);
419138889Sjdp}
419233965Sjdp
419333965Sjdp/*
419460484Sobrien** This routine takes additions to the vsi filter
419533965Sjdp** table and creates an Admin Queue call to create
419633965Sjdp** the filters in the hardware.
419733965Sjdp*/
419833965Sjdpstatic void
419933965Sjdpixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
420033965Sjdp{
420133965Sjdp	struct i40e_aqc_add_macvlan_element_data *a, *b;
420233965Sjdp	struct ixl_mac_filter	*f;
420333965Sjdp	struct ixl_pf		*pf;
420433965Sjdp	struct i40e_hw		*hw;
420533965Sjdp	device_t		dev;
420633965Sjdp	int			err, j = 0;
420733965Sjdp
420860484Sobrien	pf = vsi->back;
420933965Sjdp	dev = pf->dev;
421033965Sjdp	hw = &pf->hw;
421133965Sjdp	IXL_PF_LOCK_ASSERT(pf);
421233965Sjdp
421333965Sjdp	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
421433965Sjdp	    M_DEVBUF, M_NOWAIT | M_ZERO);
421533965Sjdp	if (a == NULL) {
421633965Sjdp		device_printf(dev, "add_hw_filters failed to get memory\n");
421733965Sjdp		return;
421833965Sjdp	}
421933965Sjdp
422033965Sjdp	/*
422133965Sjdp	** Scan the filter list, each time we find one
422233965Sjdp	** we add it to the admin queue array and turn off
422333965Sjdp	** the add bit.
422433965Sjdp	*/
422533965Sjdp	SLIST_FOREACH(f, &vsi->ftl, next) {
422633965Sjdp		if (f->flags == flags) {
422733965Sjdp			b = &a[j]; // a pox on fvl long names :)
422877298Sobrien			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
422933965Sjdp			if (f->vlan == IXL_VLAN_ANY) {
423033965Sjdp				b->vlan_tag = 0;
423133965Sjdp				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
423260484Sobrien			} else {
423360484Sobrien				b->vlan_tag = f->vlan;
423460484Sobrien				b->flags = 0;
423560484Sobrien			}
423633965Sjdp			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
423733965Sjdp			f->flags &= ~IXL_FILTER_ADD;
423833965Sjdp			j++;
423933965Sjdp		}
424033965Sjdp		if (j == cnt)
424133965Sjdp			break;
424233965Sjdp	}
424333965Sjdp	if (j > 0) {
424433965Sjdp		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
424533965Sjdp		if (err)
424633965Sjdp			device_printf(dev, "aq_add_macvlan err %d, "
424733965Sjdp			    "aq_error %d\n", err, hw->aq.asq_last_status);
424833965Sjdp		else
424933965Sjdp			vsi->hw_filters_add += j;
425033965Sjdp	}
425133965Sjdp	free(a, M_DEVBUF);
425233965Sjdp	return;
425333965Sjdp}
425433965Sjdp
425533965Sjdp/*
425633965Sjdp** This routine takes removals in the vsi filter
425733965Sjdp** table and creates an Admin Queue call to delete
425833965Sjdp** the filters in the hardware.
425933965Sjdp*/
426033965Sjdpstatic void
426133965Sjdpixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
426233965Sjdp{
426333965Sjdp	struct i40e_aqc_remove_macvlan_element_data *d, *e;
426433965Sjdp	struct ixl_pf		*pf;
426533965Sjdp	struct i40e_hw		*hw;
426633965Sjdp	device_t		dev;
426733965Sjdp	struct ixl_mac_filter	*f, *f_temp;
426833965Sjdp	int			err, j = 0;
426933965Sjdp
427033965Sjdp	DEBUGOUT("ixl_del_hw_filters: begin\n");
427133965Sjdp
427233965Sjdp	pf = vsi->back;
427333965Sjdp	hw = &pf->hw;
427433965Sjdp	dev = pf->dev;
427533965Sjdp
427633965Sjdp	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
427733965Sjdp	    M_DEVBUF, M_NOWAIT | M_ZERO);
427833965Sjdp	if (d == NULL) {
427933965Sjdp		printf("del hw filter failed to get memory\n");
428033965Sjdp		return;
428133965Sjdp	}
428233965Sjdp
428333965Sjdp	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
428433965Sjdp		if (f->flags & IXL_FILTER_DEL) {
428533965Sjdp			e = &d[j]; // a pox on fvl long names :)
428633965Sjdp			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
428733965Sjdp			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
428833965Sjdp			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
428933965Sjdp			/* delete entry from vsi list */
429033965Sjdp			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
429133965Sjdp			free(f, M_DEVBUF);
429233965Sjdp			j++;
429333965Sjdp		}
429433965Sjdp		if (j == cnt)
429533965Sjdp			break;
429633965Sjdp	}
429733965Sjdp	if (j > 0) {
429833965Sjdp		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
429933965Sjdp		/* NOTE: returns ENOENT every time but seems to work fine,
430033965Sjdp		   so we'll ignore that specific error. */
430133965Sjdp		// TODO: Does this still occur on current firmwares?
430233965Sjdp		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
430360484Sobrien			int sc = 0;
430438889Sjdp			for (int i = 0; i < j; i++)
430538889Sjdp				sc += (!d[i].error_code);
430638889Sjdp			vsi->hw_filters_del += sc;
430738889Sjdp			device_printf(dev,
430833965Sjdp			    "Failed to remove %d/%d filters, aq error %d\n",
430933965Sjdp			    j - sc, j, hw->aq.asq_last_status);
431033965Sjdp		} else
431138889Sjdp			vsi->hw_filters_del += j;
431238889Sjdp	}
431338889Sjdp	free(d, M_DEVBUF);
431433965Sjdp
431560484Sobrien	DEBUGOUT("ixl_del_hw_filters: end\n");
431633965Sjdp	return;
431760484Sobrien}
431833965Sjdp
431933965Sjdpstatic int
432060484Sobrienixl_enable_rings(struct ixl_vsi *vsi)
432133965Sjdp{
432260484Sobrien	struct ixl_pf	*pf = vsi->back;
432333965Sjdp	struct i40e_hw	*hw = &pf->hw;
432438889Sjdp	int		index, error;
432538889Sjdp	u32		reg;
432638889Sjdp
432738889Sjdp	error = 0;
432838889Sjdp	for (int i = 0; i < vsi->num_queues; i++) {
432933965Sjdp		index = vsi->first_queue + i;
433033965Sjdp		i40e_pre_tx_queue_cfg(hw, index, TRUE);
433138889Sjdp
433233965Sjdp		reg = rd32(hw, I40E_QTX_ENA(index));
433333965Sjdp		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
433433965Sjdp		    I40E_QTX_ENA_QENA_STAT_MASK;
433533965Sjdp		wr32(hw, I40E_QTX_ENA(index), reg);
433633965Sjdp		/* Verify the enable took */
433733965Sjdp		for (int j = 0; j < 10; j++) {
433860484Sobrien			reg = rd32(hw, I40E_QTX_ENA(index));
433960484Sobrien			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
434060484Sobrien				break;
434160484Sobrien			i40e_msec_delay(10);
434260484Sobrien		}
434360484Sobrien		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
434460484Sobrien			device_printf(pf->dev, "TX queue %d disabled!\n",
434560484Sobrien			    index);
434660484Sobrien			error = ETIMEDOUT;
434760484Sobrien		}
434877298Sobrien
434960484Sobrien		reg = rd32(hw, I40E_QRX_ENA(index));
435060484Sobrien		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
435160484Sobrien		    I40E_QRX_ENA_QENA_STAT_MASK;
435260484Sobrien		wr32(hw, I40E_QRX_ENA(index), reg);
435360484Sobrien		/* Verify the enable took */
435460484Sobrien		for (int j = 0; j < 10; j++) {
435560484Sobrien			reg = rd32(hw, I40E_QRX_ENA(index));
435660484Sobrien			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
435760484Sobrien				break;
435860484Sobrien			i40e_msec_delay(10);
435960484Sobrien		}
436060484Sobrien		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
436160484Sobrien			device_printf(pf->dev, "RX queue %d disabled!\n",
436260484Sobrien			    index);
436360484Sobrien			error = ETIMEDOUT;
436460484Sobrien		}
436560484Sobrien	}
436660484Sobrien
436760484Sobrien	return (error);
436833965Sjdp}
436933965Sjdp
437033965Sjdpstatic int
437133965Sjdpixl_disable_rings(struct ixl_vsi *vsi)
437233965Sjdp{
437333965Sjdp	struct ixl_pf	*pf = vsi->back;
437433965Sjdp	struct i40e_hw	*hw = &pf->hw;
437533965Sjdp	int		index, error;
437633965Sjdp	u32		reg;
437733965Sjdp
437833965Sjdp	error = 0;
437960484Sobrien	for (int i = 0; i < vsi->num_queues; i++) {
438060484Sobrien		index = vsi->first_queue + i;
438160484Sobrien
438260484Sobrien		i40e_pre_tx_queue_cfg(hw, index, FALSE);
438360484Sobrien		i40e_usec_delay(500);
438433965Sjdp
438533965Sjdp		reg = rd32(hw, I40E_QTX_ENA(index));
438660484Sobrien		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
438760484Sobrien		wr32(hw, I40E_QTX_ENA(index), reg);
438860484Sobrien		/* Verify the disable took */
438933965Sjdp		for (int j = 0; j < 10; j++) {
439033965Sjdp			reg = rd32(hw, I40E_QTX_ENA(index));
439133965Sjdp			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
439233965Sjdp				break;
439333965Sjdp			i40e_msec_delay(10);
439433965Sjdp		}
439577298Sobrien		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
439638889Sjdp			device_printf(pf->dev, "TX queue %d still enabled!\n",
439738889Sjdp			    index);
439838889Sjdp			error = ETIMEDOUT;
439938889Sjdp		}
440033965Sjdp
440160484Sobrien		reg = rd32(hw, I40E_QRX_ENA(index));
440233965Sjdp		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
440333965Sjdp		wr32(hw, I40E_QRX_ENA(index), reg);
440433965Sjdp		/* Verify the disable took */
440533965Sjdp		for (int j = 0; j < 10; j++) {
440633965Sjdp			reg = rd32(hw, I40E_QRX_ENA(index));
440733965Sjdp			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
440833965Sjdp				break;
440938889Sjdp			i40e_msec_delay(10);
441038889Sjdp		}
441138889Sjdp		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
441238889Sjdp			device_printf(pf->dev, "RX queue %d still enabled!\n",
441338889Sjdp			    index);
441438889Sjdp			error = ETIMEDOUT;
441538889Sjdp		}
441638889Sjdp	}
441738889Sjdp
441838889Sjdp	return (error);
441933965Sjdp}
442033965Sjdp
442133965Sjdp/**
442233965Sjdp * ixl_handle_mdd_event
442333965Sjdp *
442433965Sjdp * Called from interrupt handler to identify possibly malicious vfs
442533965Sjdp * (But also detects events from the PF, as well)
442633965Sjdp **/
442733965Sjdpstatic void
442833965Sjdpixl_handle_mdd_event(struct ixl_pf *pf)
442933965Sjdp{
443033965Sjdp	struct i40e_hw *hw = &pf->hw;
443133965Sjdp	device_t dev = pf->dev;
443277298Sobrien	bool mdd_detected = false;
443377298Sobrien	bool pf_mdd_detected = false;
443433965Sjdp	u32 reg;
443533965Sjdp
443633965Sjdp	/* find what triggered the MDD event */
443760484Sobrien	reg = rd32(hw, I40E_GL_MDET_TX);
443833965Sjdp	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
443933965Sjdp		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
444033965Sjdp				I40E_GL_MDET_TX_PF_NUM_SHIFT;
444133965Sjdp		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
444233965Sjdp				I40E_GL_MDET_TX_EVENT_SHIFT;
444333965Sjdp		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
444433965Sjdp				I40E_GL_MDET_TX_QUEUE_SHIFT;
444533965Sjdp		device_printf(dev,
444633965Sjdp			 "Malicious Driver Detection event 0x%02x"
444733965Sjdp			 " on TX queue %d pf number 0x%02x\n",
444833965Sjdp			 event, queue, pf_num);
444933965Sjdp		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
445033965Sjdp		mdd_detected = true;
445133965Sjdp	}
445233965Sjdp	reg = rd32(hw, I40E_GL_MDET_RX);
445333965Sjdp	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
445433965Sjdp		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
445533965Sjdp				I40E_GL_MDET_RX_FUNCTION_SHIFT;
445633965Sjdp		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
445733965Sjdp				I40E_GL_MDET_RX_EVENT_SHIFT;
445833965Sjdp		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
445933965Sjdp				I40E_GL_MDET_RX_QUEUE_SHIFT;
446033965Sjdp		device_printf(dev,
446133965Sjdp			 "Malicious Driver Detection event 0x%02x"
446233965Sjdp			 " on RX queue %d of function 0x%02x\n",
446333965Sjdp			 event, queue, func);
446433965Sjdp		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
446533965Sjdp		mdd_detected = true;
446633965Sjdp	}
446733965Sjdp
446833965Sjdp	if (mdd_detected) {
446933965Sjdp		reg = rd32(hw, I40E_PF_MDET_TX);
447033965Sjdp		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
447133965Sjdp			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
447233965Sjdp			device_printf(dev,
447333965Sjdp				 "MDD TX event is for this function 0x%08x",
447433965Sjdp				 reg);
447533965Sjdp			pf_mdd_detected = true;
447633965Sjdp		}
447733965Sjdp		reg = rd32(hw, I40E_PF_MDET_RX);
447833965Sjdp		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
447933965Sjdp			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
448033965Sjdp			device_printf(dev,
448133965Sjdp				 "MDD RX event is for this function 0x%08x",
448233965Sjdp				 reg);
448333965Sjdp			pf_mdd_detected = true;
448460484Sobrien		}
448533965Sjdp	}
448633965Sjdp
448733965Sjdp	/* re-enable mdd interrupt cause */
448833965Sjdp	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
448933965Sjdp	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
449033965Sjdp	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
449133965Sjdp	ixl_flush(hw);
449233965Sjdp}
449333965Sjdp
449460484Sobrienstatic void
449533965Sjdpixl_enable_intr(struct ixl_vsi *vsi)
449633965Sjdp{
449733965Sjdp	struct i40e_hw		*hw = vsi->hw;
449833965Sjdp	struct ixl_queue	*que = vsi->queues;
449933965Sjdp
450033965Sjdp	if (ixl_enable_msix) {
450133965Sjdp		for (int i = 0; i < vsi->num_queues; i++, que++)
450233965Sjdp			ixl_enable_queue(hw, que->me);
450333965Sjdp	} else
450433965Sjdp		ixl_enable_legacy(hw);
450533965Sjdp}
450633965Sjdp
450733965Sjdpstatic void
450833965Sjdpixl_disable_rings_intr(struct ixl_vsi *vsi)
450933965Sjdp{
451033965Sjdp	struct i40e_hw		*hw = vsi->hw;
451133965Sjdp	struct ixl_queue	*que = vsi->queues;
451233965Sjdp
451333965Sjdp	for (int i = 0; i < vsi->num_queues; i++, que++)
451477298Sobrien		ixl_disable_queue(hw, que->me);
451533965Sjdp}
451638889Sjdp
451738889Sjdpstatic void
451838889Sjdpixl_disable_intr(struct ixl_vsi *vsi)
451938889Sjdp{
452060484Sobrien	struct i40e_hw		*hw = vsi->hw;
452160484Sobrien
452260484Sobrien	if (ixl_enable_msix)
452360484Sobrien		ixl_disable_adminq(hw);
452460484Sobrien	else
452560484Sobrien		ixl_disable_legacy(hw);
452660484Sobrien}
452760484Sobrien
452838889Sjdpstatic void
452933965Sjdpixl_enable_adminq(struct i40e_hw *hw)
453033965Sjdp{
453160484Sobrien	u32		reg;
453233965Sjdp
453333965Sjdp	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
453438889Sjdp	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
453533965Sjdp	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
453633965Sjdp	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
453777298Sobrien	ixl_flush(hw);
453877298Sobrien}
453977298Sobrien
454077298Sobrienstatic void
454177298Sobrienixl_disable_adminq(struct i40e_hw *hw)
454233965Sjdp{
454360484Sobrien	u32		reg;
454460484Sobrien
454560484Sobrien	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
454660484Sobrien	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
454760484Sobrien	ixl_flush(hw);
454860484Sobrien}
454960484Sobrien
455033965Sjdpstatic void
455133965Sjdpixl_enable_queue(struct i40e_hw *hw, int id)
455233965Sjdp{
455333965Sjdp	u32		reg;
455433965Sjdp
455533965Sjdp	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
455633965Sjdp	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
455733965Sjdp	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
455833965Sjdp	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
455933965Sjdp}
456077298Sobrien
456177298Sobrienstatic void
456260484Sobrienixl_disable_queue(struct i40e_hw *hw, int id)
456360484Sobrien{
456460484Sobrien	u32		reg;
456533965Sjdp
456633965Sjdp	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
456733965Sjdp	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
456860484Sobrien}
456933965Sjdp
457033965Sjdpstatic void
457133965Sjdpixl_enable_legacy(struct i40e_hw *hw)
457233965Sjdp{
457333965Sjdp	u32		reg;
457433965Sjdp	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
457533965Sjdp	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
457633965Sjdp	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
457733965Sjdp	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
457833965Sjdp}
457933965Sjdp
458033965Sjdpstatic void
458133965Sjdpixl_disable_legacy(struct i40e_hw *hw)
458233965Sjdp{
458333965Sjdp	u32		reg;
458433965Sjdp
458533965Sjdp	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
458633965Sjdp	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
458733965Sjdp}
458833965Sjdp
458933965Sjdpstatic void
459033965Sjdpixl_update_stats_counters(struct ixl_pf *pf)
459133965Sjdp{
459233965Sjdp	struct i40e_hw	*hw = &pf->hw;
459333965Sjdp	struct ixl_vsi	*vsi = &pf->vsi;
459433965Sjdp	struct ixl_vf	*vf;
459533965Sjdp
459633965Sjdp	struct i40e_hw_port_stats *nsd = &pf->stats;
459733965Sjdp	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
459833965Sjdp
459933965Sjdp	/* Update hw stats */
460033965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
460133965Sjdp			   pf->stat_offsets_loaded,
460233965Sjdp			   &osd->crc_errors, &nsd->crc_errors);
460333965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
460433965Sjdp			   pf->stat_offsets_loaded,
460533965Sjdp			   &osd->illegal_bytes, &nsd->illegal_bytes);
460660484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
460760484Sobrien			   I40E_GLPRT_GORCL(hw->port),
460860484Sobrien			   pf->stat_offsets_loaded,
460933965Sjdp			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
461060484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
461160484Sobrien			   I40E_GLPRT_GOTCL(hw->port),
461260484Sobrien			   pf->stat_offsets_loaded,
461360484Sobrien			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
461460484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
461560484Sobrien			   pf->stat_offsets_loaded,
461660484Sobrien			   &osd->eth.rx_discards,
461760484Sobrien			   &nsd->eth.rx_discards);
461860484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
461960484Sobrien			   I40E_GLPRT_UPRCL(hw->port),
462060484Sobrien			   pf->stat_offsets_loaded,
462133965Sjdp			   &osd->eth.rx_unicast,
462260484Sobrien			   &nsd->eth.rx_unicast);
462333965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
462460484Sobrien			   I40E_GLPRT_UPTCL(hw->port),
462560484Sobrien			   pf->stat_offsets_loaded,
462660484Sobrien			   &osd->eth.tx_unicast,
462760484Sobrien			   &nsd->eth.tx_unicast);
462860484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
462960484Sobrien			   I40E_GLPRT_MPRCL(hw->port),
463060484Sobrien			   pf->stat_offsets_loaded,
463133965Sjdp			   &osd->eth.rx_multicast,
463260484Sobrien			   &nsd->eth.rx_multicast);
463333965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
463460484Sobrien			   I40E_GLPRT_MPTCL(hw->port),
463560484Sobrien			   pf->stat_offsets_loaded,
463660484Sobrien			   &osd->eth.tx_multicast,
463760484Sobrien			   &nsd->eth.tx_multicast);
463860484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
463960484Sobrien			   I40E_GLPRT_BPRCL(hw->port),
464060484Sobrien			   pf->stat_offsets_loaded,
464160484Sobrien			   &osd->eth.rx_broadcast,
464233965Sjdp			   &nsd->eth.rx_broadcast);
464360484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
464460484Sobrien			   I40E_GLPRT_BPTCL(hw->port),
464560484Sobrien			   pf->stat_offsets_loaded,
464633965Sjdp			   &osd->eth.tx_broadcast,
464760484Sobrien			   &nsd->eth.tx_broadcast);
464860484Sobrien
464960484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
465060484Sobrien			   pf->stat_offsets_loaded,
465160484Sobrien			   &osd->tx_dropped_link_down,
465260484Sobrien			   &nsd->tx_dropped_link_down);
465360484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
465460484Sobrien			   pf->stat_offsets_loaded,
465560484Sobrien			   &osd->mac_local_faults,
465660484Sobrien			   &nsd->mac_local_faults);
465760484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
465833965Sjdp			   pf->stat_offsets_loaded,
465960484Sobrien			   &osd->mac_remote_faults,
466060484Sobrien			   &nsd->mac_remote_faults);
466160484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
466233965Sjdp			   pf->stat_offsets_loaded,
466360484Sobrien			   &osd->rx_length_errors,
466460484Sobrien			   &nsd->rx_length_errors);
466560484Sobrien
466660484Sobrien	/* Flow control (LFC) stats */
466733965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
466860484Sobrien			   pf->stat_offsets_loaded,
466960484Sobrien			   &osd->link_xon_rx, &nsd->link_xon_rx);
467060484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
467160484Sobrien			   pf->stat_offsets_loaded,
467260484Sobrien			   &osd->link_xon_tx, &nsd->link_xon_tx);
467360484Sobrien	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
467460484Sobrien			   pf->stat_offsets_loaded,
467560484Sobrien			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
467633965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
467760484Sobrien			   pf->stat_offsets_loaded,
467860484Sobrien			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
467933965Sjdp
468060484Sobrien	/* Packet size stats rx */
468160484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
468260484Sobrien			   I40E_GLPRT_PRC64L(hw->port),
468360484Sobrien			   pf->stat_offsets_loaded,
468460484Sobrien			   &osd->rx_size_64, &nsd->rx_size_64);
468560484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
468660484Sobrien			   I40E_GLPRT_PRC127L(hw->port),
468760484Sobrien			   pf->stat_offsets_loaded,
468860484Sobrien			   &osd->rx_size_127, &nsd->rx_size_127);
468960484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
469060484Sobrien			   I40E_GLPRT_PRC255L(hw->port),
469133965Sjdp			   pf->stat_offsets_loaded,
469260484Sobrien			   &osd->rx_size_255, &nsd->rx_size_255);
469333965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
469460484Sobrien			   I40E_GLPRT_PRC511L(hw->port),
469560484Sobrien			   pf->stat_offsets_loaded,
469660484Sobrien			   &osd->rx_size_511, &nsd->rx_size_511);
469760484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
469860484Sobrien			   I40E_GLPRT_PRC1023L(hw->port),
469960484Sobrien			   pf->stat_offsets_loaded,
470060484Sobrien			   &osd->rx_size_1023, &nsd->rx_size_1023);
470160484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
470260484Sobrien			   I40E_GLPRT_PRC1522L(hw->port),
470360484Sobrien			   pf->stat_offsets_loaded,
470460484Sobrien			   &osd->rx_size_1522, &nsd->rx_size_1522);
470560484Sobrien	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
470660484Sobrien			   I40E_GLPRT_PRC9522L(hw->port),
470733965Sjdp			   pf->stat_offsets_loaded,
470833965Sjdp			   &osd->rx_size_big, &nsd->rx_size_big);
470933965Sjdp
471033965Sjdp	/* Packet size stats tx */
471133965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
471233965Sjdp			   I40E_GLPRT_PTC64L(hw->port),
471333965Sjdp			   pf->stat_offsets_loaded,
471433965Sjdp			   &osd->tx_size_64, &nsd->tx_size_64);
471533965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
471633965Sjdp			   I40E_GLPRT_PTC127L(hw->port),
471733965Sjdp			   pf->stat_offsets_loaded,
471833965Sjdp			   &osd->tx_size_127, &nsd->tx_size_127);
471933965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
472033965Sjdp			   I40E_GLPRT_PTC255L(hw->port),
472133965Sjdp			   pf->stat_offsets_loaded,
472233965Sjdp			   &osd->tx_size_255, &nsd->tx_size_255);
472333965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
472433965Sjdp			   I40E_GLPRT_PTC511L(hw->port),
472533965Sjdp			   pf->stat_offsets_loaded,
472633965Sjdp			   &osd->tx_size_511, &nsd->tx_size_511);
472733965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
472833965Sjdp			   I40E_GLPRT_PTC1023L(hw->port),
472933965Sjdp			   pf->stat_offsets_loaded,
473033965Sjdp			   &osd->tx_size_1023, &nsd->tx_size_1023);
473133965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
473233965Sjdp			   I40E_GLPRT_PTC1522L(hw->port),
473333965Sjdp			   pf->stat_offsets_loaded,
473433965Sjdp			   &osd->tx_size_1522, &nsd->tx_size_1522);
473533965Sjdp	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
473633965Sjdp			   I40E_GLPRT_PTC9522L(hw->port),
473733965Sjdp			   pf->stat_offsets_loaded,
473833965Sjdp			   &osd->tx_size_big, &nsd->tx_size_big);
473933965Sjdp
474033965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
474133965Sjdp			   pf->stat_offsets_loaded,
474233965Sjdp			   &osd->rx_undersize, &nsd->rx_undersize);
474333965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
474433965Sjdp			   pf->stat_offsets_loaded,
474533965Sjdp			   &osd->rx_fragments, &nsd->rx_fragments);
474633965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
474733965Sjdp			   pf->stat_offsets_loaded,
474833965Sjdp			   &osd->rx_oversize, &nsd->rx_oversize);
474933965Sjdp	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
475033965Sjdp			   pf->stat_offsets_loaded,
475133965Sjdp			   &osd->rx_jabber, &nsd->rx_jabber);
475233965Sjdp	pf->stat_offsets_loaded = true;
475333965Sjdp	/* End hw stats */
475433965Sjdp
475533965Sjdp	/* Update vsi stats */
475633965Sjdp	ixl_update_vsi_stats(vsi);
475733965Sjdp
475833965Sjdp	for (int i = 0; i < pf->num_vfs; i++) {
475933965Sjdp		vf = &pf->vfs[i];
476033965Sjdp		if (vf->vf_flags & VF_FLAG_ENABLED)
476133965Sjdp			ixl_update_eth_stats(&pf->vfs[i].vsi);
476233965Sjdp	}
476333965Sjdp}
476433965Sjdp
476533965Sjdpstatic int
476633965Sjdpixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
476733965Sjdp{
476833965Sjdp	struct i40e_hw *hw = &pf->hw;
476933965Sjdp	struct ixl_vsi *vsi = &pf->vsi;
477033965Sjdp	device_t dev = pf->dev;
477133965Sjdp	bool is_up = false;
477233965Sjdp	int error = 0;
477333965Sjdp
477433965Sjdp	is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
477533965Sjdp
477633965Sjdp	/* Teardown */
477733965Sjdp	if (is_up)
477833965Sjdp		ixl_stop(pf);
477960484Sobrien	error = i40e_shutdown_lan_hmc(hw);
478033965Sjdp	if (error)
478133965Sjdp		device_printf(dev,
478233965Sjdp		    "Shutdown LAN HMC failed with code %d\n", error);
478333965Sjdp	ixl_disable_adminq(hw);
478433965Sjdp	ixl_teardown_adminq_msix(pf);
478533965Sjdp	error = i40e_shutdown_adminq(hw);
478633965Sjdp	if (error)
478733965Sjdp		device_printf(dev,
478833965Sjdp		    "Shutdown Admin queue failed with code %d\n", error);
478933965Sjdp
479033965Sjdp	/* Setup */
479133965Sjdp	error = i40e_init_adminq(hw);
479233965Sjdp	if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
479333965Sjdp		device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
479433965Sjdp		    error);
479533965Sjdp	}
479633965Sjdp	error = ixl_setup_adminq_msix(pf);
479733965Sjdp	if (error) {
479833965Sjdp		device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
479933965Sjdp		    error);
480033965Sjdp	}
480133965Sjdp	ixl_configure_intr0_msix(pf);
480233965Sjdp	ixl_enable_adminq(hw);
480333965Sjdp	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
480433965Sjdp	    hw->func_caps.num_rx_qp, 0, 0);
480533965Sjdp	if (error) {
480633965Sjdp		device_printf(dev, "init_lan_hmc failed: %d\n", error);
480733965Sjdp	}
480833965Sjdp	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
480933965Sjdp	if (error) {
481060484Sobrien		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
481133965Sjdp	}
481233965Sjdp	if (is_up)
481333965Sjdp		ixl_init(pf);
481433965Sjdp
481533965Sjdp	return (0);
481633965Sjdp}
481733965Sjdp
481833965Sjdpstatic void
481933965Sjdpixl_handle_empr_reset(struct ixl_pf *pf)
482033965Sjdp{
482133965Sjdp	struct i40e_hw *hw = &pf->hw;
482233965Sjdp	device_t dev = pf->dev;
482333965Sjdp	int count = 0;
482433965Sjdp	u32 reg;
482533965Sjdp
482633965Sjdp	/* Typically finishes within 3-4 seconds */
482733965Sjdp	while (count++ < 100) {
482833965Sjdp		reg = rd32(hw, I40E_GLGEN_RSTAT)
482933965Sjdp		    & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
483033965Sjdp		if (reg)
483133965Sjdp			i40e_msec_delay(100);
483233965Sjdp		else
483333965Sjdp			break;
483433965Sjdp	}
483533965Sjdp#ifdef IXL_DEBUG
483633965Sjdp	// Reset-related
483733965Sjdp	device_printf(dev, "EMPR reset wait count: %d\n", count);
483833965Sjdp#endif
483933965Sjdp
484033965Sjdp	device_printf(dev, "Rebuilding driver state...\n");
484133965Sjdp	ixl_rebuild_hw_structs_after_reset(pf);
484233965Sjdp	device_printf(dev, "Rebuilding driver state done.\n");
484333965Sjdp
484433965Sjdp	atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
484533965Sjdp}
484633965Sjdp
484733965Sjdp/*
484860484Sobrien** Tasklet handler for MSIX Adminq interrupts
484933965Sjdp**  - do outside interrupt since it might sleep
485033965Sjdp*/
485133965Sjdpstatic void
485233965Sjdpixl_do_adminq(void *context, int pending)
485333965Sjdp{
485433965Sjdp	struct ixl_pf			*pf = context;
485533965Sjdp	struct i40e_hw			*hw = &pf->hw;
485633965Sjdp	struct i40e_arq_event_info	event;
485733965Sjdp	i40e_status			ret;
485833965Sjdp	device_t			dev = pf->dev;
485933965Sjdp	u32				loop = 0;
486033965Sjdp	u16				opcode, result;
486133965Sjdp
486233965Sjdp	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
486333965Sjdp		/* Flag cleared at end of this function */
486433965Sjdp		ixl_handle_empr_reset(pf);
486533965Sjdp		return;
486633965Sjdp	}
486733965Sjdp
486833965Sjdp	/* Admin Queue handling */
486933965Sjdp	event.buf_len = IXL_AQ_BUF_SZ;
487033965Sjdp	event.msg_buf = malloc(event.buf_len,
487133965Sjdp	    M_DEVBUF, M_NOWAIT | M_ZERO);
487233965Sjdp	if (!event.msg_buf) {
487333965Sjdp		device_printf(dev, "%s: Unable to allocate memory for Admin"
487433965Sjdp		    " Queue event!\n", __func__);
487533965Sjdp		return;
487660484Sobrien	}
487760484Sobrien
487833965Sjdp	IXL_PF_LOCK(pf);
487960484Sobrien	/* clean and process any events */
488060484Sobrien	do {
488160484Sobrien		ret = i40e_clean_arq_element(hw, &event, &result);
488233965Sjdp		if (ret)
488333965Sjdp			break;
488433965Sjdp		opcode = LE16_TO_CPU(event.desc.opcode);
488533965Sjdp#ifdef IXL_DEBUG
488633965Sjdp		device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__,
488733965Sjdp		    opcode);
488833965Sjdp#endif
488933965Sjdp		switch (opcode) {
489033965Sjdp		case i40e_aqc_opc_get_link_status:
489133965Sjdp			ixl_link_event(pf, &event);
489233965Sjdp			break;
489333965Sjdp		case i40e_aqc_opc_send_msg_to_pf:
489433965Sjdp#ifdef PCI_IOV
489533965Sjdp			ixl_handle_vf_msg(pf, &event);
489633965Sjdp#endif
489733965Sjdp			break;
489833965Sjdp		case i40e_aqc_opc_event_lan_overflow:
489933965Sjdp		default:
490033965Sjdp			break;
490133965Sjdp		}
490233965Sjdp
490333965Sjdp	} while (result && (loop++ < IXL_ADM_LIMIT));
490433965Sjdp
490533965Sjdp	free(event.msg_buf, M_DEVBUF);
490633965Sjdp
490733965Sjdp	/*
490833965Sjdp	 * If there are still messages to process, reschedule ourselves.
490933965Sjdp	 * Otherwise, re-enable our interrupt and go to sleep.
491033965Sjdp	 */
491133965Sjdp	if (result > 0)
491233965Sjdp		taskqueue_enqueue(pf->tq, &pf->adminq);
491333965Sjdp	else
491433965Sjdp		ixl_enable_adminq(hw);
491533965Sjdp
491633965Sjdp	IXL_PF_UNLOCK(pf);
491733965Sjdp}
491833965Sjdp
491933965Sjdp/**
492033965Sjdp * Update VSI-specific ethernet statistics counters.
492133965Sjdp **/
492233965Sjdpvoid
492333965Sjdpixl_update_eth_stats(struct ixl_vsi *vsi)
492433965Sjdp{
492533965Sjdp	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
492633965Sjdp	struct i40e_hw *hw = &pf->hw;
492733965Sjdp	struct i40e_eth_stats *es;
492833965Sjdp	struct i40e_eth_stats *oes;
492933965Sjdp	struct i40e_hw_port_stats *nsd;
493033965Sjdp	u16 stat_idx = vsi->info.stat_counter_idx;
493133965Sjdp
493233965Sjdp	es = &vsi->eth_stats;
493333965Sjdp	oes = &vsi->eth_stats_offsets;
493433965Sjdp	nsd = &pf->stats;
493533965Sjdp
493633965Sjdp	/* Gather up the stats that the hw collects */
493733965Sjdp	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
493833965Sjdp			   vsi->stat_offsets_loaded,
493933965Sjdp			   &oes->tx_errors, &es->tx_errors);
494033965Sjdp	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
494133965Sjdp			   vsi->stat_offsets_loaded,
494233965Sjdp			   &oes->rx_discards, &es->rx_discards);
494333965Sjdp
494433965Sjdp	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
494533965Sjdp			   I40E_GLV_GORCL(stat_idx),
494633965Sjdp			   vsi->stat_offsets_loaded,
494733965Sjdp			   &oes->rx_bytes, &es->rx_bytes);
494833965Sjdp	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
494933965Sjdp			   I40E_GLV_UPRCL(stat_idx),
495033965Sjdp			   vsi->stat_offsets_loaded,
495133965Sjdp			   &oes->rx_unicast, &es->rx_unicast);
495260484Sobrien	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
495360484Sobrien			   I40E_GLV_MPRCL(stat_idx),
495460484Sobrien			   vsi->stat_offsets_loaded,
495560484Sobrien			   &oes->rx_multicast, &es->rx_multicast);
495660484Sobrien	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
495733965Sjdp			   I40E_GLV_BPRCL(stat_idx),
495833965Sjdp			   vsi->stat_offsets_loaded,
495960484Sobrien			   &oes->rx_broadcast, &es->rx_broadcast);
496033965Sjdp
496133965Sjdp	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
496233965Sjdp			   I40E_GLV_GOTCL(stat_idx),
496333965Sjdp			   vsi->stat_offsets_loaded,
496433965Sjdp			   &oes->tx_bytes, &es->tx_bytes);
496533965Sjdp	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
496633965Sjdp			   I40E_GLV_UPTCL(stat_idx),
496733965Sjdp			   vsi->stat_offsets_loaded,
496833965Sjdp			   &oes->tx_unicast, &es->tx_unicast);
496933965Sjdp	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
497033965Sjdp			   I40E_GLV_MPTCL(stat_idx),
497133965Sjdp			   vsi->stat_offsets_loaded,
497233965Sjdp			   &oes->tx_multicast, &es->tx_multicast);
497333965Sjdp	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
497433965Sjdp			   I40E_GLV_BPTCL(stat_idx),
497533965Sjdp			   vsi->stat_offsets_loaded,
497633965Sjdp			   &oes->tx_broadcast, &es->tx_broadcast);
497760484Sobrien	vsi->stat_offsets_loaded = true;
497860484Sobrien}
497960484Sobrien
498060484Sobrienstatic void
498160484Sobrienixl_update_vsi_stats(struct ixl_vsi *vsi)
498260484Sobrien{
498360484Sobrien	struct ixl_pf		*pf;
498433965Sjdp	struct ifnet		*ifp;
498533965Sjdp	struct i40e_eth_stats	*es;
498633965Sjdp	u64			tx_discards;
498733965Sjdp
498833965Sjdp	struct i40e_hw_port_stats *nsd;
498933965Sjdp
499033965Sjdp	pf = vsi->back;
499133965Sjdp	ifp = vsi->ifp;
499233965Sjdp	es = &vsi->eth_stats;
499333965Sjdp	nsd = &pf->stats;
499433965Sjdp
499533965Sjdp	ixl_update_eth_stats(vsi);
499633965Sjdp
499733965Sjdp	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
499833965Sjdp	for (int i = 0; i < vsi->num_queues; i++)
499933965Sjdp		tx_discards += vsi->queues[i].txr.br->br_drops;
500033965Sjdp
500133965Sjdp	/* Update ifnet stats */
500233965Sjdp	IXL_SET_IPACKETS(vsi, es->rx_unicast +
500333965Sjdp	                   es->rx_multicast +
500433965Sjdp			   es->rx_broadcast);
500533965Sjdp	IXL_SET_OPACKETS(vsi, es->tx_unicast +
500633965Sjdp	                   es->tx_multicast +
500733965Sjdp			   es->tx_broadcast);
500833965Sjdp	IXL_SET_IBYTES(vsi, es->rx_bytes);
500933965Sjdp	IXL_SET_OBYTES(vsi, es->tx_bytes);
501033965Sjdp	IXL_SET_IMCASTS(vsi, es->rx_multicast);
501138889Sjdp	IXL_SET_OMCASTS(vsi, es->tx_multicast);
501233965Sjdp
501333965Sjdp	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
501433965Sjdp	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
501533965Sjdp	    nsd->rx_jabber);
501633965Sjdp	IXL_SET_OERRORS(vsi, es->tx_errors);
501733965Sjdp	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
501833965Sjdp	IXL_SET_OQDROPS(vsi, tx_discards);
501933965Sjdp	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
502033965Sjdp	IXL_SET_COLLISIONS(vsi, 0);
502133965Sjdp}
502233965Sjdp
502333965Sjdp/**
502433965Sjdp * Reset all of the stats for the given pf
502533965Sjdp **/
502638889Sjdpvoid ixl_pf_reset_stats(struct ixl_pf *pf)
502760484Sobrien{
502860484Sobrien	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
502960484Sobrien	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
503060484Sobrien	pf->stat_offsets_loaded = false;
503138889Sjdp}
503260484Sobrien
503338889Sjdp/**
503460484Sobrien * Resets all stats of the given vsi
503538889Sjdp **/
503638889Sjdpvoid ixl_vsi_reset_stats(struct ixl_vsi *vsi)
503738889Sjdp{
503838889Sjdp	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
503938889Sjdp	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
504038889Sjdp	vsi->stat_offsets_loaded = false;
504160484Sobrien}
504260484Sobrien
504360484Sobrien/**
504460484Sobrien * Read and update a 48 bit stat from the hw
504538889Sjdp *
504660484Sobrien * Since the device stats are not reset at PFReset, they likely will not
504760484Sobrien * be zeroed when the driver starts.  We'll save the first values read
504860484Sobrien * and use them as offsets to be subtracted from the raw values in order
504938889Sjdp * to report stats that count from zero.
505038889Sjdp **/
505138889Sjdpstatic void
505238889Sjdpixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
505338889Sjdp	bool offset_loaded, u64 *offset, u64 *stat)
505438889Sjdp{
505538889Sjdp	u64 new_data;
505638889Sjdp
505738889Sjdp#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
505838889Sjdp	new_data = rd64(hw, loreg);
505938889Sjdp#else
506038889Sjdp	/*
506138889Sjdp	 * Use two rd32's instead of one rd64; FreeBSD versions before
506238889Sjdp	 * 10 don't support 8 byte bus reads/writes.
506338889Sjdp	 */
506438889Sjdp	new_data = rd32(hw, loreg);
506538889Sjdp	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
506638889Sjdp#endif
506738889Sjdp
506838889Sjdp	if (!offset_loaded)
506938889Sjdp		*offset = new_data;
507038889Sjdp	if (new_data >= *offset)
507138889Sjdp		*stat = new_data - *offset;
507238889Sjdp	else
507338889Sjdp		*stat = (new_data + ((u64)1 << 48)) - *offset;
507438889Sjdp	*stat &= 0xFFFFFFFFFFFFULL;
507538889Sjdp}
507638889Sjdp
507738889Sjdp/**
507838889Sjdp * Read and update a 32 bit stat from the hw
507938889Sjdp **/
508038889Sjdpstatic void
508138889Sjdpixl_stat_update32(struct i40e_hw *hw, u32 reg,
508238889Sjdp	bool offset_loaded, u64 *offset, u64 *stat)
508338889Sjdp{
508438889Sjdp	u32 new_data;
508538889Sjdp
508638889Sjdp	new_data = rd32(hw, reg);
508738889Sjdp	if (!offset_loaded)
508838889Sjdp		*offset = new_data;
508938889Sjdp	if (new_data >= *offset)
509038889Sjdp		*stat = (u32)(new_data - *offset);
509138889Sjdp	else
509238889Sjdp		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
509338889Sjdp}
509438889Sjdp
509538889Sjdpstatic void
509638889Sjdpixl_add_device_sysctls(struct ixl_pf *pf)
509760484Sobrien{
509833965Sjdp	device_t dev = pf->dev;
509938889Sjdp
510038889Sjdp	/* Set up sysctls */
510138889Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510238889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510338889Sjdp	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
510460484Sobrien	    pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
510533965Sjdp
510633965Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510733965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
510833965Sjdp	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
510933965Sjdp	    pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
511033965Sjdp
511133965Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
511233965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511333965Sjdp	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
511433965Sjdp	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
511533965Sjdp
511677298Sobrien	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
511777298Sobrien	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511877298Sobrien	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
511977298Sobrien	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
512077298Sobrien
512177298Sobrien#if 0
512277298Sobrien	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
512377298Sobrien	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
512477298Sobrien	    OID_AUTO, "rx_itr", CTLFLAG_RW,
512577298Sobrien	    &ixl_rx_itr, 0, "RX ITR");
512638889Sjdp
512738889Sjdp	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
512833965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
512933965Sjdp	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
513033965Sjdp	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
513160484Sobrien
513233965Sjdp	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
513338889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513438889Sjdp	    OID_AUTO, "tx_itr", CTLFLAG_RW,
513533965Sjdp	    &ixl_tx_itr, 0, "TX ITR");
513633965Sjdp
513738889Sjdp	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
513838889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
513938889Sjdp	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
514033965Sjdp	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
514138889Sjdp#endif
514238889Sjdp
514338889Sjdp#ifdef IXL_DEBUG_SYSCTL
514438889Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
514538889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
514638889Sjdp	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
514738889Sjdp	    ixl_debug_info, "I", "Debug Information");
514838889Sjdp
514933965Sjdp	/* Shared-code debug message level */
515038889Sjdp	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
515133965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515233965Sjdp	    OID_AUTO, "debug_mask", CTLFLAG_RW,
515333965Sjdp	    &pf->hw.debug_mask, 0, "Debug Message Level");
515433965Sjdp
515533965Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
515633965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
515777298Sobrien	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
515838889Sjdp	    pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
515933965Sjdp
516033965Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
516138889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
516238889Sjdp	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
516338889Sjdp	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
516433965Sjdp
516538889Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
516638889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
516738889Sjdp	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
516838889Sjdp	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
516977298Sobrien
517038889Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
517133965Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
517233965Sjdp	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
517333965Sjdp	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
517438889Sjdp
517533965Sjdp	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
517638889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
517733965Sjdp	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
517838889Sjdp	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
517933965Sjdp
518038889Sjdp#ifdef PCI_IOV
518133965Sjdp	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
518238889Sjdp	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
518333965Sjdp	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
518433965Sjdp	    0, "PF/VF Virtual Channel debug level");
518533965Sjdp#endif
518633965Sjdp#endif
518733965Sjdp}
518838889Sjdp
518933965Sjdp/*
519033965Sjdp** Set flow control using sysctl:
519133965Sjdp** 	0 - off
519233965Sjdp**	1 - rx pause
519333965Sjdp**	2 - tx pause
519433965Sjdp**	3 - full
519538889Sjdp*/
519633965Sjdpstatic int
519738889Sjdpixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
519833965Sjdp{
519960484Sobrien	/*
520060484Sobrien	 * TODO: ensure tx CRC by hardware should be enabled
520160484Sobrien	 * if tx flow control is enabled.
520260484Sobrien	 * ^ N/A for 40G ports
520360484Sobrien	 */
520460484Sobrien	struct ixl_pf *pf = (struct ixl_pf *)arg1;
520560484Sobrien	struct i40e_hw *hw = &pf->hw;
520660484Sobrien	device_t dev = pf->dev;
520760484Sobrien	int requested_fc, error = 0;
520860484Sobrien	enum i40e_status_code aq_error = 0;
520960484Sobrien	u8 fc_aq_err = 0;
521060484Sobrien
521160484Sobrien	/* Get request */
521260484Sobrien	requested_fc = pf->fc;
521360484Sobrien	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
521460484Sobrien	if ((error) || (req->newptr == NULL))
521560484Sobrien		return (error);
521660484Sobrien	if (requested_fc < 0 || requested_fc > 3) {
521760484Sobrien		device_printf(dev,
521860484Sobrien		    "Invalid fc mode; valid modes are 0 through 3\n");
521960484Sobrien		return (EINVAL);
522060484Sobrien	}
522160484Sobrien
522260484Sobrien	/* Set fc ability for port */
522360484Sobrien	hw->fc.requested_mode = requested_fc;
522460484Sobrien	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
522560484Sobrien	if (aq_error) {
522660484Sobrien		device_printf(dev,
522760484Sobrien		    "%s: Error setting new fc mode %d; fc_err %#x\n",
522860484Sobrien		    __func__, aq_error, fc_aq_err);
522960484Sobrien		return (EIO);
523060484Sobrien	}
523160484Sobrien	pf->fc = requested_fc;
523260484Sobrien
523360484Sobrien	/* Get new link state */
523460484Sobrien	i40e_msec_delay(250);
523560484Sobrien	hw->phy.get_link_info = TRUE;
523660484Sobrien	i40e_get_link_status(hw, &pf->link_up);
523760484Sobrien
523860484Sobrien	return (0);
523960484Sobrien}
524060484Sobrien
524160484Sobrienstatic int
524260484Sobrienixl_current_speed(SYSCTL_HANDLER_ARGS)
524360484Sobrien{
524460484Sobrien	struct ixl_pf *pf = (struct ixl_pf *)arg1;
524560484Sobrien	struct i40e_hw *hw = &pf->hw;
524660484Sobrien	int error = 0, index = 0;
524760484Sobrien
524860484Sobrien	char *speeds[] = {
524960484Sobrien		"Unknown",
525060484Sobrien		"100M",
525160484Sobrien		"1G",
525260484Sobrien		"10G",
525360484Sobrien		"40G",
525460484Sobrien		"20G"
525560484Sobrien	};
525660484Sobrien
525760484Sobrien	ixl_update_link_status(pf);
525860484Sobrien
525960484Sobrien	switch (hw->phy.link_info.link_speed) {
526060484Sobrien	case I40E_LINK_SPEED_100MB:
526160484Sobrien		index = 1;
526260484Sobrien		break;
526360484Sobrien	case I40E_LINK_SPEED_1GB:
526460484Sobrien		index = 2;
526560484Sobrien		break;
526660484Sobrien	case I40E_LINK_SPEED_10GB:
526760484Sobrien		index = 3;
526860484Sobrien		break;
526960484Sobrien	case I40E_LINK_SPEED_40GB:
527060484Sobrien		index = 4;
527160484Sobrien		break;
527260484Sobrien	case I40E_LINK_SPEED_20GB:
527360484Sobrien		index = 5;
527460484Sobrien		break;
527560484Sobrien	case I40E_LINK_SPEED_UNKNOWN:
527660484Sobrien	default:
527760484Sobrien		index = 0;
527860484Sobrien		break;
527960484Sobrien	}
528060484Sobrien
528160484Sobrien	error = sysctl_handle_string(oidp, speeds[index],
528260484Sobrien	    strlen(speeds[index]), req);
528360484Sobrien	return (error);
528460484Sobrien}
528560484Sobrien
528660484Sobrienstatic int
528760484Sobrienixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
528860484Sobrien{
528960484Sobrien	struct i40e_hw *hw = &pf->hw;
529060484Sobrien	device_t dev = pf->dev;
529160484Sobrien	struct i40e_aq_get_phy_abilities_resp abilities;
529260484Sobrien	struct i40e_aq_set_phy_config config;
529360484Sobrien	enum i40e_status_code aq_error = 0;
529460484Sobrien
529560484Sobrien	/* Get current capability information */
529660484Sobrien	aq_error = i40e_aq_get_phy_capabilities(hw,
529760484Sobrien	    FALSE, FALSE, &abilities, NULL);
529860484Sobrien	if (aq_error) {
529960484Sobrien		device_printf(dev,
530060484Sobrien		    "%s: Error getting phy capabilities %d,"
5301		    " aq error: %d\n", __func__, aq_error,
5302		    hw->aq.asq_last_status);
5303		return (EAGAIN);
5304	}
5305
5306	/* Prepare new config */
5307	bzero(&config, sizeof(config));
5308	config.phy_type = abilities.phy_type;
5309	config.abilities = abilities.abilities
5310	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
5311	config.eee_capability = abilities.eee_capability;
5312	config.eeer = abilities.eeer_val;
5313	config.low_power_ctrl = abilities.d3_lpan;
5314	/* Translate into aq cmd link_speed */
5315	if (speeds & 0x10)
5316		config.link_speed |= I40E_LINK_SPEED_40GB;
5317	if (speeds & 0x8)
5318		config.link_speed |= I40E_LINK_SPEED_20GB;
5319	if (speeds & 0x4)
5320		config.link_speed |= I40E_LINK_SPEED_10GB;
5321	if (speeds & 0x2)
5322		config.link_speed |= I40E_LINK_SPEED_1GB;
5323	if (speeds & 0x1)
5324		config.link_speed |= I40E_LINK_SPEED_100MB;
5325
5326	/* Do aq command & restart link */
5327	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
5328	if (aq_error) {
5329		device_printf(dev,
5330		    "%s: Error setting new phy config %d,"
5331		    " aq error: %d\n", __func__, aq_error,
5332		    hw->aq.asq_last_status);
5333		return (EAGAIN);
5334	}
5335
5336	/*
5337	** This seems a bit heavy handed, but we
5338	** need to get a reinit on some devices
5339	*/
5340	IXL_PF_LOCK(pf);
5341	ixl_stop_locked(pf);
5342	ixl_init_locked(pf);
5343	IXL_PF_UNLOCK(pf);
5344
5345	return (0);
5346}
5347
5348/*
5349** Control link advertise speed:
5350**	Flags:
5351**	 0x1 - advertise 100 Mb
5352**	 0x2 - advertise 1G
5353**	 0x4 - advertise 10G
5354**	 0x8 - advertise 20G
5355**	0x10 - advertise 40G
5356**
5357**	Set to 0 to disable link
5358*/
5359static int
5360ixl_set_advertise(SYSCTL_HANDLER_ARGS)
5361{
5362	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5363	struct i40e_hw *hw = &pf->hw;
5364	device_t dev = pf->dev;
5365	int requested_ls = 0;
5366	int error = 0;
5367
5368	/* Read in new mode */
5369	requested_ls = pf->advertised_speed;
5370	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
5371	if ((error) || (req->newptr == NULL))
5372		return (error);
5373	/* Check for sane value */
5374	if (requested_ls > 0x10) {
5375		device_printf(dev, "Invalid advertised speed; "
5376		    "valid modes are 0x1 through 0x10\n");
5377		return (EINVAL);
5378	}
5379	/* Then check for validity based on adapter type */
5380	switch (hw->device_id) {
5381	case I40E_DEV_ID_10G_BASE_T:
5382	case I40E_DEV_ID_10G_BASE_T4:
5383		/* BaseT */
5384		if (requested_ls & ~(0x7)) {
5385			device_printf(dev,
5386			    "Only 100M/1G/10G speeds supported on this device.\n");
5387			return (EINVAL);
5388		}
5389		break;
5390	case I40E_DEV_ID_20G_KR2:
5391	case I40E_DEV_ID_20G_KR2_A:
5392		/* 20G */
5393		if (requested_ls & ~(0xE)) {
5394			device_printf(dev,
5395			    "Only 1G/10G/20G speeds supported on this device.\n");
5396			return (EINVAL);
5397		}
5398		break;
5399	case I40E_DEV_ID_KX_B:
5400	case I40E_DEV_ID_QSFP_A:
5401	case I40E_DEV_ID_QSFP_B:
5402		/* 40G */
5403		if (requested_ls & ~(0x10)) {
5404			device_printf(dev,
5405			    "Only 40G speeds supported on this device.\n");
5406			return (EINVAL);
5407		}
5408		break;
5409	default:
5410		/* 10G (1G) */
5411		if (requested_ls & ~(0x6)) {
5412			device_printf(dev,
5413			    "Only 1/10Gbs speeds are supported on this device.\n");
5414			return (EINVAL);
5415		}
5416		break;
5417	}
5418
5419	/* Exit if no change */
5420	if (pf->advertised_speed == requested_ls)
5421		return (0);
5422
5423	error = ixl_set_advertised_speeds(pf, requested_ls);
5424	if (error)
5425		return (error);
5426
5427	pf->advertised_speed = requested_ls;
5428	ixl_update_link_status(pf);
5429	return (0);
5430}
5431
5432/*
5433** Get the width and transaction speed of
5434** the bus this adapter is plugged into.
5435*/
5436static u16
5437ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
5438{
5439        u16                     link;
5440        u32                     offset;
5441
5442        /* Get the PCI Express Capabilities offset */
5443        pci_find_cap(dev, PCIY_EXPRESS, &offset);
5444
5445        /* ...and read the Link Status Register */
5446        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
5447
5448        switch (link & I40E_PCI_LINK_WIDTH) {
5449        case I40E_PCI_LINK_WIDTH_1:
5450                hw->bus.width = i40e_bus_width_pcie_x1;
5451                break;
5452        case I40E_PCI_LINK_WIDTH_2:
5453                hw->bus.width = i40e_bus_width_pcie_x2;
5454                break;
5455        case I40E_PCI_LINK_WIDTH_4:
5456                hw->bus.width = i40e_bus_width_pcie_x4;
5457                break;
5458        case I40E_PCI_LINK_WIDTH_8:
5459                hw->bus.width = i40e_bus_width_pcie_x8;
5460                break;
5461        default:
5462                hw->bus.width = i40e_bus_width_unknown;
5463                break;
5464        }
5465
5466        switch (link & I40E_PCI_LINK_SPEED) {
5467        case I40E_PCI_LINK_SPEED_2500:
5468                hw->bus.speed = i40e_bus_speed_2500;
5469                break;
5470        case I40E_PCI_LINK_SPEED_5000:
5471                hw->bus.speed = i40e_bus_speed_5000;
5472                break;
5473        case I40E_PCI_LINK_SPEED_8000:
5474                hw->bus.speed = i40e_bus_speed_8000;
5475                break;
5476        default:
5477                hw->bus.speed = i40e_bus_speed_unknown;
5478                break;
5479        }
5480
5481        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
5482            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
5483            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
5484            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
5485            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
5486            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
5487            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
5488            ("Unknown"));
5489
5490        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
5491            (hw->bus.speed < i40e_bus_speed_8000)) {
5492                device_printf(dev, "PCI-Express bandwidth available"
5493                    " for this device\n     may be insufficient for"
5494                    " optimal performance.\n");
5495                device_printf(dev, "For expected performance a x8 "
5496                    "PCIE Gen3 slot is required.\n");
5497        }
5498
5499        return (link);
5500}
5501
5502static int
5503ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
5504{
5505	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
5506	struct i40e_hw	*hw = &pf->hw;
5507	struct sbuf	*sbuf;
5508
5509	sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5510	ixl_nvm_version_str(hw, sbuf);
5511	sbuf_finish(sbuf);
5512	sbuf_delete(sbuf);
5513
5514	return 0;
5515}
5516
5517#ifdef IXL_DEBUG
5518static void
5519ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
5520{
5521	if ((nvma->command == I40E_NVM_READ) &&
5522	    ((nvma->config & 0xFF) == 0xF) &&
5523	    (((nvma->config & 0xF00) >> 8) == 0xF) &&
5524	    (nvma->offset == 0) &&
5525	    (nvma->data_size == 1)) {
5526		// device_printf(dev, "- Get Driver Status Command\n");
5527	}
5528	else if (nvma->command == I40E_NVM_READ) {
5529
5530	}
5531	else {
5532		switch (nvma->command) {
5533		case 0xB:
5534			device_printf(dev, "- command: I40E_NVM_READ\n");
5535			break;
5536		case 0xC:
5537			device_printf(dev, "- command: I40E_NVM_WRITE\n");
5538			break;
5539		default:
5540			device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
5541			break;
5542		}
5543
5544		device_printf(dev, "- config (ptr)  : 0x%02x\n", nvma->config & 0xFF);
5545		device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
5546		device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
5547		device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
5548	}
5549}
5550#endif
5551
5552static int
5553ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
5554{
5555	struct i40e_hw *hw = &pf->hw;
5556	struct i40e_nvm_access *nvma;
5557	device_t dev = pf->dev;
5558	enum i40e_status_code status = 0;
5559	int perrno;
5560
5561	DEBUGFUNC("ixl_handle_nvmupd_cmd");
5562
5563	/* Sanity checks */
5564	if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
5565	    ifd->ifd_data == NULL) {
5566		device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
5567		    __func__);
5568		device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
5569		    __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
5570		device_printf(dev, "%s: data pointer: %p\n", __func__,
5571		    ifd->ifd_data);
5572		return (EINVAL);
5573	}
5574
5575	nvma = (struct i40e_nvm_access *)ifd->ifd_data;
5576
5577#ifdef IXL_DEBUG
5578	ixl_print_nvm_cmd(dev, nvma);
5579#endif
5580
5581	if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
5582		int count = 0;
5583		while (count++ < 100) {
5584			i40e_msec_delay(100);
5585			if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
5586				break;
5587		}
5588	}
5589
5590	if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
5591		IXL_PF_LOCK(pf);
5592		status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
5593		IXL_PF_UNLOCK(pf);
5594	} else {
5595		perrno = -EBUSY;
5596	}
5597
5598	if (status)
5599		device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
5600		    status, perrno);
5601
5602	/*
5603	 * -EPERM is actually ERESTART, which the kernel interprets as it needing
5604	 * to run this ioctl again. So use -EACCES for -EPERM instead.
5605	 */
5606	if (perrno == -EPERM)
5607		return (-EACCES);
5608	else
5609		return (perrno);
5610}
5611
5612#ifdef IXL_DEBUG_SYSCTL
5613static int
5614ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
5615{
5616	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5617	struct i40e_hw *hw = &pf->hw;
5618	struct i40e_link_status link_status;
5619	char buf[512];
5620
5621	enum i40e_status_code aq_error = 0;
5622
5623	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
5624	if (aq_error) {
5625		printf("i40e_aq_get_link_info() error %d\n", aq_error);
5626		return (EPERM);
5627	}
5628
5629	sprintf(buf, "\n"
5630	    "PHY Type : %#04x\n"
5631	    "Speed    : %#04x\n"
5632	    "Link info: %#04x\n"
5633	    "AN info  : %#04x\n"
5634	    "Ext info : %#04x\n"
5635	    "Max Frame: %d\n"
5636	    "Pacing   : %#04x\n"
5637	    "CRC En?  : %d",
5638	    link_status.phy_type, link_status.link_speed,
5639	    link_status.link_info, link_status.an_info,
5640	    link_status.ext_info, link_status.max_frame_size,
5641	    link_status.pacing, link_status.crc_enable);
5642
5643	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5644}
5645
5646static int
5647ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
5648{
5649	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
5650	struct i40e_hw		*hw = &pf->hw;
5651	char			buf[512];
5652	enum i40e_status_code	aq_error = 0;
5653
5654	struct i40e_aq_get_phy_abilities_resp abilities;
5655
5656	aq_error = i40e_aq_get_phy_capabilities(hw,
5657	    TRUE, FALSE, &abilities, NULL);
5658	if (aq_error) {
5659		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
5660		return (EPERM);
5661	}
5662
5663	sprintf(buf, "\n"
5664	    "PHY Type : %#010x\n"
5665	    "Speed    : %#04x\n"
5666	    "Abilities: %#04x\n"
5667	    "EEE cap  : %#06x\n"
5668	    "EEER reg : %#010x\n"
5669	    "D3 Lpan  : %#04x",
5670	    abilities.phy_type, abilities.link_speed,
5671	    abilities.abilities, abilities.eee_capability,
5672	    abilities.eeer_val, abilities.d3_lpan);
5673
5674	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
5675}
5676
5677static int
5678ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
5679{
5680	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5681	struct ixl_vsi *vsi = &pf->vsi;
5682	struct ixl_mac_filter *f;
5683	char *buf, *buf_i;
5684
5685	int error = 0;
5686	int ftl_len = 0;
5687	int ftl_counter = 0;
5688	int buf_len = 0;
5689	int entry_len = 42;
5690
5691	SLIST_FOREACH(f, &vsi->ftl, next) {
5692		ftl_len++;
5693	}
5694
5695	if (ftl_len < 1) {
5696		sysctl_handle_string(oidp, "(none)", 6, req);
5697		return (0);
5698	}
5699
5700	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
5701	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
5702
5703	sprintf(buf_i++, "\n");
5704	SLIST_FOREACH(f, &vsi->ftl, next) {
5705		sprintf(buf_i,
5706		    MAC_FORMAT ", vlan %4d, flags %#06x",
5707		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
5708		buf_i += entry_len;
5709		/* don't print '\n' for last entry */
5710		if (++ftl_counter != ftl_len) {
5711			sprintf(buf_i, "\n");
5712			buf_i++;
5713		}
5714	}
5715
5716	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
5717	if (error)
5718		printf("sysctl error: %d\n", error);
5719	free(buf, M_DEVBUF);
5720	return error;
5721}
5722
5723#define IXL_SW_RES_SIZE 0x14
5724static int
5725ixl_res_alloc_cmp(const void *a, const void *b)
5726{
5727	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
5728	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
5729	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
5730
5731	return ((int)one->resource_type - (int)two->resource_type);
5732}
5733
5734/*
5735 * Longest string length: 25
5736 */
5737static char *
5738ixl_switch_res_type_string(u8 type)
5739{
5740	static char * ixl_switch_res_type_strings[0x14] = {
5741		"VEB",
5742		"VSI",
5743		"Perfect Match MAC address",
5744		"S-tag",
5745		"(Reserved)",
5746		"Multicast hash entry",
5747		"Unicast hash entry",
5748		"VLAN",
5749		"VSI List entry",
5750		"(Reserved)",
5751		"VLAN Statistic Pool",
5752		"Mirror Rule",
5753		"Queue Set",
5754		"Inner VLAN Forward filter",
5755		"(Reserved)",
5756		"Inner MAC",
5757		"IP",
5758		"GRE/VN1 Key",
5759		"VN2 Key",
5760		"Tunneling Port"
5761	};
5762
5763	if (type < 0x14)
5764		return ixl_switch_res_type_strings[type];
5765	else
5766		return "(Reserved)";
5767}
5768
5769static int
5770ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
5771{
5772	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5773	struct i40e_hw *hw = &pf->hw;
5774	device_t dev = pf->dev;
5775	struct sbuf *buf;
5776	int error = 0;
5777
5778	u8 num_entries;
5779	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
5780
5781	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5782	if (!buf) {
5783		device_printf(dev, "Could not allocate sbuf for output.\n");
5784		return (ENOMEM);
5785	}
5786
5787	bzero(resp, sizeof(resp));
5788	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
5789				resp,
5790				IXL_SW_RES_SIZE,
5791				NULL);
5792	if (error) {
5793		device_printf(dev,
5794		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
5795		    __func__, error, hw->aq.asq_last_status);
5796		sbuf_delete(buf);
5797		return error;
5798	}
5799
5800	/* Sort entries by type for display */
5801	qsort(resp, num_entries,
5802	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
5803	    &ixl_res_alloc_cmp);
5804
5805	sbuf_cat(buf, "\n");
5806	sbuf_printf(buf, "# of entries: %d\n", num_entries);
5807	sbuf_printf(buf,
5808	    "                     Type | Guaranteed | Total | Used   | Un-allocated\n"
5809	    "                          | (this)     | (all) | (this) | (all)       \n");
5810	for (int i = 0; i < num_entries; i++) {
5811		sbuf_printf(buf,
5812		    "%25s | %10d   %5d   %6d   %12d",
5813		    ixl_switch_res_type_string(resp[i].resource_type),
5814		    resp[i].guaranteed,
5815		    resp[i].total,
5816		    resp[i].used,
5817		    resp[i].total_unalloced);
5818		if (i < num_entries - 1)
5819			sbuf_cat(buf, "\n");
5820	}
5821
5822	error = sbuf_finish(buf);
5823	if (error)
5824		device_printf(dev, "Error finishing sbuf: %d\n", error);
5825
5826	sbuf_delete(buf);
5827	return error;
5828}
5829
5830/*
5831** Caller must init and delete sbuf; this function will clear and
5832** finish it for caller.
5833**
5834** XXX: Cannot use the SEID for this, since there is no longer a
5835** fixed mapping between SEID and element type.
5836*/
5837static char *
5838ixl_switch_element_string(struct sbuf *s,
5839    struct i40e_aqc_switch_config_element_resp *element)
5840{
5841	sbuf_clear(s);
5842
5843	switch (element->element_type) {
5844	case I40E_AQ_SW_ELEM_TYPE_MAC:
5845		sbuf_printf(s, "MAC %3d", element->element_info);
5846		break;
5847	case I40E_AQ_SW_ELEM_TYPE_PF:
5848		sbuf_printf(s, "PF  %3d", element->element_info);
5849		break;
5850	case I40E_AQ_SW_ELEM_TYPE_VF:
5851		sbuf_printf(s, "VF  %3d", element->element_info);
5852		break;
5853	case I40E_AQ_SW_ELEM_TYPE_EMP:
5854		sbuf_cat(s, "EMP");
5855		break;
5856	case I40E_AQ_SW_ELEM_TYPE_BMC:
5857		sbuf_cat(s, "BMC");
5858		break;
5859	case I40E_AQ_SW_ELEM_TYPE_PV:
5860		sbuf_cat(s, "PV");
5861		break;
5862	case I40E_AQ_SW_ELEM_TYPE_VEB:
5863		sbuf_cat(s, "VEB");
5864		break;
5865	case I40E_AQ_SW_ELEM_TYPE_PA:
5866		sbuf_cat(s, "PA");
5867		break;
5868	case I40E_AQ_SW_ELEM_TYPE_VSI:
5869		sbuf_printf(s, "VSI %3d", element->element_info);
5870		break;
5871	default:
5872		sbuf_cat(s, "?");
5873		break;
5874	}
5875
5876	sbuf_finish(s);
5877	return sbuf_data(s);
5878}
5879
5880static int
5881ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
5882{
5883	struct ixl_pf *pf = (struct ixl_pf *)arg1;
5884	struct i40e_hw *hw = &pf->hw;
5885	device_t dev = pf->dev;
5886	struct sbuf *buf;
5887	struct sbuf *nmbuf;
5888	int error = 0;
5889	u16 next = 0;
5890	u8 aq_buf[I40E_AQ_LARGE_BUF];
5891
5892	struct i40e_aqc_get_switch_config_resp *sw_config;
5893	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
5894
5895	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5896	if (!buf) {
5897		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
5898		return (ENOMEM);
5899	}
5900
5901	error = i40e_aq_get_switch_config(hw, sw_config,
5902	    sizeof(aq_buf), &next, NULL);
5903	if (error) {
5904		device_printf(dev,
5905		    "%s: aq_get_switch_config() error %d, aq error %d\n",
5906		    __func__, error, hw->aq.asq_last_status);
5907		sbuf_delete(buf);
5908		return error;
5909	}
5910	if (next)
5911		device_printf(dev, "%s: TODO: get more config with SEID %d\n",
5912		    __func__, next);
5913
5914	nmbuf = sbuf_new_auto();
5915	if (!nmbuf) {
5916		device_printf(dev, "Could not allocate sbuf for name output.\n");
5917		sbuf_delete(buf);
5918		return (ENOMEM);
5919	}
5920
5921	sbuf_cat(buf, "\n");
5922	// Assuming <= 255 elements in switch
5923	sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
5924	sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
5925	/* Exclude:
5926	** Revision -- all elements are revision 1 for now
5927	*/
5928	sbuf_printf(buf,
5929	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
5930	    "                |          |          | (uplink)\n");
5931	for (int i = 0; i < sw_config->header.num_reported; i++) {
5932		// "%4d (%8s) | %8s   %8s   %#8x",
5933		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
5934		sbuf_cat(buf, " ");
5935		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
5936		    &sw_config->element[i]));
5937		sbuf_cat(buf, " | ");
5938		sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
5939		sbuf_cat(buf, "   ");
5940		sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
5941		sbuf_cat(buf, "   ");
5942		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
5943		if (i < sw_config->header.num_reported - 1)
5944			sbuf_cat(buf, "\n");
5945	}
5946	sbuf_delete(nmbuf);
5947
5948	error = sbuf_finish(buf);
5949	if (error)
5950		device_printf(dev, "Error finishing sbuf: %d\n", error);
5951
5952	sbuf_delete(buf);
5953
5954	return (error);
5955}
5956
5957static int
5958ixl_debug_info(SYSCTL_HANDLER_ARGS)
5959{
5960	struct ixl_pf	*pf;
5961	int		error, input = 0;
5962
5963	error = sysctl_handle_int(oidp, &input, 0, req);
5964
5965	if (error || !req->newptr)
5966		return (error);
5967
5968	if (input == 1) {
5969		pf = (struct ixl_pf *)arg1;
5970		ixl_print_debug_info(pf);
5971	}
5972
5973	return (error);
5974}
5975
5976static void
5977ixl_print_debug_info(struct ixl_pf *pf)
5978{
5979	struct i40e_hw		*hw = &pf->hw;
5980	struct ixl_vsi		*vsi = &pf->vsi;
5981	struct ixl_queue	*que = vsi->queues;
5982	struct rx_ring		*rxr = &que->rxr;
5983	struct tx_ring		*txr = &que->txr;
5984	u32			reg;
5985
5986
5987	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
5988	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
5989	printf("RX next check = %x\n", rxr->next_check);
5990	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
5991	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
5992	printf("TX desc avail = %x\n", txr->avail);
5993
5994	reg = rd32(hw, I40E_GLV_GORCL(0xc));
5995	 printf("RX Bytes = %x\n", reg);
5996	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
5997	 printf("Port RX Bytes = %x\n", reg);
5998	reg = rd32(hw, I40E_GLV_RDPC(0xc));
5999	 printf("RX discard = %x\n", reg);
6000	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
6001	 printf("Port RX discard = %x\n", reg);
6002
6003	reg = rd32(hw, I40E_GLV_TEPC(0xc));
6004	 printf("TX errors = %x\n", reg);
6005	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
6006	 printf("TX Bytes = %x\n", reg);
6007
6008	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
6009	 printf("RX undersize = %x\n", reg);
6010	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
6011	 printf("RX fragments = %x\n", reg);
6012	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
6013	 printf("RX oversize = %x\n", reg);
6014	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
6015	 printf("RX length error = %x\n", reg);
6016	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
6017	 printf("mac remote fault = %x\n", reg);
6018	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
6019	 printf("mac local fault = %x\n", reg);
6020}
6021
6022#endif /* IXL_DEBUG_SYSCTL */
6023
6024#ifdef PCI_IOV
6025static int
6026ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
6027{
6028	struct i40e_hw *hw;
6029	struct ixl_vsi *vsi;
6030	struct i40e_vsi_context vsi_ctx;
6031	int i;
6032	uint16_t first_queue;
6033	enum i40e_status_code code;
6034
6035	hw = &pf->hw;
6036	vsi = &pf->vsi;
6037
6038	vsi_ctx.pf_num = hw->pf_id;
6039	vsi_ctx.uplink_seid = pf->veb_seid;
6040	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
6041	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6042	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
6043
6044	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6045
6046	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6047	vsi_ctx.info.switch_id = htole16(0);
6048
6049	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
6050	vsi_ctx.info.sec_flags = 0;
6051	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
6052		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
6053
6054	/* TODO: If a port VLAN is set, then this needs to be changed */
6055	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6056	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6057	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6058
6059	vsi_ctx.info.valid_sections |=
6060	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
6061	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
6062	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
6063	for (i = 0; i < IXLV_MAX_QUEUES; i++)
6064		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
6065	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
6066		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
6067
6068	vsi_ctx.info.tc_mapping[0] = htole16(
6069	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6070	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
6071
6072	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
6073	if (code != I40E_SUCCESS)
6074		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
6075	vf->vsi.seid = vsi_ctx.seid;
6076	vf->vsi.vsi_num = vsi_ctx.vsi_number;
6077	vf->vsi.first_queue = first_queue;
6078	vf->vsi.num_queues = IXLV_MAX_QUEUES;
6079
6080	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
6081	if (code != I40E_SUCCESS)
6082		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
6083
6084	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
6085	if (code != I40E_SUCCESS) {
6086		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
6087		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
6088		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
6089	}
6090
6091	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
6092	return (0);
6093}
6094
6095static int
6096ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
6097{
6098	struct i40e_hw *hw;
6099	int error;
6100
6101	hw = &pf->hw;
6102
6103	error = ixl_vf_alloc_vsi(pf, vf);
6104	if (error != 0)
6105		return (error);
6106
6107	vf->vsi.hw_filters_add = 0;
6108	vf->vsi.hw_filters_del = 0;
6109	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
6110	ixl_reconfigure_filters(&vf->vsi);
6111
6112	return (0);
6113}
6114
6115static void
6116ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
6117    uint32_t val)
6118{
6119	uint32_t qtable;
6120	int index, shift;
6121
6122	/*
6123	 * Two queues are mapped in a single register, so we have to do some
6124	 * gymnastics to convert the queue number into a register index and
6125	 * shift.
6126	 */
6127	index = qnum / 2;
6128	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
6129
6130	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
6131	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
6132	qtable |= val << shift;
6133	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
6134}
6135
6136static void
6137ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
6138{
6139	struct i40e_hw *hw;
6140	uint32_t qtable;
6141	int i;
6142
6143	hw = &pf->hw;
6144
6145	/*
6146	 * Contiguous mappings aren't actually supported by the hardware,
6147	 * so we have to use non-contiguous mappings.
6148	 */
6149	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
6150	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
6151
6152	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
6153	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
6154
6155	for (i = 0; i < vf->vsi.num_queues; i++) {
6156		qtable = (vf->vsi.first_queue + i) <<
6157		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
6158
6159		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
6160	}
6161
6162	/* Map queues allocated to VF to its VSI. */
6163	for (i = 0; i < vf->vsi.num_queues; i++)
6164		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
6165
6166	/* Set rest of VSI queues as unused. */
6167	for (; i < IXL_MAX_VSI_QUEUES; i++)
6168		ixl_vf_map_vsi_queue(hw, vf, i,
6169		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
6170
6171	ixl_flush(hw);
6172}
6173
6174static void
6175ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
6176{
6177	struct i40e_hw *hw;
6178
6179	hw = &pf->hw;
6180
6181	if (vsi->seid == 0)
6182		return;
6183
6184	i40e_aq_delete_element(hw, vsi->seid, NULL);
6185}
6186
6187static void
6188ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
6189{
6190
6191	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
6192	ixl_flush(hw);
6193}
6194
6195static void
6196ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
6197{
6198
6199	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
6200	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
6201	ixl_flush(hw);
6202}
6203
6204static void
6205ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
6206{
6207	struct i40e_hw *hw;
6208	uint32_t vfint_reg, vpint_reg;
6209	int i;
6210
6211	hw = &pf->hw;
6212
6213	ixl_vf_vsi_release(pf, &vf->vsi);
6214
6215	/* Index 0 has a special register. */
6216	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
6217
6218	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
6219		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
6220		ixl_vf_disable_queue_intr(hw, vfint_reg);
6221	}
6222
6223	/* Index 0 has a special register. */
6224	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
6225
6226	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
6227		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
6228		ixl_vf_unregister_intr(hw, vpint_reg);
6229	}
6230
6231	vf->vsi.num_queues = 0;
6232}
6233
6234static int
6235ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
6236{
6237	struct i40e_hw *hw;
6238	int i;
6239	uint16_t global_vf_num;
6240	uint32_t ciad;
6241
6242	hw = &pf->hw;
6243	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6244
6245	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
6246	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
6247	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
6248		ciad = rd32(hw, I40E_PF_PCI_CIAD);
6249		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
6250			return (0);
6251		DELAY(1);
6252	}
6253
6254	return (ETIMEDOUT);
6255}
6256
6257static void
6258ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
6259{
6260	struct i40e_hw *hw;
6261	uint32_t vfrtrig;
6262
6263	hw = &pf->hw;
6264
6265	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
6266	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
6267	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
6268	ixl_flush(hw);
6269
6270	ixl_reinit_vf(pf, vf);
6271}
6272
6273static void
6274ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
6275{
6276	struct i40e_hw *hw;
6277	uint32_t vfrstat, vfrtrig;
6278	int i, error;
6279
6280	hw = &pf->hw;
6281
6282	error = ixl_flush_pcie(pf, vf);
6283	if (error != 0)
6284		device_printf(pf->dev,
6285		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
6286		    vf->vf_num);
6287
6288	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
6289		DELAY(10);
6290
6291		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
6292		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
6293			break;
6294	}
6295
6296	if (i == IXL_VF_RESET_TIMEOUT)
6297		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
6298
6299	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
6300
6301	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
6302	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
6303	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
6304
6305	if (vf->vsi.seid != 0)
6306		ixl_disable_rings(&vf->vsi);
6307
6308	ixl_vf_release_resources(pf, vf);
6309	ixl_vf_setup_vsi(pf, vf);
6310	ixl_vf_map_queues(pf, vf);
6311
6312	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
6313	ixl_flush(hw);
6314}
6315
6316static const char *
6317ixl_vc_opcode_str(uint16_t op)
6318{
6319
6320	switch (op) {
6321	case I40E_VIRTCHNL_OP_VERSION:
6322		return ("VERSION");
6323	case I40E_VIRTCHNL_OP_RESET_VF:
6324		return ("RESET_VF");
6325	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
6326		return ("GET_VF_RESOURCES");
6327	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
6328		return ("CONFIG_TX_QUEUE");
6329	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
6330		return ("CONFIG_RX_QUEUE");
6331	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
6332		return ("CONFIG_VSI_QUEUES");
6333	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
6334		return ("CONFIG_IRQ_MAP");
6335	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
6336		return ("ENABLE_QUEUES");
6337	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
6338		return ("DISABLE_QUEUES");
6339	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
6340		return ("ADD_ETHER_ADDRESS");
6341	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
6342		return ("DEL_ETHER_ADDRESS");
6343	case I40E_VIRTCHNL_OP_ADD_VLAN:
6344		return ("ADD_VLAN");
6345	case I40E_VIRTCHNL_OP_DEL_VLAN:
6346		return ("DEL_VLAN");
6347	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
6348		return ("CONFIG_PROMISCUOUS_MODE");
6349	case I40E_VIRTCHNL_OP_GET_STATS:
6350		return ("GET_STATS");
6351	case I40E_VIRTCHNL_OP_FCOE:
6352		return ("FCOE");
6353	case I40E_VIRTCHNL_OP_EVENT:
6354		return ("EVENT");
6355	default:
6356		return ("UNKNOWN");
6357	}
6358}
6359
6360static int
6361ixl_vc_opcode_level(uint16_t opcode)
6362{
6363	switch (opcode) {
6364	case I40E_VIRTCHNL_OP_GET_STATS:
6365		return (10);
6366	default:
6367		return (5);
6368	}
6369}
6370
6371static void
6372ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6373    enum i40e_status_code status, void *msg, uint16_t len)
6374{
6375	struct i40e_hw *hw;
6376	int global_vf_id;
6377
6378	hw = &pf->hw;
6379	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
6380
6381	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
6382	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
6383	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
6384
6385	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
6386}
6387
6388static void
6389ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
6390{
6391
6392	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
6393}
6394
6395static void
6396ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
6397    enum i40e_status_code status, const char *file, int line)
6398{
6399
6400	I40E_VC_DEBUG(pf, 1,
6401	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
6402	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
6403	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
6404}
6405
6406static void
6407ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6408    uint16_t msg_size)
6409{
6410	struct i40e_virtchnl_version_info reply;
6411
6412	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
6413		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
6414		    I40E_ERR_PARAM);
6415		return;
6416	}
6417
6418	vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
6419
6420	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
6421	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
6422	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
6423	    sizeof(reply));
6424}
6425
6426static void
6427ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6428    uint16_t msg_size)
6429{
6430
6431	if (msg_size != 0) {
6432		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
6433		    I40E_ERR_PARAM);
6434		return;
6435	}
6436
6437	ixl_reset_vf(pf, vf);
6438
6439	/* No response to a reset message. */
6440}
6441
6442static void
6443ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6444    uint16_t msg_size)
6445{
6446	struct i40e_virtchnl_vf_resource reply;
6447
6448	if ((vf->version == 0 && msg_size != 0) ||
6449	    (vf->version == 1 && msg_size != 4)) {
6450		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
6451		    " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
6452		    vf->version);
6453		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6454		    I40E_ERR_PARAM);
6455		return;
6456	}
6457
6458	bzero(&reply, sizeof(reply));
6459
6460	if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
6461		reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
6462					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
6463					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
6464	else
6465		reply.vf_offload_flags = *(u32 *)msg;
6466
6467	reply.num_vsis = 1;
6468	reply.num_queue_pairs = vf->vsi.num_queues;
6469	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
6470	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
6471	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
6472	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
6473	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
6474
6475	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
6476	    I40E_SUCCESS, &reply, sizeof(reply));
6477}
6478
6479static int
6480ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6481    struct i40e_virtchnl_txq_info *info)
6482{
6483	struct i40e_hw *hw;
6484	struct i40e_hmc_obj_txq txq;
6485	uint16_t global_queue_num, global_vf_num;
6486	enum i40e_status_code status;
6487	uint32_t qtx_ctl;
6488
6489	hw = &pf->hw;
6490	global_queue_num = vf->vsi.first_queue + info->queue_id;
6491	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
6492	bzero(&txq, sizeof(txq));
6493
6494	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
6495	if (status != I40E_SUCCESS)
6496		return (EINVAL);
6497
6498	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
6499
6500	txq.head_wb_ena = info->headwb_enabled;
6501	txq.head_wb_addr = info->dma_headwb_addr;
6502	txq.qlen = info->ring_len;
6503	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
6504	txq.rdylist_act = 0;
6505
6506	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
6507	if (status != I40E_SUCCESS)
6508		return (EINVAL);
6509
6510	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
6511	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
6512	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
6513	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
6514	ixl_flush(hw);
6515
6516	return (0);
6517}
6518
6519static int
6520ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
6521    struct i40e_virtchnl_rxq_info *info)
6522{
6523	struct i40e_hw *hw;
6524	struct i40e_hmc_obj_rxq rxq;
6525	uint16_t global_queue_num;
6526	enum i40e_status_code status;
6527
6528	hw = &pf->hw;
6529	global_queue_num = vf->vsi.first_queue + info->queue_id;
6530	bzero(&rxq, sizeof(rxq));
6531
6532	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
6533		return (EINVAL);
6534
6535	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
6536	    info->max_pkt_size < ETHER_MIN_LEN)
6537		return (EINVAL);
6538
6539	if (info->splithdr_enabled) {
6540		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
6541			return (EINVAL);
6542
6543		rxq.hsplit_0 = info->rx_split_pos &
6544		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
6545		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
6546		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
6547		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
6548		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
6549
6550		rxq.dtype = 2;
6551	}
6552
6553	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
6554	if (status != I40E_SUCCESS)
6555		return (EINVAL);
6556
6557	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
6558	rxq.qlen = info->ring_len;
6559
6560	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
6561
6562	rxq.dsize = 1;
6563	rxq.crcstrip = 1;
6564	rxq.l2tsel = 1;
6565
6566	rxq.rxmax = info->max_pkt_size;
6567	rxq.tphrdesc_ena = 1;
6568	rxq.tphwdesc_ena = 1;
6569	rxq.tphdata_ena = 1;
6570	rxq.tphhead_ena = 1;
6571	rxq.lrxqthresh = 2;
6572	rxq.prefena = 1;
6573
6574	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
6575	if (status != I40E_SUCCESS)
6576		return (EINVAL);
6577
6578	return (0);
6579}
6580
6581static void
6582ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6583    uint16_t msg_size)
6584{
6585	struct i40e_virtchnl_vsi_queue_config_info *info;
6586	struct i40e_virtchnl_queue_pair_info *pair;
6587	int i;
6588
6589	if (msg_size < sizeof(*info)) {
6590		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6591		    I40E_ERR_PARAM);
6592		return;
6593	}
6594
6595	info = msg;
6596	if (info->num_queue_pairs == 0) {
6597		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6598		    I40E_ERR_PARAM);
6599		return;
6600	}
6601
6602	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
6603		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6604		    I40E_ERR_PARAM);
6605		return;
6606	}
6607
6608	if (info->vsi_id != vf->vsi.vsi_num) {
6609		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
6610		    I40E_ERR_PARAM);
6611		return;
6612	}
6613
6614	for (i = 0; i < info->num_queue_pairs; i++) {
6615		pair = &info->qpair[i];
6616
6617		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
6618		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
6619		    pair->txq.queue_id != pair->rxq.queue_id ||
6620		    pair->txq.queue_id >= vf->vsi.num_queues) {
6621
6622			i40e_send_vf_nack(pf, vf,
6623			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6624			return;
6625		}
6626
6627		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
6628			i40e_send_vf_nack(pf, vf,
6629			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6630			return;
6631		}
6632
6633		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
6634			i40e_send_vf_nack(pf, vf,
6635			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
6636			return;
6637		}
6638	}
6639
6640	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
6641}
6642
6643static void
6644ixl_vf_set_qctl(struct ixl_pf *pf,
6645    const struct i40e_virtchnl_vector_map *vector,
6646    enum i40e_queue_type cur_type, uint16_t cur_queue,
6647    enum i40e_queue_type *last_type, uint16_t *last_queue)
6648{
6649	uint32_t offset, qctl;
6650	uint16_t itr_indx;
6651
6652	if (cur_type == I40E_QUEUE_TYPE_RX) {
6653		offset = I40E_QINT_RQCTL(cur_queue);
6654		itr_indx = vector->rxitr_idx;
6655	} else {
6656		offset = I40E_QINT_TQCTL(cur_queue);
6657		itr_indx = vector->txitr_idx;
6658	}
6659
6660	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
6661	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
6662	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
6663	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
6664	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
6665
6666	wr32(&pf->hw, offset, qctl);
6667
6668	*last_type = cur_type;
6669	*last_queue = cur_queue;
6670}
6671
6672static void
6673ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
6674    const struct i40e_virtchnl_vector_map *vector)
6675{
6676	struct i40e_hw *hw;
6677	u_int qindex;
6678	enum i40e_queue_type type, last_type;
6679	uint32_t lnklst_reg;
6680	uint16_t rxq_map, txq_map, cur_queue, last_queue;
6681
6682	hw = &pf->hw;
6683
6684	rxq_map = vector->rxq_map;
6685	txq_map = vector->txq_map;
6686
6687	last_queue = IXL_END_OF_INTR_LNKLST;
6688	last_type = I40E_QUEUE_TYPE_RX;
6689
6690	/*
6691	 * The datasheet says to optimize performance, RX queues and TX queues
6692	 * should be interleaved in the interrupt linked list, so we process
6693	 * both at once here.
6694	 */
6695	while ((rxq_map != 0) || (txq_map != 0)) {
6696		if (txq_map != 0) {
6697			qindex = ffs(txq_map) - 1;
6698			type = I40E_QUEUE_TYPE_TX;
6699			cur_queue = vf->vsi.first_queue + qindex;
6700			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6701			    &last_type, &last_queue);
6702			txq_map &= ~(1 << qindex);
6703		}
6704
6705		if (rxq_map != 0) {
6706			qindex = ffs(rxq_map) - 1;
6707			type = I40E_QUEUE_TYPE_RX;
6708			cur_queue = vf->vsi.first_queue + qindex;
6709			ixl_vf_set_qctl(pf, vector, type, cur_queue,
6710			    &last_type, &last_queue);
6711			rxq_map &= ~(1 << qindex);
6712		}
6713	}
6714
6715	if (vector->vector_id == 0)
6716		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
6717	else
6718		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
6719		    vf->vf_num);
6720	wr32(hw, lnklst_reg,
6721	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
6722	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
6723
6724	ixl_flush(hw);
6725}
6726
6727static void
6728ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6729    uint16_t msg_size)
6730{
6731	struct i40e_virtchnl_irq_map_info *map;
6732	struct i40e_virtchnl_vector_map *vector;
6733	struct i40e_hw *hw;
6734	int i, largest_txq, largest_rxq;
6735
6736	hw = &pf->hw;
6737
6738	if (msg_size < sizeof(*map)) {
6739		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6740		    I40E_ERR_PARAM);
6741		return;
6742	}
6743
6744	map = msg;
6745	if (map->num_vectors == 0) {
6746		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6747		    I40E_ERR_PARAM);
6748		return;
6749	}
6750
6751	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
6752		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6753		    I40E_ERR_PARAM);
6754		return;
6755	}
6756
6757	for (i = 0; i < map->num_vectors; i++) {
6758		vector = &map->vecmap[i];
6759
6760		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
6761		    vector->vsi_id != vf->vsi.vsi_num) {
6762			i40e_send_vf_nack(pf, vf,
6763			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
6764			return;
6765		}
6766
6767		if (vector->rxq_map != 0) {
6768			largest_rxq = fls(vector->rxq_map) - 1;
6769			if (largest_rxq >= vf->vsi.num_queues) {
6770				i40e_send_vf_nack(pf, vf,
6771				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6772				    I40E_ERR_PARAM);
6773				return;
6774			}
6775		}
6776
6777		if (vector->txq_map != 0) {
6778			largest_txq = fls(vector->txq_map) - 1;
6779			if (largest_txq >= vf->vsi.num_queues) {
6780				i40e_send_vf_nack(pf, vf,
6781				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6782				    I40E_ERR_PARAM);
6783				return;
6784			}
6785		}
6786
6787		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
6788		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
6789			i40e_send_vf_nack(pf, vf,
6790			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
6791			    I40E_ERR_PARAM);
6792			return;
6793		}
6794
6795		ixl_vf_config_vector(pf, vf, vector);
6796	}
6797
6798	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
6799}
6800
6801static void
6802ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6803    uint16_t msg_size)
6804{
6805	struct i40e_virtchnl_queue_select *select;
6806	int error;
6807
6808	if (msg_size != sizeof(*select)) {
6809		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6810		    I40E_ERR_PARAM);
6811		return;
6812	}
6813
6814	select = msg;
6815	if (select->vsi_id != vf->vsi.vsi_num ||
6816	    select->rx_queues == 0 || select->tx_queues == 0) {
6817		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6818		    I40E_ERR_PARAM);
6819		return;
6820	}
6821
6822	error = ixl_enable_rings(&vf->vsi);
6823	if (error) {
6824		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
6825		    I40E_ERR_TIMEOUT);
6826		return;
6827	}
6828
6829	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
6830}
6831
6832static void
6833ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
6834    void *msg, uint16_t msg_size)
6835{
6836	struct i40e_virtchnl_queue_select *select;
6837	int error;
6838
6839	if (msg_size != sizeof(*select)) {
6840		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6841		    I40E_ERR_PARAM);
6842		return;
6843	}
6844
6845	select = msg;
6846	if (select->vsi_id != vf->vsi.vsi_num ||
6847	    select->rx_queues == 0 || select->tx_queues == 0) {
6848		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6849		    I40E_ERR_PARAM);
6850		return;
6851	}
6852
6853	error = ixl_disable_rings(&vf->vsi);
6854	if (error) {
6855		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
6856		    I40E_ERR_TIMEOUT);
6857		return;
6858	}
6859
6860	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
6861}
6862
6863static boolean_t
6864ixl_zero_mac(const uint8_t *addr)
6865{
6866	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
6867
6868	return (cmp_etheraddr(addr, zero));
6869}
6870
6871static boolean_t
6872ixl_bcast_mac(const uint8_t *addr)
6873{
6874
6875	return (cmp_etheraddr(addr, ixl_bcast_addr));
6876}
6877
6878static int
6879ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
6880{
6881
6882	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
6883		return (EINVAL);
6884
6885	/*
6886	 * If the VF is not allowed to change its MAC address, don't let it
6887	 * set a MAC filter for an address that is not a multicast address and
6888	 * is not its assigned MAC.
6889	 */
6890	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
6891	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
6892		return (EPERM);
6893
6894	return (0);
6895}
6896
6897static void
6898ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6899    uint16_t msg_size)
6900{
6901	struct i40e_virtchnl_ether_addr_list *addr_list;
6902	struct i40e_virtchnl_ether_addr *addr;
6903	struct ixl_vsi *vsi;
6904	int i;
6905	size_t expected_size;
6906
6907	vsi = &vf->vsi;
6908
6909	if (msg_size < sizeof(*addr_list)) {
6910		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6911		    I40E_ERR_PARAM);
6912		return;
6913	}
6914
6915	addr_list = msg;
6916	expected_size = sizeof(*addr_list) +
6917	    addr_list->num_elements * sizeof(*addr);
6918
6919	if (addr_list->num_elements == 0 ||
6920	    addr_list->vsi_id != vsi->vsi_num ||
6921	    msg_size != expected_size) {
6922		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6923		    I40E_ERR_PARAM);
6924		return;
6925	}
6926
6927	for (i = 0; i < addr_list->num_elements; i++) {
6928		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
6929			i40e_send_vf_nack(pf, vf,
6930			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6931			return;
6932		}
6933	}
6934
6935	for (i = 0; i < addr_list->num_elements; i++) {
6936		addr = &addr_list->list[i];
6937		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
6938	}
6939
6940	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
6941}
6942
6943static void
6944ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
6945    uint16_t msg_size)
6946{
6947	struct i40e_virtchnl_ether_addr_list *addr_list;
6948	struct i40e_virtchnl_ether_addr *addr;
6949	size_t expected_size;
6950	int i;
6951
6952	if (msg_size < sizeof(*addr_list)) {
6953		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6954		    I40E_ERR_PARAM);
6955		return;
6956	}
6957
6958	addr_list = msg;
6959	expected_size = sizeof(*addr_list) +
6960	    addr_list->num_elements * sizeof(*addr);
6961
6962	if (addr_list->num_elements == 0 ||
6963	    addr_list->vsi_id != vf->vsi.vsi_num ||
6964	    msg_size != expected_size) {
6965		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
6966		    I40E_ERR_PARAM);
6967		return;
6968	}
6969
6970	for (i = 0; i < addr_list->num_elements; i++) {
6971		addr = &addr_list->list[i];
6972		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
6973			i40e_send_vf_nack(pf, vf,
6974			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
6975			return;
6976		}
6977	}
6978
6979	for (i = 0; i < addr_list->num_elements; i++) {
6980		addr = &addr_list->list[i];
6981		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
6982	}
6983
6984	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
6985}
6986
6987static enum i40e_status_code
6988ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
6989{
6990	struct i40e_vsi_context vsi_ctx;
6991
6992	vsi_ctx.seid = vf->vsi.seid;
6993
6994	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
6995	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
6996	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
6997	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6998	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
6999}
7000
7001static void
7002ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
7003    uint16_t msg_size)
7004{
7005	struct i40e_virtchnl_vlan_filter_list *filter_list;
7006	enum i40e_status_code code;
7007	size_t expected_size;
7008	int i;
7009
7010	if (msg_size < sizeof(*filter_list)) {
7011		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7012		    I40E_ERR_PARAM);
7013		return;
7014	}
7015
7016	filter_list = msg;
7017	expected_size = sizeof(*filter_list) +
7018	    filter_list->num_elements * sizeof(uint16_t);
7019	if (filter_list->num_elements == 0 ||
7020	    filter_list->vsi_id != vf->vsi.vsi_num ||
7021	    msg_size != expected_size) {
7022		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7023		    I40E_ERR_PARAM);
7024		return;
7025	}
7026
7027	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
7028		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7029		    I40E_ERR_PARAM);
7030		return;
7031	}
7032
7033	for (i = 0; i < filter_list->num_elements; i++) {
7034		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
7035			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7036			    I40E_ERR_PARAM);
7037			return;
7038		}
7039	}
7040
7041	code = ixl_vf_enable_vlan_strip(pf, vf);
7042	if (code != I40E_SUCCESS) {
7043		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7044		    I40E_ERR_PARAM);
7045	}
7046
7047	for (i = 0; i < filter_list->num_elements; i++)
7048		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
7049
7050	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
7051}
7052
7053static void
7054ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
7055    uint16_t msg_size)
7056{
7057	struct i40e_virtchnl_vlan_filter_list *filter_list;
7058	int i;
7059	size_t expected_size;
7060
7061	if (msg_size < sizeof(*filter_list)) {
7062		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
7063		    I40E_ERR_PARAM);
7064		return;
7065	}
7066
7067	filter_list = msg;
7068	expected_size = sizeof(*filter_list) +
7069	    filter_list->num_elements * sizeof(uint16_t);
7070	if (filter_list->num_elements == 0 ||
7071	    filter_list->vsi_id != vf->vsi.vsi_num ||
7072	    msg_size != expected_size) {
7073		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
7074		    I40E_ERR_PARAM);
7075		return;
7076	}
7077
7078	for (i = 0; i < filter_list->num_elements; i++) {
7079		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
7080			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7081			    I40E_ERR_PARAM);
7082			return;
7083		}
7084	}
7085
7086	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
7087		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
7088		    I40E_ERR_PARAM);
7089		return;
7090	}
7091
7092	for (i = 0; i < filter_list->num_elements; i++)
7093		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
7094
7095	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
7096}
7097
7098static void
7099ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
7100    void *msg, uint16_t msg_size)
7101{
7102	struct i40e_virtchnl_promisc_info *info;
7103	enum i40e_status_code code;
7104
7105	if (msg_size != sizeof(*info)) {
7106		i40e_send_vf_nack(pf, vf,
7107		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
7108		return;
7109	}
7110
7111	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
7112		i40e_send_vf_nack(pf, vf,
7113		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
7114		return;
7115	}
7116
7117	info = msg;
7118	if (info->vsi_id != vf->vsi.vsi_num) {
7119		i40e_send_vf_nack(pf, vf,
7120		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
7121		return;
7122	}
7123
7124	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
7125	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
7126	if (code != I40E_SUCCESS) {
7127		i40e_send_vf_nack(pf, vf,
7128		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
7129		return;
7130	}
7131
7132	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
7133	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
7134	if (code != I40E_SUCCESS) {
7135		i40e_send_vf_nack(pf, vf,
7136		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
7137		return;
7138	}
7139
7140	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
7141}
7142
7143static void
7144ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
7145    uint16_t msg_size)
7146{
7147	struct i40e_virtchnl_queue_select *queue;
7148
7149	if (msg_size != sizeof(*queue)) {
7150		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
7151		    I40E_ERR_PARAM);
7152		return;
7153	}
7154
7155	queue = msg;
7156	if (queue->vsi_id != vf->vsi.vsi_num) {
7157		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
7158		    I40E_ERR_PARAM);
7159		return;
7160	}
7161
7162	ixl_update_eth_stats(&vf->vsi);
7163
7164	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
7165	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
7166}
7167
7168static void
7169ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
7170{
7171	struct ixl_vf *vf;
7172	void *msg;
7173	uint16_t vf_num, msg_size;
7174	uint32_t opcode;
7175
7176	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
7177	opcode = le32toh(event->desc.cookie_high);
7178
7179	if (vf_num >= pf->num_vfs) {
7180		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
7181		return;
7182	}
7183
7184	vf = &pf->vfs[vf_num];
7185	msg = event->msg_buf;
7186	msg_size = event->msg_len;
7187
7188	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
7189	    "Got msg %s(%d) from VF-%d of size %d\n",
7190	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
7191
7192	switch (opcode) {
7193	case I40E_VIRTCHNL_OP_VERSION:
7194		ixl_vf_version_msg(pf, vf, msg, msg_size);
7195		break;
7196	case I40E_VIRTCHNL_OP_RESET_VF:
7197		ixl_vf_reset_msg(pf, vf, msg, msg_size);
7198		break;
7199	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
7200		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
7201		break;
7202	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
7203		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
7204		break;
7205	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
7206		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
7207		break;
7208	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
7209		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
7210		break;
7211	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
7212		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
7213		break;
7214	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
7215		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
7216		break;
7217	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
7218		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
7219		break;
7220	case I40E_VIRTCHNL_OP_ADD_VLAN:
7221		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
7222		break;
7223	case I40E_VIRTCHNL_OP_DEL_VLAN:
7224		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
7225		break;
7226	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
7227		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
7228		break;
7229	case I40E_VIRTCHNL_OP_GET_STATS:
7230		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
7231		break;
7232
7233	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
7234	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
7235	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
7236	default:
7237		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
7238		break;
7239	}
7240}
7241
7242/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
7243static void
7244ixl_handle_vflr(void *arg, int pending)
7245{
7246	struct ixl_pf *pf;
7247	struct i40e_hw *hw;
7248	uint16_t global_vf_num;
7249	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
7250	int i;
7251
7252	pf = arg;
7253	hw = &pf->hw;
7254
7255	IXL_PF_LOCK(pf);
7256	for (i = 0; i < pf->num_vfs; i++) {
7257		global_vf_num = hw->func_caps.vf_base_id + i;
7258
7259		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
7260		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
7261		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
7262		if (vflrstat & vflrstat_mask) {
7263			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
7264			    vflrstat_mask);
7265
7266			ixl_reinit_vf(pf, &pf->vfs[i]);
7267		}
7268	}
7269
7270	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
7271	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
7272	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
7273	ixl_flush(hw);
7274
7275	IXL_PF_UNLOCK(pf);
7276}
7277
7278static int
7279ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
7280{
7281
7282	switch (err) {
7283	case I40E_AQ_RC_EPERM:
7284		return (EPERM);
7285	case I40E_AQ_RC_ENOENT:
7286		return (ENOENT);
7287	case I40E_AQ_RC_ESRCH:
7288		return (ESRCH);
7289	case I40E_AQ_RC_EINTR:
7290		return (EINTR);
7291	case I40E_AQ_RC_EIO:
7292		return (EIO);
7293	case I40E_AQ_RC_ENXIO:
7294		return (ENXIO);
7295	case I40E_AQ_RC_E2BIG:
7296		return (E2BIG);
7297	case I40E_AQ_RC_EAGAIN:
7298		return (EAGAIN);
7299	case I40E_AQ_RC_ENOMEM:
7300		return (ENOMEM);
7301	case I40E_AQ_RC_EACCES:
7302		return (EACCES);
7303	case I40E_AQ_RC_EFAULT:
7304		return (EFAULT);
7305	case I40E_AQ_RC_EBUSY:
7306		return (EBUSY);
7307	case I40E_AQ_RC_EEXIST:
7308		return (EEXIST);
7309	case I40E_AQ_RC_EINVAL:
7310		return (EINVAL);
7311	case I40E_AQ_RC_ENOTTY:
7312		return (ENOTTY);
7313	case I40E_AQ_RC_ENOSPC:
7314		return (ENOSPC);
7315	case I40E_AQ_RC_ENOSYS:
7316		return (ENOSYS);
7317	case I40E_AQ_RC_ERANGE:
7318		return (ERANGE);
7319	case I40E_AQ_RC_EFLUSHED:
7320		return (EINVAL);	/* No exact equivalent in errno.h */
7321	case I40E_AQ_RC_BAD_ADDR:
7322		return (EFAULT);
7323	case I40E_AQ_RC_EMODE:
7324		return (EPERM);
7325	case I40E_AQ_RC_EFBIG:
7326		return (EFBIG);
7327	default:
7328		return (EINVAL);
7329	}
7330}
7331
7332static int
7333ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
7334{
7335	struct ixl_pf *pf;
7336	struct i40e_hw *hw;
7337	struct ixl_vsi *pf_vsi;
7338	enum i40e_status_code ret;
7339	int i, error;
7340
7341	pf = device_get_softc(dev);
7342	hw = &pf->hw;
7343	pf_vsi = &pf->vsi;
7344
7345	IXL_PF_LOCK(pf);
7346	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
7347	    M_ZERO);
7348
7349	if (pf->vfs == NULL) {
7350		error = ENOMEM;
7351		goto fail;
7352	}
7353
7354	for (i = 0; i < num_vfs; i++)
7355		sysctl_ctx_init(&pf->vfs[i].ctx);
7356
7357	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
7358	    1, FALSE, &pf->veb_seid, FALSE, NULL);
7359	if (ret != I40E_SUCCESS) {
7360		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
7361		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
7362		    error);
7363		goto fail;
7364	}
7365
7366	// TODO: [Configure MSI-X here]
7367	ixl_enable_adminq(hw);
7368
7369	pf->num_vfs = num_vfs;
7370	IXL_PF_UNLOCK(pf);
7371	return (0);
7372
7373fail:
7374	free(pf->vfs, M_IXL);
7375	pf->vfs = NULL;
7376	IXL_PF_UNLOCK(pf);
7377	return (error);
7378}
7379
7380static void
7381ixl_iov_uninit(device_t dev)
7382{
7383	struct ixl_pf *pf;
7384	struct i40e_hw *hw;
7385	struct ixl_vsi *vsi;
7386	struct ifnet *ifp;
7387	struct ixl_vf *vfs;
7388	int i, num_vfs;
7389
7390	pf = device_get_softc(dev);
7391	hw = &pf->hw;
7392	vsi = &pf->vsi;
7393	ifp = vsi->ifp;
7394
7395	IXL_PF_LOCK(pf);
7396	for (i = 0; i < pf->num_vfs; i++) {
7397		if (pf->vfs[i].vsi.seid != 0)
7398			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
7399	}
7400
7401	if (pf->veb_seid != 0) {
7402		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
7403		pf->veb_seid = 0;
7404	}
7405
7406	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
7407		ixl_disable_intr(vsi);
7408
7409	vfs = pf->vfs;
7410	num_vfs = pf->num_vfs;
7411
7412	pf->vfs = NULL;
7413	pf->num_vfs = 0;
7414	IXL_PF_UNLOCK(pf);
7415
7416	/* Do this after the unlock as sysctl_ctx_free might sleep. */
7417	for (i = 0; i < num_vfs; i++)
7418		sysctl_ctx_free(&vfs[i].ctx);
7419	free(vfs, M_IXL);
7420}
7421
7422static int
7423ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
7424{
7425	char sysctl_name[QUEUE_NAME_LEN];
7426	struct ixl_pf *pf;
7427	struct ixl_vf *vf;
7428	const void *mac;
7429	size_t size;
7430	int error;
7431
7432	pf = device_get_softc(dev);
7433	vf = &pf->vfs[vfnum];
7434
7435	IXL_PF_LOCK(pf);
7436	vf->vf_num = vfnum;
7437
7438	vf->vsi.back = pf;
7439	vf->vf_flags = VF_FLAG_ENABLED;
7440	SLIST_INIT(&vf->vsi.ftl);
7441
7442	error = ixl_vf_setup_vsi(pf, vf);
7443	if (error != 0)
7444		goto out;
7445
7446	if (nvlist_exists_binary(params, "mac-addr")) {
7447		mac = nvlist_get_binary(params, "mac-addr", &size);
7448		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
7449
7450		if (nvlist_get_bool(params, "allow-set-mac"))
7451			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7452	} else
7453		/*
7454		 * If the administrator has not specified a MAC address then
7455		 * we must allow the VF to choose one.
7456		 */
7457		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
7458
7459	if (nvlist_get_bool(params, "mac-anti-spoof"))
7460		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
7461
7462	if (nvlist_get_bool(params, "allow-promisc"))
7463		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
7464
7465	/* TODO: Get VLAN that PF has set for the VF */
7466
7467	vf->vf_flags |= VF_FLAG_VLAN_CAP;
7468
7469	ixl_reset_vf(pf, vf);
7470out:
7471	IXL_PF_UNLOCK(pf);
7472	if (error == 0) {
7473		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
7474		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
7475	}
7476
7477	return (error);
7478}
7479#endif /* PCI_IOV */
7480