if_ix.c revision 283882
1233294Sstas/******************************************************************************
2102644Snectar
355682Smarkm  Copyright (c) 2001-2015, Intel Corporation
4142403Snectar  All rights reserved.
5233294Sstas
6233294Sstas  Redistribution and use in source and binary forms, with or without
755682Smarkm  modification, are permitted provided that the following conditions are met:
855682Smarkm
955682Smarkm   1. Redistributions of source code must retain the above copyright notice,
1055682Smarkm      this list of conditions and the following disclaimer.
1155682Smarkm
1255682Smarkm   2. Redistributions in binary form must reproduce the above copyright
1355682Smarkm      notice, this list of conditions and the following disclaimer in the
1455682Smarkm      documentation and/or other materials provided with the distribution.
1555682Smarkm
1690926Snectar   3. Neither the name of the Intel Corporation nor the names of its
1790926Snectar      contributors may be used to endorse or promote products derived from
18233294Sstas      this software without specific prior written permission.
1990926Snectar
20233294Sstas  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2190926Snectar  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22233294Sstas  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2355682Smarkm  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
2455682Smarkm  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2555682Smarkm  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26233294Sstas  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2755682Smarkm  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28233294Sstas  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29102644Snectar  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30102644Snectar  POSSIBILITY OF SUCH DAMAGE.
31102644Snectar
32127808Snectar******************************************************************************/
3390926Snectar/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 283882 2015-06-01 17:35:29Z jfv $*/
34127808Snectar
3555682Smarkm
3655682Smarkm#ifndef IXGBE_STANDALONE_BUILD
3755682Smarkm#include "opt_inet.h"
3855682Smarkm#include "opt_inet6.h"
3955682Smarkm#include "opt_rss.h"
4055682Smarkm#endif
41178825Sdfr
4255682Smarkm#include "ixgbe.h"
43142403Snectar
44142403Snectar#ifdef	RSS
45142403Snectar#include <net/rss_config.h>
46142403Snectar#include <netinet/in_rss.h>
47142403Snectar#endif
48142403Snectar
49142403Snectar/*********************************************************************
50233294Sstas *  Set this to one to display debug statistics
51142403Snectar *********************************************************************/
52142403Snectarint             ixgbe_display_debug_stats = 0;
53142403Snectar
54142403Snectar/*********************************************************************
55142403Snectar *  Driver version
56142403Snectar *********************************************************************/
57142403Snectarchar ixgbe_driver_version[] = "2.8.3";
58142403Snectar
59142403Snectar/*********************************************************************
60142403Snectar *  PCI Device ID Table
61142403Snectar *
62142403Snectar *  Used by probe to select devices to load on
63142403Snectar *  Last field stores an index into ixgbe_strings
64142403Snectar *  Last entry must be all 0s
65233294Sstas *
66142403Snectar *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67142403Snectar *********************************************************************/
68142403Snectar
69142403Snectarstatic ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
70178825Sdfr{
71142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98233294Sstas	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99178825Sdfr	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100142403Snectar	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101142403Snectar	/* required last entry */
102142403Snectar	{0, 0, 0, 0, 0}
103142403Snectar};
104142403Snectar
105142403Snectar/*********************************************************************
106142403Snectar *  Table of branding strings
107142403Snectar *********************************************************************/
108142403Snectar
109233294Sstasstatic char    *ixgbe_strings[] = {
110233294Sstas	"Intel(R) PRO/10GbE PCI-Express Network Driver"
111233294Sstas};
112233294Sstas
113142403Snectar/*********************************************************************
114142403Snectar *  Function prototypes
115178825Sdfr *********************************************************************/
116178825Sdfrstatic int      ixgbe_probe(device_t);
117178825Sdfrstatic int      ixgbe_attach(device_t);
118142403Snectarstatic int      ixgbe_detach(device_t);
119178825Sdfrstatic int      ixgbe_shutdown(device_t);
120178825Sdfrstatic int	ixgbe_suspend(device_t);
121178825Sdfrstatic int	ixgbe_resume(device_t);
122142403Snectarstatic int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
123142403Snectarstatic void	ixgbe_init(void *);
124233294Sstasstatic void	ixgbe_init_locked(struct adapter *);
125233294Sstasstatic void     ixgbe_stop(void *);
126233294Sstas#if __FreeBSD_version >= 1100036
127233294Sstasstatic uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
128233294Sstas#endif
129233294Sstasstatic void	ixgbe_add_media_types(struct adapter *);
130233294Sstasstatic void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
131233294Sstasstatic int      ixgbe_media_change(struct ifnet *);
132233294Sstasstatic void     ixgbe_identify_hardware(struct adapter *);
133233294Sstasstatic int      ixgbe_allocate_pci_resources(struct adapter *);
134233294Sstasstatic void	ixgbe_get_slot_info(struct ixgbe_hw *);
135233294Sstasstatic int      ixgbe_allocate_msix(struct adapter *);
136233294Sstasstatic int      ixgbe_allocate_legacy(struct adapter *);
137233294Sstasstatic int	ixgbe_setup_msix(struct adapter *);
138233294Sstasstatic void	ixgbe_free_pci_resources(struct adapter *);
139233294Sstasstatic void	ixgbe_local_timer(void *);
140233294Sstasstatic int	ixgbe_setup_interface(device_t, struct adapter *);
141233294Sstasstatic void	ixgbe_config_dmac(struct adapter *);
142233294Sstasstatic void	ixgbe_config_delay_values(struct adapter *);
143233294Sstasstatic void	ixgbe_config_link(struct adapter *);
144233294Sstasstatic void	ixgbe_check_eee_support(struct adapter *);
145178825Sdfrstatic void	ixgbe_check_wol_support(struct adapter *);
146178825Sdfrstatic int	ixgbe_setup_low_power_mode(struct adapter *);
147142403Snectarstatic void	ixgbe_rearm_queues(struct adapter *, u64);
148142403Snectar
149142403Snectarstatic void     ixgbe_initialize_transmit_units(struct adapter *);
150127808Snectarstatic void     ixgbe_initialize_receive_units(struct adapter *);
15155682Smarkmstatic void	ixgbe_enable_rx_drop(struct adapter *);
15272445Sassarstatic void	ixgbe_disable_rx_drop(struct adapter *);
153127808Snectar
154233294Sstasstatic void     ixgbe_enable_intr(struct adapter *);
155233294Sstasstatic void     ixgbe_disable_intr(struct adapter *);
156127808Snectarstatic void     ixgbe_update_stats_counters(struct adapter *);
157127808Snectarstatic void     ixgbe_set_promisc(struct adapter *);
158127808Snectarstatic void     ixgbe_set_multi(struct adapter *);
15955682Smarkmstatic void     ixgbe_update_link_status(struct adapter *);
16055682Smarkmstatic void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
161233294Sstasstatic void	ixgbe_configure_ivars(struct adapter *);
162233294Sstasstatic u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
16355682Smarkm
16455682Smarkmstatic void	ixgbe_setup_vlan_hw_support(struct adapter *);
16555682Smarkmstatic void	ixgbe_register_vlan(void *, struct ifnet *, u16);
166233294Sstasstatic void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
167127808Snectar
16890926Snectarstatic void	ixgbe_add_device_sysctls(struct adapter *);
16972445Sassarstatic void     ixgbe_add_hw_stats(struct adapter *);
170127808Snectar
171127808Snectar/* Sysctl handlers */
172233294Sstasstatic int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
17355682Smarkmstatic int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
174127808Snectarstatic int	ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
175233294Sstasstatic int	ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
17690926Snectarstatic int	ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
177178825Sdfrstatic int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
178178825Sdfrstatic int	ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
17972445Sassarstatic int	ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
180233294Sstasstatic int	ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
181233294Sstasstatic int	ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
182233294Sstasstatic int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
183127808Snectarstatic int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
184127808Snectar
185127808Snectar/* Support for pluggable optic modules */
186127808Snectarstatic bool	ixgbe_sfp_probe(struct adapter *);
187127808Snectarstatic void	ixgbe_setup_optics(struct adapter *);
188233294Sstas
189178825Sdfr/* Legacy (single vector interrupt handler */
19055682Smarkmstatic void	ixgbe_legacy_irq(void *);
19172445Sassar
192178825Sdfr/* The MSI/X Interrupt handlers */
193127808Snectarstatic void	ixgbe_msix_que(void *);
194127808Snectarstatic void	ixgbe_msix_link(void *);
195233294Sstas
196233294Sstas/* Deferred interrupt tasklets */
197127808Snectarstatic void	ixgbe_handle_que(void *, int);
198127808Snectarstatic void	ixgbe_handle_link(void *, int);
199233294Sstasstatic void	ixgbe_handle_msf(void *, int);
200178825Sdfrstatic void	ixgbe_handle_mod(void *, int);
201127808Snectarstatic void	ixgbe_handle_phy(void *, int);
202127808Snectar
203127808Snectar#ifdef IXGBE_FDIR
20490926Snectarstatic void	ixgbe_reinit_fdir(void *, int);
205233294Sstas#endif
206127808Snectar
207178825Sdfr/*********************************************************************
20855682Smarkm *  FreeBSD Device Interface Entry Points
209102644Snectar *********************************************************************/
210102644Snectar
211178825Sdfrstatic device_method_t ix_methods[] = {
212127808Snectar	/* Device interface */
213127808Snectar	DEVMETHOD(device_probe, ixgbe_probe),
21455682Smarkm	DEVMETHOD(device_attach, ixgbe_attach),
21555682Smarkm	DEVMETHOD(device_detach, ixgbe_detach),
21690926Snectar	DEVMETHOD(device_shutdown, ixgbe_shutdown),
217127808Snectar	DEVMETHOD(device_suspend, ixgbe_suspend),
218127808Snectar	DEVMETHOD(device_resume, ixgbe_resume),
219127808Snectar	DEVMETHOD_END
220127808Snectar};
221127808Snectar
22290926Snectarstatic driver_t ix_driver = {
22390926Snectar	"ix", ix_methods, sizeof(struct adapter),
22490926Snectar};
225127808Snectar
226127808Snectardevclass_t ix_devclass;
227127808SnectarDRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
228127808Snectar
229233294SstasMODULE_DEPEND(ix, pci, 1, 1, 1);
230127808SnectarMODULE_DEPEND(ix, ether, 1, 1, 1);
231127808Snectar
232233294Sstas/*
233178825Sdfr** TUNEABLE PARAMETERS:
234127808Snectar*/
235127808Snectar
236127808Snectarstatic SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
237127808Snectar		   "IXGBE driver parameters");
238127808Snectar
239127808Snectar/*
240127808Snectar** AIM: Adaptive Interrupt Moderation
241127808Snectar** which means that the interrupt rate
242178825Sdfr** is varied over time based on the
243178825Sdfr** traffic for that interrupt vector
244178825Sdfr*/
245178825Sdfrstatic int ixgbe_enable_aim = TRUE;
246127808SnectarSYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
247127808Snectar    "Enable adaptive interrupt moderation");
24855682Smarkm
249127808Snectarstatic int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
250233294SstasSYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
251233294Sstas    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
252127808Snectar
253127808Snectar/* How many packets rxeof tries to clean at a time */
254127808Snectarstatic int ixgbe_rx_process_limit = 256;
255127808SnectarTUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
256127808SnectarSYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
25755682Smarkm    &ixgbe_rx_process_limit, 0,
258127808Snectar    "Maximum number of received packets to process at a time,"
259127808Snectar    "-1 means unlimited");
260178825Sdfr
261127808Snectar/* How many packets txeof tries to clean at a time */
262127808Snectarstatic int ixgbe_tx_process_limit = 256;
26355682SmarkmTUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
26455682SmarkmSYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
265127808Snectar    &ixgbe_tx_process_limit, 0,
266127808Snectar    "Maximum number of sent packets to process at a time,"
267233294Sstas    "-1 means unlimited");
268127808Snectar
269127808Snectar/*
270233294Sstas** Smart speed setting, default to on
27155682Smarkm** this only works as a compile option
27255682Smarkm** right now as its during attach, set
273120945Snectar** this to 'ixgbe_smart_speed_off' to
274127808Snectar** disable.
275233294Sstas*/
276178825Sdfrstatic int ixgbe_smart_speed = ixgbe_smart_speed_on;
277233294Sstas
278233294Sstas/*
279233294Sstas * MSIX should be the default for best performance,
28055682Smarkm * but this allows it to be forced off for testing.
281233294Sstas */
282127808Snectarstatic int ixgbe_enable_msix = 1;
283233294SstasSYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
284233294Sstas    "Enable MSI-X interrupts");
28555682Smarkm
286127808Snectar/*
287127808Snectar * Number of Queues, can be set to 0,
288127808Snectar * it then autoconfigures based on the
289127808Snectar * number of cpus with a max of 8. This
290233294Sstas * can be overriden manually here.
291127808Snectar */
292127808Snectarstatic int ixgbe_num_queues = 0;
293233294SstasSYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
294233294Sstas    "Number of queues to configure, 0 indicates autoconfigure");
295233294Sstas
296233294Sstas/*
29755682Smarkm** Number of TX descriptors per ring,
298233294Sstas** setting higher than RX as this seems
299127808Snectar** the better performing choice.
300127808Snectar*/
301233294Sstasstatic int ixgbe_txd = PERFORM_TXD;
302233294SstasSYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
303102644Snectar    "Number of transmit descriptors per queue");
30455682Smarkm
305178825Sdfr/* Number of RX descriptors per ring */
30655682Smarkmstatic int ixgbe_rxd = PERFORM_RXD;
30755682SmarkmSYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
30855682Smarkm    "Number of receive descriptors per queue");
309178825Sdfr
31090926Snectar/*
31190926Snectar** Defining this on will allow the use
31290926Snectar** of unsupported SFP+ modules, note that
31390926Snectar** doing so you are on your own :)
31455682Smarkm*/
315178825Sdfrstatic int allow_unsupported_sfp = FALSE;
316178825SdfrTUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
317178825Sdfr
318178825Sdfr/* Keep running tab on them for sanity check */
319178825Sdfrstatic int ixgbe_total_ports;
320233294Sstas
321127808Snectar#ifdef IXGBE_FDIR
322233294Sstas/*
323233294Sstas** Flow Director actually 'steals'
324127808Snectar** part of the packet buffer as its
325233294Sstas** filter pool, this variable controls
326178825Sdfr** how much it uses:
327178825Sdfr**  0 = 64K, 1 = 128K, 2 = 256K
328127808Snectar*/
329127808Snectarstatic int fdir_pballoc = 1;
330127808Snectar#endif
331127808Snectar
332127808Snectar#ifdef DEV_NETMAP
333127808Snectar/*
334178825Sdfr * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
335127808Snectar * be a reference on how to implement netmap support in a driver.
336178825Sdfr * Additional comments are in ixgbe_netmap.h .
337178825Sdfr *
338102644Snectar * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
339102644Snectar * that extend the standard driver.
340102644Snectar */
341178825Sdfr#include <dev/netmap/ixgbe_netmap.h>
342127808Snectar#endif /* DEV_NETMAP */
343127808Snectar
344127808Snectar/*********************************************************************
345127808Snectar *  Device identification routine
346127808Snectar *
347127808Snectar *  ixgbe_probe determines if the driver should be loaded on
348178825Sdfr *  adapter based on PCI vendor/device id of the adapter.
349127808Snectar *
350127808Snectar *  return BUS_PROBE_DEFAULT on success, positive on failure
35172445Sassar *********************************************************************/
352127808Snectar
353127808Snectarstatic int
354178825Sdfrixgbe_probe(device_t dev)
355127808Snectar{
356127808Snectar	ixgbe_vendor_info_t *ent;
357142403Snectar
358127808Snectar	u16	pci_vendor_id = 0;
359178825Sdfr	u16	pci_device_id = 0;
360127808Snectar	u16	pci_subvendor_id = 0;
361127808Snectar	u16	pci_subdevice_id = 0;
362178825Sdfr	char	adapter_name[256];
363127808Snectar
364127808Snectar	INIT_DEBUGOUT("ixgbe_probe: begin");
365178825Sdfr
366233294Sstas	pci_vendor_id = pci_get_vendor(dev);
367127808Snectar	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
368127808Snectar		return (ENXIO);
369233294Sstas
370178825Sdfr	pci_device_id = pci_get_device(dev);
371178825Sdfr	pci_subvendor_id = pci_get_subvendor(dev);
372233294Sstas	pci_subdevice_id = pci_get_subdevice(dev);
373233294Sstas
374233294Sstas	ent = ixgbe_vendor_info_array;
375102644Snectar	while (ent->vendor_id != 0) {
37690926Snectar		if ((pci_vendor_id == ent->vendor_id) &&
37772445Sassar		    (pci_device_id == ent->device_id) &&
37855682Smarkm
379233294Sstas		    ((pci_subvendor_id == ent->subvendor_id) ||
38055682Smarkm		     (ent->subvendor_id == 0)) &&
38155682Smarkm
38255682Smarkm		    ((pci_subdevice_id == ent->subdevice_id) ||
38355682Smarkm		     (ent->subdevice_id == 0))) {
38455682Smarkm			sprintf(adapter_name, "%s, Version - %s",
38555682Smarkm				ixgbe_strings[ent->index],
386233294Sstas				ixgbe_driver_version);
38755682Smarkm			device_set_desc_copy(dev, adapter_name);
388120945Snectar			++ixgbe_total_ports;
38990926Snectar			return (BUS_PROBE_DEFAULT);
39072445Sassar		}
39155682Smarkm		ent++;
39290926Snectar	}
393233294Sstas	return (ENXIO);
39490926Snectar}
39572445Sassar
396178825Sdfr/*********************************************************************
397178825Sdfr *  Device initialization routine
39872445Sassar *
39972445Sassar *  The attach entry point is called when the driver is being loaded.
400178825Sdfr *  This routine identifies the type of hardware, allocates all resources
40172445Sassar *  and initializes the hardware.
40272445Sassar *
40355682Smarkm *  return 0 on success, positive on failure
404233294Sstas *********************************************************************/
40590926Snectar
40655682Smarkmstatic int
40755682Smarkmixgbe_attach(device_t dev)
408233294Sstas{
409142403Snectar	struct adapter *adapter;
410142403Snectar	struct ixgbe_hw *hw;
411142403Snectar	int             error = 0;
412142403Snectar	u16		csum;
413233294Sstas	u32		ctrl_ext;
414233294Sstas
415142403Snectar	INIT_DEBUGOUT("ixgbe_attach: begin");
416142403Snectar
417142403Snectar	/* Allocate, clear, and link in our adapter structure */
418233294Sstas	adapter = device_get_softc(dev);
419233294Sstas	adapter->dev = adapter->osdep.dev = dev;
420233294Sstas	hw = &adapter->hw;
421142403Snectar
422142403Snectar	/* Core Lock Init*/
423142403Snectar	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
424142403Snectar
425142403Snectar	/* Set up the timer callout */
426142403Snectar	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
427142403Snectar
428142403Snectar	/* Determine hardware revision */
429142403Snectar	ixgbe_identify_hardware(adapter);
430142403Snectar
431142403Snectar	/* Do base PCI setup - map BAR0 */
432142403Snectar	if (ixgbe_allocate_pci_resources(adapter)) {
433142403Snectar		device_printf(dev, "Allocation of PCI resources failed\n");
434142403Snectar		error = ENXIO;
435142403Snectar		goto err_out;
436142403Snectar	}
437142403Snectar
438233294Sstas	/* Do descriptor calc and sanity checks */
43972445Sassar	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
44072445Sassar	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
441178825Sdfr		device_printf(dev, "TXD config issue, using default!\n");
442233294Sstas		adapter->num_tx_desc = DEFAULT_TXD;
443233294Sstas	} else
444233294Sstas		adapter->num_tx_desc = ixgbe_txd;
445233294Sstas
446233294Sstas	/*
447233294Sstas	** With many RX rings it is easy to exceed the
448233294Sstas	** system mbuf allocation. Tuning nmbclusters
449233294Sstas	** can alleviate this.
450233294Sstas	*/
451233294Sstas	if (nmbclusters > 0) {
452233294Sstas		int s;
453233294Sstas		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
454233294Sstas		if (s > nmbclusters) {
455233294Sstas			device_printf(dev, "RX Descriptors exceed "
456233294Sstas			    "system mbuf max, using default instead!\n");
457233294Sstas			ixgbe_rxd = DEFAULT_RXD;
458233294Sstas		}
459233294Sstas	}
460233294Sstas
461233294Sstas	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
462233294Sstas	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
46355682Smarkm		device_printf(dev, "RXD config issue, using default!\n");
46472445Sassar		adapter->num_rx_desc = DEFAULT_RXD;
46572445Sassar	} else
466233294Sstas		adapter->num_rx_desc = ixgbe_rxd;
467233294Sstas
468233294Sstas	/* Allocate our TX/RX Queues */
469233294Sstas	if (ixgbe_allocate_queues(adapter)) {
470233294Sstas		error = ENOMEM;
471233294Sstas		goto err_out;
472233294Sstas	}
47355682Smarkm
47490926Snectar	/* Allocate multicast array memory. */
475233294Sstas	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
476233294Sstas	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
477233294Sstas	if (adapter->mta == NULL) {
478233294Sstas		device_printf(dev, "Can not allocate multicast setup array\n");
479233294Sstas		error = ENOMEM;
480233294Sstas		goto err_late;
481233294Sstas	}
48290926Snectar
48390926Snectar	/* Initialize the shared code */
484178825Sdfr	hw->allow_unsupported_sfp = allow_unsupported_sfp;
48590926Snectar	error = ixgbe_init_shared_code(hw);
48655682Smarkm	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
487142403Snectar		/*
48855682Smarkm		** No optics in this port, set up
48955682Smarkm		** so the timer routine will probe
49055682Smarkm		** for later insertion.
49155682Smarkm		*/
492233294Sstas		adapter->sfp_probe = TRUE;
493233294Sstas		error = 0;
49490926Snectar	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
495233294Sstas		device_printf(dev,"Unsupported SFP+ module detected!\n");
496233294Sstas		error = EIO;
497233294Sstas		goto err_late;
498233294Sstas	} else if (error) {
499233294Sstas		device_printf(dev,"Unable to initialize the shared code\n");
50055682Smarkm		error = EIO;
50172445Sassar		goto err_late;
502233294Sstas	}
503233294Sstas
504233294Sstas	/* Make sure we have a good EEPROM before we read from it */
505233294Sstas	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
506233294Sstas		device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
50790926Snectar		error = EIO;
50872445Sassar		goto err_late;
509233294Sstas	}
510233294Sstas
511233294Sstas	error = ixgbe_init_hw(hw);
512233294Sstas	switch (error) {
513233294Sstas	case IXGBE_ERR_EEPROM_VERSION:
514102644Snectar		device_printf(dev, "This device is a pre-production adapter/"
515102644Snectar		    "LOM.  Please be aware there may be issues associated "
516102644Snectar		    "with your hardware.\n If you are experiencing problems "
517102644Snectar		    "please contact your Intel or hardware representative "
518102644Snectar		    "who provided you with this hardware.\n");
519102644Snectar		break;
520233294Sstas	case IXGBE_ERR_SFP_NOT_SUPPORTED:
521178825Sdfr		device_printf(dev,"Unsupported SFP+ Module\n");
522178825Sdfr		error = EIO;
523233294Sstas		goto err_late;
524233294Sstas	case IXGBE_ERR_SFP_NOT_PRESENT:
525233294Sstas		device_printf(dev,"No SFP+ Module found\n");
526233294Sstas		/* falls thru */
527233294Sstas	default:
528233294Sstas		break;
529233294Sstas	}
530233294Sstas
531233294Sstas	/* Detect and set physical type */
532233294Sstas	ixgbe_setup_optics(adapter);
533233294Sstas
534233294Sstas	if ((adapter->msix > 1) && (ixgbe_enable_msix))
535233294Sstas		error = ixgbe_allocate_msix(adapter);
536233294Sstas	else
537233294Sstas		error = ixgbe_allocate_legacy(adapter);
538233294Sstas	if (error)
539178825Sdfr		goto err_late;
540233294Sstas
541233294Sstas	/* Setup OS specific network interface */
542233294Sstas	if (ixgbe_setup_interface(dev, adapter) != 0)
543233294Sstas		goto err_late;
544233294Sstas
545233294Sstas	/* Initialize statistics */
546233294Sstas	ixgbe_update_stats_counters(adapter);
547178825Sdfr
548178825Sdfr	/* Register for VLAN events */
549233294Sstas	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
550233294Sstas	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
551233294Sstas	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
552233294Sstas	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
553233294Sstas
554233294Sstas        /* Check PCIE slot type/speed/width */
555233294Sstas	ixgbe_get_slot_info(hw);
556233294Sstas
557233294Sstas
558102644Snectar	/* Set an initial default flow control value */
55972445Sassar	adapter->fc = ixgbe_fc_full;
560102644Snectar
56172445Sassar	/* Check for certain supported features */
56272445Sassar	ixgbe_check_wol_support(adapter);
56372445Sassar	ixgbe_check_eee_support(adapter);
564233294Sstas
565233294Sstas	/* Add sysctls */
566102644Snectar	ixgbe_add_device_sysctls(adapter);
567142403Snectar	ixgbe_add_hw_stats(adapter);
56855682Smarkm
56972445Sassar	/* let hardware know driver is loaded */
57072445Sassar	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
571233294Sstas	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
57255682Smarkm	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
573102644Snectar
57472445Sassar#ifdef DEV_NETMAP
57572445Sassar	ixgbe_netmap_attach(adapter);
57672445Sassar#endif /* DEV_NETMAP */
577233294Sstas	INIT_DEBUGOUT("ixgbe_attach: end");
578233294Sstas	return (0);
579233294Sstas
580233294Sstaserr_late:
581178825Sdfr	ixgbe_free_transmit_structures(adapter);
582233294Sstas	ixgbe_free_receive_structures(adapter);
583233294Sstaserr_out:
584233294Sstas	if (adapter->ifp != NULL)
585233294Sstas		if_free(adapter->ifp);
586233294Sstas	ixgbe_free_pci_resources(adapter);
587233294Sstas	free(adapter->mta, M_DEVBUF);
588233294Sstas	return (error);
589178825Sdfr}
590127808Snectar
591127808Snectar/*********************************************************************
592127808Snectar *  Device removal routine
593127808Snectar *
594127808Snectar *  The detach entry point is called when the driver is being removed.
595127808Snectar *  This routine stops the adapter and deallocates all the resources
596127808Snectar *  that were allocated for driver operation.
597233294Sstas *
598233294Sstas *  return 0 on success, positive on failure
599233294Sstas *********************************************************************/
600127808Snectar
601233294Sstasstatic int
602127808Snectarixgbe_detach(device_t dev)
60378527Sassar{
604102644Snectar	struct adapter *adapter = device_get_softc(dev);
605233294Sstas	struct ix_queue *que = adapter->queues;
606233294Sstas	struct tx_ring *txr = adapter->tx_rings;
60778527Sassar	u32	ctrl_ext;
60855682Smarkm
609127808Snectar	INIT_DEBUGOUT("ixgbe_detach: begin");
61055682Smarkm
61155682Smarkm	/* Make sure VLANS are not using driver */
612233294Sstas	if (adapter->ifp->if_vlantrunk != NULL) {
613233294Sstas		device_printf(dev,"Vlan in use, detach first\n");
614233294Sstas		return (EBUSY);
615233294Sstas	}
616233294Sstas
617233294Sstas	/* Stop the adapter */
618233294Sstas	IXGBE_CORE_LOCK(adapter);
619233294Sstas	ixgbe_setup_low_power_mode(adapter);
620233294Sstas	IXGBE_CORE_UNLOCK(adapter);
621233294Sstas
622233294Sstas	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
623233294Sstas		if (que->tq) {
624233294Sstas#ifndef IXGBE_LEGACY_TX
625178825Sdfr			taskqueue_drain(que->tq, &txr->txq_task);
626178825Sdfr#endif
627178825Sdfr			taskqueue_drain(que->tq, &que->que_task);
628178825Sdfr			taskqueue_free(que->tq);
629178825Sdfr		}
630178825Sdfr	}
631178825Sdfr
632178825Sdfr	/* Drain the Link queue */
633178825Sdfr	if (adapter->tq) {
634178825Sdfr		taskqueue_drain(adapter->tq, &adapter->link_task);
635178825Sdfr		taskqueue_drain(adapter->tq, &adapter->mod_task);
636178825Sdfr		taskqueue_drain(adapter->tq, &adapter->msf_task);
637102644Snectar		taskqueue_drain(adapter->tq, &adapter->phy_task);
63855682Smarkm#ifdef IXGBE_FDIR
639178825Sdfr		taskqueue_drain(adapter->tq, &adapter->fdir_task);
640233294Sstas#endif
641233294Sstas		taskqueue_free(adapter->tq);
642233294Sstas	}
643102644Snectar
644233294Sstas	/* let hardware know driver is unloading */
645233294Sstas	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
646102644Snectar	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
647233294Sstas	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
64855682Smarkm
649233294Sstas	/* Unregister VLAN events */
650233294Sstas	if (adapter->vlan_attach != NULL)
65172445Sassar		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
65255682Smarkm	if (adapter->vlan_detach != NULL)
65355682Smarkm		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
65490926Snectar
655127808Snectar	ether_ifdetach(adapter->ifp);
65690926Snectar	callout_drain(&adapter->timer);
65755682Smarkm#ifdef DEV_NETMAP
65855682Smarkm	netmap_detach(adapter->ifp);
65955682Smarkm#endif /* DEV_NETMAP */
660178825Sdfr	ixgbe_free_pci_resources(adapter);
66190926Snectar	bus_generic_detach(dev);
662178825Sdfr	if_free(adapter->ifp);
663178825Sdfr
664142403Snectar	ixgbe_free_transmit_structures(adapter);
66590926Snectar	ixgbe_free_receive_structures(adapter);
66655682Smarkm	free(adapter->mta, M_DEVBUF);
66755682Smarkm
66890926Snectar	IXGBE_CORE_LOCK_DESTROY(adapter);
66955682Smarkm	return (0);
67055682Smarkm}
67155682Smarkm
67290926Snectar/*********************************************************************
67390926Snectar *
67455682Smarkm *  Shutdown entry point
67590926Snectar *
676127808Snectar **********************************************************************/
67790926Snectar
67890926Snectarstatic int
67955682Smarkmixgbe_shutdown(device_t dev)
68055682Smarkm{
68155682Smarkm	struct adapter *adapter = device_get_softc(dev);
68255682Smarkm	int error = 0;
68355682Smarkm
684178825Sdfr	INIT_DEBUGOUT("ixgbe_shutdown: begin");
685233294Sstas
68655682Smarkm	IXGBE_CORE_LOCK(adapter);
68755682Smarkm	error = ixgbe_setup_low_power_mode(adapter);
68890926Snectar	IXGBE_CORE_UNLOCK(adapter);
68990926Snectar
69090926Snectar	return (error);
69155682Smarkm}
69290926Snectar
69355682Smarkm/**
69490926Snectar * Methods for going from:
695233294Sstas * D0 -> D3: ixgbe_suspend
696127808Snectar * D3 -> D0: ixgbe_resume
69790926Snectar */
698178825Sdfrstatic int
69955682Smarkmixgbe_suspend(device_t dev)
70090926Snectar{
70155682Smarkm	struct adapter *adapter = device_get_softc(dev);
70290926Snectar	int error = 0;
70355682Smarkm
704142403Snectar	INIT_DEBUGOUT("ixgbe_suspend: begin");
705142403Snectar
706233294Sstas	IXGBE_CORE_LOCK(adapter);
707233294Sstas
70890926Snectar	error = ixgbe_setup_low_power_mode(adapter);
70955682Smarkm
71090926Snectar	/* Save state and power down */
71190926Snectar	pci_save_state(dev);
712178825Sdfr	pci_set_powerstate(dev, PCI_POWERSTATE_D3);
713120945Snectar
714120945Snectar	IXGBE_CORE_UNLOCK(adapter);
715178825Sdfr
716178825Sdfr	return (error);
717233294Sstas}
718233294Sstas
71990926Snectarstatic int
72090926Snectarixgbe_resume(device_t dev)
72190926Snectar{
722178825Sdfr	struct adapter *adapter = device_get_softc(dev);
723178825Sdfr	struct ifnet *ifp = adapter->ifp;
724233294Sstas	struct ixgbe_hw *hw = &adapter->hw;
725233294Sstas	u32 wus;
72690926Snectar
72790926Snectar	INIT_DEBUGOUT("ixgbe_resume: begin");
728233294Sstas
729233294Sstas	IXGBE_CORE_LOCK(adapter);
730178825Sdfr
73190926Snectar	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
732178825Sdfr	pci_restore_state(dev);
733178825Sdfr
734233294Sstas	/* Read & clear WUS register */
735233294Sstas	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
736178825Sdfr	if (wus)
737178825Sdfr		device_printf(dev, "Woken up by (WUS): %#010x\n",
738233294Sstas		    IXGBE_READ_REG(hw, IXGBE_WUS));
739233294Sstas	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
74090926Snectar	/* And clear WUFC until next low-power transition */
74190926Snectar	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
74255682Smarkm
743233294Sstas	/*
744127808Snectar	 * Required after D3->D0 transition;
74590926Snectar	 * will re-advertise all previous advertised speeds
74655682Smarkm	 */
74790926Snectar	if (ifp->if_flags & IFF_UP)
74855682Smarkm		ixgbe_init_locked(adapter);
74990926Snectar
75090926Snectar	IXGBE_CORE_UNLOCK(adapter);
75190926Snectar
752127808Snectar	INIT_DEBUGOUT("ixgbe_resume: end");
753127808Snectar	return (0);
754127808Snectar}
755127808Snectar
756127808Snectar
757127808Snectar/*********************************************************************
758127808Snectar *  Ioctl entry point
759127808Snectar *
760178825Sdfr *  ixgbe_ioctl is called when the user wants to configure the
761178825Sdfr *  interface.
762178825Sdfr *
763178825Sdfr *  return 0 on success, positive on failure
764178825Sdfr **********************************************************************/
765233294Sstas
766233294Sstasstatic int
767178825Sdfrixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
768127808Snectar{
769127808Snectar	struct adapter	*adapter = ifp->if_softc;
770178825Sdfr	struct ifreq	*ifr = (struct ifreq *) data;
771142403Snectar#if defined(INET) || defined(INET6)
772142403Snectar	struct ifaddr *ifa = (struct ifaddr *)data;
773178825Sdfr	bool		avoid_reset = FALSE;
774178825Sdfr#endif
775178825Sdfr	int             error = 0;
776178825Sdfr
777178825Sdfr	switch (command) {
778178825Sdfr
779178825Sdfr        case SIOCSIFADDR:
780178825Sdfr#ifdef INET
781178825Sdfr		if (ifa->ifa_addr->sa_family == AF_INET)
782178825Sdfr			avoid_reset = TRUE;
78390926Snectar#endif
78490926Snectar#ifdef INET6
78555682Smarkm		if (ifa->ifa_addr->sa_family == AF_INET6)
78655682Smarkm			avoid_reset = TRUE;
78755682Smarkm#endif
78855682Smarkm#if defined(INET) || defined(INET6)
78955682Smarkm		/*
79072445Sassar		** Calling init results in link renegotiation,
79172445Sassar		** so we avoid doing it when possible.
79272445Sassar		*/
79372445Sassar		if (avoid_reset) {
79455682Smarkm			ifp->if_flags |= IFF_UP;
79555682Smarkm			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
79655682Smarkm				ixgbe_init(adapter);
797178825Sdfr			if (!(ifp->if_flags & IFF_NOARP))
798178825Sdfr				arp_ifinit(ifp, ifa);
79955682Smarkm		} else
80055682Smarkm			error = ether_ioctl(ifp, command, data);
80155682Smarkm#endif
80255682Smarkm		break;
80355682Smarkm	case SIOCSIFMTU:
80455682Smarkm		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
80572445Sassar		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
80672445Sassar			error = EINVAL;
80755682Smarkm		} else {
808178825Sdfr			IXGBE_CORE_LOCK(adapter);
809178825Sdfr			ifp->if_mtu = ifr->ifr_mtu;
810178825Sdfr			adapter->max_frame_size =
811178825Sdfr				ifp->if_mtu + IXGBE_MTU_HDR;
812178825Sdfr			ixgbe_init_locked(adapter);
813178825Sdfr			IXGBE_CORE_UNLOCK(adapter);
814178825Sdfr		}
815178825Sdfr		break;
816178825Sdfr	case SIOCSIFFLAGS:
817178825Sdfr		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
818178825Sdfr		IXGBE_CORE_LOCK(adapter);
81955682Smarkm		if (ifp->if_flags & IFF_UP) {
82055682Smarkm			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
82155682Smarkm				if ((ifp->if_flags ^ adapter->if_flags) &
822102644Snectar				    (IFF_PROMISC | IFF_ALLMULTI)) {
823102644Snectar					ixgbe_set_promisc(adapter);
824178825Sdfr                                }
825178825Sdfr			} else
826102644Snectar				ixgbe_init_locked(adapter);
827102644Snectar		} else
828102644Snectar			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
829102644Snectar				ixgbe_stop(adapter);
830102644Snectar		adapter->if_flags = ifp->if_flags;
831102644Snectar		IXGBE_CORE_UNLOCK(adapter);
832178825Sdfr		break;
833102644Snectar	case SIOCADDMULTI:
834102644Snectar	case SIOCDELMULTI:
835102644Snectar		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
836102644Snectar		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
837102644Snectar			IXGBE_CORE_LOCK(adapter);
838102644Snectar			ixgbe_disable_intr(adapter);
839102644Snectar			ixgbe_set_multi(adapter);
840102644Snectar			ixgbe_enable_intr(adapter);
841102644Snectar			IXGBE_CORE_UNLOCK(adapter);
842102644Snectar		}
843102644Snectar		break;
844102644Snectar	case SIOCSIFMEDIA:
845102644Snectar	case SIOCGIFMEDIA:
846102644Snectar		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
847102644Snectar		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
848178825Sdfr		break;
849102644Snectar	case SIOCSIFCAP:
850102644Snectar	{
851102644Snectar		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
852102644Snectar		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
853233294Sstas		if (mask & IFCAP_HWCSUM)
854233294Sstas			ifp->if_capenable ^= IFCAP_HWCSUM;
855233294Sstas		if (mask & IFCAP_TSO4)
85655682Smarkm			ifp->if_capenable ^= IFCAP_TSO4;
85755682Smarkm		if (mask & IFCAP_TSO6)
85855682Smarkm			ifp->if_capenable ^= IFCAP_TSO6;
85955682Smarkm		if (mask & IFCAP_LRO)
86055682Smarkm			ifp->if_capenable ^= IFCAP_LRO;
86155682Smarkm		if (mask & IFCAP_VLAN_HWTAGGING)
86255682Smarkm			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
86355682Smarkm		if (mask & IFCAP_VLAN_HWFILTER)
86455682Smarkm			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
86555682Smarkm		if (mask & IFCAP_VLAN_HWTSO)
86655682Smarkm			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
86755682Smarkm		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
86855682Smarkm			IXGBE_CORE_LOCK(adapter);
86955682Smarkm			ixgbe_init_locked(adapter);
87055682Smarkm			IXGBE_CORE_UNLOCK(adapter);
87155682Smarkm		}
87255682Smarkm		VLAN_CAPABILITIES(ifp);
87355682Smarkm		break;
87455682Smarkm	}
87555682Smarkm#if __FreeBSD_version >= 1100036
87655682Smarkm	case SIOCGI2C:
87755682Smarkm	{
87855682Smarkm		struct ixgbe_hw *hw = &adapter->hw;
87955682Smarkm		struct ifi2creq i2c;
88055682Smarkm		int i;
88155682Smarkm		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
88255682Smarkm		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
88355682Smarkm		if (error != 0)
88455682Smarkm			break;
88555682Smarkm		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
88655682Smarkm			error = EINVAL;
88755682Smarkm			break;
88855682Smarkm		}
88955682Smarkm		if (i2c.len > sizeof(i2c.data)) {
89055682Smarkm			error = EINVAL;
89155682Smarkm			break;
89255682Smarkm		}
89355682Smarkm
89455682Smarkm		for (i = 0; i < i2c.len; i++)
89555682Smarkm			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
89655682Smarkm			    i2c.dev_addr, &i2c.data[i]);
89755682Smarkm		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
89855682Smarkm		break;
89955682Smarkm	}
90055682Smarkm#endif
90155682Smarkm	default:
90255682Smarkm		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
90355682Smarkm		error = ether_ioctl(ifp, command, data);
90455682Smarkm		break;
90555682Smarkm	}
90655682Smarkm
90755682Smarkm	return (error);
90855682Smarkm}
90955682Smarkm
91055682Smarkm/*********************************************************************
91155682Smarkm *  Init entry point
91255682Smarkm *
91355682Smarkm *  This routine is used in two ways. It is used by the stack as
91455682Smarkm *  init entry point in network interface structure. It is also used
91555682Smarkm *  by the driver as a hw/sw initialization routine to get to a
91655682Smarkm *  consistent state.
91755682Smarkm *
91855682Smarkm *  return 0 on success, positive on failure
91955682Smarkm **********************************************************************/
92072445Sassar#define IXGBE_MHADD_MFS_SHIFT 16
921178825Sdfr
92255682Smarkmstatic void
923178825Sdfrixgbe_init_locked(struct adapter *adapter)
924178825Sdfr{
925178825Sdfr	struct ifnet   *ifp = adapter->ifp;
926120945Snectar	device_t 	dev = adapter->dev;
927178825Sdfr	struct ixgbe_hw *hw = &adapter->hw;
92855682Smarkm	u32		k, txdctl, mhadd, gpie;
92955682Smarkm	u32		rxdctl, rxctrl;
93055682Smarkm
93155682Smarkm	mtx_assert(&adapter->core_mtx, MA_OWNED);
93255682Smarkm	INIT_DEBUGOUT("ixgbe_init_locked: begin");
933178825Sdfr	hw->adapter_stopped = FALSE;
934178825Sdfr	ixgbe_stop_adapter(hw);
935178825Sdfr        callout_stop(&adapter->timer);
936178825Sdfr
937178825Sdfr        /* reprogram the RAR[0] in case user changed it. */
938178825Sdfr        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
939178825Sdfr
940178825Sdfr	/* Get the latest mac address, User can use a LAA */
941233294Sstas	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
942178825Sdfr	      IXGBE_ETH_LENGTH_OF_ADDRESS);
943178825Sdfr	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
944178825Sdfr	hw->addr_ctrl.rar_used_count = 1;
945178825Sdfr
946178825Sdfr	/* Set the various hardware offload abilities */
947178825Sdfr	ifp->if_hwassist = 0;
948178825Sdfr	if (ifp->if_capenable & IFCAP_TSO)
949178825Sdfr		ifp->if_hwassist |= CSUM_TSO;
950178825Sdfr	if (ifp->if_capenable & IFCAP_TXCSUM) {
951178825Sdfr		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
952178825Sdfr#if __FreeBSD_version >= 800000
953178825Sdfr		if (hw->mac.type != ixgbe_mac_82598EB)
954233294Sstas			ifp->if_hwassist |= CSUM_SCTP;
95555682Smarkm#endif
95655682Smarkm	}
95755682Smarkm
958	/* Prepare transmit descriptors and buffers */
959	if (ixgbe_setup_transmit_structures(adapter)) {
960		device_printf(dev, "Could not setup transmit structures\n");
961		ixgbe_stop(adapter);
962		return;
963	}
964
965	ixgbe_init_hw(hw);
966	ixgbe_initialize_transmit_units(adapter);
967
968	/* Setup Multicast table */
969	ixgbe_set_multi(adapter);
970
971	/*
972	** Determine the correct mbuf pool
973	** for doing jumbo frames
974	*/
975	if (adapter->max_frame_size <= 2048)
976		adapter->rx_mbuf_sz = MCLBYTES;
977	else if (adapter->max_frame_size <= 4096)
978		adapter->rx_mbuf_sz = MJUMPAGESIZE;
979	else if (adapter->max_frame_size <= 9216)
980		adapter->rx_mbuf_sz = MJUM9BYTES;
981	else
982		adapter->rx_mbuf_sz = MJUM16BYTES;
983
984	/* Prepare receive descriptors and buffers */
985	if (ixgbe_setup_receive_structures(adapter)) {
986		device_printf(dev, "Could not setup receive structures\n");
987		ixgbe_stop(adapter);
988		return;
989	}
990
991	/* Configure RX settings */
992	ixgbe_initialize_receive_units(adapter);
993
994	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
995
996	/* Enable Fan Failure Interrupt */
997	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
998
999	/* Add for Module detection */
1000	if (hw->mac.type == ixgbe_mac_82599EB)
1001		gpie |= IXGBE_SDP2_GPIEN;
1002
1003	/*
1004	 * Thermal Failure Detection (X540)
1005	 * Link Detection (X552)
1006	 */
1007	if (hw->mac.type == ixgbe_mac_X540 ||
1008	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1009	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1010		gpie |= IXGBE_SDP0_GPIEN_X540;
1011
1012	if (adapter->msix > 1) {
1013		/* Enable Enhanced MSIX mode */
1014		gpie |= IXGBE_GPIE_MSIX_MODE;
1015		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1016		    IXGBE_GPIE_OCD;
1017	}
1018	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1019
1020	/* Set MTU size */
1021	if (ifp->if_mtu > ETHERMTU) {
1022		/* aka IXGBE_MAXFRS on 82599 and newer */
1023		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1024		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1025		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1026		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1027	}
1028
1029	/* Now enable all the queues */
1030	for (int i = 0; i < adapter->num_queues; i++) {
1031		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1032		txdctl |= IXGBE_TXDCTL_ENABLE;
1033		/* Set WTHRESH to 8, burst writeback */
1034		txdctl |= (8 << 16);
1035		/*
1036		 * When the internal queue falls below PTHRESH (32),
1037		 * start prefetching as long as there are at least
1038		 * HTHRESH (1) buffers ready. The values are taken
1039		 * from the Intel linux driver 3.8.21.
1040		 * Prefetching enables tx line rate even with 1 queue.
1041		 */
1042		txdctl |= (32 << 0) | (1 << 8);
1043		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1044	}
1045
1046	for (int i = 0; i < adapter->num_queues; i++) {
1047		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1048		if (hw->mac.type == ixgbe_mac_82598EB) {
1049			/*
1050			** PTHRESH = 21
1051			** HTHRESH = 4
1052			** WTHRESH = 8
1053			*/
1054			rxdctl &= ~0x3FFFFF;
1055			rxdctl |= 0x080420;
1056		}
1057		rxdctl |= IXGBE_RXDCTL_ENABLE;
1058		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1059		for (k = 0; k < 10; k++) {
1060			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1061			    IXGBE_RXDCTL_ENABLE)
1062				break;
1063			else
1064				msec_delay(1);
1065		}
1066		wmb();
1067#ifdef DEV_NETMAP
1068		/*
1069		 * In netmap mode, we must preserve the buffers made
1070		 * available to userspace before the if_init()
1071		 * (this is true by default on the TX side, because
1072		 * init makes all buffers available to userspace).
1073		 *
1074		 * netmap_reset() and the device specific routines
1075		 * (e.g. ixgbe_setup_receive_rings()) map these
1076		 * buffers at the end of the NIC ring, so here we
1077		 * must set the RDT (tail) register to make sure
1078		 * they are not overwritten.
1079		 *
1080		 * In this driver the NIC ring starts at RDH = 0,
1081		 * RDT points to the last slot available for reception (?),
1082		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1083		 */
1084		if (ifp->if_capenable & IFCAP_NETMAP) {
1085			struct netmap_adapter *na = NA(adapter->ifp);
1086			struct netmap_kring *kring = &na->rx_rings[i];
1087			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1088
1089			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1090		} else
1091#endif /* DEV_NETMAP */
1092		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1093	}
1094
1095	/* Enable Receive engine */
1096	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1097	if (hw->mac.type == ixgbe_mac_82598EB)
1098		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1099	rxctrl |= IXGBE_RXCTRL_RXEN;
1100	ixgbe_enable_rx_dma(hw, rxctrl);
1101
1102	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1103
1104	/* Set up MSI/X routing */
1105	if (ixgbe_enable_msix)  {
1106		ixgbe_configure_ivars(adapter);
1107		/* Set up auto-mask */
1108		if (hw->mac.type == ixgbe_mac_82598EB)
1109			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1110		else {
1111			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1112			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1113		}
1114	} else {  /* Simple settings for Legacy/MSI */
1115                ixgbe_set_ivar(adapter, 0, 0, 0);
1116                ixgbe_set_ivar(adapter, 0, 0, 1);
1117		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1118	}
1119
1120#ifdef IXGBE_FDIR
1121	/* Init Flow director */
1122	if (hw->mac.type != ixgbe_mac_82598EB) {
1123		u32 hdrm = 32 << fdir_pballoc;
1124
1125		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1126		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1127	}
1128#endif
1129
1130	/*
1131	** Check on any SFP devices that
1132	** need to be kick-started
1133	*/
1134	if (hw->phy.type == ixgbe_phy_none) {
1135		int err = hw->phy.ops.identify(hw);
1136		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1137                	device_printf(dev,
1138			    "Unsupported SFP+ module type was detected.\n");
1139			return;
1140        	}
1141	}
1142
1143	/* Set moderation on the Link interrupt */
1144	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1145
1146	/* Configure Energy Efficient Ethernet for supported devices */
1147	if (adapter->eee_support)
1148		ixgbe_setup_eee(hw, adapter->eee_enabled);
1149
1150	/* Config/Enable Link */
1151	ixgbe_config_link(adapter);
1152
1153	/* Hardware Packet Buffer & Flow Control setup */
1154	ixgbe_config_delay_values(adapter);
1155
1156	/* Initialize the FC settings */
1157	ixgbe_start_hw(hw);
1158
1159	/* Set up VLAN support and filter */
1160	ixgbe_setup_vlan_hw_support(adapter);
1161
1162	/* Setup DMA Coalescing */
1163	ixgbe_config_dmac(adapter);
1164
1165	/* And now turn on interrupts */
1166	ixgbe_enable_intr(adapter);
1167
1168	/* Now inform the stack we're ready */
1169	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1170
1171	return;
1172}
1173
1174static void
1175ixgbe_init(void *arg)
1176{
1177	struct adapter *adapter = arg;
1178
1179	IXGBE_CORE_LOCK(adapter);
1180	ixgbe_init_locked(adapter);
1181	IXGBE_CORE_UNLOCK(adapter);
1182	return;
1183}
1184
1185static void
1186ixgbe_config_delay_values(struct adapter *adapter)
1187{
1188	struct ixgbe_hw *hw = &adapter->hw;
1189	u32 rxpb, frame, size, tmp;
1190
1191	frame = adapter->max_frame_size;
1192
1193	/* Calculate High Water */
1194	switch (hw->mac.type) {
1195	case ixgbe_mac_X540:
1196	case ixgbe_mac_X550:
1197	case ixgbe_mac_X550EM_x:
1198		tmp = IXGBE_DV_X540(frame, frame);
1199		break;
1200	default:
1201		tmp = IXGBE_DV(frame, frame);
1202		break;
1203	}
1204	size = IXGBE_BT2KB(tmp);
1205	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1206	hw->fc.high_water[0] = rxpb - size;
1207
1208	/* Now calculate Low Water */
1209	switch (hw->mac.type) {
1210	case ixgbe_mac_X540:
1211	case ixgbe_mac_X550:
1212	case ixgbe_mac_X550EM_x:
1213		tmp = IXGBE_LOW_DV_X540(frame);
1214		break;
1215	default:
1216		tmp = IXGBE_LOW_DV(frame);
1217		break;
1218	}
1219	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1220
1221	hw->fc.requested_mode = adapter->fc;
1222	hw->fc.pause_time = IXGBE_FC_PAUSE;
1223	hw->fc.send_xon = TRUE;
1224}
1225
1226/*
1227**
1228** MSIX Interrupt Handlers and Tasklets
1229**
1230*/
1231
1232static inline void
1233ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1234{
1235	struct ixgbe_hw *hw = &adapter->hw;
1236	u64	queue = (u64)(1 << vector);
1237	u32	mask;
1238
1239	if (hw->mac.type == ixgbe_mac_82598EB) {
1240                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1241                IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1242	} else {
1243                mask = (queue & 0xFFFFFFFF);
1244                if (mask)
1245                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1246                mask = (queue >> 32);
1247                if (mask)
1248                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1249	}
1250}
1251
1252static inline void
1253ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1254{
1255	struct ixgbe_hw *hw = &adapter->hw;
1256	u64	queue = (u64)(1 << vector);
1257	u32	mask;
1258
1259	if (hw->mac.type == ixgbe_mac_82598EB) {
1260                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1261                IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1262	} else {
1263                mask = (queue & 0xFFFFFFFF);
1264                if (mask)
1265                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1266                mask = (queue >> 32);
1267                if (mask)
1268                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1269	}
1270}
1271
1272static void
1273ixgbe_handle_que(void *context, int pending)
1274{
1275	struct ix_queue *que = context;
1276	struct adapter  *adapter = que->adapter;
1277	struct tx_ring  *txr = que->txr;
1278	struct ifnet    *ifp = adapter->ifp;
1279	bool		more;
1280
1281	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1282		more = ixgbe_rxeof(que);
1283		IXGBE_TX_LOCK(txr);
1284		ixgbe_txeof(txr);
1285#ifndef IXGBE_LEGACY_TX
1286		if (!drbr_empty(ifp, txr->br))
1287			ixgbe_mq_start_locked(ifp, txr);
1288#else
1289		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1290			ixgbe_start_locked(txr, ifp);
1291#endif
1292		IXGBE_TX_UNLOCK(txr);
1293	}
1294
1295	/* Reenable this interrupt */
1296	if (que->res != NULL)
1297		ixgbe_enable_queue(adapter, que->msix);
1298	else
1299		ixgbe_enable_intr(adapter);
1300	return;
1301}
1302
1303
1304/*********************************************************************
1305 *
1306 *  Legacy Interrupt Service routine
1307 *
1308 **********************************************************************/
1309
1310static void
1311ixgbe_legacy_irq(void *arg)
1312{
1313	struct ix_queue *que = arg;
1314	struct adapter	*adapter = que->adapter;
1315	struct ixgbe_hw	*hw = &adapter->hw;
1316	struct ifnet    *ifp = adapter->ifp;
1317	struct 		tx_ring *txr = adapter->tx_rings;
1318	bool		more;
1319	u32       	reg_eicr;
1320
1321
1322	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1323
1324	++que->irqs;
1325	if (reg_eicr == 0) {
1326		ixgbe_enable_intr(adapter);
1327		return;
1328	}
1329
1330	more = ixgbe_rxeof(que);
1331
1332	IXGBE_TX_LOCK(txr);
1333	ixgbe_txeof(txr);
1334#ifdef IXGBE_LEGACY_TX
1335	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1336		ixgbe_start_locked(txr, ifp);
1337#else
1338	if (!drbr_empty(ifp, txr->br))
1339		ixgbe_mq_start_locked(ifp, txr);
1340#endif
1341	IXGBE_TX_UNLOCK(txr);
1342
1343	/* Check for fan failure */
1344	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1345	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1346                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1347		    "REPLACE IMMEDIATELY!!\n");
1348		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1349	}
1350
1351	/* Link status change */
1352	if (reg_eicr & IXGBE_EICR_LSC)
1353		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1354
1355	/* External PHY interrupt */
1356	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1357	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1358		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1359
1360	if (more)
1361		taskqueue_enqueue(que->tq, &que->que_task);
1362	else
1363		ixgbe_enable_intr(adapter);
1364	return;
1365}
1366
1367
1368/*********************************************************************
1369 *
1370 *  MSIX Queue Interrupt Service routine
1371 *
1372 **********************************************************************/
1373void
1374ixgbe_msix_que(void *arg)
1375{
1376	struct ix_queue	*que = arg;
1377	struct adapter  *adapter = que->adapter;
1378	struct ifnet    *ifp = adapter->ifp;
1379	struct tx_ring	*txr = que->txr;
1380	struct rx_ring	*rxr = que->rxr;
1381	bool		more;
1382	u32		newitr = 0;
1383
1384	/* Protect against spurious interrupts */
1385	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1386		return;
1387
1388	ixgbe_disable_queue(adapter, que->msix);
1389	++que->irqs;
1390
1391	more = ixgbe_rxeof(que);
1392
1393	IXGBE_TX_LOCK(txr);
1394	ixgbe_txeof(txr);
1395#ifdef IXGBE_LEGACY_TX
1396	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1397		ixgbe_start_locked(txr, ifp);
1398#else
1399	if (!drbr_empty(ifp, txr->br))
1400		ixgbe_mq_start_locked(ifp, txr);
1401#endif
1402	IXGBE_TX_UNLOCK(txr);
1403
1404	/* Do AIM now? */
1405
1406	if (ixgbe_enable_aim == FALSE)
1407		goto no_calc;
1408	/*
1409	** Do Adaptive Interrupt Moderation:
1410        **  - Write out last calculated setting
1411	**  - Calculate based on average size over
1412	**    the last interval.
1413	*/
1414        if (que->eitr_setting)
1415                IXGBE_WRITE_REG(&adapter->hw,
1416                    IXGBE_EITR(que->msix), que->eitr_setting);
1417
1418        que->eitr_setting = 0;
1419
1420        /* Idle, do nothing */
1421        if ((txr->bytes == 0) && (rxr->bytes == 0))
1422                goto no_calc;
1423
1424	if ((txr->bytes) && (txr->packets))
1425               	newitr = txr->bytes/txr->packets;
1426	if ((rxr->bytes) && (rxr->packets))
1427		newitr = max(newitr,
1428		    (rxr->bytes / rxr->packets));
1429	newitr += 24; /* account for hardware frame, crc */
1430
1431	/* set an upper boundary */
1432	newitr = min(newitr, 3000);
1433
1434	/* Be nice to the mid range */
1435	if ((newitr > 300) && (newitr < 1200))
1436		newitr = (newitr / 3);
1437	else
1438		newitr = (newitr / 2);
1439
1440        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1441                newitr |= newitr << 16;
1442        else
1443                newitr |= IXGBE_EITR_CNT_WDIS;
1444
1445        /* save for next interrupt */
1446        que->eitr_setting = newitr;
1447
1448        /* Reset state */
1449        txr->bytes = 0;
1450        txr->packets = 0;
1451        rxr->bytes = 0;
1452        rxr->packets = 0;
1453
1454no_calc:
1455	if (more)
1456		taskqueue_enqueue(que->tq, &que->que_task);
1457	else
1458		ixgbe_enable_queue(adapter, que->msix);
1459	return;
1460}
1461
1462
1463static void
1464ixgbe_msix_link(void *arg)
1465{
1466	struct adapter	*adapter = arg;
1467	struct ixgbe_hw *hw = &adapter->hw;
1468	u32		reg_eicr, mod_mask;
1469
1470	++adapter->link_irq;
1471
1472	/* First get the cause */
1473	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1474	/* Be sure the queue bits are not cleared */
1475	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1476	/* Clear interrupt with write */
1477	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1478
1479	/* Link status change */
1480	if (reg_eicr & IXGBE_EICR_LSC)
1481		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1482
1483	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1484#ifdef IXGBE_FDIR
1485		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1486			/* This is probably overkill :) */
1487			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1488				return;
1489                	/* Disable the interrupt */
1490			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1491			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1492		} else
1493#endif
1494		if (reg_eicr & IXGBE_EICR_ECC) {
1495                	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1496			    "Please Reboot!!\n");
1497			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1498		}
1499
1500		/* Check for over temp condition */
1501		if (reg_eicr & IXGBE_EICR_TS) {
1502			device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1503			    "PHY IS SHUT DOWN!!\n");
1504			device_printf(adapter->dev, "System shutdown required!\n");
1505			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1506		}
1507	}
1508
1509	/* Pluggable optics-related interrupt */
1510	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1511		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1512	else
1513		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1514
1515	if (ixgbe_is_sfp(hw)) {
1516		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1517			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1518			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1519		} else if (reg_eicr & mod_mask) {
1520			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1521			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1522		}
1523	}
1524
1525	/* Check for fan failure */
1526	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1527	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1528		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1529                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1530		    "REPLACE IMMEDIATELY!!\n");
1531	}
1532
1533	/* External PHY interrupt */
1534	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1535	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1536		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1537		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1538	}
1539
1540	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1541	return;
1542}
1543
1544/*********************************************************************
1545 *
1546 *  Media Ioctl callback
1547 *
1548 *  This routine is called whenever the user queries the status of
1549 *  the interface using ifconfig.
1550 *
1551 **********************************************************************/
1552static void
1553ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1554{
1555	struct adapter *adapter = ifp->if_softc;
1556	struct ixgbe_hw *hw = &adapter->hw;
1557	int layer;
1558
1559	INIT_DEBUGOUT("ixgbe_media_status: begin");
1560	IXGBE_CORE_LOCK(adapter);
1561	ixgbe_update_link_status(adapter);
1562
1563	ifmr->ifm_status = IFM_AVALID;
1564	ifmr->ifm_active = IFM_ETHER;
1565
1566	if (!adapter->link_active) {
1567		IXGBE_CORE_UNLOCK(adapter);
1568		return;
1569	}
1570
1571	ifmr->ifm_status |= IFM_ACTIVE;
1572	layer = ixgbe_get_supported_physical_layer(hw);
1573
1574	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1575	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1576	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1577		switch (adapter->link_speed) {
1578		case IXGBE_LINK_SPEED_10GB_FULL:
1579			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1580			break;
1581		case IXGBE_LINK_SPEED_1GB_FULL:
1582			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1583			break;
1584		case IXGBE_LINK_SPEED_100_FULL:
1585			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1586			break;
1587		}
1588	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1589	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1590		switch (adapter->link_speed) {
1591		case IXGBE_LINK_SPEED_10GB_FULL:
1592			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1593			break;
1594		}
1595	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1596		switch (adapter->link_speed) {
1597		case IXGBE_LINK_SPEED_10GB_FULL:
1598			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1599			break;
1600		case IXGBE_LINK_SPEED_1GB_FULL:
1601			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1602			break;
1603		}
1604	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1605		switch (adapter->link_speed) {
1606		case IXGBE_LINK_SPEED_10GB_FULL:
1607			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1608			break;
1609		case IXGBE_LINK_SPEED_1GB_FULL:
1610			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1611			break;
1612		}
1613	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1614	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1615		switch (adapter->link_speed) {
1616		case IXGBE_LINK_SPEED_10GB_FULL:
1617			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1618			break;
1619		case IXGBE_LINK_SPEED_1GB_FULL:
1620			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1621			break;
1622		}
1623	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1624		switch (adapter->link_speed) {
1625		case IXGBE_LINK_SPEED_10GB_FULL:
1626			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1627			break;
1628		}
1629	/*
1630	** XXX: These need to use the proper media types once
1631	** they're added.
1632	*/
1633	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1634		switch (adapter->link_speed) {
1635		case IXGBE_LINK_SPEED_10GB_FULL:
1636			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1637			break;
1638		case IXGBE_LINK_SPEED_2_5GB_FULL:
1639			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1640			break;
1641		case IXGBE_LINK_SPEED_1GB_FULL:
1642			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1643			break;
1644		}
1645	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1646	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1647		switch (adapter->link_speed) {
1648		case IXGBE_LINK_SPEED_10GB_FULL:
1649			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1650			break;
1651		case IXGBE_LINK_SPEED_2_5GB_FULL:
1652			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1653			break;
1654		case IXGBE_LINK_SPEED_1GB_FULL:
1655			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1656			break;
1657		}
1658
1659	/* If nothing is recognized... */
1660	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1661		ifmr->ifm_active |= IFM_UNKNOWN;
1662
1663#if __FreeBSD_version >= 900025
1664	/* Display current flow control setting used on link */
1665	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1666	    hw->fc.current_mode == ixgbe_fc_full)
1667		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1668	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1669	    hw->fc.current_mode == ixgbe_fc_full)
1670		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1671#endif
1672
1673	IXGBE_CORE_UNLOCK(adapter);
1674
1675	return;
1676}
1677
1678/*********************************************************************
1679 *
1680 *  Media Ioctl callback
1681 *
1682 *  This routine is called when the user changes speed/duplex using
1683 *  media/mediopt option with ifconfig.
1684 *
1685 **********************************************************************/
1686static int
1687ixgbe_media_change(struct ifnet * ifp)
1688{
1689	struct adapter *adapter = ifp->if_softc;
1690	struct ifmedia *ifm = &adapter->media;
1691	struct ixgbe_hw *hw = &adapter->hw;
1692	ixgbe_link_speed speed = 0;
1693
1694	INIT_DEBUGOUT("ixgbe_media_change: begin");
1695
1696	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1697		return (EINVAL);
1698
1699	if (hw->phy.media_type == ixgbe_media_type_backplane)
1700		return (EPERM);
1701
1702	/*
1703	** We don't actually need to check against the supported
1704	** media types of the adapter; ifmedia will take care of
1705	** that for us.
1706	*/
1707	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1708		case IFM_AUTO:
1709		case IFM_10G_T:
1710			speed |= IXGBE_LINK_SPEED_100_FULL;
1711		case IFM_10G_LRM:
1712		case IFM_10G_SR: /* KR, too */
1713		case IFM_10G_LR:
1714		case IFM_10G_CX4: /* KX4 */
1715			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1716		case IFM_10G_TWINAX:
1717			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1718			break;
1719		case IFM_1000_T:
1720			speed |= IXGBE_LINK_SPEED_100_FULL;
1721		case IFM_1000_LX:
1722		case IFM_1000_SX:
1723		case IFM_1000_CX: /* KX */
1724			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1725			break;
1726		case IFM_100_TX:
1727			speed |= IXGBE_LINK_SPEED_100_FULL;
1728			break;
1729		default:
1730			goto invalid;
1731	}
1732
1733	hw->mac.autotry_restart = TRUE;
1734	hw->mac.ops.setup_link(hw, speed, TRUE);
1735	adapter->advertise =
1736		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1737		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1738		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1739
1740	return (0);
1741
1742invalid:
1743	device_printf(adapter->dev, "Invalid media type!\n");
1744	return (EINVAL);
1745}
1746
1747static void
1748ixgbe_set_promisc(struct adapter *adapter)
1749{
1750	u_int32_t       reg_rctl;
1751	struct ifnet   *ifp = adapter->ifp;
1752	int		mcnt = 0;
1753
1754	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1755	reg_rctl &= (~IXGBE_FCTRL_UPE);
1756	if (ifp->if_flags & IFF_ALLMULTI)
1757		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1758	else {
1759		struct	ifmultiaddr *ifma;
1760#if __FreeBSD_version < 800000
1761		IF_ADDR_LOCK(ifp);
1762#else
1763		if_maddr_rlock(ifp);
1764#endif
1765		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766			if (ifma->ifma_addr->sa_family != AF_LINK)
1767				continue;
1768			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1769				break;
1770			mcnt++;
1771		}
1772#if __FreeBSD_version < 800000
1773		IF_ADDR_UNLOCK(ifp);
1774#else
1775		if_maddr_runlock(ifp);
1776#endif
1777	}
1778	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1779		reg_rctl &= (~IXGBE_FCTRL_MPE);
1780	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1781
1782	if (ifp->if_flags & IFF_PROMISC) {
1783		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1784		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1785	} else if (ifp->if_flags & IFF_ALLMULTI) {
1786		reg_rctl |= IXGBE_FCTRL_MPE;
1787		reg_rctl &= ~IXGBE_FCTRL_UPE;
1788		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1789	}
1790	return;
1791}
1792
1793
1794/*********************************************************************
1795 *  Multicast Update
1796 *
1797 *  This routine is called whenever multicast address list is updated.
1798 *
1799 **********************************************************************/
1800#define IXGBE_RAR_ENTRIES 16
1801
1802static void
1803ixgbe_set_multi(struct adapter *adapter)
1804{
1805	u32	fctrl;
1806	u8	*mta;
1807	u8	*update_ptr;
1808	struct	ifmultiaddr *ifma;
1809	int	mcnt = 0;
1810	struct ifnet   *ifp = adapter->ifp;
1811
1812	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1813
1814	mta = adapter->mta;
1815	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1816	    MAX_NUM_MULTICAST_ADDRESSES);
1817
1818#if __FreeBSD_version < 800000
1819	IF_ADDR_LOCK(ifp);
1820#else
1821	if_maddr_rlock(ifp);
1822#endif
1823	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1824		if (ifma->ifma_addr->sa_family != AF_LINK)
1825			continue;
1826		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1827			break;
1828		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1829		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1830		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1831		mcnt++;
1832	}
1833#if __FreeBSD_version < 800000
1834	IF_ADDR_UNLOCK(ifp);
1835#else
1836	if_maddr_runlock(ifp);
1837#endif
1838
1839	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1840	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1841	if (ifp->if_flags & IFF_PROMISC)
1842		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1844	    ifp->if_flags & IFF_ALLMULTI) {
1845		fctrl |= IXGBE_FCTRL_MPE;
1846		fctrl &= ~IXGBE_FCTRL_UPE;
1847	} else
1848		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1849
1850	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1851
1852	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1853		update_ptr = mta;
1854		ixgbe_update_mc_addr_list(&adapter->hw,
1855		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1856	}
1857
1858	return;
1859}
1860
1861/*
1862 * This is an iterator function now needed by the multicast
1863 * shared code. It simply feeds the shared code routine the
1864 * addresses in the array of ixgbe_set_multi() one by one.
1865 */
1866static u8 *
1867ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1868{
1869	u8 *addr = *update_ptr;
1870	u8 *newptr;
1871	*vmdq = 0;
1872
1873	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1874	*update_ptr = newptr;
1875	return addr;
1876}
1877
1878
1879/*********************************************************************
1880 *  Timer routine
1881 *
1882 *  This routine checks for link status,updates statistics,
1883 *  and runs the watchdog check.
1884 *
1885 **********************************************************************/
1886
1887static void
1888ixgbe_local_timer(void *arg)
1889{
1890	struct adapter	*adapter = arg;
1891	device_t	dev = adapter->dev;
1892	struct ix_queue *que = adapter->queues;
1893	u64		queues = 0;
1894	int		hung = 0;
1895
1896	mtx_assert(&adapter->core_mtx, MA_OWNED);
1897
1898	/* Check for pluggable optics */
1899	if (adapter->sfp_probe)
1900		if (!ixgbe_sfp_probe(adapter))
1901			goto out; /* Nothing to do */
1902
1903	ixgbe_update_link_status(adapter);
1904	ixgbe_update_stats_counters(adapter);
1905
1906	/*
1907	** Check the TX queues status
1908	**	- mark hung queues so we don't schedule on them
1909	**      - watchdog only if all queues show hung
1910	*/
1911	for (int i = 0; i < adapter->num_queues; i++, que++) {
1912		/* Keep track of queues with work for soft irq */
1913		if (que->txr->busy)
1914			queues |= ((u64)1 << que->me);
1915		/*
1916		** Each time txeof runs without cleaning, but there
1917		** are uncleaned descriptors it increments busy. If
1918		** we get to the MAX we declare it hung.
1919		*/
1920		if (que->busy == IXGBE_QUEUE_HUNG) {
1921			++hung;
1922			/* Mark the queue as inactive */
1923			adapter->active_queues &= ~((u64)1 << que->me);
1924			continue;
1925		} else {
1926			/* Check if we've come back from hung */
1927			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1928                                adapter->active_queues |= ((u64)1 << que->me);
1929		}
1930		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1931			device_printf(dev,"Warning queue %d "
1932			    "appears to be hung!\n", i);
1933			que->txr->busy = IXGBE_QUEUE_HUNG;
1934			++hung;
1935		}
1936
1937	}
1938
1939	/* Only truly watchdog if all queues show hung */
1940	if (hung == adapter->num_queues)
1941		goto watchdog;
1942	else if (queues != 0) { /* Force an IRQ on queues with work */
1943		ixgbe_rearm_queues(adapter, queues);
1944	}
1945
1946out:
1947	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1948	return;
1949
1950watchdog:
1951	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1952	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1953	adapter->watchdog_events++;
1954	ixgbe_init_locked(adapter);
1955}
1956
1957/*
1958** Note: this routine updates the OS on the link state
1959**	the real check of the hardware only happens with
1960**	a link interrupt.
1961*/
1962static void
1963ixgbe_update_link_status(struct adapter *adapter)
1964{
1965	struct ifnet	*ifp = adapter->ifp;
1966	device_t dev = adapter->dev;
1967
1968	if (adapter->link_up){
1969		if (adapter->link_active == FALSE) {
1970			if (bootverbose)
1971				device_printf(dev,"Link is up %d Gbps %s \n",
1972				    ((adapter->link_speed == 128)? 10:1),
1973				    "Full Duplex");
1974			adapter->link_active = TRUE;
1975			/* Update any Flow Control changes */
1976			ixgbe_fc_enable(&adapter->hw);
1977			/* Update DMA coalescing config */
1978			ixgbe_config_dmac(adapter);
1979			if_link_state_change(ifp, LINK_STATE_UP);
1980		}
1981	} else { /* Link down */
1982		if (adapter->link_active == TRUE) {
1983			if (bootverbose)
1984				device_printf(dev,"Link is Down\n");
1985			if_link_state_change(ifp, LINK_STATE_DOWN);
1986			adapter->link_active = FALSE;
1987		}
1988	}
1989
1990	return;
1991}
1992
1993
1994/*********************************************************************
1995 *
1996 *  This routine disables all traffic on the adapter by issuing a
1997 *  global reset on the MAC and deallocates TX/RX buffers.
1998 *
1999 **********************************************************************/
2000
2001static void
2002ixgbe_stop(void *arg)
2003{
2004	struct ifnet   *ifp;
2005	struct adapter *adapter = arg;
2006	struct ixgbe_hw *hw = &adapter->hw;
2007	ifp = adapter->ifp;
2008
2009	mtx_assert(&adapter->core_mtx, MA_OWNED);
2010
2011	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2012	ixgbe_disable_intr(adapter);
2013	callout_stop(&adapter->timer);
2014
2015	/* Let the stack know...*/
2016	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2017
2018	ixgbe_reset_hw(hw);
2019	hw->adapter_stopped = FALSE;
2020	ixgbe_stop_adapter(hw);
2021	if (hw->mac.type == ixgbe_mac_82599EB)
2022		ixgbe_stop_mac_link_on_d3_82599(hw);
2023	/* Turn off the laser - noop with no optics */
2024	ixgbe_disable_tx_laser(hw);
2025
2026	/* Update the stack */
2027	adapter->link_up = FALSE;
2028       	ixgbe_update_link_status(adapter);
2029
2030	/* reprogram the RAR[0] in case user changed it. */
2031	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2032
2033	return;
2034}
2035
2036
2037/*********************************************************************
2038 *
2039 *  Determine hardware revision.
2040 *
2041 **********************************************************************/
2042static void
2043ixgbe_identify_hardware(struct adapter *adapter)
2044{
2045	device_t        dev = adapter->dev;
2046	struct ixgbe_hw *hw = &adapter->hw;
2047
2048	/* Save off the information about this board */
2049	hw->vendor_id = pci_get_vendor(dev);
2050	hw->device_id = pci_get_device(dev);
2051	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2052	hw->subsystem_vendor_id =
2053	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2054	hw->subsystem_device_id =
2055	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2056
2057	/*
2058	** Make sure BUSMASTER is set
2059	*/
2060	pci_enable_busmaster(dev);
2061
2062	/* We need this here to set the num_segs below */
2063	ixgbe_set_mac_type(hw);
2064
2065	/* Pick up the 82599 settings */
2066	if (hw->mac.type != ixgbe_mac_82598EB) {
2067		hw->phy.smart_speed = ixgbe_smart_speed;
2068		adapter->num_segs = IXGBE_82599_SCATTER;
2069	} else
2070		adapter->num_segs = IXGBE_82598_SCATTER;
2071
2072	return;
2073}
2074
2075/*********************************************************************
2076 *
2077 *  Determine optic type
2078 *
2079 **********************************************************************/
2080static void
2081ixgbe_setup_optics(struct adapter *adapter)
2082{
2083	struct ixgbe_hw *hw = &adapter->hw;
2084	int		layer;
2085
2086	layer = ixgbe_get_supported_physical_layer(hw);
2087
2088	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2089		adapter->optics = IFM_10G_T;
2090		return;
2091	}
2092
2093	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2094		adapter->optics = IFM_1000_T;
2095		return;
2096	}
2097
2098	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2099		adapter->optics = IFM_1000_SX;
2100		return;
2101	}
2102
2103	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2104	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2105		adapter->optics = IFM_10G_LR;
2106		return;
2107	}
2108
2109	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2110		adapter->optics = IFM_10G_SR;
2111		return;
2112	}
2113
2114	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2115		adapter->optics = IFM_10G_TWINAX;
2116		return;
2117	}
2118
2119	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2120	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2121		adapter->optics = IFM_10G_CX4;
2122		return;
2123	}
2124
2125	/* If we get here just set the default */
2126	adapter->optics = IFM_ETHER | IFM_AUTO;
2127	return;
2128}
2129
2130/*********************************************************************
2131 *
2132 *  Setup the Legacy or MSI Interrupt handler
2133 *
2134 **********************************************************************/
2135static int
2136ixgbe_allocate_legacy(struct adapter *adapter)
2137{
2138	device_t	dev = adapter->dev;
2139	struct		ix_queue *que = adapter->queues;
2140#ifndef IXGBE_LEGACY_TX
2141	struct tx_ring		*txr = adapter->tx_rings;
2142#endif
2143	int		error, rid = 0;
2144
2145	/* MSI RID at 1 */
2146	if (adapter->msix == 1)
2147		rid = 1;
2148
2149	/* We allocate a single interrupt resource */
2150	adapter->res = bus_alloc_resource_any(dev,
2151            SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2152	if (adapter->res == NULL) {
2153		device_printf(dev, "Unable to allocate bus resource: "
2154		    "interrupt\n");
2155		return (ENXIO);
2156	}
2157
2158	/*
2159	 * Try allocating a fast interrupt and the associated deferred
2160	 * processing contexts.
2161	 */
2162#ifndef IXGBE_LEGACY_TX
2163	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2164#endif
2165	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2166	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2167            taskqueue_thread_enqueue, &que->tq);
2168	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2169            device_get_nameunit(adapter->dev));
2170
2171	/* Tasklets for Link, SFP and Multispeed Fiber */
2172	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2173	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2174	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2175	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2176#ifdef IXGBE_FDIR
2177	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2178#endif
2179	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2180	    taskqueue_thread_enqueue, &adapter->tq);
2181	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2182	    device_get_nameunit(adapter->dev));
2183
2184	if ((error = bus_setup_intr(dev, adapter->res,
2185            INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2186            que, &adapter->tag)) != 0) {
2187		device_printf(dev, "Failed to register fast interrupt "
2188		    "handler: %d\n", error);
2189		taskqueue_free(que->tq);
2190		taskqueue_free(adapter->tq);
2191		que->tq = NULL;
2192		adapter->tq = NULL;
2193		return (error);
2194	}
2195	/* For simplicity in the handlers */
2196	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2197
2198	return (0);
2199}
2200
2201
2202/*********************************************************************
2203 *
2204 *  Setup MSIX Interrupt resources and handlers
2205 *
2206 **********************************************************************/
2207static int
2208ixgbe_allocate_msix(struct adapter *adapter)
2209{
2210	device_t        dev = adapter->dev;
2211	struct 		ix_queue *que = adapter->queues;
2212	struct  	tx_ring *txr = adapter->tx_rings;
2213	int 		error, rid, vector = 0;
2214	int		cpu_id = 0;
2215#ifdef	RSS
2216	cpuset_t	cpu_mask;
2217#endif
2218
2219#ifdef	RSS
2220	/*
2221	 * If we're doing RSS, the number of queues needs to
2222	 * match the number of RSS buckets that are configured.
2223	 *
2224	 * + If there's more queues than RSS buckets, we'll end
2225	 *   up with queues that get no traffic.
2226	 *
2227	 * + If there's more RSS buckets than queues, we'll end
2228	 *   up having multiple RSS buckets map to the same queue,
2229	 *   so there'll be some contention.
2230	 */
2231	if (adapter->num_queues != rss_getnumbuckets()) {
2232		device_printf(dev,
2233		    "%s: number of queues (%d) != number of RSS buckets (%d)"
2234		    "; performance will be impacted.\n",
2235		    __func__,
2236		    adapter->num_queues,
2237		    rss_getnumbuckets());
2238	}
2239#endif
2240
2241	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2242		rid = vector + 1;
2243		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2244		    RF_SHAREABLE | RF_ACTIVE);
2245		if (que->res == NULL) {
2246			device_printf(dev,"Unable to allocate"
2247		    	    " bus resource: que interrupt [%d]\n", vector);
2248			return (ENXIO);
2249		}
2250		/* Set the handler function */
2251		error = bus_setup_intr(dev, que->res,
2252		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2253		    ixgbe_msix_que, que, &que->tag);
2254		if (error) {
2255			que->res = NULL;
2256			device_printf(dev, "Failed to register QUE handler");
2257			return (error);
2258		}
2259#if __FreeBSD_version >= 800504
2260		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2261#endif
2262		que->msix = vector;
2263		adapter->active_queues |= (u64)(1 << que->msix);
2264#ifdef	RSS
2265		/*
2266		 * The queue ID is used as the RSS layer bucket ID.
2267		 * We look up the queue ID -> RSS CPU ID and select
2268		 * that.
2269		 */
2270		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2271#else
2272		/*
2273		 * Bind the msix vector, and thus the
2274		 * rings to the corresponding cpu.
2275		 *
2276		 * This just happens to match the default RSS round-robin
2277		 * bucket -> queue -> CPU allocation.
2278		 */
2279		if (adapter->num_queues > 1)
2280			cpu_id = i;
2281#endif
2282		if (adapter->num_queues > 1)
2283			bus_bind_intr(dev, que->res, cpu_id);
2284
2285#ifdef	RSS
2286		device_printf(dev,
2287		    "Bound RSS bucket %d to CPU %d\n",
2288		    i, cpu_id);
2289#else
2290		if (bootverbose)
2291			device_printf(dev,
2292			    "Bound queue %d to cpu %d\n",
2293			    i, cpu_id);
2294#endif
2295#ifndef IXGBE_LEGACY_TX
2296		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2297#endif
2298		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2299		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2300		    taskqueue_thread_enqueue, &que->tq);
2301#ifdef	RSS
2302		CPU_SETOF(cpu_id, &cpu_mask);
2303		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2304		    &cpu_mask,
2305		    "%s (bucket %d)",
2306		    device_get_nameunit(adapter->dev),
2307		    cpu_id);
2308#else
2309		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2310		    device_get_nameunit(adapter->dev));
2311#endif
2312	}
2313
2314	/* and Link */
2315	rid = vector + 1;
2316	adapter->res = bus_alloc_resource_any(dev,
2317    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2318	if (!adapter->res) {
2319		device_printf(dev,"Unable to allocate"
2320    	    " bus resource: Link interrupt [%d]\n", rid);
2321		return (ENXIO);
2322	}
2323	/* Set the link handler function */
2324	error = bus_setup_intr(dev, adapter->res,
2325	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2326	    ixgbe_msix_link, adapter, &adapter->tag);
2327	if (error) {
2328		adapter->res = NULL;
2329		device_printf(dev, "Failed to register LINK handler");
2330		return (error);
2331	}
2332#if __FreeBSD_version >= 800504
2333	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2334#endif
2335	adapter->vector = vector;
2336	/* Tasklets for Link, SFP and Multispeed Fiber */
2337	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2338	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2339	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2340	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2341#ifdef IXGBE_FDIR
2342	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2343#endif
2344	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2345	    taskqueue_thread_enqueue, &adapter->tq);
2346	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2347	    device_get_nameunit(adapter->dev));
2348
2349	return (0);
2350}
2351
2352/*
2353 * Setup Either MSI/X or MSI
2354 */
2355static int
2356ixgbe_setup_msix(struct adapter *adapter)
2357{
2358	device_t dev = adapter->dev;
2359	int rid, want, queues, msgs;
2360
2361	/* Override by tuneable */
2362	if (ixgbe_enable_msix == 0)
2363		goto msi;
2364
2365	/* First try MSI/X */
2366	msgs = pci_msix_count(dev);
2367	if (msgs == 0)
2368		goto msi;
2369	rid = PCIR_BAR(MSIX_82598_BAR);
2370	adapter->msix_mem = bus_alloc_resource_any(dev,
2371	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2372       	if (adapter->msix_mem == NULL) {
2373		rid += 4;	/* 82599 maps in higher BAR */
2374		adapter->msix_mem = bus_alloc_resource_any(dev,
2375		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2376	}
2377       	if (adapter->msix_mem == NULL) {
2378		/* May not be enabled */
2379		device_printf(adapter->dev,
2380		    "Unable to map MSIX table \n");
2381		goto msi;
2382	}
2383
2384	/* Figure out a reasonable auto config value */
2385	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2386
2387#ifdef	RSS
2388	/* If we're doing RSS, clamp at the number of RSS buckets */
2389	if (queues > rss_getnumbuckets())
2390		queues = rss_getnumbuckets();
2391#endif
2392
2393	if (ixgbe_num_queues != 0)
2394		queues = ixgbe_num_queues;
2395
2396	/* reflect correct sysctl value */
2397	ixgbe_num_queues = queues;
2398
2399	/*
2400	** Want one vector (RX/TX pair) per queue
2401	** plus an additional for Link.
2402	*/
2403	want = queues + 1;
2404	if (msgs >= want)
2405		msgs = want;
2406	else {
2407               	device_printf(adapter->dev,
2408		    "MSIX Configuration Problem, "
2409		    "%d vectors but %d queues wanted!\n",
2410		    msgs, want);
2411		goto msi;
2412	}
2413	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2414               	device_printf(adapter->dev,
2415		    "Using MSIX interrupts with %d vectors\n", msgs);
2416		adapter->num_queues = queues;
2417		return (msgs);
2418	}
2419	/*
2420	** If MSIX alloc failed or provided us with
2421	** less than needed, free and fall through to MSI
2422	*/
2423	pci_release_msi(dev);
2424
2425msi:
2426       	if (adapter->msix_mem != NULL) {
2427		bus_release_resource(dev, SYS_RES_MEMORY,
2428		    rid, adapter->msix_mem);
2429		adapter->msix_mem = NULL;
2430	}
2431       	msgs = 1;
2432       	if (pci_alloc_msi(dev, &msgs) == 0) {
2433               	device_printf(adapter->dev,"Using an MSI interrupt\n");
2434		return (msgs);
2435	}
2436	device_printf(adapter->dev,"Using a Legacy interrupt\n");
2437	return (0);
2438}
2439
2440
2441static int
2442ixgbe_allocate_pci_resources(struct adapter *adapter)
2443{
2444	int             rid;
2445	device_t        dev = adapter->dev;
2446
2447	rid = PCIR_BAR(0);
2448	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2449	    &rid, RF_ACTIVE);
2450
2451	if (!(adapter->pci_mem)) {
2452		device_printf(dev,"Unable to allocate bus resource: memory\n");
2453		return (ENXIO);
2454	}
2455
2456	adapter->osdep.mem_bus_space_tag =
2457		rman_get_bustag(adapter->pci_mem);
2458	adapter->osdep.mem_bus_space_handle =
2459		rman_get_bushandle(adapter->pci_mem);
2460	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2461
2462	/* Legacy defaults */
2463	adapter->num_queues = 1;
2464	adapter->hw.back = &adapter->osdep;
2465
2466	/*
2467	** Now setup MSI or MSI/X, should
2468	** return us the number of supported
2469	** vectors. (Will be 1 for MSI)
2470	*/
2471	adapter->msix = ixgbe_setup_msix(adapter);
2472	return (0);
2473}
2474
2475static void
2476ixgbe_free_pci_resources(struct adapter * adapter)
2477{
2478	struct 		ix_queue *que = adapter->queues;
2479	device_t	dev = adapter->dev;
2480	int		rid, memrid;
2481
2482	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2483		memrid = PCIR_BAR(MSIX_82598_BAR);
2484	else
2485		memrid = PCIR_BAR(MSIX_82599_BAR);
2486
2487	/*
2488	** There is a slight possibility of a failure mode
2489	** in attach that will result in entering this function
2490	** before interrupt resources have been initialized, and
2491	** in that case we do not want to execute the loops below
2492	** We can detect this reliably by the state of the adapter
2493	** res pointer.
2494	*/
2495	if (adapter->res == NULL)
2496		goto mem;
2497
2498	/*
2499	**  Release all msix queue resources:
2500	*/
2501	for (int i = 0; i < adapter->num_queues; i++, que++) {
2502		rid = que->msix + 1;
2503		if (que->tag != NULL) {
2504			bus_teardown_intr(dev, que->res, que->tag);
2505			que->tag = NULL;
2506		}
2507		if (que->res != NULL)
2508			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2509	}
2510
2511
2512	/* Clean the Legacy or Link interrupt last */
2513	if (adapter->vector) /* we are doing MSIX */
2514		rid = adapter->vector + 1;
2515	else
2516		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2517
2518	if (adapter->tag != NULL) {
2519		bus_teardown_intr(dev, adapter->res, adapter->tag);
2520		adapter->tag = NULL;
2521	}
2522	if (adapter->res != NULL)
2523		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2524
2525mem:
2526	if (adapter->msix)
2527		pci_release_msi(dev);
2528
2529	if (adapter->msix_mem != NULL)
2530		bus_release_resource(dev, SYS_RES_MEMORY,
2531		    memrid, adapter->msix_mem);
2532
2533	if (adapter->pci_mem != NULL)
2534		bus_release_resource(dev, SYS_RES_MEMORY,
2535		    PCIR_BAR(0), adapter->pci_mem);
2536
2537	return;
2538}
2539
2540/*********************************************************************
2541 *
2542 *  Setup networking device structure and register an interface.
2543 *
2544 **********************************************************************/
2545static int
2546ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2547{
2548	struct ifnet   *ifp;
2549
2550	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2551
2552	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2553	if (ifp == NULL) {
2554		device_printf(dev, "can not allocate ifnet structure\n");
2555		return (-1);
2556	}
2557	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2558	ifp->if_baudrate = IF_Gbps(10);
2559	ifp->if_init = ixgbe_init;
2560	ifp->if_softc = adapter;
2561	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2562	ifp->if_ioctl = ixgbe_ioctl;
2563#if __FreeBSD_version >= 1100036
2564	if_setgetcounterfn(ifp, ixgbe_get_counter);
2565#endif
2566#if __FreeBSD_version >= 1100045
2567	/* TSO parameters */
2568	ifp->if_hw_tsomax = 65518;
2569	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2570	ifp->if_hw_tsomaxsegsize = 2048;
2571#endif
2572#ifndef IXGBE_LEGACY_TX
2573	ifp->if_transmit = ixgbe_mq_start;
2574	ifp->if_qflush = ixgbe_qflush;
2575#else
2576	ifp->if_start = ixgbe_start;
2577	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2578	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2579	IFQ_SET_READY(&ifp->if_snd);
2580#endif
2581
2582	ether_ifattach(ifp, adapter->hw.mac.addr);
2583
2584	adapter->max_frame_size =
2585	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2586
2587	/*
2588	 * Tell the upper layer(s) we support long frames.
2589	 */
2590	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2591
2592	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2593	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2594	ifp->if_capabilities |= IFCAP_LRO;
2595	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2596			     |  IFCAP_VLAN_HWTSO
2597			     |  IFCAP_VLAN_MTU
2598			     |  IFCAP_HWSTATS;
2599	ifp->if_capenable = ifp->if_capabilities;
2600
2601	/*
2602	** Don't turn this on by default, if vlans are
2603	** created on another pseudo device (eg. lagg)
2604	** then vlan events are not passed thru, breaking
2605	** operation, but with HW FILTER off it works. If
2606	** using vlans directly on the ixgbe driver you can
2607	** enable this and get full hardware tag filtering.
2608	*/
2609	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2610
2611	/*
2612	 * Specify the media types supported by this adapter and register
2613	 * callbacks to update media and link information
2614	 */
2615	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2616		    ixgbe_media_status);
2617
2618	ixgbe_add_media_types(adapter);
2619
2620	/* Autoselect media by default */
2621	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2622
2623	return (0);
2624}
2625
2626static void
2627ixgbe_add_media_types(struct adapter *adapter)
2628{
2629	struct ixgbe_hw *hw = &adapter->hw;
2630	device_t dev = adapter->dev;
2631	int layer;
2632
2633	layer = ixgbe_get_supported_physical_layer(hw);
2634
2635	/* Media types with matching FreeBSD media defines */
2636	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2637		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2638	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2639		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2640	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2641		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2642
2643	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2644	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2645		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2646
2647	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2648		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2649	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2650		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2651	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2652		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2653	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2654		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2655
2656	/*
2657	** Other (no matching FreeBSD media type):
2658	** To workaround this, we'll assign these completely
2659	** inappropriate media types.
2660	*/
2661	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2662		device_printf(dev, "Media supported: 10GbaseKR\n");
2663		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2664		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2665	}
2666	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2667		device_printf(dev, "Media supported: 10GbaseKX4\n");
2668		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2669		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2670	}
2671	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2672		device_printf(dev, "Media supported: 1000baseKX\n");
2673		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2674		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2675	}
2676	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2677		/* Someday, someone will care about you... */
2678		device_printf(dev, "Media supported: 1000baseBX\n");
2679	}
2680
2681	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2682		ifmedia_add(&adapter->media,
2683		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2684		ifmedia_add(&adapter->media,
2685		    IFM_ETHER | IFM_1000_T, 0, NULL);
2686	}
2687
2688	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2689}
2690
2691static void
2692ixgbe_config_link(struct adapter *adapter)
2693{
2694	struct ixgbe_hw *hw = &adapter->hw;
2695	u32	autoneg, err = 0;
2696	bool	sfp, negotiate;
2697
2698	sfp = ixgbe_is_sfp(hw);
2699
2700	if (sfp) {
2701		if (hw->phy.multispeed_fiber) {
2702			hw->mac.ops.setup_sfp(hw);
2703			ixgbe_enable_tx_laser(hw);
2704			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2705		} else
2706			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2707	} else {
2708		if (hw->mac.ops.check_link)
2709			err = ixgbe_check_link(hw, &adapter->link_speed,
2710			    &adapter->link_up, FALSE);
2711		if (err)
2712			goto out;
2713		autoneg = hw->phy.autoneg_advertised;
2714		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2715                	err  = hw->mac.ops.get_link_capabilities(hw,
2716			    &autoneg, &negotiate);
2717		if (err)
2718			goto out;
2719		if (hw->mac.ops.setup_link)
2720                	err = hw->mac.ops.setup_link(hw,
2721			    autoneg, adapter->link_up);
2722	}
2723out:
2724	return;
2725}
2726
2727
2728/*********************************************************************
2729 *
2730 *  Enable transmit units.
2731 *
2732 **********************************************************************/
2733static void
2734ixgbe_initialize_transmit_units(struct adapter *adapter)
2735{
2736	struct tx_ring	*txr = adapter->tx_rings;
2737	struct ixgbe_hw	*hw = &adapter->hw;
2738
2739	/* Setup the Base and Length of the Tx Descriptor Ring */
2740
2741	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2742		u64	tdba = txr->txdma.dma_paddr;
2743		u32	txctrl = 0;
2744
2745		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2746		       (tdba & 0x00000000ffffffffULL));
2747		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2748		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2749		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2750
2751		/* Setup the HW Tx Head and Tail descriptor pointers */
2752		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2753		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2754
2755		/* Cache the tail address */
2756		txr->tail = IXGBE_TDT(txr->me);
2757
2758		/* Set the processing limit */
2759		txr->process_limit = ixgbe_tx_process_limit;
2760
2761		/* Disable Head Writeback */
2762		switch (hw->mac.type) {
2763		case ixgbe_mac_82598EB:
2764			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2765			break;
2766		case ixgbe_mac_82599EB:
2767		case ixgbe_mac_X540:
2768		default:
2769			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2770			break;
2771                }
2772		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2773		switch (hw->mac.type) {
2774		case ixgbe_mac_82598EB:
2775			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2776			break;
2777		case ixgbe_mac_82599EB:
2778		case ixgbe_mac_X540:
2779		default:
2780			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2781			break;
2782		}
2783
2784	}
2785
2786	if (hw->mac.type != ixgbe_mac_82598EB) {
2787		u32 dmatxctl, rttdcs;
2788		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2789		dmatxctl |= IXGBE_DMATXCTL_TE;
2790		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2791		/* Disable arbiter to set MTQC */
2792		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2793		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2794		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2795		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2796		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2797		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2798	}
2799
2800	return;
2801}
2802
2803static void
2804ixgbe_initialise_rss_mapping(struct adapter *adapter)
2805{
2806	struct ixgbe_hw	*hw = &adapter->hw;
2807	uint32_t reta;
2808	int i, j, queue_id, table_size;
2809	int index_mult;
2810	uint32_t rss_key[10];
2811	uint32_t mrqc;
2812#ifdef	RSS
2813	uint32_t rss_hash_config;
2814#endif
2815
2816	/* Setup RSS */
2817	reta = 0;
2818
2819#ifdef	RSS
2820	/* Fetch the configured RSS key */
2821	rss_getkey((uint8_t *) &rss_key);
2822#else
2823	/* set up random bits */
2824	arc4rand(&rss_key, sizeof(rss_key), 0);
2825#endif
2826
2827	/* Set multiplier for RETA setup and table size based on MAC */
2828	index_mult = 0x1;
2829	table_size = 128;
2830	switch (adapter->hw.mac.type) {
2831	case ixgbe_mac_82598EB:
2832		index_mult = 0x11;
2833		break;
2834	case ixgbe_mac_X550:
2835	case ixgbe_mac_X550EM_x:
2836		table_size = 512;
2837		break;
2838	default:
2839		break;
2840	}
2841
2842	/* Set up the redirection table */
2843	for (i = 0, j = 0; i < table_size; i++, j++) {
2844		if (j == adapter->num_queues) j = 0;
2845#ifdef	RSS
2846		/*
2847		 * Fetch the RSS bucket id for the given indirection entry.
2848		 * Cap it at the number of configured buckets (which is
2849		 * num_queues.)
2850		 */
2851		queue_id = rss_get_indirection_to_bucket(i);
2852		queue_id = queue_id % adapter->num_queues;
2853#else
2854		queue_id = (j * index_mult);
2855#endif
2856		/*
2857		 * The low 8 bits are for hash value (n+0);
2858		 * The next 8 bits are for hash value (n+1), etc.
2859		 */
2860		reta = reta >> 8;
2861		reta = reta | ( ((uint32_t) queue_id) << 24);
2862		if ((i & 3) == 3) {
2863			if (i < 128)
2864				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2865			else
2866				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2867			reta = 0;
2868		}
2869	}
2870
2871	/* Now fill our hash function seeds */
2872	for (int i = 0; i < 10; i++)
2873		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2874
2875	/* Perform hash on these packet types */
2876#ifdef	RSS
2877	mrqc = IXGBE_MRQC_RSSEN;
2878	rss_hash_config = rss_gethashconfig();
2879	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2880		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2881	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2882		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2883	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2884		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2885	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2886		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2887	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2888		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2889	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
2890		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2891	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2892		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2893	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
2894		device_printf(adapter->dev,
2895		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
2896		    "but not supported\n", __func__);
2897	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2898		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2899	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2900		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2901#else
2902	/*
2903	 * Disable UDP - IP fragments aren't currently being handled
2904	 * and so we end up with a mix of 2-tuple and 4-tuple
2905	 * traffic.
2906	 */
2907	mrqc = IXGBE_MRQC_RSSEN
2908	     | IXGBE_MRQC_RSS_FIELD_IPV4
2909	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2910#if 0
2911	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2912#endif
2913	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2914	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2915	     | IXGBE_MRQC_RSS_FIELD_IPV6
2916	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2917#if 0
2918	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2919	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2920#endif
2921	;
2922#endif /* RSS */
2923	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2924}
2925
2926
2927/*********************************************************************
2928 *
2929 *  Setup receive registers and features.
2930 *
2931 **********************************************************************/
2932#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2933
2934#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2935
2936static void
2937ixgbe_initialize_receive_units(struct adapter *adapter)
2938{
2939	struct	rx_ring	*rxr = adapter->rx_rings;
2940	struct ixgbe_hw	*hw = &adapter->hw;
2941	struct ifnet   *ifp = adapter->ifp;
2942	u32		bufsz, fctrl, srrctl, rxcsum;
2943	u32		hlreg;
2944
2945
2946	/*
2947	 * Make sure receives are disabled while
2948	 * setting up the descriptor ring
2949	 */
2950	ixgbe_disable_rx(hw);
2951
2952	/* Enable broadcasts */
2953	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2954	fctrl |= IXGBE_FCTRL_BAM;
2955	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2956		fctrl |= IXGBE_FCTRL_DPF;
2957		fctrl |= IXGBE_FCTRL_PMCF;
2958	}
2959	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2960
2961	/* Set for Jumbo Frames? */
2962	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2963	if (ifp->if_mtu > ETHERMTU)
2964		hlreg |= IXGBE_HLREG0_JUMBOEN;
2965	else
2966		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2967#ifdef DEV_NETMAP
2968	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2969	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2970		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2971	else
2972		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2973#endif /* DEV_NETMAP */
2974	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2975
2976	bufsz = (adapter->rx_mbuf_sz +
2977	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2978
2979	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2980		u64 rdba = rxr->rxdma.dma_paddr;
2981
2982		/* Setup the Base and Length of the Rx Descriptor Ring */
2983		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2984			       (rdba & 0x00000000ffffffffULL));
2985		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2986		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2987		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2988
2989		/* Set up the SRRCTL register */
2990		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2991		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2992		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2993		srrctl |= bufsz;
2994		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2995
2996		/*
2997		 * Set DROP_EN iff we have no flow control and >1 queue.
2998		 * Note that srrctl was cleared shortly before during reset,
2999		 * so we do not need to clear the bit, but do it just in case
3000		 * this code is moved elsewhere.
3001		 */
3002		if (adapter->num_queues > 1 &&
3003		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3004			srrctl |= IXGBE_SRRCTL_DROP_EN;
3005		} else {
3006			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3007		}
3008
3009		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
3010
3011		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3012		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3013		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3014
3015		/* Set the processing limit */
3016		rxr->process_limit = ixgbe_rx_process_limit;
3017
3018		/* Set the driver rx tail address */
3019		rxr->tail =  IXGBE_RDT(rxr->me);
3020	}
3021
3022	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3023		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3024			      IXGBE_PSRTYPE_UDPHDR |
3025			      IXGBE_PSRTYPE_IPV4HDR |
3026			      IXGBE_PSRTYPE_IPV6HDR;
3027		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3028	}
3029
3030	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3031
3032	ixgbe_initialise_rss_mapping(adapter);
3033
3034	if (adapter->num_queues > 1) {
3035		/* RSS and RX IPP Checksum are mutually exclusive */
3036		rxcsum |= IXGBE_RXCSUM_PCSD;
3037	}
3038
3039	if (ifp->if_capenable & IFCAP_RXCSUM)
3040		rxcsum |= IXGBE_RXCSUM_PCSD;
3041
3042	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3043		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3044
3045	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3046
3047	return;
3048}
3049
3050
3051/*
3052** This routine is run via an vlan config EVENT,
3053** it enables us to use the HW Filter table since
3054** we can get the vlan id. This just creates the
3055** entry in the soft version of the VFTA, init will
3056** repopulate the real table.
3057*/
3058static void
3059ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3060{
3061	struct adapter	*adapter = ifp->if_softc;
3062	u16		index, bit;
3063
3064	if (ifp->if_softc !=  arg)   /* Not our event */
3065		return;
3066
3067	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3068		return;
3069
3070	IXGBE_CORE_LOCK(adapter);
3071	index = (vtag >> 5) & 0x7F;
3072	bit = vtag & 0x1F;
3073	adapter->shadow_vfta[index] |= (1 << bit);
3074	++adapter->num_vlans;
3075	ixgbe_setup_vlan_hw_support(adapter);
3076	IXGBE_CORE_UNLOCK(adapter);
3077}
3078
3079/*
3080** This routine is run via an vlan
3081** unconfig EVENT, remove our entry
3082** in the soft vfta.
3083*/
3084static void
3085ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3086{
3087	struct adapter	*adapter = ifp->if_softc;
3088	u16		index, bit;
3089
3090	if (ifp->if_softc !=  arg)
3091		return;
3092
3093	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3094		return;
3095
3096	IXGBE_CORE_LOCK(adapter);
3097	index = (vtag >> 5) & 0x7F;
3098	bit = vtag & 0x1F;
3099	adapter->shadow_vfta[index] &= ~(1 << bit);
3100	--adapter->num_vlans;
3101	/* Re-init to load the changes */
3102	ixgbe_setup_vlan_hw_support(adapter);
3103	IXGBE_CORE_UNLOCK(adapter);
3104}
3105
3106static void
3107ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3108{
3109	struct ifnet 	*ifp = adapter->ifp;
3110	struct ixgbe_hw *hw = &adapter->hw;
3111	struct rx_ring	*rxr;
3112	u32		ctrl;
3113
3114
3115	/*
3116	** We get here thru init_locked, meaning
3117	** a soft reset, this has already cleared
3118	** the VFTA and other state, so if there
3119	** have been no vlan's registered do nothing.
3120	*/
3121	if (adapter->num_vlans == 0)
3122		return;
3123
3124	/* Setup the queues for vlans */
3125	for (int i = 0; i < adapter->num_queues; i++) {
3126		rxr = &adapter->rx_rings[i];
3127		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3128		if (hw->mac.type != ixgbe_mac_82598EB) {
3129			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3130			ctrl |= IXGBE_RXDCTL_VME;
3131			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3132		}
3133		rxr->vtag_strip = TRUE;
3134	}
3135
3136	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3137		return;
3138	/*
3139	** A soft reset zero's out the VFTA, so
3140	** we need to repopulate it now.
3141	*/
3142	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3143		if (adapter->shadow_vfta[i] != 0)
3144			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3145			    adapter->shadow_vfta[i]);
3146
3147	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3148	/* Enable the Filter Table if enabled */
3149	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3150		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3151		ctrl |= IXGBE_VLNCTRL_VFE;
3152	}
3153	if (hw->mac.type == ixgbe_mac_82598EB)
3154		ctrl |= IXGBE_VLNCTRL_VME;
3155	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3156}
3157
3158static void
3159ixgbe_enable_intr(struct adapter *adapter)
3160{
3161	struct ixgbe_hw	*hw = &adapter->hw;
3162	struct ix_queue	*que = adapter->queues;
3163	u32		mask, fwsm;
3164
3165	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3166	/* Enable Fan Failure detection */
3167	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3168		    mask |= IXGBE_EIMS_GPI_SDP1;
3169
3170	switch (adapter->hw.mac.type) {
3171		case ixgbe_mac_82599EB:
3172			mask |= IXGBE_EIMS_ECC;
3173			/* Temperature sensor on some adapters */
3174			mask |= IXGBE_EIMS_GPI_SDP0;
3175			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3176			mask |= IXGBE_EIMS_GPI_SDP1;
3177			mask |= IXGBE_EIMS_GPI_SDP2;
3178#ifdef IXGBE_FDIR
3179			mask |= IXGBE_EIMS_FLOW_DIR;
3180#endif
3181			break;
3182		case ixgbe_mac_X540:
3183			/* Detect if Thermal Sensor is enabled */
3184			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3185			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3186				mask |= IXGBE_EIMS_TS;
3187			mask |= IXGBE_EIMS_ECC;
3188#ifdef IXGBE_FDIR
3189			mask |= IXGBE_EIMS_FLOW_DIR;
3190#endif
3191			break;
3192		case ixgbe_mac_X550:
3193		case ixgbe_mac_X550EM_x:
3194			/* MAC thermal sensor is automatically enabled */
3195			mask |= IXGBE_EIMS_TS;
3196			/* Some devices use SDP0 for important information */
3197			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3198			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3199				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3200			mask |= IXGBE_EIMS_ECC;
3201#ifdef IXGBE_FDIR
3202			mask |= IXGBE_EIMS_FLOW_DIR;
3203#endif
3204		/* falls through */
3205		default:
3206			break;
3207	}
3208
3209	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3210
3211	/* With MSI-X we use auto clear */
3212	if (adapter->msix_mem) {
3213		mask = IXGBE_EIMS_ENABLE_MASK;
3214		/* Don't autoclear Link */
3215		mask &= ~IXGBE_EIMS_OTHER;
3216		mask &= ~IXGBE_EIMS_LSC;
3217		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3218	}
3219
3220	/*
3221	** Now enable all queues, this is done separately to
3222	** allow for handling the extended (beyond 32) MSIX
3223	** vectors that can be used by 82599
3224	*/
3225        for (int i = 0; i < adapter->num_queues; i++, que++)
3226                ixgbe_enable_queue(adapter, que->msix);
3227
3228	IXGBE_WRITE_FLUSH(hw);
3229
3230	return;
3231}
3232
3233static void
3234ixgbe_disable_intr(struct adapter *adapter)
3235{
3236	if (adapter->msix_mem)
3237		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3238	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3239		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3240	} else {
3241		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3242		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3243		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3244	}
3245	IXGBE_WRITE_FLUSH(&adapter->hw);
3246	return;
3247}
3248
3249/*
3250** Get the width and transaction speed of
3251** the slot this adapter is plugged into.
3252*/
3253static void
3254ixgbe_get_slot_info(struct ixgbe_hw *hw)
3255{
3256	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
3257	struct ixgbe_mac_info	*mac = &hw->mac;
3258	u16			link;
3259	u32			offset;
3260
3261	/* For most devices simply call the shared code routine */
3262	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3263		ixgbe_get_bus_info(hw);
3264		/* These devices don't use PCI-E */
3265		switch (hw->mac.type) {
3266		case ixgbe_mac_X550EM_x:
3267			return;
3268		default:
3269			goto display;
3270		}
3271	}
3272
3273	/*
3274	** For the Quad port adapter we need to parse back
3275	** up the PCI tree to find the speed of the expansion
3276	** slot into which this adapter is plugged. A bit more work.
3277	*/
3278	dev = device_get_parent(device_get_parent(dev));
3279#ifdef IXGBE_DEBUG
3280	device_printf(dev, "parent pcib = %x,%x,%x\n",
3281	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3282#endif
3283	dev = device_get_parent(device_get_parent(dev));
3284#ifdef IXGBE_DEBUG
3285	device_printf(dev, "slot pcib = %x,%x,%x\n",
3286	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3287#endif
3288	/* Now get the PCI Express Capabilities offset */
3289	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3290	/* ...and read the Link Status Register */
3291	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3292	switch (link & IXGBE_PCI_LINK_WIDTH) {
3293	case IXGBE_PCI_LINK_WIDTH_1:
3294		hw->bus.width = ixgbe_bus_width_pcie_x1;
3295		break;
3296	case IXGBE_PCI_LINK_WIDTH_2:
3297		hw->bus.width = ixgbe_bus_width_pcie_x2;
3298		break;
3299	case IXGBE_PCI_LINK_WIDTH_4:
3300		hw->bus.width = ixgbe_bus_width_pcie_x4;
3301		break;
3302	case IXGBE_PCI_LINK_WIDTH_8:
3303		hw->bus.width = ixgbe_bus_width_pcie_x8;
3304		break;
3305	default:
3306		hw->bus.width = ixgbe_bus_width_unknown;
3307		break;
3308	}
3309
3310	switch (link & IXGBE_PCI_LINK_SPEED) {
3311	case IXGBE_PCI_LINK_SPEED_2500:
3312		hw->bus.speed = ixgbe_bus_speed_2500;
3313		break;
3314	case IXGBE_PCI_LINK_SPEED_5000:
3315		hw->bus.speed = ixgbe_bus_speed_5000;
3316		break;
3317	case IXGBE_PCI_LINK_SPEED_8000:
3318		hw->bus.speed = ixgbe_bus_speed_8000;
3319		break;
3320	default:
3321		hw->bus.speed = ixgbe_bus_speed_unknown;
3322		break;
3323	}
3324
3325	mac->ops.set_lan_id(hw);
3326
3327display:
3328	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3329	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3330	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3331	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3332	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3333	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3334	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3335	    ("Unknown"));
3336
3337	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3338	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3339	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3340		device_printf(dev, "PCI-Express bandwidth available"
3341		    " for this card\n     is not sufficient for"
3342		    " optimal performance.\n");
3343		device_printf(dev, "For optimal performance a x8 "
3344		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3345        }
3346	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3347	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3348	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3349		device_printf(dev, "PCI-Express bandwidth available"
3350		    " for this card\n     is not sufficient for"
3351		    " optimal performance.\n");
3352		device_printf(dev, "For optimal performance a x8 "
3353		    "PCIE Gen3 slot is required.\n");
3354        }
3355
3356	return;
3357}
3358
3359
3360/*
3361** Setup the correct IVAR register for a particular MSIX interrupt
3362**   (yes this is all very magic and confusing :)
3363**  - entry is the register array entry
3364**  - vector is the MSIX vector for this queue
3365**  - type is RX/TX/MISC
3366*/
3367static void
3368ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3369{
3370	struct ixgbe_hw *hw = &adapter->hw;
3371	u32 ivar, index;
3372
3373	vector |= IXGBE_IVAR_ALLOC_VAL;
3374
3375	switch (hw->mac.type) {
3376
3377	case ixgbe_mac_82598EB:
3378		if (type == -1)
3379			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3380		else
3381			entry += (type * 64);
3382		index = (entry >> 2) & 0x1F;
3383		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3384		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3385		ivar |= (vector << (8 * (entry & 0x3)));
3386		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3387		break;
3388
3389	case ixgbe_mac_82599EB:
3390	case ixgbe_mac_X540:
3391	case ixgbe_mac_X550:
3392	case ixgbe_mac_X550EM_x:
3393		if (type == -1) { /* MISC IVAR */
3394			index = (entry & 1) * 8;
3395			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3396			ivar &= ~(0xFF << index);
3397			ivar |= (vector << index);
3398			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3399		} else {	/* RX/TX IVARS */
3400			index = (16 * (entry & 1)) + (8 * type);
3401			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3402			ivar &= ~(0xFF << index);
3403			ivar |= (vector << index);
3404			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3405		}
3406
3407	default:
3408		break;
3409	}
3410}
3411
3412static void
3413ixgbe_configure_ivars(struct adapter *adapter)
3414{
3415	struct  ix_queue *que = adapter->queues;
3416	u32 newitr;
3417
3418	if (ixgbe_max_interrupt_rate > 0)
3419		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3420	else {
3421		/*
3422		** Disable DMA coalescing if interrupt moderation is
3423		** disabled.
3424		*/
3425		adapter->dmac = 0;
3426		newitr = 0;
3427	}
3428
3429        for (int i = 0; i < adapter->num_queues; i++, que++) {
3430		/* First the RX queue entry */
3431                ixgbe_set_ivar(adapter, i, que->msix, 0);
3432		/* ... and the TX */
3433		ixgbe_set_ivar(adapter, i, que->msix, 1);
3434		/* Set an Initial EITR value */
3435                IXGBE_WRITE_REG(&adapter->hw,
3436                    IXGBE_EITR(que->msix), newitr);
3437	}
3438
3439	/* For the Link interrupt */
3440        ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3441}
3442
3443/*
3444** ixgbe_sfp_probe - called in the local timer to
3445** determine if a port had optics inserted.
3446*/
3447static bool ixgbe_sfp_probe(struct adapter *adapter)
3448{
3449	struct ixgbe_hw	*hw = &adapter->hw;
3450	device_t	dev = adapter->dev;
3451	bool		result = FALSE;
3452
3453	if ((hw->phy.type == ixgbe_phy_nl) &&
3454	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3455		s32 ret = hw->phy.ops.identify_sfp(hw);
3456		if (ret)
3457                        goto out;
3458		ret = hw->phy.ops.reset(hw);
3459		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3460			device_printf(dev,"Unsupported SFP+ module detected!");
3461			printf(" Reload driver with supported module.\n");
3462			adapter->sfp_probe = FALSE;
3463                        goto out;
3464		} else
3465			device_printf(dev,"SFP+ module detected!\n");
3466		/* We now have supported optics */
3467		adapter->sfp_probe = FALSE;
3468		/* Set the optics type so system reports correctly */
3469		ixgbe_setup_optics(adapter);
3470		result = TRUE;
3471	}
3472out:
3473	return (result);
3474}
3475
3476/*
3477** Tasklet handler for MSIX Link interrupts
3478**  - do outside interrupt since it might sleep
3479*/
3480static void
3481ixgbe_handle_link(void *context, int pending)
3482{
3483	struct adapter  *adapter = context;
3484
3485	ixgbe_check_link(&adapter->hw,
3486	    &adapter->link_speed, &adapter->link_up, 0);
3487	ixgbe_update_link_status(adapter);
3488}
3489
3490/*
3491** Tasklet for handling SFP module interrupts
3492*/
3493static void
3494ixgbe_handle_mod(void *context, int pending)
3495{
3496	struct adapter  *adapter = context;
3497	struct ixgbe_hw *hw = &adapter->hw;
3498	device_t	dev = adapter->dev;
3499	u32 err;
3500
3501	err = hw->phy.ops.identify_sfp(hw);
3502	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3503		device_printf(dev,
3504		    "Unsupported SFP+ module type was detected.\n");
3505		return;
3506	}
3507	err = hw->mac.ops.setup_sfp(hw);
3508	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3509		device_printf(dev,
3510		    "Setup failure - unsupported SFP+ module type.\n");
3511		return;
3512	}
3513	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3514	return;
3515}
3516
3517
3518/*
3519** Tasklet for handling MSF (multispeed fiber) interrupts
3520*/
3521static void
3522ixgbe_handle_msf(void *context, int pending)
3523{
3524	struct adapter  *adapter = context;
3525	struct ixgbe_hw *hw = &adapter->hw;
3526	u32 autoneg;
3527	bool negotiate;
3528	int err;
3529
3530	err = hw->phy.ops.identify_sfp(hw);
3531	if (!err) {
3532		ixgbe_setup_optics(adapter);
3533		INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3534	}
3535
3536	autoneg = hw->phy.autoneg_advertised;
3537	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3538		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3539	if (hw->mac.ops.setup_link)
3540		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3541
3542	ifmedia_removeall(&adapter->media);
3543	ixgbe_add_media_types(adapter);
3544	return;
3545}
3546
3547/*
3548** Tasklet for handling interrupts from an external PHY
3549*/
3550static void
3551ixgbe_handle_phy(void *context, int pending)
3552{
3553	struct adapter  *adapter = context;
3554	struct ixgbe_hw *hw = &adapter->hw;
3555	int error;
3556
3557	error = hw->phy.ops.handle_lasi(hw);
3558	if (error == IXGBE_ERR_OVERTEMP)
3559		device_printf(adapter->dev,
3560		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3561		    " PHY will downshift to lower power state!\n");
3562	else if (error)
3563		device_printf(adapter->dev,
3564		    "Error handling LASI interrupt: %d\n",
3565		    error);
3566	return;
3567}
3568
3569#ifdef IXGBE_FDIR
3570/*
3571** Tasklet for reinitializing the Flow Director filter table
3572*/
3573static void
3574ixgbe_reinit_fdir(void *context, int pending)
3575{
3576	struct adapter  *adapter = context;
3577	struct ifnet   *ifp = adapter->ifp;
3578
3579	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3580		return;
3581	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3582	adapter->fdir_reinit = 0;
3583	/* re-enable flow director interrupts */
3584	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3585	/* Restart the interface */
3586	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3587	return;
3588}
3589#endif
3590
3591/*********************************************************************
3592 *
3593 *  Configure DMA Coalescing
3594 *
3595 **********************************************************************/
3596static void
3597ixgbe_config_dmac(struct adapter *adapter)
3598{
3599	struct ixgbe_hw *hw = &adapter->hw;
3600	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3601
3602	if (hw->mac.type < ixgbe_mac_X550 ||
3603	    !hw->mac.ops.dmac_config)
3604		return;
3605
3606	if (dcfg->watchdog_timer ^ adapter->dmac ||
3607	    dcfg->link_speed ^ adapter->link_speed) {
3608		dcfg->watchdog_timer = adapter->dmac;
3609		dcfg->fcoe_en = false;
3610		dcfg->link_speed = adapter->link_speed;
3611		dcfg->num_tcs = 1;
3612
3613		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3614		    dcfg->watchdog_timer, dcfg->link_speed);
3615
3616		hw->mac.ops.dmac_config(hw);
3617	}
3618}
3619
3620/*
3621 * Checks whether the adapter supports Energy Efficient Ethernet
3622 * or not, based on device ID.
3623 */
3624static void
3625ixgbe_check_eee_support(struct adapter *adapter)
3626{
3627	struct ixgbe_hw *hw = &adapter->hw;
3628
3629	adapter->eee_support = adapter->eee_enabled =
3630	    (hw->device_id == IXGBE_DEV_ID_X550T ||
3631	        hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3632}
3633
3634/*
3635 * Checks whether the adapter's ports are capable of
3636 * Wake On LAN by reading the adapter's NVM.
3637 *
3638 * Sets each port's hw->wol_enabled value depending
3639 * on the value read here.
3640 */
3641static void
3642ixgbe_check_wol_support(struct adapter *adapter)
3643{
3644	struct ixgbe_hw *hw = &adapter->hw;
3645	u16 dev_caps = 0;
3646
3647	/* Find out WoL support for port */
3648	adapter->wol_support = hw->wol_enabled = 0;
3649	ixgbe_get_device_caps(hw, &dev_caps);
3650	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3651	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3652	        hw->bus.func == 0))
3653	    adapter->wol_support = hw->wol_enabled = 1;
3654
3655	/* Save initial wake up filter configuration */
3656	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3657
3658	return;
3659}
3660
3661/*
3662 * Prepare the adapter/port for LPLU and/or WoL
3663 */
3664static int
3665ixgbe_setup_low_power_mode(struct adapter *adapter)
3666{
3667	struct ixgbe_hw *hw = &adapter->hw;
3668	device_t dev = adapter->dev;
3669	s32 error = 0;
3670
3671	mtx_assert(&adapter->core_mtx, MA_OWNED);
3672
3673	/* Limit power management flow to X550EM baseT */
3674	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3675	    && hw->phy.ops.enter_lplu) {
3676		/* Turn off support for APM wakeup. (Using ACPI instead) */
3677		IXGBE_WRITE_REG(hw, IXGBE_GRC,
3678		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3679
3680		/*
3681		 * Clear Wake Up Status register to prevent any previous wakeup
3682		 * events from waking us up immediately after we suspend.
3683		 */
3684		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3685
3686		/*
3687		 * Program the Wakeup Filter Control register with user filter
3688		 * settings
3689		 */
3690		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3691
3692		/* Enable wakeups and power management in Wakeup Control */
3693		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3694		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3695
3696		/* X550EM baseT adapters need a special LPLU flow */
3697		hw->phy.reset_disable = true;
3698		ixgbe_stop(adapter);
3699		error = hw->phy.ops.enter_lplu(hw);
3700		if (error)
3701			device_printf(dev,
3702			    "Error entering LPLU: %d\n", error);
3703		hw->phy.reset_disable = false;
3704	} else {
3705		/* Just stop for other adapters */
3706		ixgbe_stop(adapter);
3707	}
3708
3709	return error;
3710}
3711
3712/**********************************************************************
3713 *
3714 *  Update the board statistics counters.
3715 *
3716 **********************************************************************/
3717static void
3718ixgbe_update_stats_counters(struct adapter *adapter)
3719{
3720	struct ixgbe_hw *hw = &adapter->hw;
3721	u32 missed_rx = 0, bprc, lxon, lxoff, total;
3722	u64 total_missed_rx = 0;
3723
3724	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3725	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3726	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3727	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3728
3729	for (int i = 0; i < 16; i++) {
3730		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3731		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3732		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3733	}
3734	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3735	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3736	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3737
3738	/* Hardware workaround, gprc counts missed packets */
3739	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3740	adapter->stats.pf.gprc -= missed_rx;
3741
3742	if (hw->mac.type != ixgbe_mac_82598EB) {
3743		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3744		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3745		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3746		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3747		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3748		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3749		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3750		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3751	} else {
3752		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3753		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3754		/* 82598 only has a counter in the high register */
3755		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3756		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3757		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3758	}
3759
3760	/*
3761	 * Workaround: mprc hardware is incorrectly counting
3762	 * broadcasts, so for now we subtract those.
3763	 */
3764	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3765	adapter->stats.pf.bprc += bprc;
3766	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3767	if (hw->mac.type == ixgbe_mac_82598EB)
3768		adapter->stats.pf.mprc -= bprc;
3769
3770	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3771	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3772	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3773	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3774	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3775	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3776
3777	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3778	adapter->stats.pf.lxontxc += lxon;
3779	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3780	adapter->stats.pf.lxofftxc += lxoff;
3781	total = lxon + lxoff;
3782
3783	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3784	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3785	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3786	adapter->stats.pf.gptc -= total;
3787	adapter->stats.pf.mptc -= total;
3788	adapter->stats.pf.ptc64 -= total;
3789	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3790
3791	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3792	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3793	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3794	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3795	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3796	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3797	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3798	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3799	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3800	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3801	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3802	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3803	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3804	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3805	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3806	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3807	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3808	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3809	/* Only read FCOE on 82599 */
3810	if (hw->mac.type != ixgbe_mac_82598EB) {
3811		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3812		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3813		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3814		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3815		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3816	}
3817
3818	/* Fill out the OS statistics structure */
3819	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3820	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3821	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3822	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3823	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3824	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3825	IXGBE_SET_COLLISIONS(adapter, 0);
3826	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3827	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3828	    + adapter->stats.pf.rlec);
3829}
3830
3831#if __FreeBSD_version >= 1100036
3832static uint64_t
3833ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3834{
3835	struct adapter *adapter;
3836	struct tx_ring *txr;
3837	uint64_t rv;
3838
3839	adapter = if_getsoftc(ifp);
3840
3841	switch (cnt) {
3842	case IFCOUNTER_IPACKETS:
3843		return (adapter->ipackets);
3844	case IFCOUNTER_OPACKETS:
3845		return (adapter->opackets);
3846	case IFCOUNTER_IBYTES:
3847		return (adapter->ibytes);
3848	case IFCOUNTER_OBYTES:
3849		return (adapter->obytes);
3850	case IFCOUNTER_IMCASTS:
3851		return (adapter->imcasts);
3852	case IFCOUNTER_OMCASTS:
3853		return (adapter->omcasts);
3854	case IFCOUNTER_COLLISIONS:
3855		return (0);
3856	case IFCOUNTER_IQDROPS:
3857		return (adapter->iqdrops);
3858	case IFCOUNTER_OQDROPS:
3859		rv = 0;
3860		txr = adapter->tx_rings;
3861		for (int i = 0; i < adapter->num_queues; i++, txr++)
3862			rv += txr->br->br_drops;
3863		return (rv);
3864	case IFCOUNTER_IERRORS:
3865		return (adapter->ierrors);
3866	default:
3867		return (if_get_counter_default(ifp, cnt));
3868	}
3869}
3870#endif
3871
3872/** ixgbe_sysctl_tdh_handler - Handler function
3873 *  Retrieves the TDH value from the hardware
3874 */
3875static int
3876ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3877{
3878	int error;
3879
3880	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3881	if (!txr) return 0;
3882
3883	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3884	error = sysctl_handle_int(oidp, &val, 0, req);
3885	if (error || !req->newptr)
3886		return error;
3887	return 0;
3888}
3889
3890/** ixgbe_sysctl_tdt_handler - Handler function
3891 *  Retrieves the TDT value from the hardware
3892 */
3893static int
3894ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3895{
3896	int error;
3897
3898	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3899	if (!txr) return 0;
3900
3901	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3902	error = sysctl_handle_int(oidp, &val, 0, req);
3903	if (error || !req->newptr)
3904		return error;
3905	return 0;
3906}
3907
3908/** ixgbe_sysctl_rdh_handler - Handler function
3909 *  Retrieves the RDH value from the hardware
3910 */
3911static int
3912ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3913{
3914	int error;
3915
3916	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3917	if (!rxr) return 0;
3918
3919	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3920	error = sysctl_handle_int(oidp, &val, 0, req);
3921	if (error || !req->newptr)
3922		return error;
3923	return 0;
3924}
3925
3926/** ixgbe_sysctl_rdt_handler - Handler function
3927 *  Retrieves the RDT value from the hardware
3928 */
3929static int
3930ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3931{
3932	int error;
3933
3934	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3935	if (!rxr) return 0;
3936
3937	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3938	error = sysctl_handle_int(oidp, &val, 0, req);
3939	if (error || !req->newptr)
3940		return error;
3941	return 0;
3942}
3943
3944static int
3945ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3946{
3947	int error;
3948	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3949	unsigned int reg, usec, rate;
3950
3951	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3952	usec = ((reg & 0x0FF8) >> 3);
3953	if (usec > 0)
3954		rate = 500000 / usec;
3955	else
3956		rate = 0;
3957	error = sysctl_handle_int(oidp, &rate, 0, req);
3958	if (error || !req->newptr)
3959		return error;
3960	reg &= ~0xfff; /* default, no limitation */
3961	ixgbe_max_interrupt_rate = 0;
3962	if (rate > 0 && rate < 500000) {
3963		if (rate < 1000)
3964			rate = 1000;
3965		ixgbe_max_interrupt_rate = rate;
3966		reg |= ((4000000/rate) & 0xff8 );
3967	}
3968	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3969	return 0;
3970}
3971
3972static void
3973ixgbe_add_device_sysctls(struct adapter *adapter)
3974{
3975	device_t dev = adapter->dev;
3976	struct ixgbe_hw *hw = &adapter->hw;
3977	struct sysctl_oid_list *child;
3978	struct sysctl_ctx_list *ctx;
3979
3980	ctx = device_get_sysctl_ctx(dev);
3981	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3982
3983	/* Sysctls for all devices */
3984	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3985			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3986			ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3987
3988        SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3989			CTLFLAG_RW,
3990			&ixgbe_enable_aim, 1, "Interrupt Moderation");
3991
3992	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3993			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3994			ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3995
3996	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3997			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3998			ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3999
4000	/* for X550 devices */
4001	if (hw->mac.type >= ixgbe_mac_X550)
4002		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4003				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4004				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4005
4006	/* for X550T and X550EM backplane devices */
4007	if (hw->device_id == IXGBE_DEV_ID_X550T ||
4008	    hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
4009		struct sysctl_oid *eee_node;
4010		struct sysctl_oid_list *eee_list;
4011
4012		eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4013					   CTLFLAG_RD, NULL,
4014					   "Energy Efficient Ethernet sysctls");
4015		eee_list = SYSCTL_CHILDREN(eee_node);
4016
4017		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4018				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4019				ixgbe_sysctl_eee_enable, "I",
4020				"Enable or Disable EEE");
4021
4022		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4023				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4024				ixgbe_sysctl_eee_negotiated, "I",
4025				"EEE negotiated on link");
4026
4027		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4028				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4029				ixgbe_sysctl_eee_tx_lpi_status, "I",
4030				"Whether or not TX link is in LPI state");
4031
4032		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4033				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4034				ixgbe_sysctl_eee_rx_lpi_status, "I",
4035				"Whether or not RX link is in LPI state");
4036	}
4037
4038	/* for certain 10GBaseT devices */
4039	if (hw->device_id == IXGBE_DEV_ID_X550T ||
4040	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4041		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4042				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4043				ixgbe_sysctl_wol_enable, "I",
4044				"Enable/Disable Wake on LAN");
4045
4046		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4047				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4048				ixgbe_sysctl_wufc, "I",
4049				"Enable/Disable Wake Up Filters");
4050	}
4051
4052	/* for X550EM 10GBaseT devices */
4053	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4054		struct sysctl_oid *phy_node;
4055		struct sysctl_oid_list *phy_list;
4056
4057		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4058					   CTLFLAG_RD, NULL,
4059					   "External PHY sysctls");
4060		phy_list = SYSCTL_CHILDREN(phy_node);
4061
4062		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4063				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4064				ixgbe_sysctl_phy_temp, "I",
4065				"Current External PHY Temperature (Celsius)");
4066
4067		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4068				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4069				ixgbe_sysctl_phy_overtemp_occurred, "I",
4070				"External PHY High Temperature Event Occurred");
4071	}
4072}
4073
4074/*
4075 * Add sysctl variables, one per statistic, to the system.
4076 */
4077static void
4078ixgbe_add_hw_stats(struct adapter *adapter)
4079{
4080	device_t dev = adapter->dev;
4081
4082	struct tx_ring *txr = adapter->tx_rings;
4083	struct rx_ring *rxr = adapter->rx_rings;
4084
4085	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4086	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4087	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4088	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4089
4090	struct sysctl_oid *stat_node, *queue_node;
4091	struct sysctl_oid_list *stat_list, *queue_list;
4092
4093#define QUEUE_NAME_LEN 32
4094	char namebuf[QUEUE_NAME_LEN];
4095
4096	/* Driver Statistics */
4097	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4098			CTLFLAG_RD, &adapter->dropped_pkts,
4099			"Driver dropped packets");
4100	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4101			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4102			"m_defrag() failed");
4103	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4104			CTLFLAG_RD, &adapter->watchdog_events,
4105			"Watchdog timeouts");
4106	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4107			CTLFLAG_RD, &adapter->link_irq,
4108			"Link MSIX IRQ Handled");
4109
4110	for (int i = 0; i < adapter->num_queues; i++, txr++) {
4111		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4112		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4113					    CTLFLAG_RD, NULL, "Queue Name");
4114		queue_list = SYSCTL_CHILDREN(queue_node);
4115
4116		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4117				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4118				sizeof(&adapter->queues[i]),
4119				ixgbe_sysctl_interrupt_rate_handler, "IU",
4120				"Interrupt Rate");
4121		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4122				CTLFLAG_RD, &(adapter->queues[i].irqs),
4123				"irqs on this queue");
4124		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4125				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4126				ixgbe_sysctl_tdh_handler, "IU",
4127				"Transmit Descriptor Head");
4128		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4129				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4130				ixgbe_sysctl_tdt_handler, "IU",
4131				"Transmit Descriptor Tail");
4132		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4133				CTLFLAG_RD, &txr->tso_tx,
4134				"TSO");
4135		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4136				CTLFLAG_RD, &txr->no_tx_dma_setup,
4137				"Driver tx dma failure in xmit");
4138		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4139				CTLFLAG_RD, &txr->no_desc_avail,
4140				"Queue No Descriptor Available");
4141		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4142				CTLFLAG_RD, &txr->total_packets,
4143				"Queue Packets Transmitted");
4144		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4145				CTLFLAG_RD, &txr->br->br_drops,
4146				"Packets dropped in buf_ring");
4147	}
4148
4149	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4150		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4151		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4152					    CTLFLAG_RD, NULL, "Queue Name");
4153		queue_list = SYSCTL_CHILDREN(queue_node);
4154
4155		struct lro_ctrl *lro = &rxr->lro;
4156
4157		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4158		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4159					    CTLFLAG_RD, NULL, "Queue Name");
4160		queue_list = SYSCTL_CHILDREN(queue_node);
4161
4162		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4163				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4164				ixgbe_sysctl_rdh_handler, "IU",
4165				"Receive Descriptor Head");
4166		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4167				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4168				ixgbe_sysctl_rdt_handler, "IU",
4169				"Receive Descriptor Tail");
4170		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4171				CTLFLAG_RD, &rxr->rx_packets,
4172				"Queue Packets Received");
4173		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4174				CTLFLAG_RD, &rxr->rx_bytes,
4175				"Queue Bytes Received");
4176		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4177				CTLFLAG_RD, &rxr->rx_copies,
4178				"Copied RX Frames");
4179		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4180				CTLFLAG_RD, &lro->lro_queued, 0,
4181				"LRO Queued");
4182		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4183				CTLFLAG_RD, &lro->lro_flushed, 0,
4184				"LRO Flushed");
4185	}
4186
4187	/* MAC stats get the own sub node */
4188
4189	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4190				    CTLFLAG_RD, NULL, "MAC Statistics");
4191	stat_list = SYSCTL_CHILDREN(stat_node);
4192
4193	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4194			CTLFLAG_RD, &stats->crcerrs,
4195			"CRC Errors");
4196	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4197			CTLFLAG_RD, &stats->illerrc,
4198			"Illegal Byte Errors");
4199	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4200			CTLFLAG_RD, &stats->errbc,
4201			"Byte Errors");
4202	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4203			CTLFLAG_RD, &stats->mspdc,
4204			"MAC Short Packets Discarded");
4205	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4206			CTLFLAG_RD, &stats->mlfc,
4207			"MAC Local Faults");
4208	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4209			CTLFLAG_RD, &stats->mrfc,
4210			"MAC Remote Faults");
4211	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4212			CTLFLAG_RD, &stats->rlec,
4213			"Receive Length Errors");
4214
4215	/* Flow Control stats */
4216	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4217			CTLFLAG_RD, &stats->lxontxc,
4218			"Link XON Transmitted");
4219	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4220			CTLFLAG_RD, &stats->lxonrxc,
4221			"Link XON Received");
4222	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4223			CTLFLAG_RD, &stats->lxofftxc,
4224			"Link XOFF Transmitted");
4225	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4226			CTLFLAG_RD, &stats->lxoffrxc,
4227			"Link XOFF Received");
4228
4229	/* Packet Reception Stats */
4230	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4231			CTLFLAG_RD, &stats->tor,
4232			"Total Octets Received");
4233	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4234			CTLFLAG_RD, &stats->gorc,
4235			"Good Octets Received");
4236	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4237			CTLFLAG_RD, &stats->tpr,
4238			"Total Packets Received");
4239	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4240			CTLFLAG_RD, &stats->gprc,
4241			"Good Packets Received");
4242	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4243			CTLFLAG_RD, &stats->mprc,
4244			"Multicast Packets Received");
4245	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4246			CTLFLAG_RD, &stats->bprc,
4247			"Broadcast Packets Received");
4248	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4249			CTLFLAG_RD, &stats->prc64,
4250			"64 byte frames received ");
4251	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4252			CTLFLAG_RD, &stats->prc127,
4253			"65-127 byte frames received");
4254	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4255			CTLFLAG_RD, &stats->prc255,
4256			"128-255 byte frames received");
4257	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4258			CTLFLAG_RD, &stats->prc511,
4259			"256-511 byte frames received");
4260	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4261			CTLFLAG_RD, &stats->prc1023,
4262			"512-1023 byte frames received");
4263	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4264			CTLFLAG_RD, &stats->prc1522,
4265			"1023-1522 byte frames received");
4266	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4267			CTLFLAG_RD, &stats->ruc,
4268			"Receive Undersized");
4269	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4270			CTLFLAG_RD, &stats->rfc,
4271			"Fragmented Packets Received ");
4272	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4273			CTLFLAG_RD, &stats->roc,
4274			"Oversized Packets Received");
4275	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4276			CTLFLAG_RD, &stats->rjc,
4277			"Received Jabber");
4278	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4279			CTLFLAG_RD, &stats->mngprc,
4280			"Management Packets Received");
4281	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4282			CTLFLAG_RD, &stats->mngptc,
4283			"Management Packets Dropped");
4284	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4285			CTLFLAG_RD, &stats->xec,
4286			"Checksum Errors");
4287
4288	/* Packet Transmission Stats */
4289	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4290			CTLFLAG_RD, &stats->gotc,
4291			"Good Octets Transmitted");
4292	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4293			CTLFLAG_RD, &stats->tpt,
4294			"Total Packets Transmitted");
4295	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4296			CTLFLAG_RD, &stats->gptc,
4297			"Good Packets Transmitted");
4298	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4299			CTLFLAG_RD, &stats->bptc,
4300			"Broadcast Packets Transmitted");
4301	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4302			CTLFLAG_RD, &stats->mptc,
4303			"Multicast Packets Transmitted");
4304	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4305			CTLFLAG_RD, &stats->mngptc,
4306			"Management Packets Transmitted");
4307	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4308			CTLFLAG_RD, &stats->ptc64,
4309			"64 byte frames transmitted ");
4310	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4311			CTLFLAG_RD, &stats->ptc127,
4312			"65-127 byte frames transmitted");
4313	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4314			CTLFLAG_RD, &stats->ptc255,
4315			"128-255 byte frames transmitted");
4316	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4317			CTLFLAG_RD, &stats->ptc511,
4318			"256-511 byte frames transmitted");
4319	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4320			CTLFLAG_RD, &stats->ptc1023,
4321			"512-1023 byte frames transmitted");
4322	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4323			CTLFLAG_RD, &stats->ptc1522,
4324			"1024-1522 byte frames transmitted");
4325}
4326
4327/*
4328** Set flow control using sysctl:
4329** Flow control values:
4330** 	0 - off
4331**	1 - rx pause
4332**	2 - tx pause
4333**	3 - full
4334*/
4335static int
4336ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4337{
4338	int error, last;
4339	struct adapter *adapter = (struct adapter *) arg1;
4340
4341	last = adapter->fc;
4342	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4343	if ((error) || (req->newptr == NULL))
4344		return (error);
4345
4346	/* Don't bother if it's not changed */
4347	if (adapter->fc == last)
4348		return (0);
4349
4350	switch (adapter->fc) {
4351		case ixgbe_fc_rx_pause:
4352		case ixgbe_fc_tx_pause:
4353		case ixgbe_fc_full:
4354			adapter->hw.fc.requested_mode = adapter->fc;
4355			if (adapter->num_queues > 1)
4356				ixgbe_disable_rx_drop(adapter);
4357			break;
4358		case ixgbe_fc_none:
4359			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4360			if (adapter->num_queues > 1)
4361				ixgbe_enable_rx_drop(adapter);
4362			break;
4363		default:
4364			adapter->fc = last;
4365			return (EINVAL);
4366	}
4367	/* Don't autoneg if forcing a value */
4368	adapter->hw.fc.disable_fc_autoneg = TRUE;
4369	ixgbe_fc_enable(&adapter->hw);
4370	return error;
4371}
4372
4373/*
4374** Control advertised link speed:
4375**	Flags:
4376**	0x1 - advertise 100 Mb
4377**	0x2 - advertise 1G
4378**	0x4 - advertise 10G
4379*/
4380static int
4381ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4382{
4383	int			error = 0, requested;
4384	struct adapter		*adapter;
4385	device_t		dev;
4386	struct ixgbe_hw		*hw;
4387	ixgbe_link_speed	speed = 0;
4388
4389	adapter = (struct adapter *) arg1;
4390	dev = adapter->dev;
4391	hw = &adapter->hw;
4392
4393	requested = adapter->advertise;
4394	error = sysctl_handle_int(oidp, &requested, 0, req);
4395	if ((error) || (req->newptr == NULL))
4396		return (error);
4397
4398	/* Checks to validate new value */
4399	if (adapter->advertise == requested) /* no change */
4400		return (0);
4401
4402	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4403	    (hw->phy.multispeed_fiber))) {
4404		device_printf(dev,
4405		    "Advertised speed can only be set on copper or "
4406		    "multispeed fiber media types.\n");
4407		return (EINVAL);
4408	}
4409
4410	if (requested < 0x1 || requested > 0x7) {
4411		device_printf(dev,
4412		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4413		return (EINVAL);
4414	}
4415
4416	if ((requested & 0x1)
4417	    && (hw->mac.type != ixgbe_mac_X540)
4418	    && (hw->mac.type != ixgbe_mac_X550)) {
4419		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4420		return (EINVAL);
4421	}
4422
4423	/* Set new value and report new advertised mode */
4424	if (requested & 0x1)
4425		speed |= IXGBE_LINK_SPEED_100_FULL;
4426	if (requested & 0x2)
4427		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4428	if (requested & 0x4)
4429		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4430
4431	hw->mac.autotry_restart = TRUE;
4432	hw->mac.ops.setup_link(hw, speed, TRUE);
4433	adapter->advertise = requested;
4434
4435	return (error);
4436}
4437
4438/*
4439 * The following two sysctls are for X550 BaseT devices;
4440 * they deal with the external PHY used in them.
4441 */
4442static int
4443ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4444{
4445	struct adapter	*adapter = (struct adapter *) arg1;
4446	struct ixgbe_hw *hw = &adapter->hw;
4447	u16 reg;
4448
4449	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4450		device_printf(adapter->dev,
4451		    "Device has no supported external thermal sensor.\n");
4452		return (ENODEV);
4453	}
4454
4455	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4456				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4457				      &reg)) {
4458		device_printf(adapter->dev,
4459		    "Error reading from PHY's current temperature register\n");
4460		return (EAGAIN);
4461	}
4462
4463	/* Shift temp for output */
4464	reg = reg >> 8;
4465
4466	return (sysctl_handle_int(oidp, NULL, reg, req));
4467}
4468
4469/*
4470 * Reports whether the current PHY temperature is over
4471 * the overtemp threshold.
4472 *  - This is reported directly from the PHY
4473 */
4474static int
4475ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4476{
4477	struct adapter	*adapter = (struct adapter *) arg1;
4478	struct ixgbe_hw *hw = &adapter->hw;
4479	u16 reg;
4480
4481	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4482		device_printf(adapter->dev,
4483		    "Device has no supported external thermal sensor.\n");
4484		return (ENODEV);
4485	}
4486
4487	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4488				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4489				      &reg)) {
4490		device_printf(adapter->dev,
4491		    "Error reading from PHY's temperature status register\n");
4492		return (EAGAIN);
4493	}
4494
4495	/* Get occurrence bit */
4496	reg = !!(reg & 0x4000);
4497	return (sysctl_handle_int(oidp, 0, reg, req));
4498}
4499
4500/*
4501** Thermal Shutdown Trigger (internal MAC)
4502**   - Set this to 1 to cause an overtemp event to occur
4503*/
4504static int
4505ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4506{
4507	struct adapter	*adapter = (struct adapter *) arg1;
4508	struct ixgbe_hw *hw = &adapter->hw;
4509	int error, fire = 0;
4510
4511	error = sysctl_handle_int(oidp, &fire, 0, req);
4512	if ((error) || (req->newptr == NULL))
4513		return (error);
4514
4515	if (fire) {
4516		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4517		reg |= IXGBE_EICR_TS;
4518		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4519	}
4520
4521	return (0);
4522}
4523
4524/*
4525** Manage DMA Coalescing.
4526** Control values:
4527** 	0/1 - off / on (use default value of 1000)
4528**
4529**	Legal timer values are:
4530**	50,100,250,500,1000,2000,5000,10000
4531**
4532**	Turning off interrupt moderation will also turn this off.
4533*/
4534static int
4535ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4536{
4537	struct adapter *adapter = (struct adapter *) arg1;
4538	struct ixgbe_hw *hw = &adapter->hw;
4539	struct ifnet *ifp = adapter->ifp;
4540	int		error;
4541	u16		oldval;
4542
4543	oldval = adapter->dmac;
4544	error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4545	if ((error) || (req->newptr == NULL))
4546		return (error);
4547
4548	switch (hw->mac.type) {
4549	case ixgbe_mac_X550:
4550	case ixgbe_mac_X550EM_x:
4551		break;
4552	default:
4553		device_printf(adapter->dev,
4554		    "DMA Coalescing is only supported on X550 devices\n");
4555		return (ENODEV);
4556	}
4557
4558	switch (adapter->dmac) {
4559	case 0:
4560		/* Disabled */
4561		break;
4562	case 1: /* Enable and use default */
4563		adapter->dmac = 1000;
4564		break;
4565	case 50:
4566	case 100:
4567	case 250:
4568	case 500:
4569	case 1000:
4570	case 2000:
4571	case 5000:
4572	case 10000:
4573		/* Legal values - allow */
4574		break;
4575	default:
4576		/* Do nothing, illegal value */
4577		adapter->dmac = oldval;
4578		return (EINVAL);
4579	}
4580
4581	/* Re-initialize hardware if it's already running */
4582	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4583		ixgbe_init(adapter);
4584
4585	return (0);
4586}
4587
4588/*
4589 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4590 * Values:
4591 *	0 - disabled
4592 *	1 - enabled
4593 */
4594static int
4595ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4596{
4597	struct adapter *adapter = (struct adapter *) arg1;
4598	struct ixgbe_hw *hw = &adapter->hw;
4599	int new_wol_enabled;
4600	int error = 0;
4601
4602	new_wol_enabled = hw->wol_enabled;
4603	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4604	if ((error) || (req->newptr == NULL))
4605		return (error);
4606	if (new_wol_enabled == hw->wol_enabled)
4607		return (0);
4608
4609	if (new_wol_enabled > 0 && !adapter->wol_support)
4610		return (ENODEV);
4611	else
4612		hw->wol_enabled = !!(new_wol_enabled);
4613
4614	return (0);
4615}
4616
4617/*
4618 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4619 * if supported by the adapter.
4620 * Values:
4621 *	0 - disabled
4622 *	1 - enabled
4623 */
4624static int
4625ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4626{
4627	struct adapter *adapter = (struct adapter *) arg1;
4628	struct ifnet *ifp = adapter->ifp;
4629	int new_eee_enabled, error = 0;
4630
4631	new_eee_enabled = adapter->eee_enabled;
4632	error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4633	if ((error) || (req->newptr == NULL))
4634		return (error);
4635	if (new_eee_enabled == adapter->eee_enabled)
4636		return (0);
4637
4638	if (new_eee_enabled > 0 && !adapter->eee_support)
4639		return (ENODEV);
4640	else
4641		adapter->eee_enabled = !!(new_eee_enabled);
4642
4643	/* Re-initialize hardware if it's already running */
4644	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4645		ixgbe_init(adapter);
4646
4647	return (0);
4648}
4649
4650/*
4651 * Read-only sysctl indicating whether EEE support was negotiated
4652 * on the link.
4653 */
4654static int
4655ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4656{
4657	struct adapter *adapter = (struct adapter *) arg1;
4658	struct ixgbe_hw *hw = &adapter->hw;
4659	bool status;
4660
4661	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4662
4663	return (sysctl_handle_int(oidp, 0, status, req));
4664}
4665
4666/*
4667 * Read-only sysctl indicating whether RX Link is in LPI state.
4668 */
4669static int
4670ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4671{
4672	struct adapter *adapter = (struct adapter *) arg1;
4673	struct ixgbe_hw *hw = &adapter->hw;
4674	bool status;
4675
4676	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4677	    IXGBE_EEE_RX_LPI_STATUS);
4678
4679	return (sysctl_handle_int(oidp, 0, status, req));
4680}
4681
4682/*
4683 * Read-only sysctl indicating whether TX Link is in LPI state.
4684 */
4685static int
4686ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4687{
4688	struct adapter *adapter = (struct adapter *) arg1;
4689	struct ixgbe_hw *hw = &adapter->hw;
4690	bool status;
4691
4692	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4693	    IXGBE_EEE_TX_LPI_STATUS);
4694
4695	return (sysctl_handle_int(oidp, 0, status, req));
4696}
4697
4698/*
4699 * Sysctl to enable/disable the types of packets that the
4700 * adapter will wake up on upon receipt.
4701 * WUFC - Wake Up Filter Control
4702 * Flags:
4703 *	0x1  - Link Status Change
4704 *	0x2  - Magic Packet
4705 *	0x4  - Direct Exact
4706 *	0x8  - Directed Multicast
4707 *	0x10 - Broadcast
4708 *	0x20 - ARP/IPv4 Request Packet
4709 *	0x40 - Direct IPv4 Packet
4710 *	0x80 - Direct IPv6 Packet
4711 *
4712 * Setting another flag will cause the sysctl to return an
4713 * error.
4714 */
4715static int
4716ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4717{
4718	struct adapter *adapter = (struct adapter *) arg1;
4719	int error = 0;
4720	u32 new_wufc;
4721
4722	new_wufc = adapter->wufc;
4723
4724	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4725	if ((error) || (req->newptr == NULL))
4726		return (error);
4727	if (new_wufc == adapter->wufc)
4728		return (0);
4729
4730	if (new_wufc & 0xffffff00)
4731		return (EINVAL);
4732	else {
4733		new_wufc &= 0xff;
4734		new_wufc |= (0xffffff & adapter->wufc);
4735		adapter->wufc = new_wufc;
4736	}
4737
4738	return (0);
4739}
4740
4741/*
4742** Enable the hardware to drop packets when the buffer is
4743** full. This is useful when multiqueue,so that no single
4744** queue being full stalls the entire RX engine. We only
4745** enable this when Multiqueue AND when Flow Control is
4746** disabled.
4747*/
4748static void
4749ixgbe_enable_rx_drop(struct adapter *adapter)
4750{
4751        struct ixgbe_hw *hw = &adapter->hw;
4752
4753	for (int i = 0; i < adapter->num_queues; i++) {
4754        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4755        	srrctl |= IXGBE_SRRCTL_DROP_EN;
4756        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4757	}
4758}
4759
4760static void
4761ixgbe_disable_rx_drop(struct adapter *adapter)
4762{
4763        struct ixgbe_hw *hw = &adapter->hw;
4764
4765	for (int i = 0; i < adapter->num_queues; i++) {
4766        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4767        	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4768        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4769	}
4770}
4771
4772static void
4773ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4774{
4775	u32 mask;
4776
4777	switch (adapter->hw.mac.type) {
4778	case ixgbe_mac_82598EB:
4779		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4780		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4781		break;
4782	case ixgbe_mac_82599EB:
4783	case ixgbe_mac_X540:
4784	case ixgbe_mac_X550:
4785	case ixgbe_mac_X550EM_x:
4786		mask = (queues & 0xFFFFFFFF);
4787		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4788		mask = (queues >> 32);
4789		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4790		break;
4791	default:
4792		break;
4793	}
4794}
4795
4796
4797