if_ix.c revision 332447
1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 332447 2018-04-12 19:06:15Z shurd $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
44/************************************************************************
45 * Driver version
46 ************************************************************************/
47char ixgbe_driver_version[] = "3.2.12-k";
48
49
50/************************************************************************
51 * PCI Device ID Table
52 *
53 *   Used by probe to select devices to load on
54 *   Last field stores an index into ixgbe_strings
55 *   Last entry must be all 0s
56 *
57 *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60{
61	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105	/* required last entry */
106	{0, 0, 0, 0, 0}
107};
108
109/************************************************************************
110 * Table of branding strings
111 ************************************************************************/
112static char    *ixgbe_strings[] = {
113	"Intel(R) PRO/10GbE PCI-Express Network Driver"
114};
115
116/************************************************************************
117 * Function prototypes
118 ************************************************************************/
119static int      ixgbe_probe(device_t);
120static int      ixgbe_attach(device_t);
121static int      ixgbe_detach(device_t);
122static int      ixgbe_shutdown(device_t);
123static int      ixgbe_suspend(device_t);
124static int      ixgbe_resume(device_t);
125static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126static void     ixgbe_init(void *);
127static void     ixgbe_init_locked(struct adapter *);
128static void     ixgbe_stop(void *);
129#if __FreeBSD_version >= 1100036
130static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131#endif
132static void     ixgbe_init_device_features(struct adapter *);
133static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134static void     ixgbe_add_media_types(struct adapter *);
135static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136static int      ixgbe_media_change(struct ifnet *);
137static int      ixgbe_allocate_pci_resources(struct adapter *);
138static void     ixgbe_get_slot_info(struct adapter *);
139static int      ixgbe_allocate_msix(struct adapter *);
140static int      ixgbe_allocate_legacy(struct adapter *);
141static int      ixgbe_configure_interrupts(struct adapter *);
142static void     ixgbe_free_pci_resources(struct adapter *);
143static void     ixgbe_local_timer(void *);
144static int      ixgbe_setup_interface(device_t, struct adapter *);
145static void     ixgbe_config_gpie(struct adapter *);
146static void     ixgbe_config_dmac(struct adapter *);
147static void     ixgbe_config_delay_values(struct adapter *);
148static void     ixgbe_config_link(struct adapter *);
149static void     ixgbe_check_wol_support(struct adapter *);
150static int      ixgbe_setup_low_power_mode(struct adapter *);
151static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153static void     ixgbe_initialize_transmit_units(struct adapter *);
154static void     ixgbe_initialize_receive_units(struct adapter *);
155static void     ixgbe_enable_rx_drop(struct adapter *);
156static void     ixgbe_disable_rx_drop(struct adapter *);
157static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159static void     ixgbe_enable_intr(struct adapter *);
160static void     ixgbe_disable_intr(struct adapter *);
161static void     ixgbe_update_stats_counters(struct adapter *);
162static void     ixgbe_set_promisc(struct adapter *);
163static void     ixgbe_set_multi(struct adapter *);
164static void     ixgbe_update_link_status(struct adapter *);
165static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166static void     ixgbe_configure_ivars(struct adapter *);
167static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173static void     ixgbe_add_device_sysctls(struct adapter *);
174static void     ixgbe_add_hw_stats(struct adapter *);
175static int      ixgbe_set_flowcntl(struct adapter *, int);
176static int      ixgbe_set_advertise(struct adapter *, int);
177static int      ixgbe_get_advertise(struct adapter *);
178
179/* Sysctl handlers */
180static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                       const char *, int *, int);
182static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188#ifdef IXGBE_DEBUG
189static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191#endif
192static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200/* Support for pluggable optic modules */
201static bool     ixgbe_sfp_probe(struct adapter *);
202
203/* Legacy (single vector) interrupt handler */
204static void     ixgbe_legacy_irq(void *);
205
206/* The MSI/MSI-X Interrupt handlers */
207static void     ixgbe_msix_que(void *);
208static void     ixgbe_msix_link(void *);
209
210/* Deferred interrupt tasklets */
211static void     ixgbe_handle_que(void *, int);
212static void     ixgbe_handle_link(void *, int);
213static void     ixgbe_handle_msf(void *, int);
214static void     ixgbe_handle_mod(void *, int);
215static void     ixgbe_handle_phy(void *, int);
216
217
218/************************************************************************
219 *  FreeBSD Device Interface Entry Points
220 ************************************************************************/
221static device_method_t ix_methods[] = {
222	/* Device interface */
223	DEVMETHOD(device_probe, ixgbe_probe),
224	DEVMETHOD(device_attach, ixgbe_attach),
225	DEVMETHOD(device_detach, ixgbe_detach),
226	DEVMETHOD(device_shutdown, ixgbe_shutdown),
227	DEVMETHOD(device_suspend, ixgbe_suspend),
228	DEVMETHOD(device_resume, ixgbe_resume),
229#ifdef PCI_IOV
230	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233#endif /* PCI_IOV */
234	DEVMETHOD_END
235};
236
237static driver_t ix_driver = {
238	"ix", ix_methods, sizeof(struct adapter),
239};
240
241devclass_t ix_devclass;
242DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244MODULE_DEPEND(ix, pci, 1, 1, 1);
245MODULE_DEPEND(ix, ether, 1, 1, 1);
246#ifdef DEV_NETMAP
247MODULE_DEPEND(ix, netmap, 1, 1, 1);
248#endif
249
250/*
251 * TUNEABLE PARAMETERS:
252 */
253
254static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255
256/*
257 * AIM: Adaptive Interrupt Moderation
258 * which means that the interrupt rate
259 * is varied over time based on the
260 * traffic for that interrupt vector
261 */
262static int ixgbe_enable_aim = TRUE;
263SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
264    "Enable adaptive interrupt moderation");
265
266static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
267SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
268    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
269
270/* How many packets rxeof tries to clean at a time */
271static int ixgbe_rx_process_limit = 256;
272SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
273    &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
274
275/* How many packets txeof tries to clean at a time */
276static int ixgbe_tx_process_limit = 256;
277SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
278    &ixgbe_tx_process_limit, 0,
279    "Maximum number of sent packets to process at a time, -1 means unlimited");
280
281/* Flow control setting, default to full */
282static int ixgbe_flow_control = ixgbe_fc_full;
283SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
284    &ixgbe_flow_control, 0, "Default flow control used for all adapters");
285
286/* Advertise Speed, default to 0 (auto) */
287static int ixgbe_advertise_speed = 0;
288SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
289    &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290
291/*
292 * Smart speed setting, default to on
293 * this only works as a compile option
294 * right now as its during attach, set
295 * this to 'ixgbe_smart_speed_off' to
296 * disable.
297 */
298static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299
300/*
301 * MSI-X should be the default for best performance,
302 * but this allows it to be forced off for testing.
303 */
304static int ixgbe_enable_msix = 1;
305SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
306    "Enable MSI-X interrupts");
307
308/*
309 * Number of Queues, can be set to 0,
310 * it then autoconfigures based on the
311 * number of cpus with a max of 8. This
312 * can be overriden manually here.
313 */
314static int ixgbe_num_queues = 0;
315SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316    "Number of queues to configure, 0 indicates autoconfigure");
317
318/*
319 * Number of TX descriptors per ring,
320 * setting higher than RX as this seems
321 * the better performing choice.
322 */
323static int ixgbe_txd = PERFORM_TXD;
324SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
325    "Number of transmit descriptors per queue");
326
327/* Number of RX descriptors per ring */
328static int ixgbe_rxd = PERFORM_RXD;
329SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
330    "Number of receive descriptors per queue");
331
332/*
333 * Defining this on will allow the use
334 * of unsupported SFP+ modules, note that
335 * doing so you are on your own :)
336 */
337static int allow_unsupported_sfp = FALSE;
338SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
339    &allow_unsupported_sfp, 0,
340    "Allow unsupported SFP modules...use at your own risk");
341
342/*
343 * Not sure if Flow Director is fully baked,
344 * so we'll default to turning it off.
345 */
346static int ixgbe_enable_fdir = 0;
347SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
348    "Enable Flow Director");
349
350/* Legacy Transmit (single queue) */
351static int ixgbe_enable_legacy_tx = 0;
352SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
353    &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
354
355/* Receive-Side Scaling */
356static int ixgbe_enable_rss = 1;
357SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
358    "Enable Receive-Side Scaling (RSS)");
359
360/* Keep running tab on them for sanity check */
361static int ixgbe_total_ports;
362
363static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
364static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
365
366MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367
368/************************************************************************
369 * ixgbe_initialize_rss_mapping
370 ************************************************************************/
371static void
372ixgbe_initialize_rss_mapping(struct adapter *adapter)
373{
374	struct ixgbe_hw *hw = &adapter->hw;
375	u32             reta = 0, mrqc, rss_key[10];
376	int             queue_id, table_size, index_mult;
377	int             i, j;
378	u32             rss_hash_config;
379
380	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
381		/* Fetch the configured RSS key */
382		rss_getkey((uint8_t *)&rss_key);
383	} else {
384		/* set up random bits */
385		arc4rand(&rss_key, sizeof(rss_key), 0);
386	}
387
388	/* Set multiplier for RETA setup and table size based on MAC */
389	index_mult = 0x1;
390	table_size = 128;
391	switch (adapter->hw.mac.type) {
392	case ixgbe_mac_82598EB:
393		index_mult = 0x11;
394		break;
395	case ixgbe_mac_X550:
396	case ixgbe_mac_X550EM_x:
397	case ixgbe_mac_X550EM_a:
398		table_size = 512;
399		break;
400	default:
401		break;
402	}
403
404	/* Set up the redirection table */
405	for (i = 0, j = 0; i < table_size; i++, j++) {
406		if (j == adapter->num_queues)
407			j = 0;
408
409		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
410			/*
411			 * Fetch the RSS bucket id for the given indirection
412			 * entry. Cap it at the number of configured buckets
413			 * (which is num_queues.)
414			 */
415			queue_id = rss_get_indirection_to_bucket(i);
416			queue_id = queue_id % adapter->num_queues;
417		} else
418			queue_id = (j * index_mult);
419
420		/*
421		 * The low 8 bits are for hash value (n+0);
422		 * The next 8 bits are for hash value (n+1), etc.
423		 */
424		reta = reta >> 8;
425		reta = reta | (((uint32_t)queue_id) << 24);
426		if ((i & 3) == 3) {
427			if (i < 128)
428				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
429			else
430				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
431				    reta);
432			reta = 0;
433		}
434	}
435
436	/* Now fill our hash function seeds */
437	for (i = 0; i < 10; i++)
438		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
439
440	/* Perform hash on these packet types */
441	if (adapter->feat_en & IXGBE_FEATURE_RSS)
442		rss_hash_config = rss_gethashconfig();
443	else {
444		/*
445		 * Disable UDP - IP fragments aren't currently being handled
446		 * and so we end up with a mix of 2-tuple and 4-tuple
447		 * traffic.
448		 */
449		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
450		                | RSS_HASHTYPE_RSS_TCP_IPV4
451		                | RSS_HASHTYPE_RSS_IPV6
452		                | RSS_HASHTYPE_RSS_TCP_IPV6
453		                | RSS_HASHTYPE_RSS_IPV6_EX
454		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455	}
456
457	mrqc = IXGBE_MRQC_RSSEN;
458	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
459		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
460	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
461		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
462	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
463		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
464	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
465		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
466	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
467		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
468	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
469		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
470	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
471		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
472	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
473		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
474		    __func__);
475	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
476		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
477	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
478		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
479	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
480	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
481} /* ixgbe_initialize_rss_mapping */
482
483/************************************************************************
484 * ixgbe_initialize_receive_units - Setup receive registers and features.
485 ************************************************************************/
486#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
487
488static void
489ixgbe_initialize_receive_units(struct adapter *adapter)
490{
491	struct rx_ring  *rxr = adapter->rx_rings;
492	struct ixgbe_hw *hw = &adapter->hw;
493	struct ifnet    *ifp = adapter->ifp;
494	int             i, j;
495	u32             bufsz, fctrl, srrctl, rxcsum;
496	u32             hlreg;
497
498	/*
499	 * Make sure receives are disabled while
500	 * setting up the descriptor ring
501	 */
502	ixgbe_disable_rx(hw);
503
504	/* Enable broadcasts */
505	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
506	fctrl |= IXGBE_FCTRL_BAM;
507	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
508		fctrl |= IXGBE_FCTRL_DPF;
509		fctrl |= IXGBE_FCTRL_PMCF;
510	}
511	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
512
513	/* Set for Jumbo Frames? */
514	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
515	if (ifp->if_mtu > ETHERMTU)
516		hlreg |= IXGBE_HLREG0_JUMBOEN;
517	else
518		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
519
520#ifdef DEV_NETMAP
521	/* CRC stripping is conditional in Netmap */
522	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
523	    (ifp->if_capenable & IFCAP_NETMAP) &&
524	    !ix_crcstrip)
525		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
526	else
527#endif /* DEV_NETMAP */
528		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
529
530	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
531
532	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
533	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
534
535	for (i = 0; i < adapter->num_queues; i++, rxr++) {
536		u64 rdba = rxr->rxdma.dma_paddr;
537		j = rxr->me;
538
539		/* Setup the Base and Length of the Rx Descriptor Ring */
540		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
541		    (rdba & 0x00000000ffffffffULL));
542		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
543		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
544		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
545
546		/* Set up the SRRCTL register */
547		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
548		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
549		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
550		srrctl |= bufsz;
551		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
552
553		/*
554		 * Set DROP_EN iff we have no flow control and >1 queue.
555		 * Note that srrctl was cleared shortly before during reset,
556		 * so we do not need to clear the bit, but do it just in case
557		 * this code is moved elsewhere.
558		 */
559		if (adapter->num_queues > 1 &&
560		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
561			srrctl |= IXGBE_SRRCTL_DROP_EN;
562		} else {
563			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
564		}
565
566		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
567
568		/* Setup the HW Rx Head and Tail Descriptor Pointers */
569		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
570		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
571
572		/* Set the driver rx tail address */
573		rxr->tail =  IXGBE_RDT(rxr->me);
574	}
575
576	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
577		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
578		            | IXGBE_PSRTYPE_UDPHDR
579		            | IXGBE_PSRTYPE_IPV4HDR
580		            | IXGBE_PSRTYPE_IPV6HDR;
581		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
582	}
583
584	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
585
586	ixgbe_initialize_rss_mapping(adapter);
587
588	if (adapter->num_queues > 1) {
589		/* RSS and RX IPP Checksum are mutually exclusive */
590		rxcsum |= IXGBE_RXCSUM_PCSD;
591	}
592
593	if (ifp->if_capenable & IFCAP_RXCSUM)
594		rxcsum |= IXGBE_RXCSUM_PCSD;
595
596	/* This is useful for calculating UDP/IP fragment checksums */
597	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
598		rxcsum |= IXGBE_RXCSUM_IPPCSE;
599
600	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
601
602	return;
603} /* ixgbe_initialize_receive_units */
604
605/************************************************************************
606 * ixgbe_initialize_transmit_units - Enable transmit units.
607 ************************************************************************/
608static void
609ixgbe_initialize_transmit_units(struct adapter *adapter)
610{
611	struct tx_ring  *txr = adapter->tx_rings;
612	struct ixgbe_hw *hw = &adapter->hw;
613
614	/* Setup the Base and Length of the Tx Descriptor Ring */
615	for (int i = 0; i < adapter->num_queues; i++, txr++) {
616		u64 tdba = txr->txdma.dma_paddr;
617		u32 txctrl = 0;
618		int j = txr->me;
619
620		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
621		    (tdba & 0x00000000ffffffffULL));
622		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
623		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
624		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
625
626		/* Setup the HW Tx Head and Tail descriptor pointers */
627		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
628		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
629
630		/* Cache the tail address */
631		txr->tail = IXGBE_TDT(j);
632
633		/* Disable Head Writeback */
634		/*
635		 * Note: for X550 series devices, these registers are actually
636		 * prefixed with TPH_ isntead of DCA_, but the addresses and
637		 * fields remain the same.
638		 */
639		switch (hw->mac.type) {
640		case ixgbe_mac_82598EB:
641			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
642			break;
643		default:
644			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
645			break;
646		}
647		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
648		switch (hw->mac.type) {
649		case ixgbe_mac_82598EB:
650			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
651			break;
652		default:
653			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
654			break;
655		}
656
657	}
658
659	if (hw->mac.type != ixgbe_mac_82598EB) {
660		u32 dmatxctl, rttdcs;
661
662		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
663		dmatxctl |= IXGBE_DMATXCTL_TE;
664		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
665		/* Disable arbiter to set MTQC */
666		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
667		rttdcs |= IXGBE_RTTDCS_ARBDIS;
668		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
669		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
670		    ixgbe_get_mtqc(adapter->iov_mode));
671		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
672		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
673	}
674
675	return;
676} /* ixgbe_initialize_transmit_units */
677
678/************************************************************************
679 * ixgbe_attach - Device initialization routine
680 *
681 *   Called when the driver is being loaded.
682 *   Identifies the type of hardware, allocates all resources
683 *   and initializes the hardware.
684 *
685 *   return 0 on success, positive on failure
686 ************************************************************************/
687static int
688ixgbe_attach(device_t dev)
689{
690	struct adapter  *adapter;
691	struct ixgbe_hw *hw;
692	int             error = 0;
693	u32             ctrl_ext;
694
695	INIT_DEBUGOUT("ixgbe_attach: begin");
696
697	/* Allocate, clear, and link in our adapter structure */
698	adapter = device_get_softc(dev);
699	adapter->hw.back = adapter;
700	adapter->dev = dev;
701	hw = &adapter->hw;
702
703	/* Core Lock Init*/
704	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
705
706	/* Set up the timer callout */
707	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
708
709	/* Determine hardware revision */
710	hw->vendor_id = pci_get_vendor(dev);
711	hw->device_id = pci_get_device(dev);
712	hw->revision_id = pci_get_revid(dev);
713	hw->subsystem_vendor_id = pci_get_subvendor(dev);
714	hw->subsystem_device_id = pci_get_subdevice(dev);
715
716	/*
717	 * Make sure BUSMASTER is set
718	 */
719	pci_enable_busmaster(dev);
720
721	/* Do base PCI setup - map BAR0 */
722	if (ixgbe_allocate_pci_resources(adapter)) {
723		device_printf(dev, "Allocation of PCI resources failed\n");
724		error = ENXIO;
725		goto err_out;
726	}
727
728	/* let hardware know driver is loaded */
729	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
730	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
731	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
732
733	hw->allow_unsupported_sfp = allow_unsupported_sfp;
734
735	/*
736	 * Initialize the shared code
737	 */
738	if (ixgbe_init_shared_code(hw)) {
739		device_printf(dev, "Unable to initialize the shared code\n");
740		error = ENXIO;
741		goto err_out;
742	}
743
744	if (hw->mbx.ops.init_params)
745		hw->mbx.ops.init_params(hw);
746
747
748	/* Pick up the 82599 settings */
749	if (hw->mac.type != ixgbe_mac_82598EB) {
750		hw->phy.smart_speed = ixgbe_smart_speed;
751		adapter->num_segs = IXGBE_82599_SCATTER;
752	} else
753		adapter->num_segs = IXGBE_82598_SCATTER;
754
755	ixgbe_init_device_features(adapter);
756
757	if (ixgbe_configure_interrupts(adapter)) {
758		error = ENXIO;
759		goto err_out;
760	}
761
762	/* Allocate multicast array memory. */
763	adapter->mta = malloc(sizeof(*adapter->mta) *
764	    MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
765	if (adapter->mta == NULL) {
766		device_printf(dev, "Can not allocate multicast setup array\n");
767		error = ENOMEM;
768		goto err_out;
769	}
770
771	/* Enable WoL (if supported) */
772	ixgbe_check_wol_support(adapter);
773
774	/* Register for VLAN events */
775	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
776	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
777	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
778	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
779
780	/* Verify adapter fan is still functional (if applicable) */
781	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
782		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
783		ixgbe_check_fan_failure(adapter, esdp, FALSE);
784	}
785
786	/* Ensure SW/FW semaphore is free */
787	ixgbe_init_swfw_semaphore(hw);
788
789	/* Enable EEE power saving */
790	if (adapter->feat_en & IXGBE_FEATURE_EEE)
791		hw->mac.ops.setup_eee(hw, TRUE);
792
793	/* Set an initial default flow control value */
794	hw->fc.requested_mode = ixgbe_flow_control;
795
796	/* Sysctls for limiting the amount of work done in the taskqueues */
797	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
798	    "max number of rx packets to process",
799	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
800
801	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
802	    "max number of tx packets to process",
803	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
804
805	/* Do descriptor calc and sanity checks */
806	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
807	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
808		device_printf(dev, "TXD config issue, using default!\n");
809		adapter->num_tx_desc = DEFAULT_TXD;
810	} else
811		adapter->num_tx_desc = ixgbe_txd;
812
813	/*
814	 * With many RX rings it is easy to exceed the
815	 * system mbuf allocation. Tuning nmbclusters
816	 * can alleviate this.
817	 */
818	if (nmbclusters > 0) {
819		int s;
820		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
821		if (s > nmbclusters) {
822			device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
823			ixgbe_rxd = DEFAULT_RXD;
824		}
825	}
826
827	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
828	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
829		device_printf(dev, "RXD config issue, using default!\n");
830		adapter->num_rx_desc = DEFAULT_RXD;
831	} else
832		adapter->num_rx_desc = ixgbe_rxd;
833
834	/* Allocate our TX/RX Queues */
835	if (ixgbe_allocate_queues(adapter)) {
836		error = ENOMEM;
837		goto err_out;
838	}
839
840	hw->phy.reset_if_overtemp = TRUE;
841	error = ixgbe_reset_hw(hw);
842	hw->phy.reset_if_overtemp = FALSE;
843	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
844		/*
845		 * No optics in this port, set up
846		 * so the timer routine will probe
847		 * for later insertion.
848		 */
849		adapter->sfp_probe = TRUE;
850		error = IXGBE_SUCCESS;
851	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
852		device_printf(dev, "Unsupported SFP+ module detected!\n");
853		error = EIO;
854		goto err_late;
855	} else if (error) {
856		device_printf(dev, "Hardware initialization failed\n");
857		error = EIO;
858		goto err_late;
859	}
860
861	/* Make sure we have a good EEPROM before we read from it */
862	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
863		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
864		error = EIO;
865		goto err_late;
866	}
867
868	/* Setup OS specific network interface */
869	if (ixgbe_setup_interface(dev, adapter) != 0)
870		goto err_late;
871
872	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
873		error = ixgbe_allocate_msix(adapter);
874	else
875		error = ixgbe_allocate_legacy(adapter);
876	if (error)
877		goto err_late;
878
879	error = ixgbe_start_hw(hw);
880	switch (error) {
881	case IXGBE_ERR_EEPROM_VERSION:
882		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
883		break;
884	case IXGBE_ERR_SFP_NOT_SUPPORTED:
885		device_printf(dev, "Unsupported SFP+ Module\n");
886		error = EIO;
887		goto err_late;
888	case IXGBE_ERR_SFP_NOT_PRESENT:
889		device_printf(dev, "No SFP+ Module found\n");
890		/* falls thru */
891	default:
892		break;
893	}
894
895	/* Enable the optics for 82599 SFP+ fiber */
896	ixgbe_enable_tx_laser(hw);
897
898	/* Enable power to the phy. */
899	ixgbe_set_phy_power(hw, TRUE);
900
901	/* Initialize statistics */
902	ixgbe_update_stats_counters(adapter);
903
904	/* Check PCIE slot type/speed/width */
905	ixgbe_get_slot_info(adapter);
906
907	/*
908	 * Do time init and sysctl init here, but
909	 * only on the first port of a bypass adapter.
910	 */
911	ixgbe_bypass_init(adapter);
912
913	/* Set an initial dmac value */
914	adapter->dmac = 0;
915	/* Set initial advertised speeds (if applicable) */
916	adapter->advertise = ixgbe_get_advertise(adapter);
917
918	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
919		ixgbe_define_iov_schemas(dev, &error);
920
921	/* Add sysctls */
922	ixgbe_add_device_sysctls(adapter);
923	ixgbe_add_hw_stats(adapter);
924
925	/* For Netmap */
926	adapter->init_locked = ixgbe_init_locked;
927	adapter->stop_locked = ixgbe_stop;
928
929	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
930		ixgbe_netmap_attach(adapter);
931
932	INIT_DEBUGOUT("ixgbe_attach: end");
933
934	return (0);
935
936err_late:
937	ixgbe_free_transmit_structures(adapter);
938	ixgbe_free_receive_structures(adapter);
939	free(adapter->queues, M_DEVBUF);
940err_out:
941	if (adapter->ifp != NULL)
942		if_free(adapter->ifp);
943	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
944	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
945	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
946	ixgbe_free_pci_resources(adapter);
947	free(adapter->mta, M_IXGBE);
948	IXGBE_CORE_LOCK_DESTROY(adapter);
949
950	return (error);
951} /* ixgbe_attach */
952
953/************************************************************************
954 * ixgbe_check_wol_support
955 *
956 *   Checks whether the adapter's ports are capable of
957 *   Wake On LAN by reading the adapter's NVM.
958 *
959 *   Sets each port's hw->wol_enabled value depending
960 *   on the value read here.
961 ************************************************************************/
962static void
963ixgbe_check_wol_support(struct adapter *adapter)
964{
965	struct ixgbe_hw *hw = &adapter->hw;
966	u16             dev_caps = 0;
967
968	/* Find out WoL support for port */
969	adapter->wol_support = hw->wol_enabled = 0;
970	ixgbe_get_device_caps(hw, &dev_caps);
971	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
972	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
973	     hw->bus.func == 0))
974		adapter->wol_support = hw->wol_enabled = 1;
975
976	/* Save initial wake up filter configuration */
977	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
978
979	return;
980} /* ixgbe_check_wol_support */
981
982/************************************************************************
983 * ixgbe_setup_interface
984 *
985 *   Setup networking device structure and register an interface.
986 ************************************************************************/
987static int
988ixgbe_setup_interface(device_t dev, struct adapter *adapter)
989{
990	struct ifnet *ifp;
991
992	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
993
994	ifp = adapter->ifp = if_alloc(IFT_ETHER);
995	if (ifp == NULL) {
996		device_printf(dev, "can not allocate ifnet structure\n");
997		return (-1);
998	}
999	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1000	ifp->if_baudrate = IF_Gbps(10);
1001	ifp->if_init = ixgbe_init;
1002	ifp->if_softc = adapter;
1003	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1004	ifp->if_ioctl = ixgbe_ioctl;
1005#if __FreeBSD_version >= 1100036
1006	if_setgetcounterfn(ifp, ixgbe_get_counter);
1007#endif
1008#if __FreeBSD_version >= 1100045
1009	/* TSO parameters */
1010	ifp->if_hw_tsomax = 65518;
1011	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1012	ifp->if_hw_tsomaxsegsize = 2048;
1013#endif
1014	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1015		ifp->if_start = ixgbe_legacy_start;
1016		IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1017		ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1018		IFQ_SET_READY(&ifp->if_snd);
1019		ixgbe_start_locked = ixgbe_legacy_start_locked;
1020		ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1021	} else {
1022		ifp->if_transmit = ixgbe_mq_start;
1023		ifp->if_qflush = ixgbe_qflush;
1024		ixgbe_start_locked = ixgbe_mq_start_locked;
1025		ixgbe_ring_empty = drbr_empty;
1026	}
1027
1028	ether_ifattach(ifp, adapter->hw.mac.addr);
1029
1030	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1031
1032	/*
1033	 * Tell the upper layer(s) we support long frames.
1034	 */
1035	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1036
1037	/* Set capability flags */
1038	ifp->if_capabilities |= IFCAP_HWCSUM
1039	                     |  IFCAP_HWCSUM_IPV6
1040	                     |  IFCAP_TSO
1041	                     |  IFCAP_LRO
1042	                     |  IFCAP_VLAN_HWTAGGING
1043	                     |  IFCAP_VLAN_HWTSO
1044	                     |  IFCAP_VLAN_HWCSUM
1045	                     |  IFCAP_JUMBO_MTU
1046	                     |  IFCAP_VLAN_MTU
1047	                     |  IFCAP_HWSTATS;
1048
1049	/* Enable the above capabilities by default */
1050	ifp->if_capenable = ifp->if_capabilities;
1051
1052	/*
1053	 * Don't turn this on by default, if vlans are
1054	 * created on another pseudo device (eg. lagg)
1055	 * then vlan events are not passed thru, breaking
1056	 * operation, but with HW FILTER off it works. If
1057	 * using vlans directly on the ixgbe driver you can
1058	 * enable this and get full hardware tag filtering.
1059	 */
1060	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1061
1062	/*
1063	 * Specify the media types supported by this adapter and register
1064	 * callbacks to update media and link information
1065	 */
1066	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1067	    ixgbe_media_status);
1068
1069	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1070	ixgbe_add_media_types(adapter);
1071
1072	/* Set autoselect media by default */
1073	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1074
1075	return (0);
1076} /* ixgbe_setup_interface */
1077
1078#if __FreeBSD_version >= 1100036
1079/************************************************************************
1080 * ixgbe_get_counter
1081 ************************************************************************/
1082static uint64_t
1083ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1084{
1085	struct adapter *adapter;
1086	struct tx_ring *txr;
1087	uint64_t       rv;
1088
1089	adapter = if_getsoftc(ifp);
1090
1091	switch (cnt) {
1092	case IFCOUNTER_IPACKETS:
1093		return (adapter->ipackets);
1094	case IFCOUNTER_OPACKETS:
1095		return (adapter->opackets);
1096	case IFCOUNTER_IBYTES:
1097		return (adapter->ibytes);
1098	case IFCOUNTER_OBYTES:
1099		return (adapter->obytes);
1100	case IFCOUNTER_IMCASTS:
1101		return (adapter->imcasts);
1102	case IFCOUNTER_OMCASTS:
1103		return (adapter->omcasts);
1104	case IFCOUNTER_COLLISIONS:
1105		return (0);
1106	case IFCOUNTER_IQDROPS:
1107		return (adapter->iqdrops);
1108	case IFCOUNTER_OQDROPS:
1109		rv = 0;
1110		txr = adapter->tx_rings;
1111		for (int i = 0; i < adapter->num_queues; i++, txr++)
1112			rv += txr->br->br_drops;
1113		return (rv);
1114	case IFCOUNTER_IERRORS:
1115		return (adapter->ierrors);
1116	default:
1117		return (if_get_counter_default(ifp, cnt));
1118	}
1119} /* ixgbe_get_counter */
1120#endif
1121
1122/************************************************************************
1123 * ixgbe_add_media_types
1124 ************************************************************************/
1125static void
1126ixgbe_add_media_types(struct adapter *adapter)
1127{
1128	struct ixgbe_hw *hw = &adapter->hw;
1129	device_t        dev = adapter->dev;
1130	u64             layer;
1131
1132	layer = adapter->phy_layer;
1133
1134	/* Media types with matching FreeBSD media defines */
1135	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1136		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1137	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1138		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1139	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1140		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1141	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1142		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1143
1144	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1145	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1146		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1147		    NULL);
1148
1149	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1150		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1151		if (hw->phy.multispeed_fiber)
1152			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1153			    NULL);
1154	}
1155	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1156		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1157		if (hw->phy.multispeed_fiber)
1158			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1159			    NULL);
1160	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1161		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1162	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1163		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1164
1165#ifdef IFM_ETH_XTYPE
1166	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1167		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1168	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1169		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1170	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1171		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1172	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1173		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1174#else
1175	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1176		device_printf(dev, "Media supported: 10GbaseKR\n");
1177		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1178		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1179	}
1180	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1181		device_printf(dev, "Media supported: 10GbaseKX4\n");
1182		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1183		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1184	}
1185	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1186		device_printf(dev, "Media supported: 1000baseKX\n");
1187		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1188		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1189	}
1190	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1191		device_printf(dev, "Media supported: 2500baseKX\n");
1192		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1193		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1194	}
1195#endif
1196	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1197		device_printf(dev, "Media supported: 1000baseBX\n");
1198
1199	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1200		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1201		    0, NULL);
1202		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1203	}
1204
1205	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1206} /* ixgbe_add_media_types */
1207
1208/************************************************************************
1209 * ixgbe_is_sfp
1210 ************************************************************************/
1211static inline bool
1212ixgbe_is_sfp(struct ixgbe_hw *hw)
1213{
1214	switch (hw->mac.type) {
1215	case ixgbe_mac_82598EB:
1216		if (hw->phy.type == ixgbe_phy_nl)
1217			return TRUE;
1218		return FALSE;
1219	case ixgbe_mac_82599EB:
1220		switch (hw->mac.ops.get_media_type(hw)) {
1221		case ixgbe_media_type_fiber:
1222		case ixgbe_media_type_fiber_qsfp:
1223			return TRUE;
1224		default:
1225			return FALSE;
1226		}
1227	case ixgbe_mac_X550EM_x:
1228	case ixgbe_mac_X550EM_a:
1229		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1230			return TRUE;
1231		return FALSE;
1232	default:
1233		return FALSE;
1234	}
1235} /* ixgbe_is_sfp */
1236
1237/************************************************************************
1238 * ixgbe_config_link
1239 ************************************************************************/
1240static void
1241ixgbe_config_link(struct adapter *adapter)
1242{
1243	struct ixgbe_hw *hw = &adapter->hw;
1244	u32             autoneg, err = 0;
1245	bool            sfp, negotiate;
1246
1247	sfp = ixgbe_is_sfp(hw);
1248
1249	if (sfp) {
1250		if (hw->phy.multispeed_fiber) {
1251			hw->mac.ops.setup_sfp(hw);
1252			ixgbe_enable_tx_laser(hw);
1253			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1254		} else
1255			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1256	} else {
1257		if (hw->mac.ops.check_link)
1258			err = ixgbe_check_link(hw, &adapter->link_speed,
1259			    &adapter->link_up, FALSE);
1260		if (err)
1261			goto out;
1262		autoneg = hw->phy.autoneg_advertised;
1263		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1264			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1265			    &negotiate);
1266		if (err)
1267			goto out;
1268		if (hw->mac.ops.setup_link)
1269			err = hw->mac.ops.setup_link(hw, autoneg,
1270			    adapter->link_up);
1271	}
1272out:
1273
1274	return;
1275} /* ixgbe_config_link */
1276
1277/************************************************************************
1278 * ixgbe_update_stats_counters - Update board statistics counters.
1279 ************************************************************************/
1280static void
1281ixgbe_update_stats_counters(struct adapter *adapter)
1282{
1283	struct ixgbe_hw       *hw = &adapter->hw;
1284	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1285	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1286	u64                   total_missed_rx = 0;
1287
1288	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1289	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1290	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1291	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1292	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1293
1294	for (int i = 0; i < 16; i++) {
1295		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1296		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1297		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1298	}
1299	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1300	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1301	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1302
1303	/* Hardware workaround, gprc counts missed packets */
1304	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1305	stats->gprc -= missed_rx;
1306
1307	if (hw->mac.type != ixgbe_mac_82598EB) {
1308		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1309		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1310		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1311		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1312		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1313		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1314		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1315		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1316	} else {
1317		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1318		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1319		/* 82598 only has a counter in the high register */
1320		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1321		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1322		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1323	}
1324
1325	/*
1326	 * Workaround: mprc hardware is incorrectly counting
1327	 * broadcasts, so for now we subtract those.
1328	 */
1329	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1330	stats->bprc += bprc;
1331	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1332	if (hw->mac.type == ixgbe_mac_82598EB)
1333		stats->mprc -= bprc;
1334
1335	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1336	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1337	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1338	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1339	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1340	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1341
1342	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1343	stats->lxontxc += lxon;
1344	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1345	stats->lxofftxc += lxoff;
1346	total = lxon + lxoff;
1347
1348	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1349	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1350	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1351	stats->gptc -= total;
1352	stats->mptc -= total;
1353	stats->ptc64 -= total;
1354	stats->gotc -= total * ETHER_MIN_LEN;
1355
1356	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1357	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1358	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1359	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1360	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1361	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1362	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1363	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1364	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1365	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1366	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1367	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1368	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1369	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1370	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1371	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1372	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1373	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1374	/* Only read FCOE on 82599 */
1375	if (hw->mac.type != ixgbe_mac_82598EB) {
1376		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1377		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1378		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1379		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1380		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1381	}
1382
1383	/* Fill out the OS statistics structure */
1384	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1385	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1386	IXGBE_SET_IBYTES(adapter, stats->gorc);
1387	IXGBE_SET_OBYTES(adapter, stats->gotc);
1388	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1389	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1390	IXGBE_SET_COLLISIONS(adapter, 0);
1391	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1392	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1393} /* ixgbe_update_stats_counters */
1394
1395/************************************************************************
1396 * ixgbe_add_hw_stats
1397 *
1398 *   Add sysctl variables, one per statistic, to the system.
1399 ************************************************************************/
1400static void
1401ixgbe_add_hw_stats(struct adapter *adapter)
1402{
1403	device_t               dev = adapter->dev;
1404	struct tx_ring         *txr = adapter->tx_rings;
1405	struct rx_ring         *rxr = adapter->rx_rings;
1406	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1407	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1408	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1409	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1410	struct sysctl_oid      *stat_node, *queue_node;
1411	struct sysctl_oid_list *stat_list, *queue_list;
1412
1413#define QUEUE_NAME_LEN 32
1414	char                   namebuf[QUEUE_NAME_LEN];
1415
1416	/* Driver Statistics */
1417	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1418	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1419	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1420	    CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1421	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1422	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1423	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1424	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1425
1426	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1427		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1428		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1429		    CTLFLAG_RD, NULL, "Queue Name");
1430		queue_list = SYSCTL_CHILDREN(queue_node);
1431
1432		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1433		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1434		    sizeof(&adapter->queues[i]),
1435		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1436		    "Interrupt Rate");
1437		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1438		    CTLFLAG_RD, &(adapter->queues[i].irqs),
1439		    "irqs on this queue");
1440		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1441		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1442		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1443		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1444		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1445		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1446		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1447		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1448		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1449		    CTLFLAG_RD, &txr->no_tx_dma_setup,
1450		    "Driver tx dma failure in xmit");
1451		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1452		    CTLFLAG_RD, &txr->no_desc_avail,
1453		    "Queue No Descriptor Available");
1454		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1455		    CTLFLAG_RD, &txr->total_packets,
1456		    "Queue Packets Transmitted");
1457		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1458		    CTLFLAG_RD, &txr->br->br_drops,
1459		    "Packets dropped in buf_ring");
1460	}
1461
1462	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1463		struct lro_ctrl *lro = &rxr->lro;
1464
1465		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1466		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1467		    CTLFLAG_RD, NULL, "Queue Name");
1468		queue_list = SYSCTL_CHILDREN(queue_node);
1469
1470		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1471		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1472		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1473		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1474		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1475		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1476		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1477		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1478		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1479		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1480		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1481		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1482		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1483		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1484		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1485		    CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1486		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1487		    CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1488	}
1489
1490	/* MAC stats get their own sub node */
1491
1492	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1493	    CTLFLAG_RD, NULL, "MAC Statistics");
1494	stat_list = SYSCTL_CHILDREN(stat_node);
1495
1496	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1497	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1498	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1499	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1500	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1501	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1502	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1503	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1504	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1505	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1506	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1507	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1508	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1509	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1510	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1511	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1512
1513	/* Flow Control stats */
1514	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1515	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1516	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1517	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1518	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1519	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1520	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1521	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1522
1523	/* Packet Reception Stats */
1524	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1525	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1526	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1527	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1528	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1529	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1530	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1531	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1532	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1533	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1534	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1535	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1536	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1537	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1538	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1539	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1540	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1541	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1542	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1543	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1544	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1545	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1546	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1547	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1548	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1549	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1550	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1551	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1552	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1553	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1554	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1555	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1556	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1557	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1558	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1559	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1560	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1561	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1562
1563	/* Packet Transmission Stats */
1564	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1565	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1566	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1567	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1568	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1569	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1570	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1571	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1572	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1573	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1574	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1575	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1576	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1577	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1578	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1579	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1580	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1581	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1582	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1583	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1584	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1585	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1586	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1587	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1588} /* ixgbe_add_hw_stats */
1589
1590/************************************************************************
1591 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1592 *
1593 *   Retrieves the TDH value from the hardware
1594 ************************************************************************/
1595static int
1596ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1597{
1598	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1599	int            error;
1600	unsigned int   val;
1601
1602	if (!txr)
1603		return (0);
1604
1605	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1606	error = sysctl_handle_int(oidp, &val, 0, req);
1607	if (error || !req->newptr)
1608		return error;
1609
1610	return (0);
1611} /* ixgbe_sysctl_tdh_handler */
1612
1613/************************************************************************
1614 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1615 *
1616 *   Retrieves the TDT value from the hardware
1617 ************************************************************************/
1618static int
1619ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1620{
1621	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1622	int            error;
1623	unsigned int   val;
1624
1625	if (!txr)
1626		return (0);
1627
1628	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1629	error = sysctl_handle_int(oidp, &val, 0, req);
1630	if (error || !req->newptr)
1631		return error;
1632
1633	return (0);
1634} /* ixgbe_sysctl_tdt_handler */
1635
1636/************************************************************************
1637 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1638 *
1639 *   Retrieves the RDH value from the hardware
1640 ************************************************************************/
1641static int
1642ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1643{
1644	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1645	int            error;
1646	unsigned int   val;
1647
1648	if (!rxr)
1649		return (0);
1650
1651	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1652	error = sysctl_handle_int(oidp, &val, 0, req);
1653	if (error || !req->newptr)
1654		return error;
1655
1656	return (0);
1657} /* ixgbe_sysctl_rdh_handler */
1658
1659/************************************************************************
1660 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1661 *
1662 *   Retrieves the RDT value from the hardware
1663 ************************************************************************/
1664static int
1665ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1666{
1667	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1668	int            error;
1669	unsigned int   val;
1670
1671	if (!rxr)
1672		return (0);
1673
1674	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1675	error = sysctl_handle_int(oidp, &val, 0, req);
1676	if (error || !req->newptr)
1677		return error;
1678
1679	return (0);
1680} /* ixgbe_sysctl_rdt_handler */
1681
1682/************************************************************************
1683 * ixgbe_register_vlan
1684 *
1685 *   Run via vlan config EVENT, it enables us to use the
1686 *   HW Filter table since we can get the vlan id. This
1687 *   just creates the entry in the soft version of the
1688 *   VFTA, init will repopulate the real table.
1689 ************************************************************************/
1690static void
1691ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1692{
1693	struct adapter *adapter = ifp->if_softc;
1694	u16            index, bit;
1695
1696	if (ifp->if_softc != arg)   /* Not our event */
1697		return;
1698
1699	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1700		return;
1701
1702	IXGBE_CORE_LOCK(adapter);
1703	index = (vtag >> 5) & 0x7F;
1704	bit = vtag & 0x1F;
1705	adapter->shadow_vfta[index] |= (1 << bit);
1706	++adapter->num_vlans;
1707	ixgbe_setup_vlan_hw_support(adapter);
1708	IXGBE_CORE_UNLOCK(adapter);
1709} /* ixgbe_register_vlan */
1710
1711/************************************************************************
1712 * ixgbe_unregister_vlan
1713 *
1714 *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1715 ************************************************************************/
1716static void
1717ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1718{
1719	struct adapter *adapter = ifp->if_softc;
1720	u16            index, bit;
1721
1722	if (ifp->if_softc != arg)
1723		return;
1724
1725	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1726		return;
1727
1728	IXGBE_CORE_LOCK(adapter);
1729	index = (vtag >> 5) & 0x7F;
1730	bit = vtag & 0x1F;
1731	adapter->shadow_vfta[index] &= ~(1 << bit);
1732	--adapter->num_vlans;
1733	/* Re-init to load the changes */
1734	ixgbe_setup_vlan_hw_support(adapter);
1735	IXGBE_CORE_UNLOCK(adapter);
1736} /* ixgbe_unregister_vlan */
1737
1738/************************************************************************
1739 * ixgbe_setup_vlan_hw_support
1740 ************************************************************************/
1741static void
1742ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1743{
1744	struct ifnet    *ifp = adapter->ifp;
1745	struct ixgbe_hw *hw = &adapter->hw;
1746	struct rx_ring  *rxr;
1747	int             i;
1748	u32             ctrl;
1749
1750
1751	/*
1752	 * We get here thru init_locked, meaning
1753	 * a soft reset, this has already cleared
1754	 * the VFTA and other state, so if there
1755	 * have been no vlan's registered do nothing.
1756	 */
1757	if (adapter->num_vlans == 0)
1758		return;
1759
1760	/* Setup the queues for vlans */
1761	for (i = 0; i < adapter->num_queues; i++) {
1762		rxr = &adapter->rx_rings[i];
1763		/* On 82599 the VLAN enable is per/queue in RXDCTL */
1764		if (hw->mac.type != ixgbe_mac_82598EB) {
1765			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1766			ctrl |= IXGBE_RXDCTL_VME;
1767			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1768		}
1769		rxr->vtag_strip = TRUE;
1770	}
1771
1772	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1773		return;
1774	/*
1775	 * A soft reset zero's out the VFTA, so
1776	 * we need to repopulate it now.
1777	 */
1778	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1779		if (adapter->shadow_vfta[i] != 0)
1780			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1781			    adapter->shadow_vfta[i]);
1782
1783	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1784	/* Enable the Filter Table if enabled */
1785	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1786		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1787		ctrl |= IXGBE_VLNCTRL_VFE;
1788	}
1789	if (hw->mac.type == ixgbe_mac_82598EB)
1790		ctrl |= IXGBE_VLNCTRL_VME;
1791	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1792} /* ixgbe_setup_vlan_hw_support */
1793
1794/************************************************************************
1795 * ixgbe_get_slot_info
1796 *
1797 *   Get the width and transaction speed of
1798 *   the slot this adapter is plugged into.
1799 ************************************************************************/
1800static void
1801ixgbe_get_slot_info(struct adapter *adapter)
1802{
1803	device_t              dev = adapter->dev;
1804	struct ixgbe_hw       *hw = &adapter->hw;
1805	u32                   offset;
1806	u16                   link;
1807	int                   bus_info_valid = TRUE;
1808
1809	/* Some devices are behind an internal bridge */
1810	switch (hw->device_id) {
1811	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1812	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1813		goto get_parent_info;
1814	default:
1815		break;
1816	}
1817
1818	ixgbe_get_bus_info(hw);
1819
1820	/*
1821	 * Some devices don't use PCI-E, but there is no need
1822	 * to display "Unknown" for bus speed and width.
1823	 */
1824	switch (hw->mac.type) {
1825	case ixgbe_mac_X550EM_x:
1826	case ixgbe_mac_X550EM_a:
1827		return;
1828	default:
1829		goto display;
1830	}
1831
1832get_parent_info:
1833	/*
1834	 * For the Quad port adapter we need to parse back
1835	 * up the PCI tree to find the speed of the expansion
1836	 * slot into which this adapter is plugged. A bit more work.
1837	 */
1838	dev = device_get_parent(device_get_parent(dev));
1839#ifdef IXGBE_DEBUG
1840	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1841	    pci_get_slot(dev), pci_get_function(dev));
1842#endif
1843	dev = device_get_parent(device_get_parent(dev));
1844#ifdef IXGBE_DEBUG
1845	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1846	    pci_get_slot(dev), pci_get_function(dev));
1847#endif
1848	/* Now get the PCI Express Capabilities offset */
1849	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1850		/*
1851		 * Hmm...can't get PCI-Express capabilities.
1852		 * Falling back to default method.
1853		 */
1854		bus_info_valid = FALSE;
1855		ixgbe_get_bus_info(hw);
1856		goto display;
1857	}
1858	/* ...and read the Link Status Register */
1859	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1860	ixgbe_set_pci_config_data_generic(hw, link);
1861
1862display:
1863	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1864	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1865	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1866	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1867	     "Unknown"),
1868	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1869	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1870	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1871	     "Unknown"));
1872
1873	if (bus_info_valid) {
1874		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1875		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1876		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1877			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1878			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1879		}
1880		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1881		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1882		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1883			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1884			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1885		}
1886	} else
1887		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1888
1889	return;
1890} /* ixgbe_get_slot_info */
1891
1892/************************************************************************
1893 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1894 ************************************************************************/
1895static inline void
1896ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1897{
1898	struct ixgbe_hw *hw = &adapter->hw;
1899	u64             queue = (u64)(1 << vector);
1900	u32             mask;
1901
1902	if (hw->mac.type == ixgbe_mac_82598EB) {
1903		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1904		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1905	} else {
1906		mask = (queue & 0xFFFFFFFF);
1907		if (mask)
1908			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1909		mask = (queue >> 32);
1910		if (mask)
1911			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1912	}
1913} /* ixgbe_enable_queue */
1914
1915/************************************************************************
1916 * ixgbe_disable_queue
1917 ************************************************************************/
1918static inline void
1919ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1920{
1921	struct ixgbe_hw *hw = &adapter->hw;
1922	u64             queue = (u64)(1 << vector);
1923	u32             mask;
1924
1925	if (hw->mac.type == ixgbe_mac_82598EB) {
1926		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1927		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1928	} else {
1929		mask = (queue & 0xFFFFFFFF);
1930		if (mask)
1931			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1932		mask = (queue >> 32);
1933		if (mask)
1934			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1935	}
1936} /* ixgbe_disable_queue */
1937
1938/************************************************************************
1939 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1940 ************************************************************************/
1941void
1942ixgbe_msix_que(void *arg)
1943{
1944	struct ix_queue *que = arg;
1945	struct adapter  *adapter = que->adapter;
1946	struct ifnet    *ifp = adapter->ifp;
1947	struct tx_ring  *txr = que->txr;
1948	struct rx_ring  *rxr = que->rxr;
1949	bool            more;
1950	u32             newitr = 0;
1951
1952
1953	/* Protect against spurious interrupts */
1954	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1955		return;
1956
1957	ixgbe_disable_queue(adapter, que->msix);
1958	++que->irqs;
1959
1960	more = ixgbe_rxeof(que);
1961
1962	IXGBE_TX_LOCK(txr);
1963	ixgbe_txeof(txr);
1964	if (!ixgbe_ring_empty(ifp, txr->br))
1965		ixgbe_start_locked(ifp, txr);
1966	IXGBE_TX_UNLOCK(txr);
1967
1968	/* Do AIM now? */
1969
1970	if (adapter->enable_aim == FALSE)
1971		goto no_calc;
1972	/*
1973	 * Do Adaptive Interrupt Moderation:
1974	 *  - Write out last calculated setting
1975	 *  - Calculate based on average size over
1976	 *    the last interval.
1977	 */
1978	if (que->eitr_setting)
1979		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1980		    que->eitr_setting);
1981
1982	que->eitr_setting = 0;
1983
1984	/* Idle, do nothing */
1985	if ((txr->bytes == 0) && (rxr->bytes == 0))
1986		goto no_calc;
1987
1988	if ((txr->bytes) && (txr->packets))
1989		newitr = txr->bytes/txr->packets;
1990	if ((rxr->bytes) && (rxr->packets))
1991		newitr = max(newitr, (rxr->bytes / rxr->packets));
1992	newitr += 24; /* account for hardware frame, crc */
1993
1994	/* set an upper boundary */
1995	newitr = min(newitr, 3000);
1996
1997	/* Be nice to the mid range */
1998	if ((newitr > 300) && (newitr < 1200))
1999		newitr = (newitr / 3);
2000	else
2001		newitr = (newitr / 2);
2002
2003	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2004		newitr |= newitr << 16;
2005	else
2006		newitr |= IXGBE_EITR_CNT_WDIS;
2007
2008	/* save for next interrupt */
2009	que->eitr_setting = newitr;
2010
2011	/* Reset state */
2012	txr->bytes = 0;
2013	txr->packets = 0;
2014	rxr->bytes = 0;
2015	rxr->packets = 0;
2016
2017no_calc:
2018	if (more)
2019		taskqueue_enqueue(que->tq, &que->que_task);
2020	else
2021		ixgbe_enable_queue(adapter, que->msix);
2022
2023	return;
2024} /* ixgbe_msix_que */
2025
2026/************************************************************************
2027 * ixgbe_media_status - Media Ioctl callback
2028 *
2029 *   Called whenever the user queries the status of
2030 *   the interface using ifconfig.
2031 ************************************************************************/
2032static void
2033ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2034{
2035	struct adapter  *adapter = ifp->if_softc;
2036	struct ixgbe_hw *hw = &adapter->hw;
2037	int             layer;
2038
2039	INIT_DEBUGOUT("ixgbe_media_status: begin");
2040	IXGBE_CORE_LOCK(adapter);
2041	ixgbe_update_link_status(adapter);
2042
2043	ifmr->ifm_status = IFM_AVALID;
2044	ifmr->ifm_active = IFM_ETHER;
2045
2046	if (!adapter->link_active) {
2047		IXGBE_CORE_UNLOCK(adapter);
2048		return;
2049	}
2050
2051	ifmr->ifm_status |= IFM_ACTIVE;
2052	layer = adapter->phy_layer;
2053
2054	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2055	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2056	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2057	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2058		switch (adapter->link_speed) {
2059		case IXGBE_LINK_SPEED_10GB_FULL:
2060			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2061			break;
2062		case IXGBE_LINK_SPEED_1GB_FULL:
2063			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2064			break;
2065		case IXGBE_LINK_SPEED_100_FULL:
2066			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2067			break;
2068		case IXGBE_LINK_SPEED_10_FULL:
2069			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2070			break;
2071		}
2072	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2073	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2074		switch (adapter->link_speed) {
2075		case IXGBE_LINK_SPEED_10GB_FULL:
2076			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2077			break;
2078		}
2079	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2080		switch (adapter->link_speed) {
2081		case IXGBE_LINK_SPEED_10GB_FULL:
2082			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2083			break;
2084		case IXGBE_LINK_SPEED_1GB_FULL:
2085			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2086			break;
2087		}
2088	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2089		switch (adapter->link_speed) {
2090		case IXGBE_LINK_SPEED_10GB_FULL:
2091			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2092			break;
2093		case IXGBE_LINK_SPEED_1GB_FULL:
2094			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2095			break;
2096		}
2097	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2098	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2099		switch (adapter->link_speed) {
2100		case IXGBE_LINK_SPEED_10GB_FULL:
2101			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2102			break;
2103		case IXGBE_LINK_SPEED_1GB_FULL:
2104			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2105			break;
2106		}
2107	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2108		switch (adapter->link_speed) {
2109		case IXGBE_LINK_SPEED_10GB_FULL:
2110			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2111			break;
2112		}
2113	/*
2114	 * XXX: These need to use the proper media types once
2115	 * they're added.
2116	 */
2117#ifndef IFM_ETH_XTYPE
2118	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2119		switch (adapter->link_speed) {
2120		case IXGBE_LINK_SPEED_10GB_FULL:
2121			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2122			break;
2123		case IXGBE_LINK_SPEED_2_5GB_FULL:
2124			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2125			break;
2126		case IXGBE_LINK_SPEED_1GB_FULL:
2127			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2128			break;
2129		}
2130	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2131	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2132	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2133		switch (adapter->link_speed) {
2134		case IXGBE_LINK_SPEED_10GB_FULL:
2135			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2136			break;
2137		case IXGBE_LINK_SPEED_2_5GB_FULL:
2138			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2139			break;
2140		case IXGBE_LINK_SPEED_1GB_FULL:
2141			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2142			break;
2143		}
2144#else
2145	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2146		switch (adapter->link_speed) {
2147		case IXGBE_LINK_SPEED_10GB_FULL:
2148			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2149			break;
2150		case IXGBE_LINK_SPEED_2_5GB_FULL:
2151			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2152			break;
2153		case IXGBE_LINK_SPEED_1GB_FULL:
2154			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2155			break;
2156		}
2157	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2158	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2159	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2160		switch (adapter->link_speed) {
2161		case IXGBE_LINK_SPEED_10GB_FULL:
2162			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2163			break;
2164		case IXGBE_LINK_SPEED_2_5GB_FULL:
2165			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2166			break;
2167		case IXGBE_LINK_SPEED_1GB_FULL:
2168			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2169			break;
2170		}
2171#endif
2172
2173	/* If nothing is recognized... */
2174	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2175		ifmr->ifm_active |= IFM_UNKNOWN;
2176
2177#if __FreeBSD_version >= 900025
2178	/* Display current flow control setting used on link */
2179	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2180	    hw->fc.current_mode == ixgbe_fc_full)
2181		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2182	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2183	    hw->fc.current_mode == ixgbe_fc_full)
2184		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2185#endif
2186
2187	IXGBE_CORE_UNLOCK(adapter);
2188
2189	return;
2190} /* ixgbe_media_status */
2191
2192/************************************************************************
2193 * ixgbe_media_change - Media Ioctl callback
2194 *
2195 *   Called when the user changes speed/duplex using
2196 *   media/mediopt option with ifconfig.
2197 ************************************************************************/
2198static int
2199ixgbe_media_change(struct ifnet *ifp)
2200{
2201	struct adapter   *adapter = ifp->if_softc;
2202	struct ifmedia   *ifm = &adapter->media;
2203	struct ixgbe_hw  *hw = &adapter->hw;
2204	ixgbe_link_speed speed = 0;
2205
2206	INIT_DEBUGOUT("ixgbe_media_change: begin");
2207
2208	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2209		return (EINVAL);
2210
2211	if (hw->phy.media_type == ixgbe_media_type_backplane)
2212		return (ENODEV);
2213
2214	/*
2215	 * We don't actually need to check against the supported
2216	 * media types of the adapter; ifmedia will take care of
2217	 * that for us.
2218	 */
2219	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2220		case IFM_AUTO:
2221		case IFM_10G_T:
2222			speed |= IXGBE_LINK_SPEED_100_FULL;
2223			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2224			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2225			break;
2226		case IFM_10G_LRM:
2227		case IFM_10G_LR:
2228#ifndef IFM_ETH_XTYPE
2229		case IFM_10G_SR: /* KR, too */
2230		case IFM_10G_CX4: /* KX4 */
2231#else
2232		case IFM_10G_KR:
2233		case IFM_10G_KX4:
2234#endif
2235			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2236			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2237			break;
2238#ifndef IFM_ETH_XTYPE
2239		case IFM_1000_CX: /* KX */
2240#else
2241		case IFM_1000_KX:
2242#endif
2243		case IFM_1000_LX:
2244		case IFM_1000_SX:
2245			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2246			break;
2247		case IFM_1000_T:
2248			speed |= IXGBE_LINK_SPEED_100_FULL;
2249			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2250			break;
2251		case IFM_10G_TWINAX:
2252			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2253			break;
2254		case IFM_100_TX:
2255			speed |= IXGBE_LINK_SPEED_100_FULL;
2256			break;
2257		case IFM_10_T:
2258			speed |= IXGBE_LINK_SPEED_10_FULL;
2259			break;
2260		default:
2261			goto invalid;
2262	}
2263
2264	hw->mac.autotry_restart = TRUE;
2265	hw->mac.ops.setup_link(hw, speed, TRUE);
2266	adapter->advertise =
2267	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2268	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2269	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2270	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2271
2272	return (0);
2273
2274invalid:
2275	device_printf(adapter->dev, "Invalid media type!\n");
2276
2277	return (EINVAL);
2278} /* ixgbe_media_change */
2279
2280/************************************************************************
2281 * ixgbe_set_promisc
2282 ************************************************************************/
2283static void
2284ixgbe_set_promisc(struct adapter *adapter)
2285{
2286	struct ifnet *ifp = adapter->ifp;
2287	int          mcnt = 0;
2288	u32          rctl;
2289
2290	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2291	rctl &= (~IXGBE_FCTRL_UPE);
2292	if (ifp->if_flags & IFF_ALLMULTI)
2293		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2294	else {
2295		struct ifmultiaddr *ifma;
2296#if __FreeBSD_version < 800000
2297		IF_ADDR_LOCK(ifp);
2298#else
2299		if_maddr_rlock(ifp);
2300#endif
2301		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2302			if (ifma->ifma_addr->sa_family != AF_LINK)
2303				continue;
2304			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2305				break;
2306			mcnt++;
2307		}
2308#if __FreeBSD_version < 800000
2309		IF_ADDR_UNLOCK(ifp);
2310#else
2311		if_maddr_runlock(ifp);
2312#endif
2313	}
2314	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2315		rctl &= (~IXGBE_FCTRL_MPE);
2316	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2317
2318	if (ifp->if_flags & IFF_PROMISC) {
2319		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2320		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2321	} else if (ifp->if_flags & IFF_ALLMULTI) {
2322		rctl |= IXGBE_FCTRL_MPE;
2323		rctl &= ~IXGBE_FCTRL_UPE;
2324		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2325	}
2326} /* ixgbe_set_promisc */
2327
2328/************************************************************************
2329 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2330 ************************************************************************/
2331static void
2332ixgbe_msix_link(void *arg)
2333{
2334	struct adapter  *adapter = arg;
2335	struct ixgbe_hw *hw = &adapter->hw;
2336	u32             eicr, eicr_mask;
2337	s32             retval;
2338
2339	++adapter->link_irq;
2340
2341	/* Pause other interrupts */
2342	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2343
2344	/* First get the cause */
2345	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2346	/* Be sure the queue bits are not cleared */
2347	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2348	/* Clear interrupt with write */
2349	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2350
2351	/* Link status change */
2352	if (eicr & IXGBE_EICR_LSC) {
2353		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2354		taskqueue_enqueue(adapter->tq, &adapter->link_task);
2355	}
2356
2357	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2358		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2359		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2360			/* This is probably overkill :) */
2361			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2362				return;
2363			/* Disable the interrupt */
2364			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2365			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2366		}
2367
2368		if (eicr & IXGBE_EICR_ECC) {
2369			device_printf(adapter->dev,
2370			    "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2371			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2372		}
2373
2374		/* Check for over temp condition */
2375		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2376			switch (adapter->hw.mac.type) {
2377			case ixgbe_mac_X550EM_a:
2378				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2379					break;
2380				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2381				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2382				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2383				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2384				retval = hw->phy.ops.check_overtemp(hw);
2385				if (retval != IXGBE_ERR_OVERTEMP)
2386					break;
2387				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2388				device_printf(adapter->dev, "System shutdown required!\n");
2389				break;
2390			default:
2391				if (!(eicr & IXGBE_EICR_TS))
2392					break;
2393				retval = hw->phy.ops.check_overtemp(hw);
2394				if (retval != IXGBE_ERR_OVERTEMP)
2395					break;
2396				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2397				device_printf(adapter->dev, "System shutdown required!\n");
2398				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2399				break;
2400			}
2401		}
2402
2403		/* Check for VF message */
2404		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2405		    (eicr & IXGBE_EICR_MAILBOX))
2406			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2407	}
2408
2409	if (ixgbe_is_sfp(hw)) {
2410		/* Pluggable optics-related interrupt */
2411		if (hw->mac.type >= ixgbe_mac_X540)
2412			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2413		else
2414			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2415
2416		if (eicr & eicr_mask) {
2417			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2418			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2419		}
2420
2421		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2422		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2423			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2424			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2425			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2426		}
2427	}
2428
2429	/* Check for fan failure */
2430	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2431		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2432		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2433	}
2434
2435	/* External PHY interrupt */
2436	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2437	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2438		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2439		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2440	}
2441
2442	/* Re-enable other interrupts */
2443	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2444} /* ixgbe_msix_link */
2445
2446/************************************************************************
2447 * ixgbe_sysctl_interrupt_rate_handler
2448 ************************************************************************/
2449static int
2450ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2451{
2452	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2453	int             error;
2454	unsigned int    reg, usec, rate;
2455
2456	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2457	usec = ((reg & 0x0FF8) >> 3);
2458	if (usec > 0)
2459		rate = 500000 / usec;
2460	else
2461		rate = 0;
2462	error = sysctl_handle_int(oidp, &rate, 0, req);
2463	if (error || !req->newptr)
2464		return error;
2465	reg &= ~0xfff; /* default, no limitation */
2466	ixgbe_max_interrupt_rate = 0;
2467	if (rate > 0 && rate < 500000) {
2468		if (rate < 1000)
2469			rate = 1000;
2470		ixgbe_max_interrupt_rate = rate;
2471		reg |= ((4000000/rate) & 0xff8);
2472	}
2473	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2474
2475	return (0);
2476} /* ixgbe_sysctl_interrupt_rate_handler */
2477
2478/************************************************************************
2479 * ixgbe_add_device_sysctls
2480 ************************************************************************/
2481static void
2482ixgbe_add_device_sysctls(struct adapter *adapter)
2483{
2484	device_t               dev = adapter->dev;
2485	struct ixgbe_hw        *hw = &adapter->hw;
2486	struct sysctl_oid_list *child;
2487	struct sysctl_ctx_list *ctx;
2488
2489	ctx = device_get_sysctl_ctx(dev);
2490	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2491
2492	/* Sysctls for all devices */
2493	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2494	    adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2495
2496	adapter->enable_aim = ixgbe_enable_aim;
2497	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2498	    &adapter->enable_aim, 1, "Interrupt Moderation");
2499
2500	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2501	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2502	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2503
2504#ifdef IXGBE_DEBUG
2505	/* testing sysctls (for all devices) */
2506	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2507	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2508	    "I", "PCI Power State");
2509
2510	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2511	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2512	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2513#endif
2514	/* for X550 series devices */
2515	if (hw->mac.type >= ixgbe_mac_X550)
2516		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2517		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2518		    "I", "DMA Coalesce");
2519
2520	/* for WoL-capable devices */
2521	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2522		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2523		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2524		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2525
2526		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2527		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2528		    "I", "Enable/Disable Wake Up Filters");
2529	}
2530
2531	/* for X552/X557-AT devices */
2532	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2533		struct sysctl_oid *phy_node;
2534		struct sysctl_oid_list *phy_list;
2535
2536		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2537		    CTLFLAG_RD, NULL, "External PHY sysctls");
2538		phy_list = SYSCTL_CHILDREN(phy_node);
2539
2540		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2541		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2542		    "I", "Current External PHY Temperature (Celsius)");
2543
2544		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2545		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2546		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2547		    "External PHY High Temperature Event Occurred");
2548	}
2549
2550	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2551		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2552		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2553		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2554	}
2555} /* ixgbe_add_device_sysctls */
2556
2557/************************************************************************
2558 * ixgbe_allocate_pci_resources
2559 ************************************************************************/
2560static int
2561ixgbe_allocate_pci_resources(struct adapter *adapter)
2562{
2563	device_t dev = adapter->dev;
2564	int      rid;
2565
2566	rid = PCIR_BAR(0);
2567	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2568	    RF_ACTIVE);
2569
2570	if (!(adapter->pci_mem)) {
2571		device_printf(dev, "Unable to allocate bus resource: memory\n");
2572		return (ENXIO);
2573	}
2574
2575	/* Save bus_space values for READ/WRITE_REG macros */
2576	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2577	adapter->osdep.mem_bus_space_handle =
2578	    rman_get_bushandle(adapter->pci_mem);
2579	/* Set hw values for shared code */
2580	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2581
2582	return (0);
2583} /* ixgbe_allocate_pci_resources */
2584
2585/************************************************************************
2586 * ixgbe_detach - Device removal routine
2587 *
2588 *   Called when the driver is being removed.
2589 *   Stops the adapter and deallocates all the resources
2590 *   that were allocated for driver operation.
2591 *
2592 *   return 0 on success, positive on failure
2593 ************************************************************************/
2594static int
2595ixgbe_detach(device_t dev)
2596{
2597	struct adapter  *adapter = device_get_softc(dev);
2598	struct ix_queue *que = adapter->queues;
2599	struct tx_ring  *txr = adapter->tx_rings;
2600	u32             ctrl_ext;
2601
2602	INIT_DEBUGOUT("ixgbe_detach: begin");
2603
2604	/* Make sure VLANS are not using driver */
2605	if (adapter->ifp->if_vlantrunk != NULL) {
2606		device_printf(dev, "Vlan in use, detach first\n");
2607		return (EBUSY);
2608	}
2609
2610	if (ixgbe_pci_iov_detach(dev) != 0) {
2611		device_printf(dev, "SR-IOV in use; detach first.\n");
2612		return (EBUSY);
2613	}
2614
2615	ether_ifdetach(adapter->ifp);
2616	/* Stop the adapter */
2617	IXGBE_CORE_LOCK(adapter);
2618	ixgbe_setup_low_power_mode(adapter);
2619	IXGBE_CORE_UNLOCK(adapter);
2620
2621	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2622		if (que->tq) {
2623			if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2624				taskqueue_drain(que->tq, &txr->txq_task);
2625			taskqueue_drain(que->tq, &que->que_task);
2626			taskqueue_free(que->tq);
2627		}
2628	}
2629
2630	/* Drain the Link queue */
2631	if (adapter->tq) {
2632		taskqueue_drain(adapter->tq, &adapter->link_task);
2633		taskqueue_drain(adapter->tq, &adapter->mod_task);
2634		taskqueue_drain(adapter->tq, &adapter->msf_task);
2635		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2636			taskqueue_drain(adapter->tq, &adapter->mbx_task);
2637		taskqueue_drain(adapter->tq, &adapter->phy_task);
2638		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2639			taskqueue_drain(adapter->tq, &adapter->fdir_task);
2640		taskqueue_free(adapter->tq);
2641	}
2642
2643	/* let hardware know driver is unloading */
2644	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2645	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2646	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2647
2648	/* Unregister VLAN events */
2649	if (adapter->vlan_attach != NULL)
2650		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2651	if (adapter->vlan_detach != NULL)
2652		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2653
2654	callout_drain(&adapter->timer);
2655
2656	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2657		netmap_detach(adapter->ifp);
2658
2659	ixgbe_free_pci_resources(adapter);
2660	bus_generic_detach(dev);
2661	if_free(adapter->ifp);
2662
2663	ixgbe_free_transmit_structures(adapter);
2664	ixgbe_free_receive_structures(adapter);
2665	free(adapter->queues, M_DEVBUF);
2666	free(adapter->mta, M_IXGBE);
2667
2668	IXGBE_CORE_LOCK_DESTROY(adapter);
2669
2670	return (0);
2671} /* ixgbe_detach */
2672
2673/************************************************************************
2674 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2675 *
2676 *   Prepare the adapter/port for LPLU and/or WoL
2677 ************************************************************************/
2678static int
2679ixgbe_setup_low_power_mode(struct adapter *adapter)
2680{
2681	struct ixgbe_hw *hw = &adapter->hw;
2682	device_t        dev = adapter->dev;
2683	s32             error = 0;
2684
2685	mtx_assert(&adapter->core_mtx, MA_OWNED);
2686
2687	/* Limit power management flow to X550EM baseT */
2688	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2689	    hw->phy.ops.enter_lplu) {
2690		/* Turn off support for APM wakeup. (Using ACPI instead) */
2691		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2692		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2693
2694		/*
2695		 * Clear Wake Up Status register to prevent any previous wakeup
2696		 * events from waking us up immediately after we suspend.
2697		 */
2698		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2699
2700		/*
2701		 * Program the Wakeup Filter Control register with user filter
2702		 * settings
2703		 */
2704		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2705
2706		/* Enable wakeups and power management in Wakeup Control */
2707		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2708		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2709
2710		/* X550EM baseT adapters need a special LPLU flow */
2711		hw->phy.reset_disable = true;
2712		ixgbe_stop(adapter);
2713		error = hw->phy.ops.enter_lplu(hw);
2714		if (error)
2715			device_printf(dev, "Error entering LPLU: %d\n", error);
2716		hw->phy.reset_disable = false;
2717	} else {
2718		/* Just stop for other adapters */
2719		ixgbe_stop(adapter);
2720	}
2721
2722	return error;
2723} /* ixgbe_setup_low_power_mode */
2724
2725/************************************************************************
2726 * ixgbe_shutdown - Shutdown entry point
2727 ************************************************************************/
2728static int
2729ixgbe_shutdown(device_t dev)
2730{
2731	struct adapter *adapter = device_get_softc(dev);
2732	int            error = 0;
2733
2734	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2735
2736	IXGBE_CORE_LOCK(adapter);
2737	error = ixgbe_setup_low_power_mode(adapter);
2738	IXGBE_CORE_UNLOCK(adapter);
2739
2740	return (error);
2741} /* ixgbe_shutdown */
2742
2743/************************************************************************
2744 * ixgbe_suspend
2745 *
2746 *   From D0 to D3
2747 ************************************************************************/
2748static int
2749ixgbe_suspend(device_t dev)
2750{
2751	struct adapter *adapter = device_get_softc(dev);
2752	int            error = 0;
2753
2754	INIT_DEBUGOUT("ixgbe_suspend: begin");
2755
2756	IXGBE_CORE_LOCK(adapter);
2757
2758	error = ixgbe_setup_low_power_mode(adapter);
2759
2760	IXGBE_CORE_UNLOCK(adapter);
2761
2762	return (error);
2763} /* ixgbe_suspend */
2764
2765/************************************************************************
2766 * ixgbe_resume
2767 *
2768 *   From D3 to D0
2769 ************************************************************************/
2770static int
2771ixgbe_resume(device_t dev)
2772{
2773	struct adapter  *adapter = device_get_softc(dev);
2774	struct ifnet    *ifp = adapter->ifp;
2775	struct ixgbe_hw *hw = &adapter->hw;
2776	u32             wus;
2777
2778	INIT_DEBUGOUT("ixgbe_resume: begin");
2779
2780	IXGBE_CORE_LOCK(adapter);
2781
2782	/* Read & clear WUS register */
2783	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2784	if (wus)
2785		device_printf(dev, "Woken up by (WUS): %#010x\n",
2786		    IXGBE_READ_REG(hw, IXGBE_WUS));
2787	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2788	/* And clear WUFC until next low-power transition */
2789	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2790
2791	/*
2792	 * Required after D3->D0 transition;
2793	 * will re-advertise all previous advertised speeds
2794	 */
2795	if (ifp->if_flags & IFF_UP)
2796		ixgbe_init_locked(adapter);
2797
2798	IXGBE_CORE_UNLOCK(adapter);
2799
2800	return (0);
2801} /* ixgbe_resume */
2802
2803/************************************************************************
2804 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2805 *
2806 *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2807 *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2808 *   field what mbuf offload flags the driver will understand.
2809 ************************************************************************/
2810static void
2811ixgbe_set_if_hwassist(struct adapter *adapter)
2812{
2813	struct ifnet *ifp = adapter->ifp;
2814
2815	ifp->if_hwassist = 0;
2816#if __FreeBSD_version >= 1000000
2817	if (ifp->if_capenable & IFCAP_TSO4)
2818		ifp->if_hwassist |= CSUM_IP_TSO;
2819	if (ifp->if_capenable & IFCAP_TSO6)
2820		ifp->if_hwassist |= CSUM_IP6_TSO;
2821	if (ifp->if_capenable & IFCAP_TXCSUM) {
2822		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2823		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2824			ifp->if_hwassist |= CSUM_IP_SCTP;
2825	}
2826	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2827		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2828		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2829			ifp->if_hwassist |= CSUM_IP6_SCTP;
2830	}
2831#else
2832	if (ifp->if_capenable & IFCAP_TSO)
2833		ifp->if_hwassist |= CSUM_TSO;
2834	if (ifp->if_capenable & IFCAP_TXCSUM) {
2835		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2836		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2837			ifp->if_hwassist |= CSUM_SCTP;
2838	}
2839#endif
2840} /* ixgbe_set_if_hwassist */
2841
2842/************************************************************************
2843 * ixgbe_init_locked - Init entry point
2844 *
2845 *   Used in two ways: It is used by the stack as an init
2846 *   entry point in network interface structure. It is also
2847 *   used by the driver as a hw/sw initialization routine to
2848 *   get to a consistent state.
2849 *
2850 *   return 0 on success, positive on failure
2851 ************************************************************************/
2852void
2853ixgbe_init_locked(struct adapter *adapter)
2854{
2855	struct ifnet    *ifp = adapter->ifp;
2856	device_t        dev = adapter->dev;
2857	struct ixgbe_hw *hw = &adapter->hw;
2858	struct tx_ring  *txr;
2859	struct rx_ring  *rxr;
2860	u32             txdctl, mhadd;
2861	u32             rxdctl, rxctrl;
2862	u32             ctrl_ext;
2863	int             err = 0;
2864
2865	mtx_assert(&adapter->core_mtx, MA_OWNED);
2866	INIT_DEBUGOUT("ixgbe_init_locked: begin");
2867
2868	hw->adapter_stopped = FALSE;
2869	ixgbe_stop_adapter(hw);
2870	callout_stop(&adapter->timer);
2871
2872	/* Queue indices may change with IOV mode */
2873	ixgbe_align_all_queue_indices(adapter);
2874
2875	/* reprogram the RAR[0] in case user changed it. */
2876	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2877
2878	/* Get the latest mac address, User can use a LAA */
2879	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2880	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2881	hw->addr_ctrl.rar_used_count = 1;
2882
2883	/* Set hardware offload abilities from ifnet flags */
2884	ixgbe_set_if_hwassist(adapter);
2885
2886	/* Prepare transmit descriptors and buffers */
2887	if (ixgbe_setup_transmit_structures(adapter)) {
2888		device_printf(dev, "Could not setup transmit structures\n");
2889		ixgbe_stop(adapter);
2890		return;
2891	}
2892
2893	ixgbe_init_hw(hw);
2894	ixgbe_initialize_iov(adapter);
2895	ixgbe_initialize_transmit_units(adapter);
2896
2897	/* Setup Multicast table */
2898	ixgbe_set_multi(adapter);
2899
2900	/* Determine the correct mbuf pool, based on frame size */
2901	if (adapter->max_frame_size <= MCLBYTES)
2902		adapter->rx_mbuf_sz = MCLBYTES;
2903	else
2904		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2905
2906	/* Prepare receive descriptors and buffers */
2907	if (ixgbe_setup_receive_structures(adapter)) {
2908		device_printf(dev, "Could not setup receive structures\n");
2909		ixgbe_stop(adapter);
2910		return;
2911	}
2912
2913	/* Configure RX settings */
2914	ixgbe_initialize_receive_units(adapter);
2915
2916	/* Enable SDP & MSI-X interrupts based on adapter */
2917	ixgbe_config_gpie(adapter);
2918
2919	/* Set MTU size */
2920	if (ifp->if_mtu > ETHERMTU) {
2921		/* aka IXGBE_MAXFRS on 82599 and newer */
2922		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2923		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2924		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2925		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2926	}
2927
2928	/* Now enable all the queues */
2929	for (int i = 0; i < adapter->num_queues; i++) {
2930		txr = &adapter->tx_rings[i];
2931		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2932		txdctl |= IXGBE_TXDCTL_ENABLE;
2933		/* Set WTHRESH to 8, burst writeback */
2934		txdctl |= (8 << 16);
2935		/*
2936		 * When the internal queue falls below PTHRESH (32),
2937		 * start prefetching as long as there are at least
2938		 * HTHRESH (1) buffers ready. The values are taken
2939		 * from the Intel linux driver 3.8.21.
2940		 * Prefetching enables tx line rate even with 1 queue.
2941		 */
2942		txdctl |= (32 << 0) | (1 << 8);
2943		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2944	}
2945
2946	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2947		rxr = &adapter->rx_rings[i];
2948		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2949		if (hw->mac.type == ixgbe_mac_82598EB) {
2950			/*
2951			 * PTHRESH = 21
2952			 * HTHRESH = 4
2953			 * WTHRESH = 8
2954			 */
2955			rxdctl &= ~0x3FFFFF;
2956			rxdctl |= 0x080420;
2957		}
2958		rxdctl |= IXGBE_RXDCTL_ENABLE;
2959		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2960		for (; j < 10; j++) {
2961			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2962			    IXGBE_RXDCTL_ENABLE)
2963				break;
2964			else
2965				msec_delay(1);
2966		}
2967		wmb();
2968
2969		/*
2970		 * In netmap mode, we must preserve the buffers made
2971		 * available to userspace before the if_init()
2972		 * (this is true by default on the TX side, because
2973		 * init makes all buffers available to userspace).
2974		 *
2975		 * netmap_reset() and the device specific routines
2976		 * (e.g. ixgbe_setup_receive_rings()) map these
2977		 * buffers at the end of the NIC ring, so here we
2978		 * must set the RDT (tail) register to make sure
2979		 * they are not overwritten.
2980		 *
2981		 * In this driver the NIC ring starts at RDH = 0,
2982		 * RDT points to the last slot available for reception (?),
2983		 * so RDT = num_rx_desc - 1 means the whole ring is available.
2984		 */
2985#ifdef DEV_NETMAP
2986		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2987		    (ifp->if_capenable & IFCAP_NETMAP)) {
2988			struct netmap_adapter *na = NA(adapter->ifp);
2989			struct netmap_kring *kring = &na->rx_rings[i];
2990			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2991
2992			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2993		} else
2994#endif /* DEV_NETMAP */
2995			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2996			    adapter->num_rx_desc - 1);
2997	}
2998
2999	/* Enable Receive engine */
3000	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3001	if (hw->mac.type == ixgbe_mac_82598EB)
3002		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3003	rxctrl |= IXGBE_RXCTRL_RXEN;
3004	ixgbe_enable_rx_dma(hw, rxctrl);
3005
3006	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3007
3008	/* Set up MSI-X routing */
3009	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3010		ixgbe_configure_ivars(adapter);
3011		/* Set up auto-mask */
3012		if (hw->mac.type == ixgbe_mac_82598EB)
3013			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3014		else {
3015			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3016			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3017		}
3018	} else {  /* Simple settings for Legacy/MSI */
3019		ixgbe_set_ivar(adapter, 0, 0, 0);
3020		ixgbe_set_ivar(adapter, 0, 0, 1);
3021		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3022	}
3023
3024	ixgbe_init_fdir(adapter);
3025
3026	/*
3027	 * Check on any SFP devices that
3028	 * need to be kick-started
3029	 */
3030	if (hw->phy.type == ixgbe_phy_none) {
3031		err = hw->phy.ops.identify(hw);
3032		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3033			device_printf(dev,
3034			    "Unsupported SFP+ module type was detected.\n");
3035			return;
3036		}
3037	}
3038
3039	/* Set moderation on the Link interrupt */
3040	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3041
3042	/* Config/Enable Link */
3043	ixgbe_config_link(adapter);
3044
3045	/* Hardware Packet Buffer & Flow Control setup */
3046	ixgbe_config_delay_values(adapter);
3047
3048	/* Initialize the FC settings */
3049	ixgbe_start_hw(hw);
3050
3051	/* Set up VLAN support and filter */
3052	ixgbe_setup_vlan_hw_support(adapter);
3053
3054	/* Setup DMA Coalescing */
3055	ixgbe_config_dmac(adapter);
3056
3057	/* And now turn on interrupts */
3058	ixgbe_enable_intr(adapter);
3059
3060	/* Enable the use of the MBX by the VF's */
3061	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3062		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3063		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3064		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3065	}
3066
3067	/* Now inform the stack we're ready */
3068	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3069
3070	return;
3071} /* ixgbe_init_locked */
3072
3073/************************************************************************
3074 * ixgbe_init
3075 ************************************************************************/
3076static void
3077ixgbe_init(void *arg)
3078{
3079	struct adapter *adapter = arg;
3080
3081	IXGBE_CORE_LOCK(adapter);
3082	ixgbe_init_locked(adapter);
3083	IXGBE_CORE_UNLOCK(adapter);
3084
3085	return;
3086} /* ixgbe_init */
3087
3088/************************************************************************
3089 * ixgbe_set_ivar
3090 *
3091 *   Setup the correct IVAR register for a particular MSI-X interrupt
3092 *     (yes this is all very magic and confusing :)
3093 *    - entry is the register array entry
3094 *    - vector is the MSI-X vector for this queue
3095 *    - type is RX/TX/MISC
3096 ************************************************************************/
3097static void
3098ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3099{
3100	struct ixgbe_hw *hw = &adapter->hw;
3101	u32 ivar, index;
3102
3103	vector |= IXGBE_IVAR_ALLOC_VAL;
3104
3105	switch (hw->mac.type) {
3106
3107	case ixgbe_mac_82598EB:
3108		if (type == -1)
3109			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3110		else
3111			entry += (type * 64);
3112		index = (entry >> 2) & 0x1F;
3113		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3114		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3115		ivar |= (vector << (8 * (entry & 0x3)));
3116		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3117		break;
3118
3119	case ixgbe_mac_82599EB:
3120	case ixgbe_mac_X540:
3121	case ixgbe_mac_X550:
3122	case ixgbe_mac_X550EM_x:
3123	case ixgbe_mac_X550EM_a:
3124		if (type == -1) { /* MISC IVAR */
3125			index = (entry & 1) * 8;
3126			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3127			ivar &= ~(0xFF << index);
3128			ivar |= (vector << index);
3129			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3130		} else {          /* RX/TX IVARS */
3131			index = (16 * (entry & 1)) + (8 * type);
3132			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3133			ivar &= ~(0xFF << index);
3134			ivar |= (vector << index);
3135			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3136		}
3137
3138	default:
3139		break;
3140	}
3141} /* ixgbe_set_ivar */
3142
3143/************************************************************************
3144 * ixgbe_configure_ivars
3145 ************************************************************************/
3146static void
3147ixgbe_configure_ivars(struct adapter *adapter)
3148{
3149	struct ix_queue *que = adapter->queues;
3150	u32             newitr;
3151
3152	if (ixgbe_max_interrupt_rate > 0)
3153		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3154	else {
3155		/*
3156		 * Disable DMA coalescing if interrupt moderation is
3157		 * disabled.
3158		 */
3159		adapter->dmac = 0;
3160		newitr = 0;
3161	}
3162
3163	for (int i = 0; i < adapter->num_queues; i++, que++) {
3164		struct rx_ring *rxr = &adapter->rx_rings[i];
3165		struct tx_ring *txr = &adapter->tx_rings[i];
3166		/* First the RX queue entry */
3167		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3168		/* ... and the TX */
3169		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3170		/* Set an Initial EITR value */
3171		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3172	}
3173
3174	/* For the Link interrupt */
3175	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3176} /* ixgbe_configure_ivars */
3177
3178/************************************************************************
3179 * ixgbe_config_gpie
3180 ************************************************************************/
3181static void
3182ixgbe_config_gpie(struct adapter *adapter)
3183{
3184	struct ixgbe_hw *hw = &adapter->hw;
3185	u32             gpie;
3186
3187	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3188
3189	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3190		/* Enable Enhanced MSI-X mode */
3191		gpie |= IXGBE_GPIE_MSIX_MODE
3192		     |  IXGBE_GPIE_EIAME
3193		     |  IXGBE_GPIE_PBA_SUPPORT
3194		     |  IXGBE_GPIE_OCD;
3195	}
3196
3197	/* Fan Failure Interrupt */
3198	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3199		gpie |= IXGBE_SDP1_GPIEN;
3200
3201	/* Thermal Sensor Interrupt */
3202	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3203		gpie |= IXGBE_SDP0_GPIEN_X540;
3204
3205	/* Link detection */
3206	switch (hw->mac.type) {
3207	case ixgbe_mac_82599EB:
3208		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3209		break;
3210	case ixgbe_mac_X550EM_x:
3211	case ixgbe_mac_X550EM_a:
3212		gpie |= IXGBE_SDP0_GPIEN_X540;
3213		break;
3214	default:
3215		break;
3216	}
3217
3218	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3219
3220	return;
3221} /* ixgbe_config_gpie */
3222
3223/************************************************************************
3224 * ixgbe_config_delay_values
3225 *
3226 *   Requires adapter->max_frame_size to be set.
3227 ************************************************************************/
3228static void
3229ixgbe_config_delay_values(struct adapter *adapter)
3230{
3231	struct ixgbe_hw *hw = &adapter->hw;
3232	u32             rxpb, frame, size, tmp;
3233
3234	frame = adapter->max_frame_size;
3235
3236	/* Calculate High Water */
3237	switch (hw->mac.type) {
3238	case ixgbe_mac_X540:
3239	case ixgbe_mac_X550:
3240	case ixgbe_mac_X550EM_x:
3241	case ixgbe_mac_X550EM_a:
3242		tmp = IXGBE_DV_X540(frame, frame);
3243		break;
3244	default:
3245		tmp = IXGBE_DV(frame, frame);
3246		break;
3247	}
3248	size = IXGBE_BT2KB(tmp);
3249	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3250	hw->fc.high_water[0] = rxpb - size;
3251
3252	/* Now calculate Low Water */
3253	switch (hw->mac.type) {
3254	case ixgbe_mac_X540:
3255	case ixgbe_mac_X550:
3256	case ixgbe_mac_X550EM_x:
3257	case ixgbe_mac_X550EM_a:
3258		tmp = IXGBE_LOW_DV_X540(frame);
3259		break;
3260	default:
3261		tmp = IXGBE_LOW_DV(frame);
3262		break;
3263	}
3264	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3265
3266	hw->fc.pause_time = IXGBE_FC_PAUSE;
3267	hw->fc.send_xon = TRUE;
3268} /* ixgbe_config_delay_values */
3269
3270/************************************************************************
3271 * ixgbe_set_multi - Multicast Update
3272 *
3273 *   Called whenever multicast address list is updated.
3274 ************************************************************************/
3275static void
3276ixgbe_set_multi(struct adapter *adapter)
3277{
3278	struct ifmultiaddr   *ifma;
3279	struct ixgbe_mc_addr *mta;
3280	struct ifnet         *ifp = adapter->ifp;
3281	u8                   *update_ptr;
3282	int                  mcnt = 0;
3283	u32                  fctrl;
3284
3285	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3286
3287	mta = adapter->mta;
3288	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3289
3290#if __FreeBSD_version < 800000
3291	IF_ADDR_LOCK(ifp);
3292#else
3293	if_maddr_rlock(ifp);
3294#endif
3295	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3296		if (ifma->ifma_addr->sa_family != AF_LINK)
3297			continue;
3298		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3299			break;
3300		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3301		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3302		mta[mcnt].vmdq = adapter->pool;
3303		mcnt++;
3304	}
3305#if __FreeBSD_version < 800000
3306	IF_ADDR_UNLOCK(ifp);
3307#else
3308	if_maddr_runlock(ifp);
3309#endif
3310
3311	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3312	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3313	if (ifp->if_flags & IFF_PROMISC)
3314		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3315	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3316	    ifp->if_flags & IFF_ALLMULTI) {
3317		fctrl |= IXGBE_FCTRL_MPE;
3318		fctrl &= ~IXGBE_FCTRL_UPE;
3319	} else
3320		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3321
3322	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3323
3324	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3325		update_ptr = (u8 *)mta;
3326		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3327		    ixgbe_mc_array_itr, TRUE);
3328	}
3329
3330	return;
3331} /* ixgbe_set_multi */
3332
3333/************************************************************************
3334 * ixgbe_mc_array_itr
3335 *
3336 *   An iterator function needed by the multicast shared code.
3337 *   It feeds the shared code routine the addresses in the
3338 *   array of ixgbe_set_multi() one by one.
3339 ************************************************************************/
3340static u8 *
3341ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3342{
3343	struct ixgbe_mc_addr *mta;
3344
3345	mta = (struct ixgbe_mc_addr *)*update_ptr;
3346	*vmdq = mta->vmdq;
3347
3348	*update_ptr = (u8*)(mta + 1);
3349
3350	return (mta->addr);
3351} /* ixgbe_mc_array_itr */
3352
3353/************************************************************************
3354 * ixgbe_local_timer - Timer routine
3355 *
3356 *   Checks for link status, updates statistics,
3357 *   and runs the watchdog check.
3358 ************************************************************************/
3359static void
3360ixgbe_local_timer(void *arg)
3361{
3362	struct adapter  *adapter = arg;
3363	device_t        dev = adapter->dev;
3364	struct ix_queue *que = adapter->queues;
3365	u64             queues = 0;
3366	int             hung = 0;
3367
3368	mtx_assert(&adapter->core_mtx, MA_OWNED);
3369
3370	/* Check for pluggable optics */
3371	if (adapter->sfp_probe)
3372		if (!ixgbe_sfp_probe(adapter))
3373			goto out; /* Nothing to do */
3374
3375	ixgbe_update_link_status(adapter);
3376	ixgbe_update_stats_counters(adapter);
3377
3378	/*
3379	 * Check the TX queues status
3380	 *      - mark hung queues so we don't schedule on them
3381	 *      - watchdog only if all queues show hung
3382	 */
3383	for (int i = 0; i < adapter->num_queues; i++, que++) {
3384		/* Keep track of queues with work for soft irq */
3385		if (que->txr->busy)
3386			queues |= ((u64)1 << que->me);
3387		/*
3388		 * Each time txeof runs without cleaning, but there
3389		 * are uncleaned descriptors it increments busy. If
3390		 * we get to the MAX we declare it hung.
3391		 */
3392		if (que->busy == IXGBE_QUEUE_HUNG) {
3393			++hung;
3394			/* Mark the queue as inactive */
3395			adapter->active_queues &= ~((u64)1 << que->me);
3396			continue;
3397		} else {
3398			/* Check if we've come back from hung */
3399			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3400				adapter->active_queues |= ((u64)1 << que->me);
3401		}
3402		if (que->busy >= IXGBE_MAX_TX_BUSY) {
3403			device_printf(dev,
3404			    "Warning queue %d appears to be hung!\n", i);
3405			que->txr->busy = IXGBE_QUEUE_HUNG;
3406			++hung;
3407		}
3408	}
3409
3410	/* Only truly watchdog if all queues show hung */
3411	if (hung == adapter->num_queues)
3412		goto watchdog;
3413	else if (queues != 0) { /* Force an IRQ on queues with work */
3414		ixgbe_rearm_queues(adapter, queues);
3415	}
3416
3417out:
3418	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3419	return;
3420
3421watchdog:
3422	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3423	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3424	adapter->watchdog_events++;
3425	ixgbe_init_locked(adapter);
3426} /* ixgbe_local_timer */
3427
3428/************************************************************************
3429 * ixgbe_sfp_probe
3430 *
3431 *   Determine if a port had optics inserted.
3432 ************************************************************************/
3433static bool
3434ixgbe_sfp_probe(struct adapter *adapter)
3435{
3436	struct ixgbe_hw *hw = &adapter->hw;
3437	device_t        dev = adapter->dev;
3438	bool            result = FALSE;
3439
3440	if ((hw->phy.type == ixgbe_phy_nl) &&
3441	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3442		s32 ret = hw->phy.ops.identify_sfp(hw);
3443		if (ret)
3444			goto out;
3445		ret = hw->phy.ops.reset(hw);
3446		adapter->sfp_probe = FALSE;
3447		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3448			device_printf(dev, "Unsupported SFP+ module detected!");
3449			device_printf(dev,
3450			    "Reload driver with supported module.\n");
3451			goto out;
3452		} else
3453			device_printf(dev, "SFP+ module detected!\n");
3454		/* We now have supported optics */
3455		result = TRUE;
3456	}
3457out:
3458
3459	return (result);
3460} /* ixgbe_sfp_probe */
3461
3462/************************************************************************
3463 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3464 ************************************************************************/
3465static void
3466ixgbe_handle_mod(void *context, int pending)
3467{
3468	struct adapter  *adapter = context;
3469	struct ixgbe_hw *hw = &adapter->hw;
3470	device_t        dev = adapter->dev;
3471	u32             err, cage_full = 0;
3472
3473	if (adapter->hw.need_crosstalk_fix) {
3474		switch (hw->mac.type) {
3475		case ixgbe_mac_82599EB:
3476			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3477			    IXGBE_ESDP_SDP2;
3478			break;
3479		case ixgbe_mac_X550EM_x:
3480		case ixgbe_mac_X550EM_a:
3481			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3482			    IXGBE_ESDP_SDP0;
3483			break;
3484		default:
3485			break;
3486		}
3487
3488		if (!cage_full)
3489			return;
3490	}
3491
3492	err = hw->phy.ops.identify_sfp(hw);
3493	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3494		device_printf(dev,
3495		    "Unsupported SFP+ module type was detected.\n");
3496		return;
3497	}
3498
3499	err = hw->mac.ops.setup_sfp(hw);
3500	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3501		device_printf(dev,
3502		    "Setup failure - unsupported SFP+ module type.\n");
3503		return;
3504	}
3505	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3506} /* ixgbe_handle_mod */
3507
3508
3509/************************************************************************
3510 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3511 ************************************************************************/
3512static void
3513ixgbe_handle_msf(void *context, int pending)
3514{
3515	struct adapter  *adapter = context;
3516	struct ixgbe_hw *hw = &adapter->hw;
3517	u32             autoneg;
3518	bool            negotiate;
3519
3520	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3521	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3522
3523	autoneg = hw->phy.autoneg_advertised;
3524	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3525		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3526	if (hw->mac.ops.setup_link)
3527		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3528
3529	/* Adjust media types shown in ifconfig */
3530	ifmedia_removeall(&adapter->media);
3531	ixgbe_add_media_types(adapter);
3532	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3533} /* ixgbe_handle_msf */
3534
3535/************************************************************************
3536 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3537 ************************************************************************/
3538static void
3539ixgbe_handle_phy(void *context, int pending)
3540{
3541	struct adapter  *adapter = context;
3542	struct ixgbe_hw *hw = &adapter->hw;
3543	int             error;
3544
3545	error = hw->phy.ops.handle_lasi(hw);
3546	if (error == IXGBE_ERR_OVERTEMP)
3547		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3548	else if (error)
3549		device_printf(adapter->dev,
3550		    "Error handling LASI interrupt: %d\n", error);
3551} /* ixgbe_handle_phy */
3552
3553/************************************************************************
3554 * ixgbe_stop - Stop the hardware
3555 *
3556 *   Disables all traffic on the adapter by issuing a
3557 *   global reset on the MAC and deallocates TX/RX buffers.
3558 ************************************************************************/
3559static void
3560ixgbe_stop(void *arg)
3561{
3562	struct ifnet    *ifp;
3563	struct adapter  *adapter = arg;
3564	struct ixgbe_hw *hw = &adapter->hw;
3565
3566	ifp = adapter->ifp;
3567
3568	mtx_assert(&adapter->core_mtx, MA_OWNED);
3569
3570	msec_delay(1000);
3571
3572	INIT_DEBUGOUT("ixgbe_stop: begin\n");
3573	ixgbe_disable_intr(adapter);
3574	callout_stop(&adapter->timer);
3575
3576	/* Let the stack know...*/
3577	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3578
3579	ixgbe_reset_hw(hw);
3580	hw->adapter_stopped = FALSE;
3581	ixgbe_stop_adapter(hw);
3582	if (hw->mac.type == ixgbe_mac_82599EB)
3583		ixgbe_stop_mac_link_on_d3_82599(hw);
3584	/* Turn off the laser - noop with no optics */
3585	ixgbe_disable_tx_laser(hw);
3586
3587	/* Update the stack */
3588	adapter->link_up = FALSE;
3589	ixgbe_update_link_status(adapter);
3590
3591	/* reprogram the RAR[0] in case user changed it. */
3592	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3593
3594	return;
3595} /* ixgbe_stop */
3596
3597/************************************************************************
3598 * ixgbe_update_link_status - Update OS on link state
3599 *
3600 * Note: Only updates the OS on the cached link state.
3601 *       The real check of the hardware only happens with
3602 *       a link interrupt.
3603 ************************************************************************/
3604static void
3605ixgbe_update_link_status(struct adapter *adapter)
3606{
3607	struct ifnet *ifp = adapter->ifp;
3608	device_t     dev = adapter->dev;
3609
3610	if (adapter->link_up) {
3611		if (adapter->link_active == FALSE) {
3612			if (bootverbose)
3613				device_printf(dev, "Link is up %d Gbps %s \n",
3614				    ((adapter->link_speed == 128) ? 10 : 1),
3615				    "Full Duplex");
3616			adapter->link_active = TRUE;
3617			/* Update any Flow Control changes */
3618			ixgbe_fc_enable(&adapter->hw);
3619			/* Update DMA coalescing config */
3620			ixgbe_config_dmac(adapter);
3621			if_link_state_change(ifp, LINK_STATE_UP);
3622			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3623				ixgbe_ping_all_vfs(adapter);
3624		}
3625	} else { /* Link down */
3626		if (adapter->link_active == TRUE) {
3627			if (bootverbose)
3628				device_printf(dev, "Link is Down\n");
3629			if_link_state_change(ifp, LINK_STATE_DOWN);
3630			adapter->link_active = FALSE;
3631			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3632				ixgbe_ping_all_vfs(adapter);
3633		}
3634	}
3635
3636	return;
3637} /* ixgbe_update_link_status */
3638
3639/************************************************************************
3640 * ixgbe_config_dmac - Configure DMA Coalescing
3641 ************************************************************************/
3642static void
3643ixgbe_config_dmac(struct adapter *adapter)
3644{
3645	struct ixgbe_hw          *hw = &adapter->hw;
3646	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3647
3648	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3649		return;
3650
3651	if (dcfg->watchdog_timer ^ adapter->dmac ||
3652	    dcfg->link_speed ^ adapter->link_speed) {
3653		dcfg->watchdog_timer = adapter->dmac;
3654		dcfg->fcoe_en = false;
3655		dcfg->link_speed = adapter->link_speed;
3656		dcfg->num_tcs = 1;
3657
3658		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3659		    dcfg->watchdog_timer, dcfg->link_speed);
3660
3661		hw->mac.ops.dmac_config(hw);
3662	}
3663} /* ixgbe_config_dmac */
3664
3665/************************************************************************
3666 * ixgbe_enable_intr
3667 ************************************************************************/
3668static void
3669ixgbe_enable_intr(struct adapter *adapter)
3670{
3671	struct ixgbe_hw *hw = &adapter->hw;
3672	struct ix_queue *que = adapter->queues;
3673	u32             mask, fwsm;
3674
3675	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3676
3677	switch (adapter->hw.mac.type) {
3678	case ixgbe_mac_82599EB:
3679		mask |= IXGBE_EIMS_ECC;
3680		/* Temperature sensor on some adapters */
3681		mask |= IXGBE_EIMS_GPI_SDP0;
3682		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3683		mask |= IXGBE_EIMS_GPI_SDP1;
3684		mask |= IXGBE_EIMS_GPI_SDP2;
3685		break;
3686	case ixgbe_mac_X540:
3687		/* Detect if Thermal Sensor is enabled */
3688		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3689		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3690			mask |= IXGBE_EIMS_TS;
3691		mask |= IXGBE_EIMS_ECC;
3692		break;
3693	case ixgbe_mac_X550:
3694		/* MAC thermal sensor is automatically enabled */
3695		mask |= IXGBE_EIMS_TS;
3696		mask |= IXGBE_EIMS_ECC;
3697		break;
3698	case ixgbe_mac_X550EM_x:
3699	case ixgbe_mac_X550EM_a:
3700		/* Some devices use SDP0 for important information */
3701		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3702		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3703		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3704		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3705			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3706		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3707			mask |= IXGBE_EICR_GPI_SDP0_X540;
3708		mask |= IXGBE_EIMS_ECC;
3709		break;
3710	default:
3711		break;
3712	}
3713
3714	/* Enable Fan Failure detection */
3715	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3716		mask |= IXGBE_EIMS_GPI_SDP1;
3717	/* Enable SR-IOV */
3718	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3719		mask |= IXGBE_EIMS_MAILBOX;
3720	/* Enable Flow Director */
3721	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3722		mask |= IXGBE_EIMS_FLOW_DIR;
3723
3724	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3725
3726	/* With MSI-X we use auto clear */
3727	if (adapter->msix_mem) {
3728		mask = IXGBE_EIMS_ENABLE_MASK;
3729		/* Don't autoclear Link */
3730		mask &= ~IXGBE_EIMS_OTHER;
3731		mask &= ~IXGBE_EIMS_LSC;
3732		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3733			mask &= ~IXGBE_EIMS_MAILBOX;
3734		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3735	}
3736
3737	/*
3738	 * Now enable all queues, this is done separately to
3739	 * allow for handling the extended (beyond 32) MSI-X
3740	 * vectors that can be used by 82599
3741	 */
3742	for (int i = 0; i < adapter->num_queues; i++, que++)
3743		ixgbe_enable_queue(adapter, que->msix);
3744
3745	IXGBE_WRITE_FLUSH(hw);
3746
3747	return;
3748} /* ixgbe_enable_intr */
3749
3750/************************************************************************
3751 * ixgbe_disable_intr
3752 ************************************************************************/
3753static void
3754ixgbe_disable_intr(struct adapter *adapter)
3755{
3756	if (adapter->msix_mem)
3757		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3758	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3759		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3760	} else {
3761		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3762		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3763		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3764	}
3765	IXGBE_WRITE_FLUSH(&adapter->hw);
3766
3767	return;
3768} /* ixgbe_disable_intr */
3769
3770/************************************************************************
3771 * ixgbe_legacy_irq - Legacy Interrupt Service routine
3772 ************************************************************************/
3773static void
3774ixgbe_legacy_irq(void *arg)
3775{
3776	struct ix_queue *que = arg;
3777	struct adapter  *adapter = que->adapter;
3778	struct ixgbe_hw *hw = &adapter->hw;
3779	struct ifnet    *ifp = adapter->ifp;
3780	struct tx_ring  *txr = adapter->tx_rings;
3781	bool            more = false;
3782	u32             eicr, eicr_mask;
3783
3784	/* Silicon errata #26 on 82598 */
3785	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3786
3787	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3788
3789	++que->irqs;
3790	if (eicr == 0) {
3791		ixgbe_enable_intr(adapter);
3792		return;
3793	}
3794
3795	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3796		more = ixgbe_rxeof(que);
3797
3798		IXGBE_TX_LOCK(txr);
3799		ixgbe_txeof(txr);
3800		if (!ixgbe_ring_empty(ifp, txr->br))
3801			ixgbe_start_locked(ifp, txr);
3802		IXGBE_TX_UNLOCK(txr);
3803	}
3804
3805	/* Check for fan failure */
3806	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3807		ixgbe_check_fan_failure(adapter, eicr, true);
3808		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3809	}
3810
3811	/* Link status change */
3812	if (eicr & IXGBE_EICR_LSC)
3813		taskqueue_enqueue(adapter->tq, &adapter->link_task);
3814
3815	if (ixgbe_is_sfp(hw)) {
3816		/* Pluggable optics-related interrupt */
3817		if (hw->mac.type >= ixgbe_mac_X540)
3818			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3819		else
3820			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3821
3822		if (eicr & eicr_mask) {
3823			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3824			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3825		}
3826
3827		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3828		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3829			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3830			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3831			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3832		}
3833	}
3834
3835	/* External PHY interrupt */
3836	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3837	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3838		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3839
3840	if (more)
3841		taskqueue_enqueue(que->tq, &que->que_task);
3842	else
3843		ixgbe_enable_intr(adapter);
3844
3845	return;
3846} /* ixgbe_legacy_irq */
3847
3848/************************************************************************
3849 * ixgbe_free_pci_resources
3850 ************************************************************************/
3851static void
3852ixgbe_free_pci_resources(struct adapter *adapter)
3853{
3854	struct ix_queue *que = adapter->queues;
3855	device_t        dev = adapter->dev;
3856	int             rid, memrid;
3857
3858	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3859		memrid = PCIR_BAR(MSIX_82598_BAR);
3860	else
3861		memrid = PCIR_BAR(MSIX_82599_BAR);
3862
3863	/*
3864	 * There is a slight possibility of a failure mode
3865	 * in attach that will result in entering this function
3866	 * before interrupt resources have been initialized, and
3867	 * in that case we do not want to execute the loops below
3868	 * We can detect this reliably by the state of the adapter
3869	 * res pointer.
3870	 */
3871	if (adapter->res == NULL)
3872		goto mem;
3873
3874	/*
3875	 * Release all msix queue resources:
3876	 */
3877	for (int i = 0; i < adapter->num_queues; i++, que++) {
3878		rid = que->msix + 1;
3879		if (que->tag != NULL) {
3880			bus_teardown_intr(dev, que->res, que->tag);
3881			que->tag = NULL;
3882		}
3883		if (que->res != NULL)
3884			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3885	}
3886
3887
3888	if (adapter->tag != NULL) {
3889		bus_teardown_intr(dev, adapter->res, adapter->tag);
3890		adapter->tag = NULL;
3891	}
3892
3893	/* Clean the Legacy or Link interrupt last */
3894	if (adapter->res != NULL)
3895		bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3896		    adapter->res);
3897
3898mem:
3899	if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3900	    (adapter->feat_en & IXGBE_FEATURE_MSIX))
3901		pci_release_msi(dev);
3902
3903	if (adapter->msix_mem != NULL)
3904		bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3905		    adapter->msix_mem);
3906
3907	if (adapter->pci_mem != NULL)
3908		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3909		    adapter->pci_mem);
3910
3911	return;
3912} /* ixgbe_free_pci_resources */
3913
3914/************************************************************************
3915 * ixgbe_set_sysctl_value
3916 ************************************************************************/
3917static void
3918ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3919    const char *description, int *limit, int value)
3920{
3921	*limit = value;
3922	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3923	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3924	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3925} /* ixgbe_set_sysctl_value */
3926
3927/************************************************************************
3928 * ixgbe_sysctl_flowcntl
3929 *
3930 *   SYSCTL wrapper around setting Flow Control
3931 ************************************************************************/
3932static int
3933ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3934{
3935	struct adapter *adapter;
3936	int            error, fc;
3937
3938	adapter = (struct adapter *)arg1;
3939	fc = adapter->hw.fc.current_mode;
3940
3941	error = sysctl_handle_int(oidp, &fc, 0, req);
3942	if ((error) || (req->newptr == NULL))
3943		return (error);
3944
3945	/* Don't bother if it's not changed */
3946	if (fc == adapter->hw.fc.current_mode)
3947		return (0);
3948
3949	return ixgbe_set_flowcntl(adapter, fc);
3950} /* ixgbe_sysctl_flowcntl */
3951
3952/************************************************************************
3953 * ixgbe_set_flowcntl - Set flow control
3954 *
3955 *   Flow control values:
3956 *     0 - off
3957 *     1 - rx pause
3958 *     2 - tx pause
3959 *     3 - full
3960 ************************************************************************/
3961static int
3962ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3963{
3964	switch (fc) {
3965	case ixgbe_fc_rx_pause:
3966	case ixgbe_fc_tx_pause:
3967	case ixgbe_fc_full:
3968		adapter->hw.fc.requested_mode = fc;
3969		if (adapter->num_queues > 1)
3970			ixgbe_disable_rx_drop(adapter);
3971		break;
3972	case ixgbe_fc_none:
3973		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3974		if (adapter->num_queues > 1)
3975			ixgbe_enable_rx_drop(adapter);
3976		break;
3977	default:
3978		return (EINVAL);
3979	}
3980
3981	/* Don't autoneg if forcing a value */
3982	adapter->hw.fc.disable_fc_autoneg = TRUE;
3983	ixgbe_fc_enable(&adapter->hw);
3984
3985	return (0);
3986} /* ixgbe_set_flowcntl */
3987
3988/************************************************************************
3989 * ixgbe_enable_rx_drop
3990 *
3991 *   Enable the hardware to drop packets when the buffer is
3992 *   full. This is useful with multiqueue, so that no single
3993 *   queue being full stalls the entire RX engine. We only
3994 *   enable this when Multiqueue is enabled AND Flow Control
3995 *   is disabled.
3996 ************************************************************************/
3997static void
3998ixgbe_enable_rx_drop(struct adapter *adapter)
3999{
4000	struct ixgbe_hw *hw = &adapter->hw;
4001	struct rx_ring  *rxr;
4002	u32             srrctl;
4003
4004	for (int i = 0; i < adapter->num_queues; i++) {
4005		rxr = &adapter->rx_rings[i];
4006		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4007		srrctl |= IXGBE_SRRCTL_DROP_EN;
4008		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4009	}
4010
4011	/* enable drop for each vf */
4012	for (int i = 0; i < adapter->num_vfs; i++) {
4013		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4014		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4015		    IXGBE_QDE_ENABLE));
4016	}
4017} /* ixgbe_enable_rx_drop */
4018
4019/************************************************************************
4020 * ixgbe_disable_rx_drop
4021 ************************************************************************/
4022static void
4023ixgbe_disable_rx_drop(struct adapter *adapter)
4024{
4025	struct ixgbe_hw *hw = &adapter->hw;
4026	struct rx_ring  *rxr;
4027	u32             srrctl;
4028
4029	for (int i = 0; i < adapter->num_queues; i++) {
4030		rxr = &adapter->rx_rings[i];
4031		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4032		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4033		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4034	}
4035
4036	/* disable drop for each vf */
4037	for (int i = 0; i < adapter->num_vfs; i++) {
4038		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4039		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4040	}
4041} /* ixgbe_disable_rx_drop */
4042
4043/************************************************************************
4044 * ixgbe_sysctl_advertise
4045 *
4046 *   SYSCTL wrapper around setting advertised speed
4047 ************************************************************************/
4048static int
4049ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4050{
4051	struct adapter *adapter;
4052	int            error, advertise;
4053
4054	adapter = (struct adapter *)arg1;
4055	advertise = adapter->advertise;
4056
4057	error = sysctl_handle_int(oidp, &advertise, 0, req);
4058	if ((error) || (req->newptr == NULL))
4059		return (error);
4060
4061	return ixgbe_set_advertise(adapter, advertise);
4062} /* ixgbe_sysctl_advertise */
4063
4064/************************************************************************
4065 * ixgbe_set_advertise - Control advertised link speed
4066 *
4067 *   Flags:
4068 *     0x1 - advertise 100 Mb
4069 *     0x2 - advertise 1G
4070 *     0x4 - advertise 10G
4071 *     0x8 - advertise 10 Mb (yes, Mb)
4072 ************************************************************************/
4073static int
4074ixgbe_set_advertise(struct adapter *adapter, int advertise)
4075{
4076	device_t         dev;
4077	struct ixgbe_hw  *hw;
4078	ixgbe_link_speed speed = 0;
4079	ixgbe_link_speed link_caps = 0;
4080	s32              err = IXGBE_NOT_IMPLEMENTED;
4081	bool             negotiate = FALSE;
4082
4083	/* Checks to validate new value */
4084	if (adapter->advertise == advertise) /* no change */
4085		return (0);
4086
4087	dev = adapter->dev;
4088	hw = &adapter->hw;
4089
4090	/* No speed changes for backplane media */
4091	if (hw->phy.media_type == ixgbe_media_type_backplane)
4092		return (ENODEV);
4093
4094	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4095	      (hw->phy.multispeed_fiber))) {
4096		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4097		return (EINVAL);
4098	}
4099
4100	if (advertise < 0x1 || advertise > 0xF) {
4101		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4102		return (EINVAL);
4103	}
4104
4105	if (hw->mac.ops.get_link_capabilities) {
4106		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4107		    &negotiate);
4108		if (err != IXGBE_SUCCESS) {
4109			device_printf(dev, "Unable to determine supported advertise speeds\n");
4110			return (ENODEV);
4111		}
4112	}
4113
4114	/* Set new value and report new advertised mode */
4115	if (advertise & 0x1) {
4116		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4117			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4118			return (EINVAL);
4119		}
4120		speed |= IXGBE_LINK_SPEED_100_FULL;
4121	}
4122	if (advertise & 0x2) {
4123		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4124			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4125			return (EINVAL);
4126		}
4127		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4128	}
4129	if (advertise & 0x4) {
4130		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4131			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4132			return (EINVAL);
4133		}
4134		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4135	}
4136	if (advertise & 0x8) {
4137		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4138			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4139			return (EINVAL);
4140		}
4141		speed |= IXGBE_LINK_SPEED_10_FULL;
4142	}
4143
4144	hw->mac.autotry_restart = TRUE;
4145	hw->mac.ops.setup_link(hw, speed, TRUE);
4146	adapter->advertise = advertise;
4147
4148	return (0);
4149} /* ixgbe_set_advertise */
4150
4151/************************************************************************
4152 * ixgbe_get_advertise - Get current advertised speed settings
4153 *
4154 *   Formatted for sysctl usage.
4155 *   Flags:
4156 *     0x1 - advertise 100 Mb
4157 *     0x2 - advertise 1G
4158 *     0x4 - advertise 10G
4159 *     0x8 - advertise 10 Mb (yes, Mb)
4160 ************************************************************************/
4161static int
4162ixgbe_get_advertise(struct adapter *adapter)
4163{
4164	struct ixgbe_hw  *hw = &adapter->hw;
4165	int              speed;
4166	ixgbe_link_speed link_caps = 0;
4167	s32              err;
4168	bool             negotiate = FALSE;
4169
4170	/*
4171	 * Advertised speed means nothing unless it's copper or
4172	 * multi-speed fiber
4173	 */
4174	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4175	    !(hw->phy.multispeed_fiber))
4176		return (0);
4177
4178	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4179	if (err != IXGBE_SUCCESS)
4180		return (0);
4181
4182	speed =
4183	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4184	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4185	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4186	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4187
4188	return speed;
4189} /* ixgbe_get_advertise */
4190
4191/************************************************************************
4192 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4193 *
4194 *   Control values:
4195 *     0/1 - off / on (use default value of 1000)
4196 *
4197 *     Legal timer values are:
4198 *     50,100,250,500,1000,2000,5000,10000
4199 *
4200 *     Turning off interrupt moderation will also turn this off.
4201 ************************************************************************/
4202static int
4203ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4204{
4205	struct adapter *adapter = (struct adapter *)arg1;
4206	struct ifnet   *ifp = adapter->ifp;
4207	int            error;
4208	u32            newval;
4209
4210	newval = adapter->dmac;
4211	error = sysctl_handle_int(oidp, &newval, 0, req);
4212	if ((error) || (req->newptr == NULL))
4213		return (error);
4214
4215	switch (newval) {
4216	case 0:
4217		/* Disabled */
4218		adapter->dmac = 0;
4219		break;
4220	case 1:
4221		/* Enable and use default */
4222		adapter->dmac = 1000;
4223		break;
4224	case 50:
4225	case 100:
4226	case 250:
4227	case 500:
4228	case 1000:
4229	case 2000:
4230	case 5000:
4231	case 10000:
4232		/* Legal values - allow */
4233		adapter->dmac = newval;
4234		break;
4235	default:
4236		/* Do nothing, illegal value */
4237		return (EINVAL);
4238	}
4239
4240	/* Re-initialize hardware if it's already running */
4241	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4242		ixgbe_init(adapter);
4243
4244	return (0);
4245} /* ixgbe_sysctl_dmac */
4246
4247#ifdef IXGBE_DEBUG
4248/************************************************************************
4249 * ixgbe_sysctl_power_state
4250 *
4251 *   Sysctl to test power states
4252 *   Values:
4253 *     0      - set device to D0
4254 *     3      - set device to D3
4255 *     (none) - get current device power state
4256 ************************************************************************/
4257static int
4258ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4259{
4260	struct adapter *adapter = (struct adapter *)arg1;
4261	device_t       dev = adapter->dev;
4262	int            curr_ps, new_ps, error = 0;
4263
4264	curr_ps = new_ps = pci_get_powerstate(dev);
4265
4266	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4267	if ((error) || (req->newptr == NULL))
4268		return (error);
4269
4270	if (new_ps == curr_ps)
4271		return (0);
4272
4273	if (new_ps == 3 && curr_ps == 0)
4274		error = DEVICE_SUSPEND(dev);
4275	else if (new_ps == 0 && curr_ps == 3)
4276		error = DEVICE_RESUME(dev);
4277	else
4278		return (EINVAL);
4279
4280	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4281
4282	return (error);
4283} /* ixgbe_sysctl_power_state */
4284#endif
4285
4286/************************************************************************
4287 * ixgbe_sysctl_wol_enable
4288 *
4289 *   Sysctl to enable/disable the WoL capability,
4290 *   if supported by the adapter.
4291 *
4292 *   Values:
4293 *     0 - disabled
4294 *     1 - enabled
4295 ************************************************************************/
4296static int
4297ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4298{
4299	struct adapter  *adapter = (struct adapter *)arg1;
4300	struct ixgbe_hw *hw = &adapter->hw;
4301	int             new_wol_enabled;
4302	int             error = 0;
4303
4304	new_wol_enabled = hw->wol_enabled;
4305	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4306	if ((error) || (req->newptr == NULL))
4307		return (error);
4308	new_wol_enabled = !!(new_wol_enabled);
4309	if (new_wol_enabled == hw->wol_enabled)
4310		return (0);
4311
4312	if (new_wol_enabled > 0 && !adapter->wol_support)
4313		return (ENODEV);
4314	else
4315		hw->wol_enabled = new_wol_enabled;
4316
4317	return (0);
4318} /* ixgbe_sysctl_wol_enable */
4319
4320/************************************************************************
4321 * ixgbe_sysctl_wufc - Wake Up Filter Control
4322 *
4323 *   Sysctl to enable/disable the types of packets that the
4324 *   adapter will wake up on upon receipt.
4325 *   Flags:
4326 *     0x1  - Link Status Change
4327 *     0x2  - Magic Packet
4328 *     0x4  - Direct Exact
4329 *     0x8  - Directed Multicast
4330 *     0x10 - Broadcast
4331 *     0x20 - ARP/IPv4 Request Packet
4332 *     0x40 - Direct IPv4 Packet
4333 *     0x80 - Direct IPv6 Packet
4334 *
4335 *   Settings not listed above will cause the sysctl to return an error.
4336 ************************************************************************/
4337static int
4338ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4339{
4340	struct adapter *adapter = (struct adapter *)arg1;
4341	int            error = 0;
4342	u32            new_wufc;
4343
4344	new_wufc = adapter->wufc;
4345
4346	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4347	if ((error) || (req->newptr == NULL))
4348		return (error);
4349	if (new_wufc == adapter->wufc)
4350		return (0);
4351
4352	if (new_wufc & 0xffffff00)
4353		return (EINVAL);
4354
4355	new_wufc &= 0xff;
4356	new_wufc |= (0xffffff & adapter->wufc);
4357	adapter->wufc = new_wufc;
4358
4359	return (0);
4360} /* ixgbe_sysctl_wufc */
4361
4362#ifdef IXGBE_DEBUG
4363/************************************************************************
4364 * ixgbe_sysctl_print_rss_config
4365 ************************************************************************/
4366static int
4367ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4368{
4369	struct adapter  *adapter = (struct adapter *)arg1;
4370	struct ixgbe_hw *hw = &adapter->hw;
4371	device_t        dev = adapter->dev;
4372	struct sbuf     *buf;
4373	int             error = 0, reta_size;
4374	u32             reg;
4375
4376	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4377	if (!buf) {
4378		device_printf(dev, "Could not allocate sbuf for output.\n");
4379		return (ENOMEM);
4380	}
4381
4382	// TODO: use sbufs to make a string to print out
4383	/* Set multiplier for RETA setup and table size based on MAC */
4384	switch (adapter->hw.mac.type) {
4385	case ixgbe_mac_X550:
4386	case ixgbe_mac_X550EM_x:
4387	case ixgbe_mac_X550EM_a:
4388		reta_size = 128;
4389		break;
4390	default:
4391		reta_size = 32;
4392		break;
4393	}
4394
4395	/* Print out the redirection table */
4396	sbuf_cat(buf, "\n");
4397	for (int i = 0; i < reta_size; i++) {
4398		if (i < 32) {
4399			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4400			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4401		} else {
4402			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4403			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4404		}
4405	}
4406
4407	// TODO: print more config
4408
4409	error = sbuf_finish(buf);
4410	if (error)
4411		device_printf(dev, "Error finishing sbuf: %d\n", error);
4412
4413	sbuf_delete(buf);
4414
4415	return (0);
4416} /* ixgbe_sysctl_print_rss_config */
4417#endif /* IXGBE_DEBUG */
4418
4419/************************************************************************
4420 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4421 *
4422 *   For X552/X557-AT devices using an external PHY
4423 ************************************************************************/
4424static int
4425ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4426{
4427	struct adapter  *adapter = (struct adapter *)arg1;
4428	struct ixgbe_hw *hw = &adapter->hw;
4429	u16             reg;
4430
4431	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4432		device_printf(adapter->dev,
4433		    "Device has no supported external thermal sensor.\n");
4434		return (ENODEV);
4435	}
4436
4437	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4438	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4439		device_printf(adapter->dev,
4440		    "Error reading from PHY's current temperature register\n");
4441		return (EAGAIN);
4442	}
4443
4444	/* Shift temp for output */
4445	reg = reg >> 8;
4446
4447	return (sysctl_handle_int(oidp, NULL, reg, req));
4448} /* ixgbe_sysctl_phy_temp */
4449
4450/************************************************************************
4451 * ixgbe_sysctl_phy_overtemp_occurred
4452 *
4453 *   Reports (directly from the PHY) whether the current PHY
4454 *   temperature is over the overtemp threshold.
4455 ************************************************************************/
4456static int
4457ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4458{
4459	struct adapter  *adapter = (struct adapter *)arg1;
4460	struct ixgbe_hw *hw = &adapter->hw;
4461	u16             reg;
4462
4463	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4464		device_printf(adapter->dev,
4465		    "Device has no supported external thermal sensor.\n");
4466		return (ENODEV);
4467	}
4468
4469	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4470	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4471		device_printf(adapter->dev,
4472		    "Error reading from PHY's temperature status register\n");
4473		return (EAGAIN);
4474	}
4475
4476	/* Get occurrence bit */
4477	reg = !!(reg & 0x4000);
4478
4479	return (sysctl_handle_int(oidp, 0, reg, req));
4480} /* ixgbe_sysctl_phy_overtemp_occurred */
4481
4482/************************************************************************
4483 * ixgbe_sysctl_eee_state
4484 *
4485 *   Sysctl to set EEE power saving feature
4486 *   Values:
4487 *     0      - disable EEE
4488 *     1      - enable EEE
4489 *     (none) - get current device EEE state
4490 ************************************************************************/
4491static int
4492ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4493{
4494	struct adapter *adapter = (struct adapter *)arg1;
4495	device_t       dev = adapter->dev;
4496	int            curr_eee, new_eee, error = 0;
4497	s32            retval;
4498
4499	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4500
4501	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4502	if ((error) || (req->newptr == NULL))
4503		return (error);
4504
4505	/* Nothing to do */
4506	if (new_eee == curr_eee)
4507		return (0);
4508
4509	/* Not supported */
4510	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4511		return (EINVAL);
4512
4513	/* Bounds checking */
4514	if ((new_eee < 0) || (new_eee > 1))
4515		return (EINVAL);
4516
4517	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4518	if (retval) {
4519		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4520		return (EINVAL);
4521	}
4522
4523	/* Restart auto-neg */
4524	ixgbe_init(adapter);
4525
4526	device_printf(dev, "New EEE state: %d\n", new_eee);
4527
4528	/* Cache new value */
4529	if (new_eee)
4530		adapter->feat_en |= IXGBE_FEATURE_EEE;
4531	else
4532		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4533
4534	return (error);
4535} /* ixgbe_sysctl_eee_state */
4536
4537/************************************************************************
4538 * ixgbe_init_device_features
4539 ************************************************************************/
4540static void
4541ixgbe_init_device_features(struct adapter *adapter)
4542{
4543	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4544	                  | IXGBE_FEATURE_RSS
4545	                  | IXGBE_FEATURE_MSI
4546	                  | IXGBE_FEATURE_MSIX
4547	                  | IXGBE_FEATURE_LEGACY_IRQ
4548	                  | IXGBE_FEATURE_LEGACY_TX;
4549
4550	/* Set capabilities first... */
4551	switch (adapter->hw.mac.type) {
4552	case ixgbe_mac_82598EB:
4553		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4554			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4555		break;
4556	case ixgbe_mac_X540:
4557		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4558		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4559		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4560		    (adapter->hw.bus.func == 0))
4561			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4562		break;
4563	case ixgbe_mac_X550:
4564		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4565		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4566		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4567		break;
4568	case ixgbe_mac_X550EM_x:
4569		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4570		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4571		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4572			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4573		break;
4574	case ixgbe_mac_X550EM_a:
4575		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4576		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4577		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4578		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4579		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4580			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4581			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4582		}
4583		break;
4584	case ixgbe_mac_82599EB:
4585		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4586		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4587		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4588		    (adapter->hw.bus.func == 0))
4589			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4590		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4591			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4592		break;
4593	default:
4594		break;
4595	}
4596
4597	/* Enabled by default... */
4598	/* Fan failure detection */
4599	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4600		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4601	/* Netmap */
4602	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4603		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4604	/* EEE */
4605	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4606		adapter->feat_en |= IXGBE_FEATURE_EEE;
4607	/* Thermal Sensor */
4608	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4609		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4610
4611	/* Enabled via global sysctl... */
4612	/* Flow Director */
4613	if (ixgbe_enable_fdir) {
4614		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4615			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4616		else
4617			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4618	}
4619	/* Legacy (single queue) transmit */
4620	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4621	    ixgbe_enable_legacy_tx)
4622		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4623	/*
4624	 * Message Signal Interrupts - Extended (MSI-X)
4625	 * Normal MSI is only enabled if MSI-X calls fail.
4626	 */
4627	if (!ixgbe_enable_msix)
4628		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4629	/* Receive-Side Scaling (RSS) */
4630	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4631		adapter->feat_en |= IXGBE_FEATURE_RSS;
4632
4633	/* Disable features with unmet dependencies... */
4634	/* No MSI-X */
4635	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4636		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4637		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4638		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4639		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4640	}
4641} /* ixgbe_init_device_features */
4642
4643/************************************************************************
4644 * ixgbe_probe - Device identification routine
4645 *
4646 *   Determines if the driver should be loaded on
4647 *   adapter based on its PCI vendor/device ID.
4648 *
4649 *   return BUS_PROBE_DEFAULT on success, positive on failure
4650 ************************************************************************/
4651static int
4652ixgbe_probe(device_t dev)
4653{
4654	ixgbe_vendor_info_t *ent;
4655
4656	u16  pci_vendor_id = 0;
4657	u16  pci_device_id = 0;
4658	u16  pci_subvendor_id = 0;
4659	u16  pci_subdevice_id = 0;
4660	char adapter_name[256];
4661
4662	INIT_DEBUGOUT("ixgbe_probe: begin");
4663
4664	pci_vendor_id = pci_get_vendor(dev);
4665	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4666		return (ENXIO);
4667
4668	pci_device_id = pci_get_device(dev);
4669	pci_subvendor_id = pci_get_subvendor(dev);
4670	pci_subdevice_id = pci_get_subdevice(dev);
4671
4672	ent = ixgbe_vendor_info_array;
4673	while (ent->vendor_id != 0) {
4674		if ((pci_vendor_id == ent->vendor_id) &&
4675		    (pci_device_id == ent->device_id) &&
4676		    ((pci_subvendor_id == ent->subvendor_id) ||
4677		     (ent->subvendor_id == 0)) &&
4678		    ((pci_subdevice_id == ent->subdevice_id) ||
4679		     (ent->subdevice_id == 0))) {
4680			sprintf(adapter_name, "%s, Version - %s",
4681				ixgbe_strings[ent->index],
4682				ixgbe_driver_version);
4683			device_set_desc_copy(dev, adapter_name);
4684			++ixgbe_total_ports;
4685			return (BUS_PROBE_DEFAULT);
4686		}
4687		ent++;
4688	}
4689
4690	return (ENXIO);
4691} /* ixgbe_probe */
4692
4693
4694/************************************************************************
4695 * ixgbe_ioctl - Ioctl entry point
4696 *
4697 *   Called when the user wants to configure the interface.
4698 *
4699 *   return 0 on success, positive on failure
4700 ************************************************************************/
4701static int
4702ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4703{
4704	struct adapter *adapter = ifp->if_softc;
4705	struct ifreq   *ifr = (struct ifreq *) data;
4706#if defined(INET) || defined(INET6)
4707	struct ifaddr  *ifa = (struct ifaddr *)data;
4708#endif
4709	int            error = 0;
4710	bool           avoid_reset = FALSE;
4711
4712	switch (command) {
4713	case SIOCSIFADDR:
4714#ifdef INET
4715		if (ifa->ifa_addr->sa_family == AF_INET)
4716			avoid_reset = TRUE;
4717#endif
4718#ifdef INET6
4719		if (ifa->ifa_addr->sa_family == AF_INET6)
4720			avoid_reset = TRUE;
4721#endif
4722		/*
4723		 * Calling init results in link renegotiation,
4724		 * so we avoid doing it when possible.
4725		 */
4726		if (avoid_reset) {
4727			ifp->if_flags |= IFF_UP;
4728			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4729				ixgbe_init(adapter);
4730#ifdef INET
4731			if (!(ifp->if_flags & IFF_NOARP))
4732				arp_ifinit(ifp, ifa);
4733#endif
4734		} else
4735			error = ether_ioctl(ifp, command, data);
4736		break;
4737	case SIOCSIFMTU:
4738		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4739		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4740			error = EINVAL;
4741		} else {
4742			IXGBE_CORE_LOCK(adapter);
4743			ifp->if_mtu = ifr->ifr_mtu;
4744			adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4745			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4746				ixgbe_init_locked(adapter);
4747			ixgbe_recalculate_max_frame(adapter);
4748			IXGBE_CORE_UNLOCK(adapter);
4749		}
4750		break;
4751	case SIOCSIFFLAGS:
4752		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4753		IXGBE_CORE_LOCK(adapter);
4754		if (ifp->if_flags & IFF_UP) {
4755			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4756				if ((ifp->if_flags ^ adapter->if_flags) &
4757				    (IFF_PROMISC | IFF_ALLMULTI)) {
4758					ixgbe_set_promisc(adapter);
4759				}
4760			} else
4761				ixgbe_init_locked(adapter);
4762		} else
4763			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4764				ixgbe_stop(adapter);
4765		adapter->if_flags = ifp->if_flags;
4766		IXGBE_CORE_UNLOCK(adapter);
4767		break;
4768	case SIOCADDMULTI:
4769	case SIOCDELMULTI:
4770		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4771		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4772			IXGBE_CORE_LOCK(adapter);
4773			ixgbe_disable_intr(adapter);
4774			ixgbe_set_multi(adapter);
4775			ixgbe_enable_intr(adapter);
4776			IXGBE_CORE_UNLOCK(adapter);
4777		}
4778		break;
4779	case SIOCSIFMEDIA:
4780	case SIOCGIFMEDIA:
4781		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4782		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4783		break;
4784	case SIOCSIFCAP:
4785	{
4786		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4787
4788		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4789
4790		if (!mask)
4791			break;
4792
4793		/* HW cannot turn these on/off separately */
4794		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4795			ifp->if_capenable ^= IFCAP_RXCSUM;
4796			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4797		}
4798		if (mask & IFCAP_TXCSUM)
4799			ifp->if_capenable ^= IFCAP_TXCSUM;
4800		if (mask & IFCAP_TXCSUM_IPV6)
4801			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4802		if (mask & IFCAP_TSO4)
4803			ifp->if_capenable ^= IFCAP_TSO4;
4804		if (mask & IFCAP_TSO6)
4805			ifp->if_capenable ^= IFCAP_TSO6;
4806		if (mask & IFCAP_LRO)
4807			ifp->if_capenable ^= IFCAP_LRO;
4808		if (mask & IFCAP_VLAN_HWTAGGING)
4809			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4810		if (mask & IFCAP_VLAN_HWFILTER)
4811			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4812		if (mask & IFCAP_VLAN_HWTSO)
4813			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4814
4815		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4816			IXGBE_CORE_LOCK(adapter);
4817			ixgbe_init_locked(adapter);
4818			IXGBE_CORE_UNLOCK(adapter);
4819		}
4820		VLAN_CAPABILITIES(ifp);
4821		break;
4822	}
4823#if __FreeBSD_version >= 1100036
4824	case SIOCGI2C:
4825	{
4826		struct ixgbe_hw *hw = &adapter->hw;
4827		struct ifi2creq i2c;
4828		int i;
4829
4830		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4831		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4832		if (error != 0)
4833			break;
4834		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4835			error = EINVAL;
4836			break;
4837		}
4838		if (i2c.len > sizeof(i2c.data)) {
4839			error = EINVAL;
4840			break;
4841		}
4842
4843		for (i = 0; i < i2c.len; i++)
4844			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4845			    i2c.dev_addr, &i2c.data[i]);
4846		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
4847		break;
4848	}
4849#endif
4850	default:
4851		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4852		error = ether_ioctl(ifp, command, data);
4853		break;
4854	}
4855
4856	return (error);
4857} /* ixgbe_ioctl */
4858
4859/************************************************************************
4860 * ixgbe_check_fan_failure
4861 ************************************************************************/
4862static void
4863ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4864{
4865	u32 mask;
4866
4867	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4868	    IXGBE_ESDP_SDP1;
4869
4870	if (reg & mask)
4871		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4872} /* ixgbe_check_fan_failure */
4873
4874/************************************************************************
4875 * ixgbe_handle_que
4876 ************************************************************************/
4877static void
4878ixgbe_handle_que(void *context, int pending)
4879{
4880	struct ix_queue *que = context;
4881	struct adapter  *adapter = que->adapter;
4882	struct tx_ring  *txr = que->txr;
4883	struct ifnet    *ifp = adapter->ifp;
4884
4885	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4886		ixgbe_rxeof(que);
4887		IXGBE_TX_LOCK(txr);
4888		ixgbe_txeof(txr);
4889		if (!ixgbe_ring_empty(ifp, txr->br))
4890			ixgbe_start_locked(ifp, txr);
4891		IXGBE_TX_UNLOCK(txr);
4892	}
4893
4894	/* Re-enable this interrupt */
4895	if (que->res != NULL)
4896		ixgbe_enable_queue(adapter, que->msix);
4897	else
4898		ixgbe_enable_intr(adapter);
4899
4900	return;
4901} /* ixgbe_handle_que */
4902
4903
4904
4905/************************************************************************
4906 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4907 ************************************************************************/
4908static int
4909ixgbe_allocate_legacy(struct adapter *adapter)
4910{
4911	device_t        dev = adapter->dev;
4912	struct ix_queue *que = adapter->queues;
4913	struct tx_ring  *txr = adapter->tx_rings;
4914	int             error;
4915
4916	/* We allocate a single interrupt resource */
4917	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4918	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4919	if (adapter->res == NULL) {
4920		device_printf(dev,
4921		    "Unable to allocate bus resource: interrupt\n");
4922		return (ENXIO);
4923	}
4924
4925	/*
4926	 * Try allocating a fast interrupt and the associated deferred
4927	 * processing contexts.
4928	 */
4929	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4930		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4931	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4932	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4933	    taskqueue_thread_enqueue, &que->tq);
4934	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4935	    device_get_nameunit(adapter->dev));
4936
4937	/* Tasklets for Link, SFP and Multispeed Fiber */
4938	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4939	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4940	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4941	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4942	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4943		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4944	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4945	    taskqueue_thread_enqueue, &adapter->tq);
4946	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4947	    device_get_nameunit(adapter->dev));
4948
4949	if ((error = bus_setup_intr(dev, adapter->res,
4950	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4951	    &adapter->tag)) != 0) {
4952		device_printf(dev,
4953		    "Failed to register fast interrupt handler: %d\n", error);
4954		taskqueue_free(que->tq);
4955		taskqueue_free(adapter->tq);
4956		que->tq = NULL;
4957		adapter->tq = NULL;
4958
4959		return (error);
4960	}
4961	/* For simplicity in the handlers */
4962	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4963
4964	return (0);
4965} /* ixgbe_allocate_legacy */
4966
4967
4968/************************************************************************
4969 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4970 ************************************************************************/
4971static int
4972ixgbe_allocate_msix(struct adapter *adapter)
4973{
4974	device_t        dev = adapter->dev;
4975	struct ix_queue *que = adapter->queues;
4976	struct tx_ring  *txr = adapter->tx_rings;
4977	int             error, rid, vector = 0;
4978	int             cpu_id = 0;
4979	unsigned int    rss_buckets = 0;
4980	cpuset_t        cpu_mask;
4981
4982	/*
4983	 * If we're doing RSS, the number of queues needs to
4984	 * match the number of RSS buckets that are configured.
4985	 *
4986	 * + If there's more queues than RSS buckets, we'll end
4987	 *   up with queues that get no traffic.
4988	 *
4989	 * + If there's more RSS buckets than queues, we'll end
4990	 *   up having multiple RSS buckets map to the same queue,
4991	 *   so there'll be some contention.
4992	 */
4993	rss_buckets = rss_getnumbuckets();
4994	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4995	    (adapter->num_queues != rss_buckets)) {
4996		device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4997		    __func__, adapter->num_queues, rss_buckets);
4998	}
4999
5000	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
5001		rid = vector + 1;
5002		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
5003		    RF_SHAREABLE | RF_ACTIVE);
5004		if (que->res == NULL) {
5005			device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5006			    vector);
5007			return (ENXIO);
5008		}
5009		/* Set the handler function */
5010		error = bus_setup_intr(dev, que->res,
5011		    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5012		    &que->tag);
5013		if (error) {
5014			que->res = NULL;
5015			device_printf(dev, "Failed to register QUE handler");
5016			return (error);
5017		}
5018#if __FreeBSD_version >= 800504
5019		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5020#endif
5021		que->msix = vector;
5022		adapter->active_queues |= (u64)(1 << que->msix);
5023
5024		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5025			/*
5026			 * The queue ID is used as the RSS layer bucket ID.
5027			 * We look up the queue ID -> RSS CPU ID and select
5028			 * that.
5029			 */
5030			cpu_id = rss_getcpu(i % rss_buckets);
5031			CPU_SETOF(cpu_id, &cpu_mask);
5032		} else {
5033			/*
5034			 * Bind the MSI-X vector, and thus the
5035			 * rings to the corresponding CPU.
5036			 *
5037			 * This just happens to match the default RSS
5038			 * round-robin bucket -> queue -> CPU allocation.
5039			 */
5040			if (adapter->num_queues > 1)
5041				cpu_id = i;
5042		}
5043		if (adapter->num_queues > 1)
5044			bus_bind_intr(dev, que->res, cpu_id);
5045#ifdef IXGBE_DEBUG
5046		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5047			device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5048			    cpu_id);
5049		else
5050			device_printf(dev, "Bound queue %d to cpu %d\n", i,
5051			    cpu_id);
5052#endif /* IXGBE_DEBUG */
5053
5054
5055		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5056			TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5057			    txr);
5058		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5059		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5060		    taskqueue_thread_enqueue, &que->tq);
5061#if __FreeBSD_version < 1100000
5062		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5063		    device_get_nameunit(adapter->dev), i);
5064#else
5065		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5066			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5067			    &cpu_mask, "%s (bucket %d)",
5068			    device_get_nameunit(adapter->dev), cpu_id);
5069		else
5070			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5071			    NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5072			    i);
5073#endif
5074	}
5075
5076	/* and Link */
5077	adapter->link_rid = vector + 1;
5078	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5079	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5080	if (!adapter->res) {
5081		device_printf(dev,
5082		    "Unable to allocate bus resource: Link interrupt [%d]\n",
5083		    adapter->link_rid);
5084		return (ENXIO);
5085	}
5086	/* Set the link handler function */
5087	error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5088	    NULL, ixgbe_msix_link, adapter, &adapter->tag);
5089	if (error) {
5090		adapter->res = NULL;
5091		device_printf(dev, "Failed to register LINK handler");
5092		return (error);
5093	}
5094#if __FreeBSD_version >= 800504
5095	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5096#endif
5097	adapter->vector = vector;
5098	/* Tasklets for Link, SFP and Multispeed Fiber */
5099	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5100	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5101	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5102	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5103		TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5104	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5105	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5106		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5107	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5108	    taskqueue_thread_enqueue, &adapter->tq);
5109	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5110	    device_get_nameunit(adapter->dev));
5111
5112	return (0);
5113} /* ixgbe_allocate_msix */
5114
5115/************************************************************************
5116 * ixgbe_configure_interrupts
5117 *
5118 *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5119 *   This will also depend on user settings.
5120 ************************************************************************/
5121static int
5122ixgbe_configure_interrupts(struct adapter *adapter)
5123{
5124	device_t dev = adapter->dev;
5125	int      rid, want, queues, msgs;
5126
5127	/* Default to 1 queue if MSI-X setup fails */
5128	adapter->num_queues = 1;
5129
5130	/* Override by tuneable */
5131	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5132		goto msi;
5133
5134	/* First try MSI-X */
5135	msgs = pci_msix_count(dev);
5136	if (msgs == 0)
5137		goto msi;
5138	rid = PCIR_BAR(MSIX_82598_BAR);
5139	adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5140	    RF_ACTIVE);
5141	if (adapter->msix_mem == NULL) {
5142		rid += 4;  /* 82599 maps in higher BAR */
5143		adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5144		    &rid, RF_ACTIVE);
5145	}
5146	if (adapter->msix_mem == NULL) {
5147		/* May not be enabled */
5148		device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5149		goto msi;
5150	}
5151
5152	/* Figure out a reasonable auto config value */
5153	queues = min(mp_ncpus, msgs - 1);
5154	/* If we're doing RSS, clamp at the number of RSS buckets */
5155	if (adapter->feat_en & IXGBE_FEATURE_RSS)
5156		queues = min(queues, rss_getnumbuckets());
5157	if (ixgbe_num_queues > queues) {
5158		device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5159		ixgbe_num_queues = queues;
5160	}
5161
5162	if (ixgbe_num_queues != 0)
5163		queues = ixgbe_num_queues;
5164	/* Set max queues to 8 when autoconfiguring */
5165	else
5166		queues = min(queues, 8);
5167
5168	/* reflect correct sysctl value */
5169	ixgbe_num_queues = queues;
5170
5171	/*
5172	 * Want one vector (RX/TX pair) per queue
5173	 * plus an additional for Link.
5174	 */
5175	want = queues + 1;
5176	if (msgs >= want)
5177		msgs = want;
5178	else {
5179		device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5180		    msgs, want);
5181		goto msi;
5182	}
5183	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5184		device_printf(adapter->dev,
5185		    "Using MSI-X interrupts with %d vectors\n", msgs);
5186		adapter->num_queues = queues;
5187		adapter->feat_en |= IXGBE_FEATURE_MSIX;
5188		return (0);
5189	}
5190	/*
5191	 * MSI-X allocation failed or provided us with
5192	 * less vectors than needed. Free MSI-X resources
5193	 * and we'll try enabling MSI.
5194	 */
5195	pci_release_msi(dev);
5196
5197msi:
5198	/* Without MSI-X, some features are no longer supported */
5199	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5200	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5201	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5202	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5203
5204	if (adapter->msix_mem != NULL) {
5205		bus_release_resource(dev, SYS_RES_MEMORY, rid,
5206		    adapter->msix_mem);
5207		adapter->msix_mem = NULL;
5208	}
5209	msgs = 1;
5210	if (pci_alloc_msi(dev, &msgs) == 0) {
5211		adapter->feat_en |= IXGBE_FEATURE_MSI;
5212		adapter->link_rid = 1;
5213		device_printf(adapter->dev, "Using an MSI interrupt\n");
5214		return (0);
5215	}
5216
5217	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5218		device_printf(adapter->dev,
5219		    "Device does not support legacy interrupts.\n");
5220		return 1;
5221	}
5222
5223	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5224	adapter->link_rid = 0;
5225	device_printf(adapter->dev, "Using a Legacy interrupt\n");
5226
5227	return (0);
5228} /* ixgbe_configure_interrupts */
5229
5230
5231/************************************************************************
5232 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5233 *
5234 *   Done outside of interrupt context since the driver might sleep
5235 ************************************************************************/
5236static void
5237ixgbe_handle_link(void *context, int pending)
5238{
5239	struct adapter  *adapter = context;
5240	struct ixgbe_hw *hw = &adapter->hw;
5241
5242	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5243	ixgbe_update_link_status(adapter);
5244
5245	/* Re-enable link interrupts */
5246	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5247} /* ixgbe_handle_link */
5248
5249/************************************************************************
5250 * ixgbe_rearm_queues
5251 ************************************************************************/
5252static void
5253ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5254{
5255	u32 mask;
5256
5257	switch (adapter->hw.mac.type) {
5258	case ixgbe_mac_82598EB:
5259		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5260		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5261		break;
5262	case ixgbe_mac_82599EB:
5263	case ixgbe_mac_X540:
5264	case ixgbe_mac_X550:
5265	case ixgbe_mac_X550EM_x:
5266	case ixgbe_mac_X550EM_a:
5267		mask = (queues & 0xFFFFFFFF);
5268		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5269		mask = (queues >> 32);
5270		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5271		break;
5272	default:
5273		break;
5274	}
5275} /* ixgbe_rearm_queues */
5276
5277