if_ix.c revision 341477
1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/if_ix.c 341477 2018-12-04 17:40:56Z vmaffione $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
44/************************************************************************
45 * Driver version
46 ************************************************************************/
47char ixgbe_driver_version[] = "3.2.12-k";
48
49
50/************************************************************************
51 * PCI Device ID Table
52 *
53 *   Used by probe to select devices to load on
54 *   Last field stores an index into ixgbe_strings
55 *   Last entry must be all 0s
56 *
57 *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60{
61	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105	/* required last entry */
106	{0, 0, 0, 0, 0}
107};
108
109/************************************************************************
110 * Table of branding strings
111 ************************************************************************/
112static char    *ixgbe_strings[] = {
113	"Intel(R) PRO/10GbE PCI-Express Network Driver"
114};
115
116/************************************************************************
117 * Function prototypes
118 ************************************************************************/
119static int      ixgbe_probe(device_t);
120static int      ixgbe_attach(device_t);
121static int      ixgbe_detach(device_t);
122static int      ixgbe_shutdown(device_t);
123static int      ixgbe_suspend(device_t);
124static int      ixgbe_resume(device_t);
125static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126static void     ixgbe_init(void *);
127static void     ixgbe_init_locked(struct adapter *);
128static void     ixgbe_stop(void *);
129#if __FreeBSD_version >= 1100036
130static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131#endif
132static void     ixgbe_init_device_features(struct adapter *);
133static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134static void     ixgbe_add_media_types(struct adapter *);
135static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136static int      ixgbe_media_change(struct ifnet *);
137static int      ixgbe_allocate_pci_resources(struct adapter *);
138static void     ixgbe_get_slot_info(struct adapter *);
139static int      ixgbe_allocate_msix(struct adapter *);
140static int      ixgbe_allocate_legacy(struct adapter *);
141static int      ixgbe_configure_interrupts(struct adapter *);
142static void     ixgbe_free_pci_resources(struct adapter *);
143static void     ixgbe_local_timer(void *);
144static int      ixgbe_setup_interface(device_t, struct adapter *);
145static void     ixgbe_config_gpie(struct adapter *);
146static void     ixgbe_config_dmac(struct adapter *);
147static void     ixgbe_config_delay_values(struct adapter *);
148static void     ixgbe_config_link(struct adapter *);
149static void     ixgbe_check_wol_support(struct adapter *);
150static int      ixgbe_setup_low_power_mode(struct adapter *);
151static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153static void     ixgbe_initialize_transmit_units(struct adapter *);
154static void     ixgbe_initialize_receive_units(struct adapter *);
155static void     ixgbe_enable_rx_drop(struct adapter *);
156static void     ixgbe_disable_rx_drop(struct adapter *);
157static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159static void     ixgbe_enable_intr(struct adapter *);
160static void     ixgbe_disable_intr(struct adapter *);
161static void     ixgbe_update_stats_counters(struct adapter *);
162static void     ixgbe_set_promisc(struct adapter *);
163static void     ixgbe_set_multi(struct adapter *);
164static void     ixgbe_update_link_status(struct adapter *);
165static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166static void     ixgbe_configure_ivars(struct adapter *);
167static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173static void     ixgbe_add_device_sysctls(struct adapter *);
174static void     ixgbe_add_hw_stats(struct adapter *);
175static int      ixgbe_set_flowcntl(struct adapter *, int);
176static int      ixgbe_set_advertise(struct adapter *, int);
177static int      ixgbe_get_advertise(struct adapter *);
178
179/* Sysctl handlers */
180static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                       const char *, int *, int);
182static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188#ifdef IXGBE_DEBUG
189static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191#endif
192static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200/* Support for pluggable optic modules */
201static bool     ixgbe_sfp_probe(struct adapter *);
202
203/* Legacy (single vector) interrupt handler */
204static void     ixgbe_legacy_irq(void *);
205
206/* The MSI/MSI-X Interrupt handlers */
207static void     ixgbe_msix_que(void *);
208static void     ixgbe_msix_link(void *);
209
210/* Deferred interrupt tasklets */
211static void     ixgbe_handle_que(void *, int);
212static void     ixgbe_handle_link(void *, int);
213static void     ixgbe_handle_msf(void *, int);
214static void     ixgbe_handle_mod(void *, int);
215static void     ixgbe_handle_phy(void *, int);
216
217
218/************************************************************************
219 *  FreeBSD Device Interface Entry Points
220 ************************************************************************/
221static device_method_t ix_methods[] = {
222	/* Device interface */
223	DEVMETHOD(device_probe, ixgbe_probe),
224	DEVMETHOD(device_attach, ixgbe_attach),
225	DEVMETHOD(device_detach, ixgbe_detach),
226	DEVMETHOD(device_shutdown, ixgbe_shutdown),
227	DEVMETHOD(device_suspend, ixgbe_suspend),
228	DEVMETHOD(device_resume, ixgbe_resume),
229#ifdef PCI_IOV
230	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233#endif /* PCI_IOV */
234	DEVMETHOD_END
235};
236
237static driver_t ix_driver = {
238	"ix", ix_methods, sizeof(struct adapter),
239};
240
241devclass_t ix_devclass;
242DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244MODULE_DEPEND(ix, pci, 1, 1, 1);
245MODULE_DEPEND(ix, ether, 1, 1, 1);
246#ifdef DEV_NETMAP
247MODULE_DEPEND(ix, netmap, 1, 1, 1);
248#endif
249
250/*
251 * TUNEABLE PARAMETERS:
252 */
253
254static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255
256/*
257 * AIM: Adaptive Interrupt Moderation
258 * which means that the interrupt rate
259 * is varied over time based on the
260 * traffic for that interrupt vector
261 */
262static int ixgbe_enable_aim = TRUE;
263SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
264    "Enable adaptive interrupt moderation");
265
266static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
267SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
268    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
269
270/* How many packets rxeof tries to clean at a time */
271static int ixgbe_rx_process_limit = 256;
272SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
273    &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
274
275/* How many packets txeof tries to clean at a time */
276static int ixgbe_tx_process_limit = 256;
277SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
278    &ixgbe_tx_process_limit, 0,
279    "Maximum number of sent packets to process at a time, -1 means unlimited");
280
281/* Flow control setting, default to full */
282static int ixgbe_flow_control = ixgbe_fc_full;
283SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
284    &ixgbe_flow_control, 0, "Default flow control used for all adapters");
285
286/* Advertise Speed, default to 0 (auto) */
287static int ixgbe_advertise_speed = 0;
288SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
289    &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290
291/*
292 * Smart speed setting, default to on
293 * this only works as a compile option
294 * right now as its during attach, set
295 * this to 'ixgbe_smart_speed_off' to
296 * disable.
297 */
298static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299
300/*
301 * MSI-X should be the default for best performance,
302 * but this allows it to be forced off for testing.
303 */
304static int ixgbe_enable_msix = 1;
305SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
306    "Enable MSI-X interrupts");
307
308/*
309 * Number of Queues, can be set to 0,
310 * it then autoconfigures based on the
311 * number of cpus with a max of 8. This
312 * can be overriden manually here.
313 */
314static int ixgbe_num_queues = 0;
315SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316    "Number of queues to configure, 0 indicates autoconfigure");
317
318/*
319 * Number of TX descriptors per ring,
320 * setting higher than RX as this seems
321 * the better performing choice.
322 */
323static int ixgbe_txd = PERFORM_TXD;
324SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
325    "Number of transmit descriptors per queue");
326
327/* Number of RX descriptors per ring */
328static int ixgbe_rxd = PERFORM_RXD;
329SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
330    "Number of receive descriptors per queue");
331
332/*
333 * Defining this on will allow the use
334 * of unsupported SFP+ modules, note that
335 * doing so you are on your own :)
336 */
337static int allow_unsupported_sfp = FALSE;
338SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
339    &allow_unsupported_sfp, 0,
340    "Allow unsupported SFP modules...use at your own risk");
341
342/*
343 * Not sure if Flow Director is fully baked,
344 * so we'll default to turning it off.
345 */
346static int ixgbe_enable_fdir = 0;
347SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
348    "Enable Flow Director");
349
350/* Legacy Transmit (single queue) */
351static int ixgbe_enable_legacy_tx = 0;
352SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
353    &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
354
355/* Receive-Side Scaling */
356static int ixgbe_enable_rss = 1;
357SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
358    "Enable Receive-Side Scaling (RSS)");
359
360/* Keep running tab on them for sanity check */
361static int ixgbe_total_ports;
362
363static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
364static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
365
366MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367
368/************************************************************************
369 * ixgbe_initialize_rss_mapping
370 ************************************************************************/
371static void
372ixgbe_initialize_rss_mapping(struct adapter *adapter)
373{
374	struct ixgbe_hw *hw = &adapter->hw;
375	u32             reta = 0, mrqc, rss_key[10];
376	int             queue_id, table_size, index_mult;
377	int             i, j;
378	u32             rss_hash_config;
379
380	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
381		/* Fetch the configured RSS key */
382		rss_getkey((uint8_t *)&rss_key);
383	} else {
384		/* set up random bits */
385		arc4rand(&rss_key, sizeof(rss_key), 0);
386	}
387
388	/* Set multiplier for RETA setup and table size based on MAC */
389	index_mult = 0x1;
390	table_size = 128;
391	switch (adapter->hw.mac.type) {
392	case ixgbe_mac_82598EB:
393		index_mult = 0x11;
394		break;
395	case ixgbe_mac_X550:
396	case ixgbe_mac_X550EM_x:
397	case ixgbe_mac_X550EM_a:
398		table_size = 512;
399		break;
400	default:
401		break;
402	}
403
404	/* Set up the redirection table */
405	for (i = 0, j = 0; i < table_size; i++, j++) {
406		if (j == adapter->num_queues)
407			j = 0;
408
409		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
410			/*
411			 * Fetch the RSS bucket id for the given indirection
412			 * entry. Cap it at the number of configured buckets
413			 * (which is num_queues.)
414			 */
415			queue_id = rss_get_indirection_to_bucket(i);
416			queue_id = queue_id % adapter->num_queues;
417		} else
418			queue_id = (j * index_mult);
419
420		/*
421		 * The low 8 bits are for hash value (n+0);
422		 * The next 8 bits are for hash value (n+1), etc.
423		 */
424		reta = reta >> 8;
425		reta = reta | (((uint32_t)queue_id) << 24);
426		if ((i & 3) == 3) {
427			if (i < 128)
428				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
429			else
430				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
431				    reta);
432			reta = 0;
433		}
434	}
435
436	/* Now fill our hash function seeds */
437	for (i = 0; i < 10; i++)
438		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
439
440	/* Perform hash on these packet types */
441	if (adapter->feat_en & IXGBE_FEATURE_RSS)
442		rss_hash_config = rss_gethashconfig();
443	else {
444		/*
445		 * Disable UDP - IP fragments aren't currently being handled
446		 * and so we end up with a mix of 2-tuple and 4-tuple
447		 * traffic.
448		 */
449		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
450		                | RSS_HASHTYPE_RSS_TCP_IPV4
451		                | RSS_HASHTYPE_RSS_IPV6
452		                | RSS_HASHTYPE_RSS_TCP_IPV6
453		                | RSS_HASHTYPE_RSS_IPV6_EX
454		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455	}
456
457	mrqc = IXGBE_MRQC_RSSEN;
458	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
459		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
460	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
461		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
462	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
463		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
464	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
465		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
466	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
467		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
468	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
469		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
470	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
471		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
472	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
473		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
474		    __func__);
475	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
476		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
477	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
478		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
479	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
480	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
481} /* ixgbe_initialize_rss_mapping */
482
483/************************************************************************
484 * ixgbe_initialize_receive_units - Setup receive registers and features.
485 ************************************************************************/
486#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
487
488static void
489ixgbe_initialize_receive_units(struct adapter *adapter)
490{
491	struct rx_ring  *rxr = adapter->rx_rings;
492	struct ixgbe_hw *hw = &adapter->hw;
493	struct ifnet    *ifp = adapter->ifp;
494	int             i, j;
495	u32             bufsz, fctrl, srrctl, rxcsum;
496	u32             hlreg;
497
498	/*
499	 * Make sure receives are disabled while
500	 * setting up the descriptor ring
501	 */
502	ixgbe_disable_rx(hw);
503
504	/* Enable broadcasts */
505	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
506	fctrl |= IXGBE_FCTRL_BAM;
507	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
508		fctrl |= IXGBE_FCTRL_DPF;
509		fctrl |= IXGBE_FCTRL_PMCF;
510	}
511	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
512
513	/* Set for Jumbo Frames? */
514	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
515	if (ifp->if_mtu > ETHERMTU)
516		hlreg |= IXGBE_HLREG0_JUMBOEN;
517	else
518		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
519
520#ifdef DEV_NETMAP
521	/* CRC stripping is conditional in Netmap */
522	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
523	    (ifp->if_capenable & IFCAP_NETMAP) &&
524	    !ix_crcstrip)
525		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
526	else
527#endif /* DEV_NETMAP */
528		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
529
530	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
531
532	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
533	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
534
535	for (i = 0; i < adapter->num_queues; i++, rxr++) {
536		u64 rdba = rxr->rxdma.dma_paddr;
537		j = rxr->me;
538
539		/* Setup the Base and Length of the Rx Descriptor Ring */
540		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
541		    (rdba & 0x00000000ffffffffULL));
542		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
543		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
544		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
545
546		/* Set up the SRRCTL register */
547		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
548		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
549		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
550		srrctl |= bufsz;
551		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
552
553		/*
554		 * Set DROP_EN iff we have no flow control and >1 queue.
555		 * Note that srrctl was cleared shortly before during reset,
556		 * so we do not need to clear the bit, but do it just in case
557		 * this code is moved elsewhere.
558		 */
559		if (adapter->num_queues > 1 &&
560		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
561			srrctl |= IXGBE_SRRCTL_DROP_EN;
562		} else {
563			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
564		}
565
566		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
567
568		/* Setup the HW Rx Head and Tail Descriptor Pointers */
569		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
570		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
571
572		/* Set the driver rx tail address */
573		rxr->tail =  IXGBE_RDT(rxr->me);
574	}
575
576	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
577		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
578		            | IXGBE_PSRTYPE_UDPHDR
579		            | IXGBE_PSRTYPE_IPV4HDR
580		            | IXGBE_PSRTYPE_IPV6HDR;
581		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
582	}
583
584	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
585
586	ixgbe_initialize_rss_mapping(adapter);
587
588	if (adapter->num_queues > 1) {
589		/* RSS and RX IPP Checksum are mutually exclusive */
590		rxcsum |= IXGBE_RXCSUM_PCSD;
591	}
592
593	if (ifp->if_capenable & IFCAP_RXCSUM)
594		rxcsum |= IXGBE_RXCSUM_PCSD;
595
596	/* This is useful for calculating UDP/IP fragment checksums */
597	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
598		rxcsum |= IXGBE_RXCSUM_IPPCSE;
599
600	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
601
602	return;
603} /* ixgbe_initialize_receive_units */
604
605/************************************************************************
606 * ixgbe_initialize_transmit_units - Enable transmit units.
607 ************************************************************************/
608static void
609ixgbe_initialize_transmit_units(struct adapter *adapter)
610{
611	struct tx_ring  *txr = adapter->tx_rings;
612	struct ixgbe_hw *hw = &adapter->hw;
613
614	/* Setup the Base and Length of the Tx Descriptor Ring */
615	for (int i = 0; i < adapter->num_queues; i++, txr++) {
616		u64 tdba = txr->txdma.dma_paddr;
617		u32 txctrl = 0;
618		int j = txr->me;
619
620		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
621		    (tdba & 0x00000000ffffffffULL));
622		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
623		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
624		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
625
626		/* Setup the HW Tx Head and Tail descriptor pointers */
627		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
628		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
629
630		/* Cache the tail address */
631		txr->tail = IXGBE_TDT(j);
632
633		/* Disable Head Writeback */
634		/*
635		 * Note: for X550 series devices, these registers are actually
636		 * prefixed with TPH_ isntead of DCA_, but the addresses and
637		 * fields remain the same.
638		 */
639		switch (hw->mac.type) {
640		case ixgbe_mac_82598EB:
641			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
642			break;
643		default:
644			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
645			break;
646		}
647		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
648		switch (hw->mac.type) {
649		case ixgbe_mac_82598EB:
650			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
651			break;
652		default:
653			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
654			break;
655		}
656
657	}
658
659	if (hw->mac.type != ixgbe_mac_82598EB) {
660		u32 dmatxctl, rttdcs;
661
662		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
663		dmatxctl |= IXGBE_DMATXCTL_TE;
664		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
665		/* Disable arbiter to set MTQC */
666		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
667		rttdcs |= IXGBE_RTTDCS_ARBDIS;
668		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
669		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
670		    ixgbe_get_mtqc(adapter->iov_mode));
671		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
672		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
673	}
674
675	return;
676} /* ixgbe_initialize_transmit_units */
677
678/************************************************************************
679 * ixgbe_attach - Device initialization routine
680 *
681 *   Called when the driver is being loaded.
682 *   Identifies the type of hardware, allocates all resources
683 *   and initializes the hardware.
684 *
685 *   return 0 on success, positive on failure
686 ************************************************************************/
687static int
688ixgbe_attach(device_t dev)
689{
690	struct adapter  *adapter;
691	struct ixgbe_hw *hw;
692	int             error = 0;
693	u32             ctrl_ext;
694
695	INIT_DEBUGOUT("ixgbe_attach: begin");
696
697	/* Allocate, clear, and link in our adapter structure */
698	adapter = device_get_softc(dev);
699	adapter->hw.back = adapter;
700	adapter->dev = dev;
701	hw = &adapter->hw;
702
703	/* Core Lock Init*/
704	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
705
706	/* Set up the timer callout */
707	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
708
709	/* Determine hardware revision */
710	hw->vendor_id = pci_get_vendor(dev);
711	hw->device_id = pci_get_device(dev);
712	hw->revision_id = pci_get_revid(dev);
713	hw->subsystem_vendor_id = pci_get_subvendor(dev);
714	hw->subsystem_device_id = pci_get_subdevice(dev);
715
716	/*
717	 * Make sure BUSMASTER is set
718	 */
719	pci_enable_busmaster(dev);
720
721	/* Do base PCI setup - map BAR0 */
722	if (ixgbe_allocate_pci_resources(adapter)) {
723		device_printf(dev, "Allocation of PCI resources failed\n");
724		error = ENXIO;
725		goto err_out;
726	}
727
728	/* let hardware know driver is loaded */
729	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
730	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
731	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
732
733	hw->allow_unsupported_sfp = allow_unsupported_sfp;
734
735	/*
736	 * Initialize the shared code
737	 */
738	if (ixgbe_init_shared_code(hw)) {
739		device_printf(dev, "Unable to initialize the shared code\n");
740		error = ENXIO;
741		goto err_out;
742	}
743
744	if (hw->mbx.ops.init_params)
745		hw->mbx.ops.init_params(hw);
746
747
748	/* Pick up the 82599 settings */
749	if (hw->mac.type != ixgbe_mac_82598EB) {
750		hw->phy.smart_speed = ixgbe_smart_speed;
751		adapter->num_segs = IXGBE_82599_SCATTER;
752	} else
753		adapter->num_segs = IXGBE_82598_SCATTER;
754
755	ixgbe_init_device_features(adapter);
756
757	if (ixgbe_configure_interrupts(adapter)) {
758		error = ENXIO;
759		goto err_out;
760	}
761
762	/* Allocate multicast array memory. */
763	adapter->mta = malloc(sizeof(*adapter->mta) *
764	    MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
765	if (adapter->mta == NULL) {
766		device_printf(dev, "Can not allocate multicast setup array\n");
767		error = ENOMEM;
768		goto err_out;
769	}
770
771	/* Enable WoL (if supported) */
772	ixgbe_check_wol_support(adapter);
773
774	/* Register for VLAN events */
775	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
776	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
777	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
778	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
779
780	/* Verify adapter fan is still functional (if applicable) */
781	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
782		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
783		ixgbe_check_fan_failure(adapter, esdp, FALSE);
784	}
785
786	/* Ensure SW/FW semaphore is free */
787	ixgbe_init_swfw_semaphore(hw);
788
789	/* Enable EEE power saving */
790	if (adapter->feat_en & IXGBE_FEATURE_EEE)
791		hw->mac.ops.setup_eee(hw, TRUE);
792
793	/* Set an initial default flow control value */
794	hw->fc.requested_mode = ixgbe_flow_control;
795
796	/* Sysctls for limiting the amount of work done in the taskqueues */
797	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
798	    "max number of rx packets to process",
799	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
800
801	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
802	    "max number of tx packets to process",
803	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
804
805	/* Do descriptor calc and sanity checks */
806	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
807	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
808		device_printf(dev, "TXD config issue, using default!\n");
809		adapter->num_tx_desc = DEFAULT_TXD;
810	} else
811		adapter->num_tx_desc = ixgbe_txd;
812
813	/*
814	 * With many RX rings it is easy to exceed the
815	 * system mbuf allocation. Tuning nmbclusters
816	 * can alleviate this.
817	 */
818	if (nmbclusters > 0) {
819		int s;
820		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
821		if (s > nmbclusters) {
822			device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
823			ixgbe_rxd = DEFAULT_RXD;
824		}
825	}
826
827	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
828	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
829		device_printf(dev, "RXD config issue, using default!\n");
830		adapter->num_rx_desc = DEFAULT_RXD;
831	} else
832		adapter->num_rx_desc = ixgbe_rxd;
833
834	/* Allocate our TX/RX Queues */
835	if (ixgbe_allocate_queues(adapter)) {
836		error = ENOMEM;
837		goto err_out;
838	}
839
840	hw->phy.reset_if_overtemp = TRUE;
841	error = ixgbe_reset_hw(hw);
842	hw->phy.reset_if_overtemp = FALSE;
843	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
844		/*
845		 * No optics in this port, set up
846		 * so the timer routine will probe
847		 * for later insertion.
848		 */
849		adapter->sfp_probe = TRUE;
850		error = IXGBE_SUCCESS;
851	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
852		device_printf(dev, "Unsupported SFP+ module detected!\n");
853		error = EIO;
854		goto err_late;
855	} else if (error) {
856		device_printf(dev, "Hardware initialization failed\n");
857		error = EIO;
858		goto err_late;
859	}
860
861	/* Make sure we have a good EEPROM before we read from it */
862	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
863		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
864		error = EIO;
865		goto err_late;
866	}
867
868	/* Setup OS specific network interface */
869	if (ixgbe_setup_interface(dev, adapter) != 0)
870		goto err_late;
871
872	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
873		error = ixgbe_allocate_msix(adapter);
874	else
875		error = ixgbe_allocate_legacy(adapter);
876	if (error)
877		goto err_late;
878
879	error = ixgbe_start_hw(hw);
880	switch (error) {
881	case IXGBE_ERR_EEPROM_VERSION:
882		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
883		break;
884	case IXGBE_ERR_SFP_NOT_SUPPORTED:
885		device_printf(dev, "Unsupported SFP+ Module\n");
886		error = EIO;
887		goto err_late;
888	case IXGBE_ERR_SFP_NOT_PRESENT:
889		device_printf(dev, "No SFP+ Module found\n");
890		/* falls thru */
891	default:
892		break;
893	}
894
895	/* Enable the optics for 82599 SFP+ fiber */
896	ixgbe_enable_tx_laser(hw);
897
898	/* Enable power to the phy. */
899	ixgbe_set_phy_power(hw, TRUE);
900
901	/* Initialize statistics */
902	ixgbe_update_stats_counters(adapter);
903
904	/* Check PCIE slot type/speed/width */
905	ixgbe_get_slot_info(adapter);
906
907	/*
908	 * Do time init and sysctl init here, but
909	 * only on the first port of a bypass adapter.
910	 */
911	ixgbe_bypass_init(adapter);
912
913	/* Set an initial dmac value */
914	adapter->dmac = 0;
915	/* Set initial advertised speeds (if applicable) */
916	adapter->advertise = ixgbe_get_advertise(adapter);
917
918	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
919		ixgbe_define_iov_schemas(dev, &error);
920
921	/* Add sysctls */
922	ixgbe_add_device_sysctls(adapter);
923	ixgbe_add_hw_stats(adapter);
924
925	/* For Netmap */
926	adapter->init_locked = ixgbe_init_locked;
927	adapter->stop_locked = ixgbe_stop;
928
929	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
930		ixgbe_netmap_attach(adapter);
931
932	INIT_DEBUGOUT("ixgbe_attach: end");
933
934	return (0);
935
936err_late:
937	ixgbe_free_transmit_structures(adapter);
938	ixgbe_free_receive_structures(adapter);
939	free(adapter->queues, M_DEVBUF);
940err_out:
941	if (adapter->ifp != NULL)
942		if_free(adapter->ifp);
943	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
944	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
945	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
946	ixgbe_free_pci_resources(adapter);
947	free(adapter->mta, M_IXGBE);
948	IXGBE_CORE_LOCK_DESTROY(adapter);
949
950	return (error);
951} /* ixgbe_attach */
952
953/************************************************************************
954 * ixgbe_check_wol_support
955 *
956 *   Checks whether the adapter's ports are capable of
957 *   Wake On LAN by reading the adapter's NVM.
958 *
959 *   Sets each port's hw->wol_enabled value depending
960 *   on the value read here.
961 ************************************************************************/
962static void
963ixgbe_check_wol_support(struct adapter *adapter)
964{
965	struct ixgbe_hw *hw = &adapter->hw;
966	u16             dev_caps = 0;
967
968	/* Find out WoL support for port */
969	adapter->wol_support = hw->wol_enabled = 0;
970	ixgbe_get_device_caps(hw, &dev_caps);
971	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
972	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
973	     hw->bus.func == 0))
974		adapter->wol_support = hw->wol_enabled = 1;
975
976	/* Save initial wake up filter configuration */
977	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
978
979	return;
980} /* ixgbe_check_wol_support */
981
982/************************************************************************
983 * ixgbe_setup_interface
984 *
985 *   Setup networking device structure and register an interface.
986 ************************************************************************/
987static int
988ixgbe_setup_interface(device_t dev, struct adapter *adapter)
989{
990	struct ifnet *ifp;
991
992	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
993
994	ifp = adapter->ifp = if_alloc(IFT_ETHER);
995	if (ifp == NULL) {
996		device_printf(dev, "can not allocate ifnet structure\n");
997		return (-1);
998	}
999	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1000	ifp->if_baudrate = IF_Gbps(10);
1001	ifp->if_init = ixgbe_init;
1002	ifp->if_softc = adapter;
1003	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1004	ifp->if_ioctl = ixgbe_ioctl;
1005#if __FreeBSD_version >= 1100036
1006	if_setgetcounterfn(ifp, ixgbe_get_counter);
1007#endif
1008#if __FreeBSD_version >= 1100045
1009	/* TSO parameters */
1010	ifp->if_hw_tsomax = 65518;
1011	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1012	ifp->if_hw_tsomaxsegsize = 2048;
1013#endif
1014	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1015		ifp->if_start = ixgbe_legacy_start;
1016		IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1017		ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1018		IFQ_SET_READY(&ifp->if_snd);
1019		ixgbe_start_locked = ixgbe_legacy_start_locked;
1020		ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1021	} else {
1022		ifp->if_transmit = ixgbe_mq_start;
1023		ifp->if_qflush = ixgbe_qflush;
1024		ixgbe_start_locked = ixgbe_mq_start_locked;
1025		ixgbe_ring_empty = drbr_empty;
1026	}
1027
1028	ether_ifattach(ifp, adapter->hw.mac.addr);
1029
1030	adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1031
1032	/*
1033	 * Tell the upper layer(s) we support long frames.
1034	 */
1035	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1036
1037	/* Set capability flags */
1038	ifp->if_capabilities |= IFCAP_HWCSUM
1039	                     |  IFCAP_HWCSUM_IPV6
1040	                     |  IFCAP_TSO
1041	                     |  IFCAP_LRO
1042	                     |  IFCAP_VLAN_HWTAGGING
1043	                     |  IFCAP_VLAN_HWTSO
1044	                     |  IFCAP_VLAN_HWCSUM
1045	                     |  IFCAP_JUMBO_MTU
1046	                     |  IFCAP_VLAN_MTU
1047	                     |  IFCAP_HWSTATS;
1048
1049	/* Enable the above capabilities by default */
1050	ifp->if_capenable = ifp->if_capabilities;
1051
1052	/*
1053	 * Don't turn this on by default, if vlans are
1054	 * created on another pseudo device (eg. lagg)
1055	 * then vlan events are not passed thru, breaking
1056	 * operation, but with HW FILTER off it works. If
1057	 * using vlans directly on the ixgbe driver you can
1058	 * enable this and get full hardware tag filtering.
1059	 */
1060	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1061
1062	/*
1063	 * Specify the media types supported by this adapter and register
1064	 * callbacks to update media and link information
1065	 */
1066	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1067	    ixgbe_media_status);
1068
1069	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1070	ixgbe_add_media_types(adapter);
1071
1072	/* Set autoselect media by default */
1073	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1074
1075	return (0);
1076} /* ixgbe_setup_interface */
1077
1078#if __FreeBSD_version >= 1100036
1079/************************************************************************
1080 * ixgbe_get_counter
1081 ************************************************************************/
1082static uint64_t
1083ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1084{
1085	struct adapter *adapter;
1086	struct tx_ring *txr;
1087	uint64_t       rv;
1088
1089	adapter = if_getsoftc(ifp);
1090
1091	switch (cnt) {
1092	case IFCOUNTER_IPACKETS:
1093		return (adapter->ipackets);
1094	case IFCOUNTER_OPACKETS:
1095		return (adapter->opackets);
1096	case IFCOUNTER_IBYTES:
1097		return (adapter->ibytes);
1098	case IFCOUNTER_OBYTES:
1099		return (adapter->obytes);
1100	case IFCOUNTER_IMCASTS:
1101		return (adapter->imcasts);
1102	case IFCOUNTER_OMCASTS:
1103		return (adapter->omcasts);
1104	case IFCOUNTER_COLLISIONS:
1105		return (0);
1106	case IFCOUNTER_IQDROPS:
1107		return (adapter->iqdrops);
1108	case IFCOUNTER_OQDROPS:
1109		rv = 0;
1110		txr = adapter->tx_rings;
1111		for (int i = 0; i < adapter->num_queues; i++, txr++)
1112			rv += txr->br->br_drops;
1113		return (rv);
1114	case IFCOUNTER_IERRORS:
1115		return (adapter->ierrors);
1116	default:
1117		return (if_get_counter_default(ifp, cnt));
1118	}
1119} /* ixgbe_get_counter */
1120#endif
1121
1122/************************************************************************
1123 * ixgbe_add_media_types
1124 ************************************************************************/
1125static void
1126ixgbe_add_media_types(struct adapter *adapter)
1127{
1128	struct ixgbe_hw *hw = &adapter->hw;
1129	device_t        dev = adapter->dev;
1130	u64             layer;
1131
1132	layer = adapter->phy_layer;
1133
1134	/* Media types with matching FreeBSD media defines */
1135	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1136		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1137	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1138		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1139	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1140		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1141	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1142		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1143
1144	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1145	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1146		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1147		    NULL);
1148
1149	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1150		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1151		if (hw->phy.multispeed_fiber)
1152			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1153			    NULL);
1154	}
1155	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1156		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1157		if (hw->phy.multispeed_fiber)
1158			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1159			    NULL);
1160	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1161		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1162	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1163		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1164
1165#ifdef IFM_ETH_XTYPE
1166	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1167		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1168	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1169		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1170	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1171		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1172	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1173		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1174#else
1175	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1176		device_printf(dev, "Media supported: 10GbaseKR\n");
1177		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1178		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1179	}
1180	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1181		device_printf(dev, "Media supported: 10GbaseKX4\n");
1182		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1183		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1184	}
1185	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1186		device_printf(dev, "Media supported: 1000baseKX\n");
1187		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1188		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1189	}
1190	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1191		device_printf(dev, "Media supported: 2500baseKX\n");
1192		device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1193		ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1194	}
1195#endif
1196	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1197		device_printf(dev, "Media supported: 1000baseBX\n");
1198
1199	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1200		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1201		    0, NULL);
1202		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1203	}
1204
1205	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1206} /* ixgbe_add_media_types */
1207
1208/************************************************************************
1209 * ixgbe_is_sfp
1210 ************************************************************************/
1211static inline bool
1212ixgbe_is_sfp(struct ixgbe_hw *hw)
1213{
1214	switch (hw->mac.type) {
1215	case ixgbe_mac_82598EB:
1216		if (hw->phy.type == ixgbe_phy_nl)
1217			return TRUE;
1218		return FALSE;
1219	case ixgbe_mac_82599EB:
1220		switch (hw->mac.ops.get_media_type(hw)) {
1221		case ixgbe_media_type_fiber:
1222		case ixgbe_media_type_fiber_qsfp:
1223			return TRUE;
1224		default:
1225			return FALSE;
1226		}
1227	case ixgbe_mac_X550EM_x:
1228	case ixgbe_mac_X550EM_a:
1229		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1230			return TRUE;
1231		return FALSE;
1232	default:
1233		return FALSE;
1234	}
1235} /* ixgbe_is_sfp */
1236
1237/************************************************************************
1238 * ixgbe_config_link
1239 ************************************************************************/
1240static void
1241ixgbe_config_link(struct adapter *adapter)
1242{
1243	struct ixgbe_hw *hw = &adapter->hw;
1244	u32             autoneg, err = 0;
1245	bool            sfp, negotiate;
1246
1247	sfp = ixgbe_is_sfp(hw);
1248
1249	if (sfp) {
1250		if (hw->phy.multispeed_fiber) {
1251			hw->mac.ops.setup_sfp(hw);
1252			ixgbe_enable_tx_laser(hw);
1253			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1254		} else
1255			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1256	} else {
1257		if (hw->mac.ops.check_link)
1258			err = ixgbe_check_link(hw, &adapter->link_speed,
1259			    &adapter->link_up, FALSE);
1260		if (err)
1261			goto out;
1262		autoneg = hw->phy.autoneg_advertised;
1263		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1264			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1265			    &negotiate);
1266		if (err)
1267			goto out;
1268		if (hw->mac.ops.setup_link)
1269			err = hw->mac.ops.setup_link(hw, autoneg,
1270			    adapter->link_up);
1271	}
1272out:
1273
1274	return;
1275} /* ixgbe_config_link */
1276
1277/************************************************************************
1278 * ixgbe_update_stats_counters - Update board statistics counters.
1279 ************************************************************************/
1280static void
1281ixgbe_update_stats_counters(struct adapter *adapter)
1282{
1283	struct ixgbe_hw       *hw = &adapter->hw;
1284	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1285	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1286	u64                   total_missed_rx = 0;
1287
1288	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1289	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1290	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1291	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1292	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1293
1294	for (int i = 0; i < 16; i++) {
1295		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1296		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1297		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1298	}
1299	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1300	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1301	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1302
1303	/* Hardware workaround, gprc counts missed packets */
1304	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1305	stats->gprc -= missed_rx;
1306
1307	if (hw->mac.type != ixgbe_mac_82598EB) {
1308		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1309		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1310		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1311		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1312		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1313		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1314		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1315		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1316	} else {
1317		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1318		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1319		/* 82598 only has a counter in the high register */
1320		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1321		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1322		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1323	}
1324
1325	/*
1326	 * Workaround: mprc hardware is incorrectly counting
1327	 * broadcasts, so for now we subtract those.
1328	 */
1329	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1330	stats->bprc += bprc;
1331	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1332	if (hw->mac.type == ixgbe_mac_82598EB)
1333		stats->mprc -= bprc;
1334
1335	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1336	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1337	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1338	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1339	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1340	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1341
1342	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1343	stats->lxontxc += lxon;
1344	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1345	stats->lxofftxc += lxoff;
1346	total = lxon + lxoff;
1347
1348	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1349	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1350	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1351	stats->gptc -= total;
1352	stats->mptc -= total;
1353	stats->ptc64 -= total;
1354	stats->gotc -= total * ETHER_MIN_LEN;
1355
1356	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1357	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1358	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1359	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1360	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1361	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1362	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1363	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1364	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1365	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1366	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1367	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1368	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1369	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1370	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1371	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1372	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1373	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1374	/* Only read FCOE on 82599 */
1375	if (hw->mac.type != ixgbe_mac_82598EB) {
1376		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1377		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1378		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1379		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1380		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1381	}
1382
1383	/* Fill out the OS statistics structure */
1384	IXGBE_SET_IPACKETS(adapter, stats->gprc);
1385	IXGBE_SET_OPACKETS(adapter, stats->gptc);
1386	IXGBE_SET_IBYTES(adapter, stats->gorc);
1387	IXGBE_SET_OBYTES(adapter, stats->gotc);
1388	IXGBE_SET_IMCASTS(adapter, stats->mprc);
1389	IXGBE_SET_OMCASTS(adapter, stats->mptc);
1390	IXGBE_SET_COLLISIONS(adapter, 0);
1391	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1392	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1393} /* ixgbe_update_stats_counters */
1394
1395/************************************************************************
1396 * ixgbe_add_hw_stats
1397 *
1398 *   Add sysctl variables, one per statistic, to the system.
1399 ************************************************************************/
1400static void
1401ixgbe_add_hw_stats(struct adapter *adapter)
1402{
1403	device_t               dev = adapter->dev;
1404	struct tx_ring         *txr = adapter->tx_rings;
1405	struct rx_ring         *rxr = adapter->rx_rings;
1406	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1407	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1408	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1409	struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1410	struct sysctl_oid      *stat_node, *queue_node;
1411	struct sysctl_oid_list *stat_list, *queue_list;
1412
1413#define QUEUE_NAME_LEN 32
1414	char                   namebuf[QUEUE_NAME_LEN];
1415
1416	/* Driver Statistics */
1417	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1418	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1419	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1420	    CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1421	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1422	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1423	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1424	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1425
1426	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1427		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1428		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1429		    CTLFLAG_RD, NULL, "Queue Name");
1430		queue_list = SYSCTL_CHILDREN(queue_node);
1431
1432		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1433		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1434		    sizeof(&adapter->queues[i]),
1435		    ixgbe_sysctl_interrupt_rate_handler, "IU",
1436		    "Interrupt Rate");
1437		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1438		    CTLFLAG_RD, &(adapter->queues[i].irqs),
1439		    "irqs on this queue");
1440		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1441		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1442		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1443		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1444		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1445		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1446		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1447		    CTLFLAG_RD, &txr->tso_tx, "TSO");
1448		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1449		    CTLFLAG_RD, &txr->no_tx_dma_setup,
1450		    "Driver tx dma failure in xmit");
1451		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1452		    CTLFLAG_RD, &txr->no_desc_avail,
1453		    "Queue No Descriptor Available");
1454		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1455		    CTLFLAG_RD, &txr->total_packets,
1456		    "Queue Packets Transmitted");
1457		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1458		    CTLFLAG_RD, &txr->br->br_drops,
1459		    "Packets dropped in buf_ring");
1460	}
1461
1462	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1463		struct lro_ctrl *lro = &rxr->lro;
1464
1465		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1466		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1467		    CTLFLAG_RD, NULL, "Queue Name");
1468		queue_list = SYSCTL_CHILDREN(queue_node);
1469
1470		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1471		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1472		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1473		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1474		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1475		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1476		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1477		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1478		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1479		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1480		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1481		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1482		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1483		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1484		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1485		    CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1486		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1487		    CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1488	}
1489
1490	/* MAC stats get their own sub node */
1491
1492	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1493	    CTLFLAG_RD, NULL, "MAC Statistics");
1494	stat_list = SYSCTL_CHILDREN(stat_node);
1495
1496	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1497	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1498	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1499	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1500	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1501	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
1502	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1503	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1504	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1505	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1506	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1507	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1508	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1509	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1510	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1511	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1512
1513	/* Flow Control stats */
1514	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1515	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1516	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1517	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1518	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1519	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1520	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1521	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1522
1523	/* Packet Reception Stats */
1524	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1525	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
1526	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1527	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1528	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1529	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1530	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1531	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1532	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1533	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1534	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1535	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1536	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1537	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1538	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1539	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1540	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1541	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1542	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1543	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1544	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1545	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1546	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1547	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1548	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1549	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1550	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1551	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1552	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1553	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1554	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1555	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
1556	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1557	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1558	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1559	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1560	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1561	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
1562
1563	/* Packet Transmission Stats */
1564	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1565	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1566	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1567	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1568	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1569	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1570	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1571	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1572	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1573	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1574	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1575	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1576	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1577	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1578	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1579	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1580	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1581	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1582	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1583	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1584	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1585	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1586	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1587	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1588} /* ixgbe_add_hw_stats */
1589
1590/************************************************************************
1591 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1592 *
1593 *   Retrieves the TDH value from the hardware
1594 ************************************************************************/
1595static int
1596ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1597{
1598	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1599	int            error;
1600	unsigned int   val;
1601
1602	if (!txr)
1603		return (0);
1604
1605	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1606	error = sysctl_handle_int(oidp, &val, 0, req);
1607	if (error || !req->newptr)
1608		return error;
1609
1610	return (0);
1611} /* ixgbe_sysctl_tdh_handler */
1612
1613/************************************************************************
1614 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1615 *
1616 *   Retrieves the TDT value from the hardware
1617 ************************************************************************/
1618static int
1619ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1620{
1621	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1622	int            error;
1623	unsigned int   val;
1624
1625	if (!txr)
1626		return (0);
1627
1628	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1629	error = sysctl_handle_int(oidp, &val, 0, req);
1630	if (error || !req->newptr)
1631		return error;
1632
1633	return (0);
1634} /* ixgbe_sysctl_tdt_handler */
1635
1636/************************************************************************
1637 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1638 *
1639 *   Retrieves the RDH value from the hardware
1640 ************************************************************************/
1641static int
1642ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1643{
1644	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1645	int            error;
1646	unsigned int   val;
1647
1648	if (!rxr)
1649		return (0);
1650
1651	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1652	error = sysctl_handle_int(oidp, &val, 0, req);
1653	if (error || !req->newptr)
1654		return error;
1655
1656	return (0);
1657} /* ixgbe_sysctl_rdh_handler */
1658
1659/************************************************************************
1660 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1661 *
1662 *   Retrieves the RDT value from the hardware
1663 ************************************************************************/
1664static int
1665ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1666{
1667	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1668	int            error;
1669	unsigned int   val;
1670
1671	if (!rxr)
1672		return (0);
1673
1674	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1675	error = sysctl_handle_int(oidp, &val, 0, req);
1676	if (error || !req->newptr)
1677		return error;
1678
1679	return (0);
1680} /* ixgbe_sysctl_rdt_handler */
1681
1682/************************************************************************
1683 * ixgbe_register_vlan
1684 *
1685 *   Run via vlan config EVENT, it enables us to use the
1686 *   HW Filter table since we can get the vlan id. This
1687 *   just creates the entry in the soft version of the
1688 *   VFTA, init will repopulate the real table.
1689 ************************************************************************/
1690static void
1691ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1692{
1693	struct adapter *adapter = ifp->if_softc;
1694	u16            index, bit;
1695
1696	if (ifp->if_softc != arg)   /* Not our event */
1697		return;
1698
1699	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1700		return;
1701
1702	IXGBE_CORE_LOCK(adapter);
1703	index = (vtag >> 5) & 0x7F;
1704	bit = vtag & 0x1F;
1705	adapter->shadow_vfta[index] |= (1 << bit);
1706	++adapter->num_vlans;
1707	ixgbe_setup_vlan_hw_support(adapter);
1708	IXGBE_CORE_UNLOCK(adapter);
1709} /* ixgbe_register_vlan */
1710
1711/************************************************************************
1712 * ixgbe_unregister_vlan
1713 *
1714 *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1715 ************************************************************************/
1716static void
1717ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1718{
1719	struct adapter *adapter = ifp->if_softc;
1720	u16            index, bit;
1721
1722	if (ifp->if_softc != arg)
1723		return;
1724
1725	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1726		return;
1727
1728	IXGBE_CORE_LOCK(adapter);
1729	index = (vtag >> 5) & 0x7F;
1730	bit = vtag & 0x1F;
1731	adapter->shadow_vfta[index] &= ~(1 << bit);
1732	--adapter->num_vlans;
1733	/* Re-init to load the changes */
1734	ixgbe_setup_vlan_hw_support(adapter);
1735	IXGBE_CORE_UNLOCK(adapter);
1736} /* ixgbe_unregister_vlan */
1737
1738/************************************************************************
1739 * ixgbe_setup_vlan_hw_support
1740 ************************************************************************/
1741static void
1742ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1743{
1744	struct ifnet    *ifp = adapter->ifp;
1745	struct ixgbe_hw *hw = &adapter->hw;
1746	struct rx_ring  *rxr;
1747	int             i;
1748	u32             ctrl;
1749
1750
1751	/*
1752	 * We get here thru init_locked, meaning
1753	 * a soft reset, this has already cleared
1754	 * the VFTA and other state, so if there
1755	 * have been no vlan's registered do nothing.
1756	 */
1757	if (adapter->num_vlans == 0)
1758		return;
1759
1760	/* Setup the queues for vlans */
1761	for (i = 0; i < adapter->num_queues; i++) {
1762		rxr = &adapter->rx_rings[i];
1763		/* On 82599 the VLAN enable is per/queue in RXDCTL */
1764		if (hw->mac.type != ixgbe_mac_82598EB) {
1765			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1766			ctrl |= IXGBE_RXDCTL_VME;
1767			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1768		}
1769		rxr->vtag_strip = TRUE;
1770	}
1771
1772	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1773		return;
1774	/*
1775	 * A soft reset zero's out the VFTA, so
1776	 * we need to repopulate it now.
1777	 */
1778	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1779		if (adapter->shadow_vfta[i] != 0)
1780			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1781			    adapter->shadow_vfta[i]);
1782
1783	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1784	/* Enable the Filter Table if enabled */
1785	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1786		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1787		ctrl |= IXGBE_VLNCTRL_VFE;
1788	}
1789	if (hw->mac.type == ixgbe_mac_82598EB)
1790		ctrl |= IXGBE_VLNCTRL_VME;
1791	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1792} /* ixgbe_setup_vlan_hw_support */
1793
1794/************************************************************************
1795 * ixgbe_get_slot_info
1796 *
1797 *   Get the width and transaction speed of
1798 *   the slot this adapter is plugged into.
1799 ************************************************************************/
1800static void
1801ixgbe_get_slot_info(struct adapter *adapter)
1802{
1803	device_t              dev = adapter->dev;
1804	struct ixgbe_hw       *hw = &adapter->hw;
1805	u32                   offset;
1806	u16                   link;
1807	int                   bus_info_valid = TRUE;
1808
1809	/* Some devices are behind an internal bridge */
1810	switch (hw->device_id) {
1811	case IXGBE_DEV_ID_82599_SFP_SF_QP:
1812	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1813		goto get_parent_info;
1814	default:
1815		break;
1816	}
1817
1818	ixgbe_get_bus_info(hw);
1819
1820	/*
1821	 * Some devices don't use PCI-E, but there is no need
1822	 * to display "Unknown" for bus speed and width.
1823	 */
1824	switch (hw->mac.type) {
1825	case ixgbe_mac_X550EM_x:
1826	case ixgbe_mac_X550EM_a:
1827		return;
1828	default:
1829		goto display;
1830	}
1831
1832get_parent_info:
1833	/*
1834	 * For the Quad port adapter we need to parse back
1835	 * up the PCI tree to find the speed of the expansion
1836	 * slot into which this adapter is plugged. A bit more work.
1837	 */
1838	dev = device_get_parent(device_get_parent(dev));
1839#ifdef IXGBE_DEBUG
1840	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1841	    pci_get_slot(dev), pci_get_function(dev));
1842#endif
1843	dev = device_get_parent(device_get_parent(dev));
1844#ifdef IXGBE_DEBUG
1845	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1846	    pci_get_slot(dev), pci_get_function(dev));
1847#endif
1848	/* Now get the PCI Express Capabilities offset */
1849	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1850		/*
1851		 * Hmm...can't get PCI-Express capabilities.
1852		 * Falling back to default method.
1853		 */
1854		bus_info_valid = FALSE;
1855		ixgbe_get_bus_info(hw);
1856		goto display;
1857	}
1858	/* ...and read the Link Status Register */
1859	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1860	ixgbe_set_pci_config_data_generic(hw, link);
1861
1862display:
1863	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1864	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1865	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1866	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1867	     "Unknown"),
1868	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1869	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1870	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1871	     "Unknown"));
1872
1873	if (bus_info_valid) {
1874		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1875		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1876		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
1877			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1878			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1879		}
1880		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1881		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1882		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
1883			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1884			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1885		}
1886	} else
1887		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1888
1889	return;
1890} /* ixgbe_get_slot_info */
1891
1892/************************************************************************
1893 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1894 ************************************************************************/
1895static inline void
1896ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1897{
1898	struct ixgbe_hw *hw = &adapter->hw;
1899	u64             queue = (u64)(1 << vector);
1900	u32             mask;
1901
1902	if (hw->mac.type == ixgbe_mac_82598EB) {
1903		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1904		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1905	} else {
1906		mask = (queue & 0xFFFFFFFF);
1907		if (mask)
1908			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1909		mask = (queue >> 32);
1910		if (mask)
1911			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1912	}
1913} /* ixgbe_enable_queue */
1914
1915/************************************************************************
1916 * ixgbe_disable_queue
1917 ************************************************************************/
1918static inline void
1919ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1920{
1921	struct ixgbe_hw *hw = &adapter->hw;
1922	u64             queue = (u64)(1 << vector);
1923	u32             mask;
1924
1925	if (hw->mac.type == ixgbe_mac_82598EB) {
1926		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1927		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1928	} else {
1929		mask = (queue & 0xFFFFFFFF);
1930		if (mask)
1931			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1932		mask = (queue >> 32);
1933		if (mask)
1934			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1935	}
1936} /* ixgbe_disable_queue */
1937
1938/************************************************************************
1939 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1940 ************************************************************************/
1941void
1942ixgbe_msix_que(void *arg)
1943{
1944	struct ix_queue *que = arg;
1945	struct adapter  *adapter = que->adapter;
1946	struct ifnet    *ifp = adapter->ifp;
1947	struct tx_ring  *txr = que->txr;
1948	struct rx_ring  *rxr = que->rxr;
1949	bool            more;
1950	u32             newitr = 0;
1951
1952
1953	/* Protect against spurious interrupts */
1954	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1955		return;
1956
1957	ixgbe_disable_queue(adapter, que->msix);
1958	++que->irqs;
1959
1960	more = ixgbe_rxeof(que);
1961
1962	IXGBE_TX_LOCK(txr);
1963	ixgbe_txeof(txr);
1964	if (!ixgbe_ring_empty(ifp, txr->br))
1965		ixgbe_start_locked(ifp, txr);
1966	IXGBE_TX_UNLOCK(txr);
1967
1968	/* Do AIM now? */
1969
1970	if (adapter->enable_aim == FALSE)
1971		goto no_calc;
1972	/*
1973	 * Do Adaptive Interrupt Moderation:
1974	 *  - Write out last calculated setting
1975	 *  - Calculate based on average size over
1976	 *    the last interval.
1977	 */
1978	if (que->eitr_setting)
1979		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1980		    que->eitr_setting);
1981
1982	que->eitr_setting = 0;
1983
1984	/* Idle, do nothing */
1985	if ((txr->bytes == 0) && (rxr->bytes == 0))
1986		goto no_calc;
1987
1988	if ((txr->bytes) && (txr->packets))
1989		newitr = txr->bytes/txr->packets;
1990	if ((rxr->bytes) && (rxr->packets))
1991		newitr = max(newitr, (rxr->bytes / rxr->packets));
1992	newitr += 24; /* account for hardware frame, crc */
1993
1994	/* set an upper boundary */
1995	newitr = min(newitr, 3000);
1996
1997	/* Be nice to the mid range */
1998	if ((newitr > 300) && (newitr < 1200))
1999		newitr = (newitr / 3);
2000	else
2001		newitr = (newitr / 2);
2002
2003	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2004		newitr |= newitr << 16;
2005	else
2006		newitr |= IXGBE_EITR_CNT_WDIS;
2007
2008	/* save for next interrupt */
2009	que->eitr_setting = newitr;
2010
2011	/* Reset state */
2012	txr->bytes = 0;
2013	txr->packets = 0;
2014	rxr->bytes = 0;
2015	rxr->packets = 0;
2016
2017no_calc:
2018	if (more)
2019		taskqueue_enqueue(que->tq, &que->que_task);
2020	else
2021		ixgbe_enable_queue(adapter, que->msix);
2022
2023	return;
2024} /* ixgbe_msix_que */
2025
2026/************************************************************************
2027 * ixgbe_media_status - Media Ioctl callback
2028 *
2029 *   Called whenever the user queries the status of
2030 *   the interface using ifconfig.
2031 ************************************************************************/
2032static void
2033ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2034{
2035	struct adapter  *adapter = ifp->if_softc;
2036	struct ixgbe_hw *hw = &adapter->hw;
2037	int             layer;
2038
2039	INIT_DEBUGOUT("ixgbe_media_status: begin");
2040	IXGBE_CORE_LOCK(adapter);
2041	ixgbe_update_link_status(adapter);
2042
2043	ifmr->ifm_status = IFM_AVALID;
2044	ifmr->ifm_active = IFM_ETHER;
2045
2046	if (!adapter->link_active) {
2047		IXGBE_CORE_UNLOCK(adapter);
2048		return;
2049	}
2050
2051	ifmr->ifm_status |= IFM_ACTIVE;
2052	layer = adapter->phy_layer;
2053
2054	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2055	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2056	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2057	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2058		switch (adapter->link_speed) {
2059		case IXGBE_LINK_SPEED_10GB_FULL:
2060			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2061			break;
2062		case IXGBE_LINK_SPEED_1GB_FULL:
2063			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2064			break;
2065		case IXGBE_LINK_SPEED_100_FULL:
2066			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2067			break;
2068		case IXGBE_LINK_SPEED_10_FULL:
2069			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2070			break;
2071		}
2072	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2073	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2074		switch (adapter->link_speed) {
2075		case IXGBE_LINK_SPEED_10GB_FULL:
2076			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2077			break;
2078		}
2079	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2080		switch (adapter->link_speed) {
2081		case IXGBE_LINK_SPEED_10GB_FULL:
2082			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2083			break;
2084		case IXGBE_LINK_SPEED_1GB_FULL:
2085			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2086			break;
2087		}
2088	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2089		switch (adapter->link_speed) {
2090		case IXGBE_LINK_SPEED_10GB_FULL:
2091			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2092			break;
2093		case IXGBE_LINK_SPEED_1GB_FULL:
2094			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2095			break;
2096		}
2097	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2098	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2099		switch (adapter->link_speed) {
2100		case IXGBE_LINK_SPEED_10GB_FULL:
2101			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2102			break;
2103		case IXGBE_LINK_SPEED_1GB_FULL:
2104			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2105			break;
2106		}
2107	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2108		switch (adapter->link_speed) {
2109		case IXGBE_LINK_SPEED_10GB_FULL:
2110			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2111			break;
2112		}
2113	/*
2114	 * XXX: These need to use the proper media types once
2115	 * they're added.
2116	 */
2117#ifndef IFM_ETH_XTYPE
2118	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2119		switch (adapter->link_speed) {
2120		case IXGBE_LINK_SPEED_10GB_FULL:
2121			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2122			break;
2123		case IXGBE_LINK_SPEED_2_5GB_FULL:
2124			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2125			break;
2126		case IXGBE_LINK_SPEED_1GB_FULL:
2127			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2128			break;
2129		}
2130	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2131	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2132	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2133		switch (adapter->link_speed) {
2134		case IXGBE_LINK_SPEED_10GB_FULL:
2135			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2136			break;
2137		case IXGBE_LINK_SPEED_2_5GB_FULL:
2138			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2139			break;
2140		case IXGBE_LINK_SPEED_1GB_FULL:
2141			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2142			break;
2143		}
2144#else
2145	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2146		switch (adapter->link_speed) {
2147		case IXGBE_LINK_SPEED_10GB_FULL:
2148			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2149			break;
2150		case IXGBE_LINK_SPEED_2_5GB_FULL:
2151			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2152			break;
2153		case IXGBE_LINK_SPEED_1GB_FULL:
2154			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2155			break;
2156		}
2157	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2158	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2159	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2160		switch (adapter->link_speed) {
2161		case IXGBE_LINK_SPEED_10GB_FULL:
2162			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2163			break;
2164		case IXGBE_LINK_SPEED_2_5GB_FULL:
2165			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2166			break;
2167		case IXGBE_LINK_SPEED_1GB_FULL:
2168			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2169			break;
2170		}
2171#endif
2172
2173	/* If nothing is recognized... */
2174	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2175		ifmr->ifm_active |= IFM_UNKNOWN;
2176
2177#if __FreeBSD_version >= 900025
2178	/* Display current flow control setting used on link */
2179	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2180	    hw->fc.current_mode == ixgbe_fc_full)
2181		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2182	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2183	    hw->fc.current_mode == ixgbe_fc_full)
2184		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2185#endif
2186
2187	IXGBE_CORE_UNLOCK(adapter);
2188
2189	return;
2190} /* ixgbe_media_status */
2191
2192/************************************************************************
2193 * ixgbe_media_change - Media Ioctl callback
2194 *
2195 *   Called when the user changes speed/duplex using
2196 *   media/mediopt option with ifconfig.
2197 ************************************************************************/
2198static int
2199ixgbe_media_change(struct ifnet *ifp)
2200{
2201	struct adapter   *adapter = ifp->if_softc;
2202	struct ifmedia   *ifm = &adapter->media;
2203	struct ixgbe_hw  *hw = &adapter->hw;
2204	ixgbe_link_speed speed = 0;
2205
2206	INIT_DEBUGOUT("ixgbe_media_change: begin");
2207
2208	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2209		return (EINVAL);
2210
2211	if (hw->phy.media_type == ixgbe_media_type_backplane)
2212		return (ENODEV);
2213
2214	/*
2215	 * We don't actually need to check against the supported
2216	 * media types of the adapter; ifmedia will take care of
2217	 * that for us.
2218	 */
2219	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2220		case IFM_AUTO:
2221		case IFM_10G_T:
2222			speed |= IXGBE_LINK_SPEED_100_FULL;
2223			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2224			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2225			break;
2226		case IFM_10G_LRM:
2227		case IFM_10G_LR:
2228#ifndef IFM_ETH_XTYPE
2229		case IFM_10G_SR: /* KR, too */
2230		case IFM_10G_CX4: /* KX4 */
2231#else
2232		case IFM_10G_KR:
2233		case IFM_10G_KX4:
2234#endif
2235			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2236			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2237			break;
2238#ifndef IFM_ETH_XTYPE
2239		case IFM_1000_CX: /* KX */
2240#else
2241		case IFM_1000_KX:
2242#endif
2243		case IFM_1000_LX:
2244		case IFM_1000_SX:
2245			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2246			break;
2247		case IFM_1000_T:
2248			speed |= IXGBE_LINK_SPEED_100_FULL;
2249			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2250			break;
2251		case IFM_10G_TWINAX:
2252			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2253			break;
2254		case IFM_100_TX:
2255			speed |= IXGBE_LINK_SPEED_100_FULL;
2256			break;
2257		case IFM_10_T:
2258			speed |= IXGBE_LINK_SPEED_10_FULL;
2259			break;
2260		default:
2261			goto invalid;
2262	}
2263
2264	hw->mac.autotry_restart = TRUE;
2265	hw->mac.ops.setup_link(hw, speed, TRUE);
2266	adapter->advertise =
2267	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2268	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2269	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2270	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2271
2272	return (0);
2273
2274invalid:
2275	device_printf(adapter->dev, "Invalid media type!\n");
2276
2277	return (EINVAL);
2278} /* ixgbe_media_change */
2279
2280/************************************************************************
2281 * ixgbe_set_promisc
2282 ************************************************************************/
2283static void
2284ixgbe_set_promisc(struct adapter *adapter)
2285{
2286	struct ifnet *ifp = adapter->ifp;
2287	int          mcnt = 0;
2288	u32          rctl;
2289
2290	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2291	rctl &= (~IXGBE_FCTRL_UPE);
2292	if (ifp->if_flags & IFF_ALLMULTI)
2293		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2294	else {
2295		struct ifmultiaddr *ifma;
2296#if __FreeBSD_version < 800000
2297		IF_ADDR_LOCK(ifp);
2298#else
2299		if_maddr_rlock(ifp);
2300#endif
2301		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2302			if (ifma->ifma_addr->sa_family != AF_LINK)
2303				continue;
2304			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2305				break;
2306			mcnt++;
2307		}
2308#if __FreeBSD_version < 800000
2309		IF_ADDR_UNLOCK(ifp);
2310#else
2311		if_maddr_runlock(ifp);
2312#endif
2313	}
2314	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2315		rctl &= (~IXGBE_FCTRL_MPE);
2316	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2317
2318	if (ifp->if_flags & IFF_PROMISC) {
2319		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2320		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2321	} else if (ifp->if_flags & IFF_ALLMULTI) {
2322		rctl |= IXGBE_FCTRL_MPE;
2323		rctl &= ~IXGBE_FCTRL_UPE;
2324		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2325	}
2326} /* ixgbe_set_promisc */
2327
2328/************************************************************************
2329 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2330 ************************************************************************/
2331static void
2332ixgbe_msix_link(void *arg)
2333{
2334	struct adapter  *adapter = arg;
2335	struct ixgbe_hw *hw = &adapter->hw;
2336	u32             eicr, eicr_mask;
2337	s32             retval;
2338
2339	++adapter->link_irq;
2340
2341	/* Pause other interrupts */
2342	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2343
2344	/* First get the cause */
2345	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2346	/* Be sure the queue bits are not cleared */
2347	eicr &= ~IXGBE_EICR_RTX_QUEUE;
2348	/* Clear interrupt with write */
2349	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2350
2351	/* Link status change */
2352	if (eicr & IXGBE_EICR_LSC) {
2353		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2354		taskqueue_enqueue(adapter->tq, &adapter->link_task);
2355	}
2356
2357	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2358		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2359		    (eicr & IXGBE_EICR_FLOW_DIR)) {
2360			/* This is probably overkill :) */
2361			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2362				return;
2363			/* Disable the interrupt */
2364			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2365			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2366		}
2367
2368		if (eicr & IXGBE_EICR_ECC) {
2369			device_printf(adapter->dev,
2370			    "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2371			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2372		}
2373
2374		/* Check for over temp condition */
2375		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2376			switch (adapter->hw.mac.type) {
2377			case ixgbe_mac_X550EM_a:
2378				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2379					break;
2380				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2381				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2382				IXGBE_WRITE_REG(hw, IXGBE_EICR,
2383				    IXGBE_EICR_GPI_SDP0_X550EM_a);
2384				retval = hw->phy.ops.check_overtemp(hw);
2385				if (retval != IXGBE_ERR_OVERTEMP)
2386					break;
2387				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2388				device_printf(adapter->dev, "System shutdown required!\n");
2389				break;
2390			default:
2391				if (!(eicr & IXGBE_EICR_TS))
2392					break;
2393				retval = hw->phy.ops.check_overtemp(hw);
2394				if (retval != IXGBE_ERR_OVERTEMP)
2395					break;
2396				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2397				device_printf(adapter->dev, "System shutdown required!\n");
2398				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2399				break;
2400			}
2401		}
2402
2403		/* Check for VF message */
2404		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2405		    (eicr & IXGBE_EICR_MAILBOX))
2406			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2407	}
2408
2409	if (ixgbe_is_sfp(hw)) {
2410		/* Pluggable optics-related interrupt */
2411		if (hw->mac.type >= ixgbe_mac_X540)
2412			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2413		else
2414			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2415
2416		if (eicr & eicr_mask) {
2417			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2418			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2419		}
2420
2421		if ((hw->mac.type == ixgbe_mac_82599EB) &&
2422		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2423			IXGBE_WRITE_REG(hw, IXGBE_EICR,
2424			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2425			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2426		}
2427	}
2428
2429	/* Check for fan failure */
2430	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2431		ixgbe_check_fan_failure(adapter, eicr, TRUE);
2432		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2433	}
2434
2435	/* External PHY interrupt */
2436	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2437	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2438		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2439		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2440	}
2441
2442	/* Re-enable other interrupts */
2443	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2444} /* ixgbe_msix_link */
2445
2446/************************************************************************
2447 * ixgbe_sysctl_interrupt_rate_handler
2448 ************************************************************************/
2449static int
2450ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2451{
2452	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2453	int             error;
2454	unsigned int    reg, usec, rate;
2455
2456	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2457	usec = ((reg & 0x0FF8) >> 3);
2458	if (usec > 0)
2459		rate = 500000 / usec;
2460	else
2461		rate = 0;
2462	error = sysctl_handle_int(oidp, &rate, 0, req);
2463	if (error || !req->newptr)
2464		return error;
2465	reg &= ~0xfff; /* default, no limitation */
2466	ixgbe_max_interrupt_rate = 0;
2467	if (rate > 0 && rate < 500000) {
2468		if (rate < 1000)
2469			rate = 1000;
2470		ixgbe_max_interrupt_rate = rate;
2471		reg |= ((4000000/rate) & 0xff8);
2472	}
2473	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2474
2475	return (0);
2476} /* ixgbe_sysctl_interrupt_rate_handler */
2477
2478/************************************************************************
2479 * ixgbe_add_device_sysctls
2480 ************************************************************************/
2481static void
2482ixgbe_add_device_sysctls(struct adapter *adapter)
2483{
2484	device_t               dev = adapter->dev;
2485	struct ixgbe_hw        *hw = &adapter->hw;
2486	struct sysctl_oid_list *child;
2487	struct sysctl_ctx_list *ctx;
2488
2489	ctx = device_get_sysctl_ctx(dev);
2490	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2491
2492	/* Sysctls for all devices */
2493	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2494	    adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2495
2496	adapter->enable_aim = ixgbe_enable_aim;
2497	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2498	    &adapter->enable_aim, 1, "Interrupt Moderation");
2499
2500	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2501	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2502	    IXGBE_SYSCTL_DESC_ADV_SPEED);
2503
2504#ifdef IXGBE_DEBUG
2505	/* testing sysctls (for all devices) */
2506	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2507	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2508	    "I", "PCI Power State");
2509
2510	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2511	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2512	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2513#endif
2514	/* for X550 series devices */
2515	if (hw->mac.type >= ixgbe_mac_X550)
2516		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2517		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2518		    "I", "DMA Coalesce");
2519
2520	/* for WoL-capable devices */
2521	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2522		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2523		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2524		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2525
2526		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2527		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2528		    "I", "Enable/Disable Wake Up Filters");
2529	}
2530
2531	/* for X552/X557-AT devices */
2532	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2533		struct sysctl_oid *phy_node;
2534		struct sysctl_oid_list *phy_list;
2535
2536		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2537		    CTLFLAG_RD, NULL, "External PHY sysctls");
2538		phy_list = SYSCTL_CHILDREN(phy_node);
2539
2540		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2541		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2542		    "I", "Current External PHY Temperature (Celsius)");
2543
2544		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2545		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2546		    ixgbe_sysctl_phy_overtemp_occurred, "I",
2547		    "External PHY High Temperature Event Occurred");
2548	}
2549
2550	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2551		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2552		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2553		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2554	}
2555} /* ixgbe_add_device_sysctls */
2556
2557/************************************************************************
2558 * ixgbe_allocate_pci_resources
2559 ************************************************************************/
2560static int
2561ixgbe_allocate_pci_resources(struct adapter *adapter)
2562{
2563	device_t dev = adapter->dev;
2564	int      rid;
2565
2566	rid = PCIR_BAR(0);
2567	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2568	    RF_ACTIVE);
2569
2570	if (!(adapter->pci_mem)) {
2571		device_printf(dev, "Unable to allocate bus resource: memory\n");
2572		return (ENXIO);
2573	}
2574
2575	/* Save bus_space values for READ/WRITE_REG macros */
2576	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2577	adapter->osdep.mem_bus_space_handle =
2578	    rman_get_bushandle(adapter->pci_mem);
2579	/* Set hw values for shared code */
2580	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2581
2582	return (0);
2583} /* ixgbe_allocate_pci_resources */
2584
2585/************************************************************************
2586 * ixgbe_detach - Device removal routine
2587 *
2588 *   Called when the driver is being removed.
2589 *   Stops the adapter and deallocates all the resources
2590 *   that were allocated for driver operation.
2591 *
2592 *   return 0 on success, positive on failure
2593 ************************************************************************/
2594static int
2595ixgbe_detach(device_t dev)
2596{
2597	struct adapter  *adapter = device_get_softc(dev);
2598	struct ix_queue *que = adapter->queues;
2599	struct tx_ring  *txr = adapter->tx_rings;
2600	u32             ctrl_ext;
2601
2602	INIT_DEBUGOUT("ixgbe_detach: begin");
2603
2604	/* Make sure VLANS are not using driver */
2605	if (adapter->ifp->if_vlantrunk != NULL) {
2606		device_printf(dev, "Vlan in use, detach first\n");
2607		return (EBUSY);
2608	}
2609
2610	if (ixgbe_pci_iov_detach(dev) != 0) {
2611		device_printf(dev, "SR-IOV in use; detach first.\n");
2612		return (EBUSY);
2613	}
2614
2615	ether_ifdetach(adapter->ifp);
2616	/* Stop the adapter */
2617	IXGBE_CORE_LOCK(adapter);
2618	ixgbe_setup_low_power_mode(adapter);
2619	IXGBE_CORE_UNLOCK(adapter);
2620
2621	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2622		if (que->tq) {
2623			if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2624				taskqueue_drain(que->tq, &txr->txq_task);
2625			taskqueue_drain(que->tq, &que->que_task);
2626			taskqueue_free(que->tq);
2627		}
2628	}
2629
2630	/* Drain the Link queue */
2631	if (adapter->tq) {
2632		taskqueue_drain(adapter->tq, &adapter->link_task);
2633		taskqueue_drain(adapter->tq, &adapter->mod_task);
2634		taskqueue_drain(adapter->tq, &adapter->msf_task);
2635		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2636			taskqueue_drain(adapter->tq, &adapter->mbx_task);
2637		taskqueue_drain(adapter->tq, &adapter->phy_task);
2638		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2639			taskqueue_drain(adapter->tq, &adapter->fdir_task);
2640		taskqueue_free(adapter->tq);
2641	}
2642
2643	/* let hardware know driver is unloading */
2644	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2645	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2646	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2647
2648	/* Unregister VLAN events */
2649	if (adapter->vlan_attach != NULL)
2650		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2651	if (adapter->vlan_detach != NULL)
2652		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2653
2654	callout_drain(&adapter->timer);
2655
2656	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2657		netmap_detach(adapter->ifp);
2658
2659	ixgbe_free_pci_resources(adapter);
2660	bus_generic_detach(dev);
2661	if_free(adapter->ifp);
2662
2663	ixgbe_free_transmit_structures(adapter);
2664	ixgbe_free_receive_structures(adapter);
2665	free(adapter->queues, M_DEVBUF);
2666	free(adapter->mta, M_IXGBE);
2667
2668	IXGBE_CORE_LOCK_DESTROY(adapter);
2669
2670	return (0);
2671} /* ixgbe_detach */
2672
2673/************************************************************************
2674 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2675 *
2676 *   Prepare the adapter/port for LPLU and/or WoL
2677 ************************************************************************/
2678static int
2679ixgbe_setup_low_power_mode(struct adapter *adapter)
2680{
2681	struct ixgbe_hw *hw = &adapter->hw;
2682	device_t        dev = adapter->dev;
2683	s32             error = 0;
2684
2685	mtx_assert(&adapter->core_mtx, MA_OWNED);
2686
2687	/* Limit power management flow to X550EM baseT */
2688	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2689	    hw->phy.ops.enter_lplu) {
2690		/* Turn off support for APM wakeup. (Using ACPI instead) */
2691		IXGBE_WRITE_REG(hw, IXGBE_GRC,
2692		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2693
2694		/*
2695		 * Clear Wake Up Status register to prevent any previous wakeup
2696		 * events from waking us up immediately after we suspend.
2697		 */
2698		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2699
2700		/*
2701		 * Program the Wakeup Filter Control register with user filter
2702		 * settings
2703		 */
2704		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2705
2706		/* Enable wakeups and power management in Wakeup Control */
2707		IXGBE_WRITE_REG(hw, IXGBE_WUC,
2708		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2709
2710		/* X550EM baseT adapters need a special LPLU flow */
2711		hw->phy.reset_disable = true;
2712		ixgbe_stop(adapter);
2713		error = hw->phy.ops.enter_lplu(hw);
2714		if (error)
2715			device_printf(dev, "Error entering LPLU: %d\n", error);
2716		hw->phy.reset_disable = false;
2717	} else {
2718		/* Just stop for other adapters */
2719		ixgbe_stop(adapter);
2720	}
2721
2722	return error;
2723} /* ixgbe_setup_low_power_mode */
2724
2725/************************************************************************
2726 * ixgbe_shutdown - Shutdown entry point
2727 ************************************************************************/
2728static int
2729ixgbe_shutdown(device_t dev)
2730{
2731	struct adapter *adapter = device_get_softc(dev);
2732	int            error = 0;
2733
2734	INIT_DEBUGOUT("ixgbe_shutdown: begin");
2735
2736	IXGBE_CORE_LOCK(adapter);
2737	error = ixgbe_setup_low_power_mode(adapter);
2738	IXGBE_CORE_UNLOCK(adapter);
2739
2740	return (error);
2741} /* ixgbe_shutdown */
2742
2743/************************************************************************
2744 * ixgbe_suspend
2745 *
2746 *   From D0 to D3
2747 ************************************************************************/
2748static int
2749ixgbe_suspend(device_t dev)
2750{
2751	struct adapter *adapter = device_get_softc(dev);
2752	int            error = 0;
2753
2754	INIT_DEBUGOUT("ixgbe_suspend: begin");
2755
2756	IXGBE_CORE_LOCK(adapter);
2757
2758	error = ixgbe_setup_low_power_mode(adapter);
2759
2760	IXGBE_CORE_UNLOCK(adapter);
2761
2762	return (error);
2763} /* ixgbe_suspend */
2764
2765/************************************************************************
2766 * ixgbe_resume
2767 *
2768 *   From D3 to D0
2769 ************************************************************************/
2770static int
2771ixgbe_resume(device_t dev)
2772{
2773	struct adapter  *adapter = device_get_softc(dev);
2774	struct ifnet    *ifp = adapter->ifp;
2775	struct ixgbe_hw *hw = &adapter->hw;
2776	u32             wus;
2777
2778	INIT_DEBUGOUT("ixgbe_resume: begin");
2779
2780	IXGBE_CORE_LOCK(adapter);
2781
2782	/* Read & clear WUS register */
2783	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2784	if (wus)
2785		device_printf(dev, "Woken up by (WUS): %#010x\n",
2786		    IXGBE_READ_REG(hw, IXGBE_WUS));
2787	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2788	/* And clear WUFC until next low-power transition */
2789	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2790
2791	/*
2792	 * Required after D3->D0 transition;
2793	 * will re-advertise all previous advertised speeds
2794	 */
2795	if (ifp->if_flags & IFF_UP)
2796		ixgbe_init_locked(adapter);
2797
2798	IXGBE_CORE_UNLOCK(adapter);
2799
2800	return (0);
2801} /* ixgbe_resume */
2802
2803/************************************************************************
2804 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2805 *
2806 *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2807 *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2808 *   field what mbuf offload flags the driver will understand.
2809 ************************************************************************/
2810static void
2811ixgbe_set_if_hwassist(struct adapter *adapter)
2812{
2813	struct ifnet *ifp = adapter->ifp;
2814
2815	ifp->if_hwassist = 0;
2816#if __FreeBSD_version >= 1000000
2817	if (ifp->if_capenable & IFCAP_TSO4)
2818		ifp->if_hwassist |= CSUM_IP_TSO;
2819	if (ifp->if_capenable & IFCAP_TSO6)
2820		ifp->if_hwassist |= CSUM_IP6_TSO;
2821	if (ifp->if_capenable & IFCAP_TXCSUM) {
2822		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2823		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2824			ifp->if_hwassist |= CSUM_IP_SCTP;
2825	}
2826	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2827		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2828		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2829			ifp->if_hwassist |= CSUM_IP6_SCTP;
2830	}
2831#else
2832	if (ifp->if_capenable & IFCAP_TSO)
2833		ifp->if_hwassist |= CSUM_TSO;
2834	if (ifp->if_capenable & IFCAP_TXCSUM) {
2835		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2836		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2837			ifp->if_hwassist |= CSUM_SCTP;
2838	}
2839#endif
2840} /* ixgbe_set_if_hwassist */
2841
2842/************************************************************************
2843 * ixgbe_init_locked - Init entry point
2844 *
2845 *   Used in two ways: It is used by the stack as an init
2846 *   entry point in network interface structure. It is also
2847 *   used by the driver as a hw/sw initialization routine to
2848 *   get to a consistent state.
2849 *
2850 *   return 0 on success, positive on failure
2851 ************************************************************************/
2852void
2853ixgbe_init_locked(struct adapter *adapter)
2854{
2855	struct ifnet    *ifp = adapter->ifp;
2856	device_t        dev = adapter->dev;
2857	struct ixgbe_hw *hw = &adapter->hw;
2858	struct tx_ring  *txr;
2859	struct rx_ring  *rxr;
2860	u32             txdctl, mhadd;
2861	u32             rxdctl, rxctrl;
2862	u32             ctrl_ext;
2863	int             err = 0;
2864
2865	mtx_assert(&adapter->core_mtx, MA_OWNED);
2866	INIT_DEBUGOUT("ixgbe_init_locked: begin");
2867
2868	hw->adapter_stopped = FALSE;
2869	ixgbe_stop_adapter(hw);
2870	callout_stop(&adapter->timer);
2871
2872	/* Queue indices may change with IOV mode */
2873	ixgbe_align_all_queue_indices(adapter);
2874
2875	/* reprogram the RAR[0] in case user changed it. */
2876	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2877
2878	/* Get the latest mac address, User can use a LAA */
2879	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2880	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2881	hw->addr_ctrl.rar_used_count = 1;
2882
2883	/* Set hardware offload abilities from ifnet flags */
2884	ixgbe_set_if_hwassist(adapter);
2885
2886	/* Prepare transmit descriptors and buffers */
2887	if (ixgbe_setup_transmit_structures(adapter)) {
2888		device_printf(dev, "Could not setup transmit structures\n");
2889		ixgbe_stop(adapter);
2890		return;
2891	}
2892
2893	ixgbe_init_hw(hw);
2894	ixgbe_initialize_iov(adapter);
2895	ixgbe_initialize_transmit_units(adapter);
2896
2897	/* Setup Multicast table */
2898	ixgbe_set_multi(adapter);
2899
2900	/* Determine the correct mbuf pool, based on frame size */
2901	if (adapter->max_frame_size <= MCLBYTES)
2902		adapter->rx_mbuf_sz = MCLBYTES;
2903	else
2904		adapter->rx_mbuf_sz = MJUMPAGESIZE;
2905
2906	/* Prepare receive descriptors and buffers */
2907	if (ixgbe_setup_receive_structures(adapter)) {
2908		device_printf(dev, "Could not setup receive structures\n");
2909		ixgbe_stop(adapter);
2910		return;
2911	}
2912
2913	/* Configure RX settings */
2914	ixgbe_initialize_receive_units(adapter);
2915
2916	/* Enable SDP & MSI-X interrupts based on adapter */
2917	ixgbe_config_gpie(adapter);
2918
2919	/* Set MTU size */
2920	if (ifp->if_mtu > ETHERMTU) {
2921		/* aka IXGBE_MAXFRS on 82599 and newer */
2922		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2923		mhadd &= ~IXGBE_MHADD_MFS_MASK;
2924		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2925		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2926	}
2927
2928	/* Now enable all the queues */
2929	for (int i = 0; i < adapter->num_queues; i++) {
2930		txr = &adapter->tx_rings[i];
2931		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2932		txdctl |= IXGBE_TXDCTL_ENABLE;
2933		/* Set WTHRESH to 8, burst writeback */
2934		txdctl |= (8 << 16);
2935		/*
2936		 * When the internal queue falls below PTHRESH (32),
2937		 * start prefetching as long as there are at least
2938		 * HTHRESH (1) buffers ready. The values are taken
2939		 * from the Intel linux driver 3.8.21.
2940		 * Prefetching enables tx line rate even with 1 queue.
2941		 */
2942		txdctl |= (32 << 0) | (1 << 8);
2943		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2944	}
2945
2946	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2947		rxr = &adapter->rx_rings[i];
2948		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2949		if (hw->mac.type == ixgbe_mac_82598EB) {
2950			/*
2951			 * PTHRESH = 21
2952			 * HTHRESH = 4
2953			 * WTHRESH = 8
2954			 */
2955			rxdctl &= ~0x3FFFFF;
2956			rxdctl |= 0x080420;
2957		}
2958		rxdctl |= IXGBE_RXDCTL_ENABLE;
2959		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2960		for (; j < 10; j++) {
2961			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2962			    IXGBE_RXDCTL_ENABLE)
2963				break;
2964			else
2965				msec_delay(1);
2966		}
2967		wmb();
2968
2969		/*
2970		 * In netmap mode, we must preserve the buffers made
2971		 * available to userspace before the if_init()
2972		 * (this is true by default on the TX side, because
2973		 * init makes all buffers available to userspace).
2974		 *
2975		 * netmap_reset() and the device specific routines
2976		 * (e.g. ixgbe_setup_receive_rings()) map these
2977		 * buffers at the end of the NIC ring, so here we
2978		 * must set the RDT (tail) register to make sure
2979		 * they are not overwritten.
2980		 *
2981		 * In this driver the NIC ring starts at RDH = 0,
2982		 * RDT points to the last slot available for reception (?),
2983		 * so RDT = num_rx_desc - 1 means the whole ring is available.
2984		 */
2985#ifdef DEV_NETMAP
2986		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2987		    (ifp->if_capenable & IFCAP_NETMAP)) {
2988			struct netmap_adapter *na = NA(adapter->ifp);
2989			struct netmap_kring *kring = na->rx_rings[i];
2990			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2991
2992			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2993		} else
2994#endif /* DEV_NETMAP */
2995			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2996			    adapter->num_rx_desc - 1);
2997	}
2998
2999	/* Enable Receive engine */
3000	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3001	if (hw->mac.type == ixgbe_mac_82598EB)
3002		rxctrl |= IXGBE_RXCTRL_DMBYPS;
3003	rxctrl |= IXGBE_RXCTRL_RXEN;
3004	ixgbe_enable_rx_dma(hw, rxctrl);
3005
3006	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3007
3008	/* Set up MSI-X routing */
3009	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3010		ixgbe_configure_ivars(adapter);
3011		/* Set up auto-mask */
3012		if (hw->mac.type == ixgbe_mac_82598EB)
3013			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3014		else {
3015			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3016			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3017		}
3018	} else {  /* Simple settings for Legacy/MSI */
3019		ixgbe_set_ivar(adapter, 0, 0, 0);
3020		ixgbe_set_ivar(adapter, 0, 0, 1);
3021		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3022	}
3023
3024	ixgbe_init_fdir(adapter);
3025
3026	/*
3027	 * Check on any SFP devices that
3028	 * need to be kick-started
3029	 */
3030	if (hw->phy.type == ixgbe_phy_none) {
3031		err = hw->phy.ops.identify(hw);
3032		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3033			device_printf(dev,
3034			    "Unsupported SFP+ module type was detected.\n");
3035			return;
3036		}
3037	}
3038
3039	/* Set moderation on the Link interrupt */
3040	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3041
3042	/* Config/Enable Link */
3043	ixgbe_config_link(adapter);
3044
3045	/* Hardware Packet Buffer & Flow Control setup */
3046	ixgbe_config_delay_values(adapter);
3047
3048	/* Initialize the FC settings */
3049	ixgbe_start_hw(hw);
3050
3051	/* Set up VLAN support and filter */
3052	ixgbe_setup_vlan_hw_support(adapter);
3053
3054	/* Setup DMA Coalescing */
3055	ixgbe_config_dmac(adapter);
3056
3057	/* And now turn on interrupts */
3058	ixgbe_enable_intr(adapter);
3059
3060	/* Enable the use of the MBX by the VF's */
3061	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3062		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3063		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3064		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3065	}
3066
3067	/* Now inform the stack we're ready */
3068	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3069
3070	return;
3071} /* ixgbe_init_locked */
3072
3073/************************************************************************
3074 * ixgbe_init
3075 ************************************************************************/
3076static void
3077ixgbe_init(void *arg)
3078{
3079	struct adapter *adapter = arg;
3080
3081	IXGBE_CORE_LOCK(adapter);
3082	ixgbe_init_locked(adapter);
3083	IXGBE_CORE_UNLOCK(adapter);
3084
3085	return;
3086} /* ixgbe_init */
3087
3088/************************************************************************
3089 * ixgbe_set_ivar
3090 *
3091 *   Setup the correct IVAR register for a particular MSI-X interrupt
3092 *     (yes this is all very magic and confusing :)
3093 *    - entry is the register array entry
3094 *    - vector is the MSI-X vector for this queue
3095 *    - type is RX/TX/MISC
3096 ************************************************************************/
3097static void
3098ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3099{
3100	struct ixgbe_hw *hw = &adapter->hw;
3101	u32 ivar, index;
3102
3103	vector |= IXGBE_IVAR_ALLOC_VAL;
3104
3105	switch (hw->mac.type) {
3106
3107	case ixgbe_mac_82598EB:
3108		if (type == -1)
3109			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3110		else
3111			entry += (type * 64);
3112		index = (entry >> 2) & 0x1F;
3113		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3114		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3115		ivar |= (vector << (8 * (entry & 0x3)));
3116		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3117		break;
3118
3119	case ixgbe_mac_82599EB:
3120	case ixgbe_mac_X540:
3121	case ixgbe_mac_X550:
3122	case ixgbe_mac_X550EM_x:
3123	case ixgbe_mac_X550EM_a:
3124		if (type == -1) { /* MISC IVAR */
3125			index = (entry & 1) * 8;
3126			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3127			ivar &= ~(0xFF << index);
3128			ivar |= (vector << index);
3129			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3130		} else {          /* RX/TX IVARS */
3131			index = (16 * (entry & 1)) + (8 * type);
3132			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3133			ivar &= ~(0xFF << index);
3134			ivar |= (vector << index);
3135			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3136		}
3137
3138	default:
3139		break;
3140	}
3141} /* ixgbe_set_ivar */
3142
3143/************************************************************************
3144 * ixgbe_configure_ivars
3145 ************************************************************************/
3146static void
3147ixgbe_configure_ivars(struct adapter *adapter)
3148{
3149	struct ix_queue *que = adapter->queues;
3150	u32             newitr;
3151
3152	if (ixgbe_max_interrupt_rate > 0)
3153		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3154	else {
3155		/*
3156		 * Disable DMA coalescing if interrupt moderation is
3157		 * disabled.
3158		 */
3159		adapter->dmac = 0;
3160		newitr = 0;
3161	}
3162
3163	for (int i = 0; i < adapter->num_queues; i++, que++) {
3164		struct rx_ring *rxr = &adapter->rx_rings[i];
3165		struct tx_ring *txr = &adapter->tx_rings[i];
3166		/* First the RX queue entry */
3167		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3168		/* ... and the TX */
3169		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3170		/* Set an Initial EITR value */
3171		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3172	}
3173
3174	/* For the Link interrupt */
3175	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3176} /* ixgbe_configure_ivars */
3177
3178/************************************************************************
3179 * ixgbe_config_gpie
3180 ************************************************************************/
3181static void
3182ixgbe_config_gpie(struct adapter *adapter)
3183{
3184	struct ixgbe_hw *hw = &adapter->hw;
3185	u32             gpie;
3186
3187	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3188
3189	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3190		/* Enable Enhanced MSI-X mode */
3191		gpie |= IXGBE_GPIE_MSIX_MODE
3192		     |  IXGBE_GPIE_EIAME
3193		     |  IXGBE_GPIE_PBA_SUPPORT
3194		     |  IXGBE_GPIE_OCD;
3195	}
3196
3197	/* Fan Failure Interrupt */
3198	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3199		gpie |= IXGBE_SDP1_GPIEN;
3200
3201	/* Thermal Sensor Interrupt */
3202	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3203		gpie |= IXGBE_SDP0_GPIEN_X540;
3204
3205	/* Link detection */
3206	switch (hw->mac.type) {
3207	case ixgbe_mac_82599EB:
3208		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3209		break;
3210	case ixgbe_mac_X550EM_x:
3211	case ixgbe_mac_X550EM_a:
3212		gpie |= IXGBE_SDP0_GPIEN_X540;
3213		break;
3214	default:
3215		break;
3216	}
3217
3218	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3219
3220	return;
3221} /* ixgbe_config_gpie */
3222
3223/************************************************************************
3224 * ixgbe_config_delay_values
3225 *
3226 *   Requires adapter->max_frame_size to be set.
3227 ************************************************************************/
3228static void
3229ixgbe_config_delay_values(struct adapter *adapter)
3230{
3231	struct ixgbe_hw *hw = &adapter->hw;
3232	u32             rxpb, frame, size, tmp;
3233
3234	frame = adapter->max_frame_size;
3235
3236	/* Calculate High Water */
3237	switch (hw->mac.type) {
3238	case ixgbe_mac_X540:
3239	case ixgbe_mac_X550:
3240	case ixgbe_mac_X550EM_x:
3241	case ixgbe_mac_X550EM_a:
3242		tmp = IXGBE_DV_X540(frame, frame);
3243		break;
3244	default:
3245		tmp = IXGBE_DV(frame, frame);
3246		break;
3247	}
3248	size = IXGBE_BT2KB(tmp);
3249	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3250	hw->fc.high_water[0] = rxpb - size;
3251
3252	/* Now calculate Low Water */
3253	switch (hw->mac.type) {
3254	case ixgbe_mac_X540:
3255	case ixgbe_mac_X550:
3256	case ixgbe_mac_X550EM_x:
3257	case ixgbe_mac_X550EM_a:
3258		tmp = IXGBE_LOW_DV_X540(frame);
3259		break;
3260	default:
3261		tmp = IXGBE_LOW_DV(frame);
3262		break;
3263	}
3264	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3265
3266	hw->fc.pause_time = IXGBE_FC_PAUSE;
3267	hw->fc.send_xon = TRUE;
3268} /* ixgbe_config_delay_values */
3269
3270/************************************************************************
3271 * ixgbe_set_multi - Multicast Update
3272 *
3273 *   Called whenever multicast address list is updated.
3274 ************************************************************************/
3275static void
3276ixgbe_set_multi(struct adapter *adapter)
3277{
3278	struct ifmultiaddr   *ifma;
3279	struct ixgbe_mc_addr *mta;
3280	struct ifnet         *ifp = adapter->ifp;
3281	u8                   *update_ptr;
3282	int                  mcnt = 0;
3283	u32                  fctrl;
3284
3285	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3286
3287	mta = adapter->mta;
3288	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3289
3290#if __FreeBSD_version < 800000
3291	IF_ADDR_LOCK(ifp);
3292#else
3293	if_maddr_rlock(ifp);
3294#endif
3295	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3296		if (ifma->ifma_addr->sa_family != AF_LINK)
3297			continue;
3298		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3299			break;
3300		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3301		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3302		mta[mcnt].vmdq = adapter->pool;
3303		mcnt++;
3304	}
3305#if __FreeBSD_version < 800000
3306	IF_ADDR_UNLOCK(ifp);
3307#else
3308	if_maddr_runlock(ifp);
3309#endif
3310
3311	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3312	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3313	if (ifp->if_flags & IFF_PROMISC)
3314		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3315	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3316	    ifp->if_flags & IFF_ALLMULTI) {
3317		fctrl |= IXGBE_FCTRL_MPE;
3318		fctrl &= ~IXGBE_FCTRL_UPE;
3319	} else
3320		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3321
3322	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3323
3324	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3325		update_ptr = (u8 *)mta;
3326		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3327		    ixgbe_mc_array_itr, TRUE);
3328	}
3329
3330	return;
3331} /* ixgbe_set_multi */
3332
3333/************************************************************************
3334 * ixgbe_mc_array_itr
3335 *
3336 *   An iterator function needed by the multicast shared code.
3337 *   It feeds the shared code routine the addresses in the
3338 *   array of ixgbe_set_multi() one by one.
3339 ************************************************************************/
3340static u8 *
3341ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3342{
3343	struct ixgbe_mc_addr *mta;
3344
3345	mta = (struct ixgbe_mc_addr *)*update_ptr;
3346	*vmdq = mta->vmdq;
3347
3348	*update_ptr = (u8*)(mta + 1);
3349
3350	return (mta->addr);
3351} /* ixgbe_mc_array_itr */
3352
3353/************************************************************************
3354 * ixgbe_local_timer - Timer routine
3355 *
3356 *   Checks for link status, updates statistics,
3357 *   and runs the watchdog check.
3358 ************************************************************************/
3359static void
3360ixgbe_local_timer(void *arg)
3361{
3362	struct adapter  *adapter = arg;
3363	device_t        dev = adapter->dev;
3364	struct ix_queue *que = adapter->queues;
3365	u64             queues = 0;
3366	int             hung = 0;
3367
3368	mtx_assert(&adapter->core_mtx, MA_OWNED);
3369
3370	/* Check for pluggable optics */
3371	if (adapter->sfp_probe)
3372		if (!ixgbe_sfp_probe(adapter))
3373			goto out; /* Nothing to do */
3374
3375	ixgbe_update_link_status(adapter);
3376	ixgbe_update_stats_counters(adapter);
3377
3378	/*
3379	 * Check the TX queues status
3380	 *      - mark hung queues so we don't schedule on them
3381	 *      - watchdog only if all queues show hung
3382	 */
3383	for (int i = 0; i < adapter->num_queues; i++, que++) {
3384		/* Keep track of queues with work for soft irq */
3385		if (que->txr->busy)
3386			queues |= ((u64)1 << que->me);
3387		/*
3388		 * Each time txeof runs without cleaning, but there
3389		 * are uncleaned descriptors it increments busy. If
3390		 * we get to the MAX we declare it hung.
3391		 */
3392		if (que->busy == IXGBE_QUEUE_HUNG) {
3393			++hung;
3394			/* Mark the queue as inactive */
3395			adapter->active_queues &= ~((u64)1 << que->me);
3396			continue;
3397		} else {
3398			/* Check if we've come back from hung */
3399			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3400				adapter->active_queues |= ((u64)1 << que->me);
3401		}
3402		if (que->busy >= IXGBE_MAX_TX_BUSY) {
3403			device_printf(dev,
3404			    "Warning queue %d appears to be hung!\n", i);
3405			que->txr->busy = IXGBE_QUEUE_HUNG;
3406			++hung;
3407		}
3408	}
3409
3410	/* Only truly watchdog if all queues show hung */
3411	if (hung == adapter->num_queues)
3412		goto watchdog;
3413	else if (queues != 0) { /* Force an IRQ on queues with work */
3414		ixgbe_rearm_queues(adapter, queues);
3415	}
3416
3417out:
3418	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3419	return;
3420
3421watchdog:
3422	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3423	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3424	adapter->watchdog_events++;
3425	ixgbe_init_locked(adapter);
3426} /* ixgbe_local_timer */
3427
3428/************************************************************************
3429 * ixgbe_sfp_probe
3430 *
3431 *   Determine if a port had optics inserted.
3432 ************************************************************************/
3433static bool
3434ixgbe_sfp_probe(struct adapter *adapter)
3435{
3436	struct ixgbe_hw *hw = &adapter->hw;
3437	device_t        dev = adapter->dev;
3438	bool            result = FALSE;
3439
3440	if ((hw->phy.type == ixgbe_phy_nl) &&
3441	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3442		s32 ret = hw->phy.ops.identify_sfp(hw);
3443		if (ret)
3444			goto out;
3445		ret = hw->phy.ops.reset(hw);
3446		adapter->sfp_probe = FALSE;
3447		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3448			device_printf(dev, "Unsupported SFP+ module detected!");
3449			device_printf(dev,
3450			    "Reload driver with supported module.\n");
3451			goto out;
3452		} else
3453			device_printf(dev, "SFP+ module detected!\n");
3454		/* We now have supported optics */
3455		result = TRUE;
3456	}
3457out:
3458
3459	return (result);
3460} /* ixgbe_sfp_probe */
3461
3462/************************************************************************
3463 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3464 ************************************************************************/
3465static void
3466ixgbe_handle_mod(void *context, int pending)
3467{
3468	struct adapter  *adapter = context;
3469	struct ixgbe_hw *hw = &adapter->hw;
3470	device_t        dev = adapter->dev;
3471	u32             err, cage_full = 0;
3472
3473	if (adapter->hw.need_crosstalk_fix) {
3474		switch (hw->mac.type) {
3475		case ixgbe_mac_82599EB:
3476			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3477			    IXGBE_ESDP_SDP2;
3478			break;
3479		case ixgbe_mac_X550EM_x:
3480		case ixgbe_mac_X550EM_a:
3481			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3482			    IXGBE_ESDP_SDP0;
3483			break;
3484		default:
3485			break;
3486		}
3487
3488		if (!cage_full)
3489			return;
3490	}
3491
3492	err = hw->phy.ops.identify_sfp(hw);
3493	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3494		device_printf(dev,
3495		    "Unsupported SFP+ module type was detected.\n");
3496		return;
3497	}
3498
3499	err = hw->mac.ops.setup_sfp(hw);
3500	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3501		device_printf(dev,
3502		    "Setup failure - unsupported SFP+ module type.\n");
3503		return;
3504	}
3505	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3506} /* ixgbe_handle_mod */
3507
3508
3509/************************************************************************
3510 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3511 ************************************************************************/
3512static void
3513ixgbe_handle_msf(void *context, int pending)
3514{
3515	struct adapter  *adapter = context;
3516	struct ixgbe_hw *hw = &adapter->hw;
3517	u32             autoneg;
3518	bool            negotiate;
3519
3520	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3521	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3522
3523	autoneg = hw->phy.autoneg_advertised;
3524	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3525		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3526	if (hw->mac.ops.setup_link)
3527		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3528
3529	/* Adjust media types shown in ifconfig */
3530	ifmedia_removeall(&adapter->media);
3531	ixgbe_add_media_types(adapter);
3532	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3533} /* ixgbe_handle_msf */
3534
3535/************************************************************************
3536 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3537 ************************************************************************/
3538static void
3539ixgbe_handle_phy(void *context, int pending)
3540{
3541	struct adapter  *adapter = context;
3542	struct ixgbe_hw *hw = &adapter->hw;
3543	int             error;
3544
3545	error = hw->phy.ops.handle_lasi(hw);
3546	if (error == IXGBE_ERR_OVERTEMP)
3547		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3548	else if (error)
3549		device_printf(adapter->dev,
3550		    "Error handling LASI interrupt: %d\n", error);
3551} /* ixgbe_handle_phy */
3552
3553/************************************************************************
3554 * ixgbe_stop - Stop the hardware
3555 *
3556 *   Disables all traffic on the adapter by issuing a
3557 *   global reset on the MAC and deallocates TX/RX buffers.
3558 ************************************************************************/
3559static void
3560ixgbe_stop(void *arg)
3561{
3562	struct ifnet    *ifp;
3563	struct adapter  *adapter = arg;
3564	struct ixgbe_hw *hw = &adapter->hw;
3565
3566	ifp = adapter->ifp;
3567
3568	mtx_assert(&adapter->core_mtx, MA_OWNED);
3569
3570	INIT_DEBUGOUT("ixgbe_stop: begin\n");
3571	ixgbe_disable_intr(adapter);
3572	callout_stop(&adapter->timer);
3573
3574	/* Let the stack know...*/
3575	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3576
3577	ixgbe_reset_hw(hw);
3578	hw->adapter_stopped = FALSE;
3579	ixgbe_stop_adapter(hw);
3580	if (hw->mac.type == ixgbe_mac_82599EB)
3581		ixgbe_stop_mac_link_on_d3_82599(hw);
3582	/* Turn off the laser - noop with no optics */
3583	ixgbe_disable_tx_laser(hw);
3584
3585	/* Update the stack */
3586	adapter->link_up = FALSE;
3587	ixgbe_update_link_status(adapter);
3588
3589	/* reprogram the RAR[0] in case user changed it. */
3590	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3591
3592	return;
3593} /* ixgbe_stop */
3594
3595/************************************************************************
3596 * ixgbe_update_link_status - Update OS on link state
3597 *
3598 * Note: Only updates the OS on the cached link state.
3599 *       The real check of the hardware only happens with
3600 *       a link interrupt.
3601 ************************************************************************/
3602static void
3603ixgbe_update_link_status(struct adapter *adapter)
3604{
3605	struct ifnet *ifp = adapter->ifp;
3606	device_t     dev = adapter->dev;
3607
3608	if (adapter->link_up) {
3609		if (adapter->link_active == FALSE) {
3610			if (bootverbose)
3611				device_printf(dev, "Link is up %d Gbps %s \n",
3612				    ((adapter->link_speed == 128) ? 10 : 1),
3613				    "Full Duplex");
3614			adapter->link_active = TRUE;
3615			/* Update any Flow Control changes */
3616			ixgbe_fc_enable(&adapter->hw);
3617			/* Update DMA coalescing config */
3618			ixgbe_config_dmac(adapter);
3619			if_link_state_change(ifp, LINK_STATE_UP);
3620			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3621				ixgbe_ping_all_vfs(adapter);
3622		}
3623	} else { /* Link down */
3624		if (adapter->link_active == TRUE) {
3625			if (bootverbose)
3626				device_printf(dev, "Link is Down\n");
3627			if_link_state_change(ifp, LINK_STATE_DOWN);
3628			adapter->link_active = FALSE;
3629			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3630				ixgbe_ping_all_vfs(adapter);
3631		}
3632	}
3633
3634	return;
3635} /* ixgbe_update_link_status */
3636
3637/************************************************************************
3638 * ixgbe_config_dmac - Configure DMA Coalescing
3639 ************************************************************************/
3640static void
3641ixgbe_config_dmac(struct adapter *adapter)
3642{
3643	struct ixgbe_hw          *hw = &adapter->hw;
3644	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3645
3646	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3647		return;
3648
3649	if (dcfg->watchdog_timer ^ adapter->dmac ||
3650	    dcfg->link_speed ^ adapter->link_speed) {
3651		dcfg->watchdog_timer = adapter->dmac;
3652		dcfg->fcoe_en = false;
3653		dcfg->link_speed = adapter->link_speed;
3654		dcfg->num_tcs = 1;
3655
3656		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3657		    dcfg->watchdog_timer, dcfg->link_speed);
3658
3659		hw->mac.ops.dmac_config(hw);
3660	}
3661} /* ixgbe_config_dmac */
3662
3663/************************************************************************
3664 * ixgbe_enable_intr
3665 ************************************************************************/
3666static void
3667ixgbe_enable_intr(struct adapter *adapter)
3668{
3669	struct ixgbe_hw *hw = &adapter->hw;
3670	struct ix_queue *que = adapter->queues;
3671	u32             mask, fwsm;
3672
3673	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3674
3675	switch (adapter->hw.mac.type) {
3676	case ixgbe_mac_82599EB:
3677		mask |= IXGBE_EIMS_ECC;
3678		/* Temperature sensor on some adapters */
3679		mask |= IXGBE_EIMS_GPI_SDP0;
3680		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3681		mask |= IXGBE_EIMS_GPI_SDP1;
3682		mask |= IXGBE_EIMS_GPI_SDP2;
3683		break;
3684	case ixgbe_mac_X540:
3685		/* Detect if Thermal Sensor is enabled */
3686		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3687		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3688			mask |= IXGBE_EIMS_TS;
3689		mask |= IXGBE_EIMS_ECC;
3690		break;
3691	case ixgbe_mac_X550:
3692		/* MAC thermal sensor is automatically enabled */
3693		mask |= IXGBE_EIMS_TS;
3694		mask |= IXGBE_EIMS_ECC;
3695		break;
3696	case ixgbe_mac_X550EM_x:
3697	case ixgbe_mac_X550EM_a:
3698		/* Some devices use SDP0 for important information */
3699		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3700		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3701		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3702		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3703			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3704		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3705			mask |= IXGBE_EICR_GPI_SDP0_X540;
3706		mask |= IXGBE_EIMS_ECC;
3707		break;
3708	default:
3709		break;
3710	}
3711
3712	/* Enable Fan Failure detection */
3713	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3714		mask |= IXGBE_EIMS_GPI_SDP1;
3715	/* Enable SR-IOV */
3716	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3717		mask |= IXGBE_EIMS_MAILBOX;
3718	/* Enable Flow Director */
3719	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3720		mask |= IXGBE_EIMS_FLOW_DIR;
3721
3722	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3723
3724	/* With MSI-X we use auto clear */
3725	if (adapter->msix_mem) {
3726		mask = IXGBE_EIMS_ENABLE_MASK;
3727		/* Don't autoclear Link */
3728		mask &= ~IXGBE_EIMS_OTHER;
3729		mask &= ~IXGBE_EIMS_LSC;
3730		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3731			mask &= ~IXGBE_EIMS_MAILBOX;
3732		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3733	}
3734
3735	/*
3736	 * Now enable all queues, this is done separately to
3737	 * allow for handling the extended (beyond 32) MSI-X
3738	 * vectors that can be used by 82599
3739	 */
3740	for (int i = 0; i < adapter->num_queues; i++, que++)
3741		ixgbe_enable_queue(adapter, que->msix);
3742
3743	IXGBE_WRITE_FLUSH(hw);
3744
3745	return;
3746} /* ixgbe_enable_intr */
3747
3748/************************************************************************
3749 * ixgbe_disable_intr
3750 ************************************************************************/
3751static void
3752ixgbe_disable_intr(struct adapter *adapter)
3753{
3754	if (adapter->msix_mem)
3755		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3756	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3757		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3758	} else {
3759		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3760		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3761		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3762	}
3763	IXGBE_WRITE_FLUSH(&adapter->hw);
3764
3765	return;
3766} /* ixgbe_disable_intr */
3767
3768/************************************************************************
3769 * ixgbe_legacy_irq - Legacy Interrupt Service routine
3770 ************************************************************************/
3771static void
3772ixgbe_legacy_irq(void *arg)
3773{
3774	struct ix_queue *que = arg;
3775	struct adapter  *adapter = que->adapter;
3776	struct ixgbe_hw *hw = &adapter->hw;
3777	struct ifnet    *ifp = adapter->ifp;
3778	struct tx_ring  *txr = adapter->tx_rings;
3779	bool            more = false;
3780	u32             eicr, eicr_mask;
3781
3782	/* Silicon errata #26 on 82598 */
3783	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3784
3785	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3786
3787	++que->irqs;
3788	if (eicr == 0) {
3789		ixgbe_enable_intr(adapter);
3790		return;
3791	}
3792
3793	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3794		more = ixgbe_rxeof(que);
3795
3796		IXGBE_TX_LOCK(txr);
3797		ixgbe_txeof(txr);
3798		if (!ixgbe_ring_empty(ifp, txr->br))
3799			ixgbe_start_locked(ifp, txr);
3800		IXGBE_TX_UNLOCK(txr);
3801	}
3802
3803	/* Check for fan failure */
3804	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3805		ixgbe_check_fan_failure(adapter, eicr, true);
3806		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3807	}
3808
3809	/* Link status change */
3810	if (eicr & IXGBE_EICR_LSC)
3811		taskqueue_enqueue(adapter->tq, &adapter->link_task);
3812
3813	if (ixgbe_is_sfp(hw)) {
3814		/* Pluggable optics-related interrupt */
3815		if (hw->mac.type >= ixgbe_mac_X540)
3816			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3817		else
3818			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3819
3820		if (eicr & eicr_mask) {
3821			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3822			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3823		}
3824
3825		if ((hw->mac.type == ixgbe_mac_82599EB) &&
3826		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3827			IXGBE_WRITE_REG(hw, IXGBE_EICR,
3828			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3829			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3830		}
3831	}
3832
3833	/* External PHY interrupt */
3834	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3835	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
3836		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3837
3838	if (more)
3839		taskqueue_enqueue(que->tq, &que->que_task);
3840	else
3841		ixgbe_enable_intr(adapter);
3842
3843	return;
3844} /* ixgbe_legacy_irq */
3845
3846/************************************************************************
3847 * ixgbe_free_pci_resources
3848 ************************************************************************/
3849static void
3850ixgbe_free_pci_resources(struct adapter *adapter)
3851{
3852	struct ix_queue *que = adapter->queues;
3853	device_t        dev = adapter->dev;
3854	int             rid, memrid;
3855
3856	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3857		memrid = PCIR_BAR(MSIX_82598_BAR);
3858	else
3859		memrid = PCIR_BAR(MSIX_82599_BAR);
3860
3861	/*
3862	 * There is a slight possibility of a failure mode
3863	 * in attach that will result in entering this function
3864	 * before interrupt resources have been initialized, and
3865	 * in that case we do not want to execute the loops below
3866	 * We can detect this reliably by the state of the adapter
3867	 * res pointer.
3868	 */
3869	if (adapter->res == NULL)
3870		goto mem;
3871
3872	/*
3873	 * Release all msix queue resources:
3874	 */
3875	for (int i = 0; i < adapter->num_queues; i++, que++) {
3876		rid = que->msix + 1;
3877		if (que->tag != NULL) {
3878			bus_teardown_intr(dev, que->res, que->tag);
3879			que->tag = NULL;
3880		}
3881		if (que->res != NULL)
3882			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3883	}
3884
3885
3886	if (adapter->tag != NULL) {
3887		bus_teardown_intr(dev, adapter->res, adapter->tag);
3888		adapter->tag = NULL;
3889	}
3890
3891	/* Clean the Legacy or Link interrupt last */
3892	if (adapter->res != NULL)
3893		bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3894		    adapter->res);
3895
3896mem:
3897	if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3898	    (adapter->feat_en & IXGBE_FEATURE_MSIX))
3899		pci_release_msi(dev);
3900
3901	if (adapter->msix_mem != NULL)
3902		bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3903		    adapter->msix_mem);
3904
3905	if (adapter->pci_mem != NULL)
3906		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3907		    adapter->pci_mem);
3908
3909	return;
3910} /* ixgbe_free_pci_resources */
3911
3912/************************************************************************
3913 * ixgbe_set_sysctl_value
3914 ************************************************************************/
3915static void
3916ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3917    const char *description, int *limit, int value)
3918{
3919	*limit = value;
3920	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3921	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3922	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3923} /* ixgbe_set_sysctl_value */
3924
3925/************************************************************************
3926 * ixgbe_sysctl_flowcntl
3927 *
3928 *   SYSCTL wrapper around setting Flow Control
3929 ************************************************************************/
3930static int
3931ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3932{
3933	struct adapter *adapter;
3934	int            error, fc;
3935
3936	adapter = (struct adapter *)arg1;
3937	fc = adapter->hw.fc.current_mode;
3938
3939	error = sysctl_handle_int(oidp, &fc, 0, req);
3940	if ((error) || (req->newptr == NULL))
3941		return (error);
3942
3943	/* Don't bother if it's not changed */
3944	if (fc == adapter->hw.fc.current_mode)
3945		return (0);
3946
3947	return ixgbe_set_flowcntl(adapter, fc);
3948} /* ixgbe_sysctl_flowcntl */
3949
3950/************************************************************************
3951 * ixgbe_set_flowcntl - Set flow control
3952 *
3953 *   Flow control values:
3954 *     0 - off
3955 *     1 - rx pause
3956 *     2 - tx pause
3957 *     3 - full
3958 ************************************************************************/
3959static int
3960ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3961{
3962	switch (fc) {
3963	case ixgbe_fc_rx_pause:
3964	case ixgbe_fc_tx_pause:
3965	case ixgbe_fc_full:
3966		adapter->hw.fc.requested_mode = fc;
3967		if (adapter->num_queues > 1)
3968			ixgbe_disable_rx_drop(adapter);
3969		break;
3970	case ixgbe_fc_none:
3971		adapter->hw.fc.requested_mode = ixgbe_fc_none;
3972		if (adapter->num_queues > 1)
3973			ixgbe_enable_rx_drop(adapter);
3974		break;
3975	default:
3976		return (EINVAL);
3977	}
3978
3979	/* Don't autoneg if forcing a value */
3980	adapter->hw.fc.disable_fc_autoneg = TRUE;
3981	ixgbe_fc_enable(&adapter->hw);
3982
3983	return (0);
3984} /* ixgbe_set_flowcntl */
3985
3986/************************************************************************
3987 * ixgbe_enable_rx_drop
3988 *
3989 *   Enable the hardware to drop packets when the buffer is
3990 *   full. This is useful with multiqueue, so that no single
3991 *   queue being full stalls the entire RX engine. We only
3992 *   enable this when Multiqueue is enabled AND Flow Control
3993 *   is disabled.
3994 ************************************************************************/
3995static void
3996ixgbe_enable_rx_drop(struct adapter *adapter)
3997{
3998	struct ixgbe_hw *hw = &adapter->hw;
3999	struct rx_ring  *rxr;
4000	u32             srrctl;
4001
4002	for (int i = 0; i < adapter->num_queues; i++) {
4003		rxr = &adapter->rx_rings[i];
4004		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4005		srrctl |= IXGBE_SRRCTL_DROP_EN;
4006		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4007	}
4008
4009	/* enable drop for each vf */
4010	for (int i = 0; i < adapter->num_vfs; i++) {
4011		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4012		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4013		    IXGBE_QDE_ENABLE));
4014	}
4015} /* ixgbe_enable_rx_drop */
4016
4017/************************************************************************
4018 * ixgbe_disable_rx_drop
4019 ************************************************************************/
4020static void
4021ixgbe_disable_rx_drop(struct adapter *adapter)
4022{
4023	struct ixgbe_hw *hw = &adapter->hw;
4024	struct rx_ring  *rxr;
4025	u32             srrctl;
4026
4027	for (int i = 0; i < adapter->num_queues; i++) {
4028		rxr = &adapter->rx_rings[i];
4029		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4030		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4031		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4032	}
4033
4034	/* disable drop for each vf */
4035	for (int i = 0; i < adapter->num_vfs; i++) {
4036		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4037		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4038	}
4039} /* ixgbe_disable_rx_drop */
4040
4041/************************************************************************
4042 * ixgbe_sysctl_advertise
4043 *
4044 *   SYSCTL wrapper around setting advertised speed
4045 ************************************************************************/
4046static int
4047ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4048{
4049	struct adapter *adapter;
4050	int            error, advertise;
4051
4052	adapter = (struct adapter *)arg1;
4053	advertise = adapter->advertise;
4054
4055	error = sysctl_handle_int(oidp, &advertise, 0, req);
4056	if ((error) || (req->newptr == NULL))
4057		return (error);
4058
4059	return ixgbe_set_advertise(adapter, advertise);
4060} /* ixgbe_sysctl_advertise */
4061
4062/************************************************************************
4063 * ixgbe_set_advertise - Control advertised link speed
4064 *
4065 *   Flags:
4066 *     0x1 - advertise 100 Mb
4067 *     0x2 - advertise 1G
4068 *     0x4 - advertise 10G
4069 *     0x8 - advertise 10 Mb (yes, Mb)
4070 ************************************************************************/
4071static int
4072ixgbe_set_advertise(struct adapter *adapter, int advertise)
4073{
4074	device_t         dev;
4075	struct ixgbe_hw  *hw;
4076	ixgbe_link_speed speed = 0;
4077	ixgbe_link_speed link_caps = 0;
4078	s32              err = IXGBE_NOT_IMPLEMENTED;
4079	bool             negotiate = FALSE;
4080
4081	/* Checks to validate new value */
4082	if (adapter->advertise == advertise) /* no change */
4083		return (0);
4084
4085	dev = adapter->dev;
4086	hw = &adapter->hw;
4087
4088	/* No speed changes for backplane media */
4089	if (hw->phy.media_type == ixgbe_media_type_backplane)
4090		return (ENODEV);
4091
4092	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4093	      (hw->phy.multispeed_fiber))) {
4094		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4095		return (EINVAL);
4096	}
4097
4098	if (advertise < 0x1 || advertise > 0xF) {
4099		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4100		return (EINVAL);
4101	}
4102
4103	if (hw->mac.ops.get_link_capabilities) {
4104		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4105		    &negotiate);
4106		if (err != IXGBE_SUCCESS) {
4107			device_printf(dev, "Unable to determine supported advertise speeds\n");
4108			return (ENODEV);
4109		}
4110	}
4111
4112	/* Set new value and report new advertised mode */
4113	if (advertise & 0x1) {
4114		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4115			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4116			return (EINVAL);
4117		}
4118		speed |= IXGBE_LINK_SPEED_100_FULL;
4119	}
4120	if (advertise & 0x2) {
4121		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4122			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4123			return (EINVAL);
4124		}
4125		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4126	}
4127	if (advertise & 0x4) {
4128		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4129			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4130			return (EINVAL);
4131		}
4132		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4133	}
4134	if (advertise & 0x8) {
4135		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4136			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4137			return (EINVAL);
4138		}
4139		speed |= IXGBE_LINK_SPEED_10_FULL;
4140	}
4141
4142	hw->mac.autotry_restart = TRUE;
4143	hw->mac.ops.setup_link(hw, speed, TRUE);
4144	adapter->advertise = advertise;
4145
4146	return (0);
4147} /* ixgbe_set_advertise */
4148
4149/************************************************************************
4150 * ixgbe_get_advertise - Get current advertised speed settings
4151 *
4152 *   Formatted for sysctl usage.
4153 *   Flags:
4154 *     0x1 - advertise 100 Mb
4155 *     0x2 - advertise 1G
4156 *     0x4 - advertise 10G
4157 *     0x8 - advertise 10 Mb (yes, Mb)
4158 ************************************************************************/
4159static int
4160ixgbe_get_advertise(struct adapter *adapter)
4161{
4162	struct ixgbe_hw  *hw = &adapter->hw;
4163	int              speed;
4164	ixgbe_link_speed link_caps = 0;
4165	s32              err;
4166	bool             negotiate = FALSE;
4167
4168	/*
4169	 * Advertised speed means nothing unless it's copper or
4170	 * multi-speed fiber
4171	 */
4172	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4173	    !(hw->phy.multispeed_fiber))
4174		return (0);
4175
4176	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4177	if (err != IXGBE_SUCCESS)
4178		return (0);
4179
4180	speed =
4181	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4182	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4183	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4184	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4185
4186	return speed;
4187} /* ixgbe_get_advertise */
4188
4189/************************************************************************
4190 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4191 *
4192 *   Control values:
4193 *     0/1 - off / on (use default value of 1000)
4194 *
4195 *     Legal timer values are:
4196 *     50,100,250,500,1000,2000,5000,10000
4197 *
4198 *     Turning off interrupt moderation will also turn this off.
4199 ************************************************************************/
4200static int
4201ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4202{
4203	struct adapter *adapter = (struct adapter *)arg1;
4204	struct ifnet   *ifp = adapter->ifp;
4205	int            error;
4206	u32            newval;
4207
4208	newval = adapter->dmac;
4209	error = sysctl_handle_int(oidp, &newval, 0, req);
4210	if ((error) || (req->newptr == NULL))
4211		return (error);
4212
4213	switch (newval) {
4214	case 0:
4215		/* Disabled */
4216		adapter->dmac = 0;
4217		break;
4218	case 1:
4219		/* Enable and use default */
4220		adapter->dmac = 1000;
4221		break;
4222	case 50:
4223	case 100:
4224	case 250:
4225	case 500:
4226	case 1000:
4227	case 2000:
4228	case 5000:
4229	case 10000:
4230		/* Legal values - allow */
4231		adapter->dmac = newval;
4232		break;
4233	default:
4234		/* Do nothing, illegal value */
4235		return (EINVAL);
4236	}
4237
4238	/* Re-initialize hardware if it's already running */
4239	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4240		ixgbe_init(adapter);
4241
4242	return (0);
4243} /* ixgbe_sysctl_dmac */
4244
4245#ifdef IXGBE_DEBUG
4246/************************************************************************
4247 * ixgbe_sysctl_power_state
4248 *
4249 *   Sysctl to test power states
4250 *   Values:
4251 *     0      - set device to D0
4252 *     3      - set device to D3
4253 *     (none) - get current device power state
4254 ************************************************************************/
4255static int
4256ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4257{
4258	struct adapter *adapter = (struct adapter *)arg1;
4259	device_t       dev = adapter->dev;
4260	int            curr_ps, new_ps, error = 0;
4261
4262	curr_ps = new_ps = pci_get_powerstate(dev);
4263
4264	error = sysctl_handle_int(oidp, &new_ps, 0, req);
4265	if ((error) || (req->newptr == NULL))
4266		return (error);
4267
4268	if (new_ps == curr_ps)
4269		return (0);
4270
4271	if (new_ps == 3 && curr_ps == 0)
4272		error = DEVICE_SUSPEND(dev);
4273	else if (new_ps == 0 && curr_ps == 3)
4274		error = DEVICE_RESUME(dev);
4275	else
4276		return (EINVAL);
4277
4278	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4279
4280	return (error);
4281} /* ixgbe_sysctl_power_state */
4282#endif
4283
4284/************************************************************************
4285 * ixgbe_sysctl_wol_enable
4286 *
4287 *   Sysctl to enable/disable the WoL capability,
4288 *   if supported by the adapter.
4289 *
4290 *   Values:
4291 *     0 - disabled
4292 *     1 - enabled
4293 ************************************************************************/
4294static int
4295ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4296{
4297	struct adapter  *adapter = (struct adapter *)arg1;
4298	struct ixgbe_hw *hw = &adapter->hw;
4299	int             new_wol_enabled;
4300	int             error = 0;
4301
4302	new_wol_enabled = hw->wol_enabled;
4303	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4304	if ((error) || (req->newptr == NULL))
4305		return (error);
4306	new_wol_enabled = !!(new_wol_enabled);
4307	if (new_wol_enabled == hw->wol_enabled)
4308		return (0);
4309
4310	if (new_wol_enabled > 0 && !adapter->wol_support)
4311		return (ENODEV);
4312	else
4313		hw->wol_enabled = new_wol_enabled;
4314
4315	return (0);
4316} /* ixgbe_sysctl_wol_enable */
4317
4318/************************************************************************
4319 * ixgbe_sysctl_wufc - Wake Up Filter Control
4320 *
4321 *   Sysctl to enable/disable the types of packets that the
4322 *   adapter will wake up on upon receipt.
4323 *   Flags:
4324 *     0x1  - Link Status Change
4325 *     0x2  - Magic Packet
4326 *     0x4  - Direct Exact
4327 *     0x8  - Directed Multicast
4328 *     0x10 - Broadcast
4329 *     0x20 - ARP/IPv4 Request Packet
4330 *     0x40 - Direct IPv4 Packet
4331 *     0x80 - Direct IPv6 Packet
4332 *
4333 *   Settings not listed above will cause the sysctl to return an error.
4334 ************************************************************************/
4335static int
4336ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4337{
4338	struct adapter *adapter = (struct adapter *)arg1;
4339	int            error = 0;
4340	u32            new_wufc;
4341
4342	new_wufc = adapter->wufc;
4343
4344	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4345	if ((error) || (req->newptr == NULL))
4346		return (error);
4347	if (new_wufc == adapter->wufc)
4348		return (0);
4349
4350	if (new_wufc & 0xffffff00)
4351		return (EINVAL);
4352
4353	new_wufc &= 0xff;
4354	new_wufc |= (0xffffff & adapter->wufc);
4355	adapter->wufc = new_wufc;
4356
4357	return (0);
4358} /* ixgbe_sysctl_wufc */
4359
4360#ifdef IXGBE_DEBUG
4361/************************************************************************
4362 * ixgbe_sysctl_print_rss_config
4363 ************************************************************************/
4364static int
4365ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4366{
4367	struct adapter  *adapter = (struct adapter *)arg1;
4368	struct ixgbe_hw *hw = &adapter->hw;
4369	device_t        dev = adapter->dev;
4370	struct sbuf     *buf;
4371	int             error = 0, reta_size;
4372	u32             reg;
4373
4374	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4375	if (!buf) {
4376		device_printf(dev, "Could not allocate sbuf for output.\n");
4377		return (ENOMEM);
4378	}
4379
4380	// TODO: use sbufs to make a string to print out
4381	/* Set multiplier for RETA setup and table size based on MAC */
4382	switch (adapter->hw.mac.type) {
4383	case ixgbe_mac_X550:
4384	case ixgbe_mac_X550EM_x:
4385	case ixgbe_mac_X550EM_a:
4386		reta_size = 128;
4387		break;
4388	default:
4389		reta_size = 32;
4390		break;
4391	}
4392
4393	/* Print out the redirection table */
4394	sbuf_cat(buf, "\n");
4395	for (int i = 0; i < reta_size; i++) {
4396		if (i < 32) {
4397			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4398			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4399		} else {
4400			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4401			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4402		}
4403	}
4404
4405	// TODO: print more config
4406
4407	error = sbuf_finish(buf);
4408	if (error)
4409		device_printf(dev, "Error finishing sbuf: %d\n", error);
4410
4411	sbuf_delete(buf);
4412
4413	return (0);
4414} /* ixgbe_sysctl_print_rss_config */
4415#endif /* IXGBE_DEBUG */
4416
4417/************************************************************************
4418 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4419 *
4420 *   For X552/X557-AT devices using an external PHY
4421 ************************************************************************/
4422static int
4423ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4424{
4425	struct adapter  *adapter = (struct adapter *)arg1;
4426	struct ixgbe_hw *hw = &adapter->hw;
4427	u16             reg;
4428
4429	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4430		device_printf(adapter->dev,
4431		    "Device has no supported external thermal sensor.\n");
4432		return (ENODEV);
4433	}
4434
4435	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4436	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4437		device_printf(adapter->dev,
4438		    "Error reading from PHY's current temperature register\n");
4439		return (EAGAIN);
4440	}
4441
4442	/* Shift temp for output */
4443	reg = reg >> 8;
4444
4445	return (sysctl_handle_int(oidp, NULL, reg, req));
4446} /* ixgbe_sysctl_phy_temp */
4447
4448/************************************************************************
4449 * ixgbe_sysctl_phy_overtemp_occurred
4450 *
4451 *   Reports (directly from the PHY) whether the current PHY
4452 *   temperature is over the overtemp threshold.
4453 ************************************************************************/
4454static int
4455ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4456{
4457	struct adapter  *adapter = (struct adapter *)arg1;
4458	struct ixgbe_hw *hw = &adapter->hw;
4459	u16             reg;
4460
4461	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4462		device_printf(adapter->dev,
4463		    "Device has no supported external thermal sensor.\n");
4464		return (ENODEV);
4465	}
4466
4467	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4468	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4469		device_printf(adapter->dev,
4470		    "Error reading from PHY's temperature status register\n");
4471		return (EAGAIN);
4472	}
4473
4474	/* Get occurrence bit */
4475	reg = !!(reg & 0x4000);
4476
4477	return (sysctl_handle_int(oidp, 0, reg, req));
4478} /* ixgbe_sysctl_phy_overtemp_occurred */
4479
4480/************************************************************************
4481 * ixgbe_sysctl_eee_state
4482 *
4483 *   Sysctl to set EEE power saving feature
4484 *   Values:
4485 *     0      - disable EEE
4486 *     1      - enable EEE
4487 *     (none) - get current device EEE state
4488 ************************************************************************/
4489static int
4490ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4491{
4492	struct adapter *adapter = (struct adapter *)arg1;
4493	device_t       dev = adapter->dev;
4494	int            curr_eee, new_eee, error = 0;
4495	s32            retval;
4496
4497	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4498
4499	error = sysctl_handle_int(oidp, &new_eee, 0, req);
4500	if ((error) || (req->newptr == NULL))
4501		return (error);
4502
4503	/* Nothing to do */
4504	if (new_eee == curr_eee)
4505		return (0);
4506
4507	/* Not supported */
4508	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4509		return (EINVAL);
4510
4511	/* Bounds checking */
4512	if ((new_eee < 0) || (new_eee > 1))
4513		return (EINVAL);
4514
4515	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4516	if (retval) {
4517		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4518		return (EINVAL);
4519	}
4520
4521	/* Restart auto-neg */
4522	ixgbe_init(adapter);
4523
4524	device_printf(dev, "New EEE state: %d\n", new_eee);
4525
4526	/* Cache new value */
4527	if (new_eee)
4528		adapter->feat_en |= IXGBE_FEATURE_EEE;
4529	else
4530		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4531
4532	return (error);
4533} /* ixgbe_sysctl_eee_state */
4534
4535/************************************************************************
4536 * ixgbe_init_device_features
4537 ************************************************************************/
4538static void
4539ixgbe_init_device_features(struct adapter *adapter)
4540{
4541	adapter->feat_cap = IXGBE_FEATURE_NETMAP
4542	                  | IXGBE_FEATURE_RSS
4543	                  | IXGBE_FEATURE_MSI
4544	                  | IXGBE_FEATURE_MSIX
4545	                  | IXGBE_FEATURE_LEGACY_IRQ
4546	                  | IXGBE_FEATURE_LEGACY_TX;
4547
4548	/* Set capabilities first... */
4549	switch (adapter->hw.mac.type) {
4550	case ixgbe_mac_82598EB:
4551		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4552			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4553		break;
4554	case ixgbe_mac_X540:
4555		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4556		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4557		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4558		    (adapter->hw.bus.func == 0))
4559			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4560		break;
4561	case ixgbe_mac_X550:
4562		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4563		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4564		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4565		break;
4566	case ixgbe_mac_X550EM_x:
4567		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4568		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4569		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4570			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4571		break;
4572	case ixgbe_mac_X550EM_a:
4573		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4574		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4575		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4576		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4577		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4578			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4579			adapter->feat_cap |= IXGBE_FEATURE_EEE;
4580		}
4581		break;
4582	case ixgbe_mac_82599EB:
4583		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4584		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4585		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4586		    (adapter->hw.bus.func == 0))
4587			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4588		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4589			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4590		break;
4591	default:
4592		break;
4593	}
4594
4595	/* Enabled by default... */
4596	/* Fan failure detection */
4597	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4598		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4599	/* Netmap */
4600	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4601		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4602	/* EEE */
4603	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4604		adapter->feat_en |= IXGBE_FEATURE_EEE;
4605	/* Thermal Sensor */
4606	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4607		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4608
4609	/* Enabled via global sysctl... */
4610	/* Flow Director */
4611	if (ixgbe_enable_fdir) {
4612		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4613			adapter->feat_en |= IXGBE_FEATURE_FDIR;
4614		else
4615			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4616	}
4617	/* Legacy (single queue) transmit */
4618	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4619	    ixgbe_enable_legacy_tx)
4620		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4621	/*
4622	 * Message Signal Interrupts - Extended (MSI-X)
4623	 * Normal MSI is only enabled if MSI-X calls fail.
4624	 */
4625	if (!ixgbe_enable_msix)
4626		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4627	/* Receive-Side Scaling (RSS) */
4628	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4629		adapter->feat_en |= IXGBE_FEATURE_RSS;
4630
4631	/* Disable features with unmet dependencies... */
4632	/* No MSI-X */
4633	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4634		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4635		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4636		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4637		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4638	}
4639} /* ixgbe_init_device_features */
4640
4641/************************************************************************
4642 * ixgbe_probe - Device identification routine
4643 *
4644 *   Determines if the driver should be loaded on
4645 *   adapter based on its PCI vendor/device ID.
4646 *
4647 *   return BUS_PROBE_DEFAULT on success, positive on failure
4648 ************************************************************************/
4649static int
4650ixgbe_probe(device_t dev)
4651{
4652	ixgbe_vendor_info_t *ent;
4653
4654	u16  pci_vendor_id = 0;
4655	u16  pci_device_id = 0;
4656	u16  pci_subvendor_id = 0;
4657	u16  pci_subdevice_id = 0;
4658	char adapter_name[256];
4659
4660	INIT_DEBUGOUT("ixgbe_probe: begin");
4661
4662	pci_vendor_id = pci_get_vendor(dev);
4663	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4664		return (ENXIO);
4665
4666	pci_device_id = pci_get_device(dev);
4667	pci_subvendor_id = pci_get_subvendor(dev);
4668	pci_subdevice_id = pci_get_subdevice(dev);
4669
4670	ent = ixgbe_vendor_info_array;
4671	while (ent->vendor_id != 0) {
4672		if ((pci_vendor_id == ent->vendor_id) &&
4673		    (pci_device_id == ent->device_id) &&
4674		    ((pci_subvendor_id == ent->subvendor_id) ||
4675		     (ent->subvendor_id == 0)) &&
4676		    ((pci_subdevice_id == ent->subdevice_id) ||
4677		     (ent->subdevice_id == 0))) {
4678			sprintf(adapter_name, "%s, Version - %s",
4679				ixgbe_strings[ent->index],
4680				ixgbe_driver_version);
4681			device_set_desc_copy(dev, adapter_name);
4682			++ixgbe_total_ports;
4683			return (BUS_PROBE_DEFAULT);
4684		}
4685		ent++;
4686	}
4687
4688	return (ENXIO);
4689} /* ixgbe_probe */
4690
4691
4692/************************************************************************
4693 * ixgbe_ioctl - Ioctl entry point
4694 *
4695 *   Called when the user wants to configure the interface.
4696 *
4697 *   return 0 on success, positive on failure
4698 ************************************************************************/
4699static int
4700ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4701{
4702	struct adapter *adapter = ifp->if_softc;
4703	struct ifreq   *ifr = (struct ifreq *) data;
4704#if defined(INET) || defined(INET6)
4705	struct ifaddr  *ifa = (struct ifaddr *)data;
4706#endif
4707	int            error = 0;
4708	bool           avoid_reset = FALSE;
4709
4710	switch (command) {
4711	case SIOCSIFADDR:
4712#ifdef INET
4713		if (ifa->ifa_addr->sa_family == AF_INET)
4714			avoid_reset = TRUE;
4715#endif
4716#ifdef INET6
4717		if (ifa->ifa_addr->sa_family == AF_INET6)
4718			avoid_reset = TRUE;
4719#endif
4720		/*
4721		 * Calling init results in link renegotiation,
4722		 * so we avoid doing it when possible.
4723		 */
4724		if (avoid_reset) {
4725			ifp->if_flags |= IFF_UP;
4726			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4727				ixgbe_init(adapter);
4728#ifdef INET
4729			if (!(ifp->if_flags & IFF_NOARP))
4730				arp_ifinit(ifp, ifa);
4731#endif
4732		} else
4733			error = ether_ioctl(ifp, command, data);
4734		break;
4735	case SIOCSIFMTU:
4736		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4737		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4738			error = EINVAL;
4739		} else {
4740			IXGBE_CORE_LOCK(adapter);
4741			ifp->if_mtu = ifr->ifr_mtu;
4742			adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4743			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4744				ixgbe_init_locked(adapter);
4745			ixgbe_recalculate_max_frame(adapter);
4746			IXGBE_CORE_UNLOCK(adapter);
4747		}
4748		break;
4749	case SIOCSIFFLAGS:
4750		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4751		IXGBE_CORE_LOCK(adapter);
4752		if (ifp->if_flags & IFF_UP) {
4753			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4754				if ((ifp->if_flags ^ adapter->if_flags) &
4755				    (IFF_PROMISC | IFF_ALLMULTI)) {
4756					ixgbe_set_promisc(adapter);
4757				}
4758			} else
4759				ixgbe_init_locked(adapter);
4760		} else
4761			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4762				ixgbe_stop(adapter);
4763		adapter->if_flags = ifp->if_flags;
4764		IXGBE_CORE_UNLOCK(adapter);
4765		break;
4766	case SIOCADDMULTI:
4767	case SIOCDELMULTI:
4768		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4769		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4770			IXGBE_CORE_LOCK(adapter);
4771			ixgbe_disable_intr(adapter);
4772			ixgbe_set_multi(adapter);
4773			ixgbe_enable_intr(adapter);
4774			IXGBE_CORE_UNLOCK(adapter);
4775		}
4776		break;
4777	case SIOCSIFMEDIA:
4778	case SIOCGIFMEDIA:
4779		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4780		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4781		break;
4782	case SIOCSIFCAP:
4783	{
4784		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4785
4786		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4787
4788		if (!mask)
4789			break;
4790
4791		/* HW cannot turn these on/off separately */
4792		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4793			ifp->if_capenable ^= IFCAP_RXCSUM;
4794			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4795		}
4796		if (mask & IFCAP_TXCSUM)
4797			ifp->if_capenable ^= IFCAP_TXCSUM;
4798		if (mask & IFCAP_TXCSUM_IPV6)
4799			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4800		if (mask & IFCAP_TSO4)
4801			ifp->if_capenable ^= IFCAP_TSO4;
4802		if (mask & IFCAP_TSO6)
4803			ifp->if_capenable ^= IFCAP_TSO6;
4804		if (mask & IFCAP_LRO)
4805			ifp->if_capenable ^= IFCAP_LRO;
4806		if (mask & IFCAP_VLAN_HWTAGGING)
4807			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4808		if (mask & IFCAP_VLAN_HWFILTER)
4809			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4810		if (mask & IFCAP_VLAN_HWTSO)
4811			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4812
4813		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4814			IXGBE_CORE_LOCK(adapter);
4815			ixgbe_init_locked(adapter);
4816			IXGBE_CORE_UNLOCK(adapter);
4817		}
4818		VLAN_CAPABILITIES(ifp);
4819		break;
4820	}
4821#if __FreeBSD_version >= 1100036
4822	case SIOCGI2C:
4823	{
4824		struct ixgbe_hw *hw = &adapter->hw;
4825		struct ifi2creq i2c;
4826		int i;
4827
4828		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4829		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4830		if (error != 0)
4831			break;
4832		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4833			error = EINVAL;
4834			break;
4835		}
4836		if (i2c.len > sizeof(i2c.data)) {
4837			error = EINVAL;
4838			break;
4839		}
4840
4841		for (i = 0; i < i2c.len; i++)
4842			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4843			    i2c.dev_addr, &i2c.data[i]);
4844		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
4845		break;
4846	}
4847#endif
4848	default:
4849		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4850		error = ether_ioctl(ifp, command, data);
4851		break;
4852	}
4853
4854	return (error);
4855} /* ixgbe_ioctl */
4856
4857/************************************************************************
4858 * ixgbe_check_fan_failure
4859 ************************************************************************/
4860static void
4861ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4862{
4863	u32 mask;
4864
4865	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4866	    IXGBE_ESDP_SDP1;
4867
4868	if (reg & mask)
4869		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4870} /* ixgbe_check_fan_failure */
4871
4872/************************************************************************
4873 * ixgbe_handle_que
4874 ************************************************************************/
4875static void
4876ixgbe_handle_que(void *context, int pending)
4877{
4878	struct ix_queue *que = context;
4879	struct adapter  *adapter = que->adapter;
4880	struct tx_ring  *txr = que->txr;
4881	struct ifnet    *ifp = adapter->ifp;
4882
4883	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4884		ixgbe_rxeof(que);
4885		IXGBE_TX_LOCK(txr);
4886		ixgbe_txeof(txr);
4887		if (!ixgbe_ring_empty(ifp, txr->br))
4888			ixgbe_start_locked(ifp, txr);
4889		IXGBE_TX_UNLOCK(txr);
4890	}
4891
4892	/* Re-enable this interrupt */
4893	if (que->res != NULL)
4894		ixgbe_enable_queue(adapter, que->msix);
4895	else
4896		ixgbe_enable_intr(adapter);
4897
4898	return;
4899} /* ixgbe_handle_que */
4900
4901
4902
4903/************************************************************************
4904 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4905 ************************************************************************/
4906static int
4907ixgbe_allocate_legacy(struct adapter *adapter)
4908{
4909	device_t        dev = adapter->dev;
4910	struct ix_queue *que = adapter->queues;
4911	struct tx_ring  *txr = adapter->tx_rings;
4912	int             error;
4913
4914	/* We allocate a single interrupt resource */
4915	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4916	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4917	if (adapter->res == NULL) {
4918		device_printf(dev,
4919		    "Unable to allocate bus resource: interrupt\n");
4920		return (ENXIO);
4921	}
4922
4923	/*
4924	 * Try allocating a fast interrupt and the associated deferred
4925	 * processing contexts.
4926	 */
4927	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4928		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4929	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4930	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4931	    taskqueue_thread_enqueue, &que->tq);
4932	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4933	    device_get_nameunit(adapter->dev));
4934
4935	/* Tasklets for Link, SFP and Multispeed Fiber */
4936	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4937	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4938	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4939	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4940	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4941		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4942	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4943	    taskqueue_thread_enqueue, &adapter->tq);
4944	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4945	    device_get_nameunit(adapter->dev));
4946
4947	if ((error = bus_setup_intr(dev, adapter->res,
4948	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4949	    &adapter->tag)) != 0) {
4950		device_printf(dev,
4951		    "Failed to register fast interrupt handler: %d\n", error);
4952		taskqueue_free(que->tq);
4953		taskqueue_free(adapter->tq);
4954		que->tq = NULL;
4955		adapter->tq = NULL;
4956
4957		return (error);
4958	}
4959	/* For simplicity in the handlers */
4960	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4961
4962	return (0);
4963} /* ixgbe_allocate_legacy */
4964
4965
4966/************************************************************************
4967 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4968 ************************************************************************/
4969static int
4970ixgbe_allocate_msix(struct adapter *adapter)
4971{
4972	device_t        dev = adapter->dev;
4973	struct ix_queue *que = adapter->queues;
4974	struct tx_ring  *txr = adapter->tx_rings;
4975	int             error, rid, vector = 0;
4976	int             cpu_id = 0;
4977	unsigned int    rss_buckets = 0;
4978	cpuset_t        cpu_mask;
4979
4980	/*
4981	 * If we're doing RSS, the number of queues needs to
4982	 * match the number of RSS buckets that are configured.
4983	 *
4984	 * + If there's more queues than RSS buckets, we'll end
4985	 *   up with queues that get no traffic.
4986	 *
4987	 * + If there's more RSS buckets than queues, we'll end
4988	 *   up having multiple RSS buckets map to the same queue,
4989	 *   so there'll be some contention.
4990	 */
4991	rss_buckets = rss_getnumbuckets();
4992	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4993	    (adapter->num_queues != rss_buckets)) {
4994		device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4995		    __func__, adapter->num_queues, rss_buckets);
4996	}
4997
4998	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4999		rid = vector + 1;
5000		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
5001		    RF_SHAREABLE | RF_ACTIVE);
5002		if (que->res == NULL) {
5003			device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5004			    vector);
5005			return (ENXIO);
5006		}
5007		/* Set the handler function */
5008		error = bus_setup_intr(dev, que->res,
5009		    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5010		    &que->tag);
5011		if (error) {
5012			que->res = NULL;
5013			device_printf(dev, "Failed to register QUE handler");
5014			return (error);
5015		}
5016#if __FreeBSD_version >= 800504
5017		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5018#endif
5019		que->msix = vector;
5020		adapter->active_queues |= (u64)(1 << que->msix);
5021
5022		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5023			/*
5024			 * The queue ID is used as the RSS layer bucket ID.
5025			 * We look up the queue ID -> RSS CPU ID and select
5026			 * that.
5027			 */
5028			cpu_id = rss_getcpu(i % rss_buckets);
5029			CPU_SETOF(cpu_id, &cpu_mask);
5030		} else {
5031			/*
5032			 * Bind the MSI-X vector, and thus the
5033			 * rings to the corresponding CPU.
5034			 *
5035			 * This just happens to match the default RSS
5036			 * round-robin bucket -> queue -> CPU allocation.
5037			 */
5038			if (adapter->num_queues > 1)
5039				cpu_id = i;
5040		}
5041		if (adapter->num_queues > 1)
5042			bus_bind_intr(dev, que->res, cpu_id);
5043#ifdef IXGBE_DEBUG
5044		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5045			device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5046			    cpu_id);
5047		else
5048			device_printf(dev, "Bound queue %d to cpu %d\n", i,
5049			    cpu_id);
5050#endif /* IXGBE_DEBUG */
5051
5052
5053		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5054			TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5055			    txr);
5056		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5057		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5058		    taskqueue_thread_enqueue, &que->tq);
5059#if __FreeBSD_version < 1100000
5060		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5061		    device_get_nameunit(adapter->dev), i);
5062#else
5063		if (adapter->feat_en & IXGBE_FEATURE_RSS)
5064			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5065			    &cpu_mask, "%s (bucket %d)",
5066			    device_get_nameunit(adapter->dev), cpu_id);
5067		else
5068			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5069			    NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5070			    i);
5071#endif
5072	}
5073
5074	/* and Link */
5075	adapter->link_rid = vector + 1;
5076	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5077	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5078	if (!adapter->res) {
5079		device_printf(dev,
5080		    "Unable to allocate bus resource: Link interrupt [%d]\n",
5081		    adapter->link_rid);
5082		return (ENXIO);
5083	}
5084	/* Set the link handler function */
5085	error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5086	    NULL, ixgbe_msix_link, adapter, &adapter->tag);
5087	if (error) {
5088		adapter->res = NULL;
5089		device_printf(dev, "Failed to register LINK handler");
5090		return (error);
5091	}
5092#if __FreeBSD_version >= 800504
5093	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5094#endif
5095	adapter->vector = vector;
5096	/* Tasklets for Link, SFP and Multispeed Fiber */
5097	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5098	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5099	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5100	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5101		TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5102	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5103	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5104		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5105	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5106	    taskqueue_thread_enqueue, &adapter->tq);
5107	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5108	    device_get_nameunit(adapter->dev));
5109
5110	return (0);
5111} /* ixgbe_allocate_msix */
5112
5113/************************************************************************
5114 * ixgbe_configure_interrupts
5115 *
5116 *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5117 *   This will also depend on user settings.
5118 ************************************************************************/
5119static int
5120ixgbe_configure_interrupts(struct adapter *adapter)
5121{
5122	device_t dev = adapter->dev;
5123	int      rid, want, queues, msgs;
5124
5125	/* Default to 1 queue if MSI-X setup fails */
5126	adapter->num_queues = 1;
5127
5128	/* Override by tuneable */
5129	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5130		goto msi;
5131
5132	/* First try MSI-X */
5133	msgs = pci_msix_count(dev);
5134	if (msgs == 0)
5135		goto msi;
5136	rid = PCIR_BAR(MSIX_82598_BAR);
5137	adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5138	    RF_ACTIVE);
5139	if (adapter->msix_mem == NULL) {
5140		rid += 4;  /* 82599 maps in higher BAR */
5141		adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5142		    &rid, RF_ACTIVE);
5143	}
5144	if (adapter->msix_mem == NULL) {
5145		/* May not be enabled */
5146		device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5147		goto msi;
5148	}
5149
5150	/* Figure out a reasonable auto config value */
5151	queues = min(mp_ncpus, msgs - 1);
5152	/* If we're doing RSS, clamp at the number of RSS buckets */
5153	if (adapter->feat_en & IXGBE_FEATURE_RSS)
5154		queues = min(queues, rss_getnumbuckets());
5155	if (ixgbe_num_queues > queues) {
5156		device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5157		ixgbe_num_queues = queues;
5158	}
5159
5160	if (ixgbe_num_queues != 0)
5161		queues = ixgbe_num_queues;
5162	/* Set max queues to 8 when autoconfiguring */
5163	else
5164		queues = min(queues, 8);
5165
5166	/* reflect correct sysctl value */
5167	ixgbe_num_queues = queues;
5168
5169	/*
5170	 * Want one vector (RX/TX pair) per queue
5171	 * plus an additional for Link.
5172	 */
5173	want = queues + 1;
5174	if (msgs >= want)
5175		msgs = want;
5176	else {
5177		device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5178		    msgs, want);
5179		goto msi;
5180	}
5181	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5182		device_printf(adapter->dev,
5183		    "Using MSI-X interrupts with %d vectors\n", msgs);
5184		adapter->num_queues = queues;
5185		adapter->feat_en |= IXGBE_FEATURE_MSIX;
5186		return (0);
5187	}
5188	/*
5189	 * MSI-X allocation failed or provided us with
5190	 * less vectors than needed. Free MSI-X resources
5191	 * and we'll try enabling MSI.
5192	 */
5193	pci_release_msi(dev);
5194
5195msi:
5196	/* Without MSI-X, some features are no longer supported */
5197	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5198	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5199	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5200	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5201
5202	if (adapter->msix_mem != NULL) {
5203		bus_release_resource(dev, SYS_RES_MEMORY, rid,
5204		    adapter->msix_mem);
5205		adapter->msix_mem = NULL;
5206	}
5207	msgs = 1;
5208	if (pci_alloc_msi(dev, &msgs) == 0) {
5209		adapter->feat_en |= IXGBE_FEATURE_MSI;
5210		adapter->link_rid = 1;
5211		device_printf(adapter->dev, "Using an MSI interrupt\n");
5212		return (0);
5213	}
5214
5215	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5216		device_printf(adapter->dev,
5217		    "Device does not support legacy interrupts.\n");
5218		return 1;
5219	}
5220
5221	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5222	adapter->link_rid = 0;
5223	device_printf(adapter->dev, "Using a Legacy interrupt\n");
5224
5225	return (0);
5226} /* ixgbe_configure_interrupts */
5227
5228
5229/************************************************************************
5230 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5231 *
5232 *   Done outside of interrupt context since the driver might sleep
5233 ************************************************************************/
5234static void
5235ixgbe_handle_link(void *context, int pending)
5236{
5237	struct adapter  *adapter = context;
5238	struct ixgbe_hw *hw = &adapter->hw;
5239
5240	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5241	ixgbe_update_link_status(adapter);
5242
5243	/* Re-enable link interrupts */
5244	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5245} /* ixgbe_handle_link */
5246
5247/************************************************************************
5248 * ixgbe_rearm_queues
5249 ************************************************************************/
5250static void
5251ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5252{
5253	u32 mask;
5254
5255	switch (adapter->hw.mac.type) {
5256	case ixgbe_mac_82598EB:
5257		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5258		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5259		break;
5260	case ixgbe_mac_82599EB:
5261	case ixgbe_mac_X540:
5262	case ixgbe_mac_X550:
5263	case ixgbe_mac_X550EM_x:
5264	case ixgbe_mac_X550EM_a:
5265		mask = (queues & 0xFFFFFFFF);
5266		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5267		mask = (queues >> 32);
5268		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5269		break;
5270	default:
5271		break;
5272	}
5273} /* ixgbe_rearm_queues */
5274
5275