if_ix.c revision 315333
1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/if_ix.c 315333 2017-03-15 21:20:17Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include "ixgbe.h"
42
43/************************************************************************
44 * Driver version
45 ************************************************************************/
46char ixgbe_driver_version[] = "3.2.11-k";
47
48
49/************************************************************************
50 * PCI Device ID Table
51 *
52 *   Used by probe to select devices to load on
53 *   Last field stores an index into ixgbe_strings
54 *   Last entry must be all 0s
55 *
56 *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 ************************************************************************/
58static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
59{
60	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
61	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
62	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
63	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
64	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
65	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
66	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
67	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
68	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
69	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
70	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
71	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
72	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
73	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
74	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
75	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
76	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
77	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
78	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
79	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
80	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
81	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
82	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
83	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
84	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
85	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
86	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
87	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
88	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
89	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
90	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
91	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
92	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
93	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
94	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
95	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
96	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
97	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
98	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
99	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
100	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
101	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
102	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
103	/* required last entry */
104	{0, 0, 0, 0, 0}
105};
106
107/************************************************************************
108 * Table of branding strings
109 ************************************************************************/
110static char    *ixgbe_strings[] = {
111	"Intel(R) PRO/10GbE PCI-Express Network Driver"
112};
113
114/************************************************************************
115 * Function prototypes
116 ************************************************************************/
117static int      ixgbe_probe(device_t);
118static int      ixgbe_attach(device_t);
119static int      ixgbe_detach(device_t);
120static int      ixgbe_shutdown(device_t);
121static int      ixgbe_suspend(device_t);
122static int      ixgbe_resume(device_t);
123static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
124static void     ixgbe_init(void *);
125static void     ixgbe_stop(void *);
126#if __FreeBSD_version >= 1100036
127static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
128#endif
129static void     ixgbe_init_device_features(struct adapter *);
130static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
131static void     ixgbe_add_media_types(struct adapter *);
132static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
133static int      ixgbe_media_change(struct ifnet *);
134static int      ixgbe_allocate_pci_resources(struct adapter *);
135static void     ixgbe_get_slot_info(struct adapter *);
136static int      ixgbe_allocate_msix(struct adapter *);
137static int      ixgbe_allocate_legacy(struct adapter *);
138static int      ixgbe_configure_interrupts(struct adapter *);
139static void     ixgbe_free_pci_resources(struct adapter *);
140static void     ixgbe_local_timer(void *);
141static int      ixgbe_setup_interface(device_t, struct adapter *);
142static void     ixgbe_config_gpie(struct adapter *);
143static void     ixgbe_config_dmac(struct adapter *);
144static void     ixgbe_config_delay_values(struct adapter *);
145static void     ixgbe_config_link(struct adapter *);
146static void     ixgbe_check_wol_support(struct adapter *);
147static int      ixgbe_setup_low_power_mode(struct adapter *);
148static void     ixgbe_rearm_queues(struct adapter *, u64);
149
150static void     ixgbe_initialize_transmit_units(struct adapter *);
151static void     ixgbe_initialize_receive_units(struct adapter *);
152static void     ixgbe_enable_rx_drop(struct adapter *);
153static void     ixgbe_disable_rx_drop(struct adapter *);
154static void     ixgbe_initialize_rss_mapping(struct adapter *);
155
156static void     ixgbe_enable_intr(struct adapter *);
157static void     ixgbe_disable_intr(struct adapter *);
158static void     ixgbe_update_stats_counters(struct adapter *);
159static void     ixgbe_set_promisc(struct adapter *);
160static void     ixgbe_set_multi(struct adapter *);
161static void     ixgbe_update_link_status(struct adapter *);
162static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
163static void     ixgbe_configure_ivars(struct adapter *);
164static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
165
166static void     ixgbe_setup_vlan_hw_support(struct adapter *);
167static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
168static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
169
170static void     ixgbe_add_device_sysctls(struct adapter *);
171static void     ixgbe_add_hw_stats(struct adapter *);
172static int      ixgbe_set_flowcntl(struct adapter *, int);
173static int      ixgbe_set_advertise(struct adapter *, int);
174static int      ixgbe_get_advertise(struct adapter *);
175
176/* Sysctl handlers */
177static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
178                                       const char *, int *, int);
179static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
180static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
181static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
182static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
183static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
184#ifdef IXGBE_DEBUG
185static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
186static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
187#endif
188static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
189static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
190static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
191
192/* Support for pluggable optic modules */
193static bool     ixgbe_sfp_probe(struct adapter *);
194
195/* Legacy (single vector) interrupt handler */
196static void     ixgbe_legacy_irq(void *);
197
198/* The MSI/MSI-X Interrupt handlers */
199static void     ixgbe_msix_que(void *);
200static void     ixgbe_msix_link(void *);
201
202/* Deferred interrupt tasklets */
203static void     ixgbe_handle_que(void *, int);
204static void     ixgbe_handle_link(void *, int);
205static void     ixgbe_handle_msf(void *, int);
206static void     ixgbe_handle_mod(void *, int);
207static void     ixgbe_handle_phy(void *, int);
208
209
210/************************************************************************
211 *  FreeBSD Device Interface Entry Points
212 ************************************************************************/
213static device_method_t ix_methods[] = {
214	/* Device interface */
215	DEVMETHOD(device_probe, ixgbe_probe),
216	DEVMETHOD(device_attach, ixgbe_attach),
217	DEVMETHOD(device_detach, ixgbe_detach),
218	DEVMETHOD(device_shutdown, ixgbe_shutdown),
219	DEVMETHOD(device_suspend, ixgbe_suspend),
220	DEVMETHOD(device_resume, ixgbe_resume),
221#ifdef PCI_IOV
222	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
223	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
224	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
225#endif /* PCI_IOV */
226	DEVMETHOD_END
227};
228
229static driver_t ix_driver = {
230	"ix", ix_methods, sizeof(struct adapter),
231};
232
233devclass_t ix_devclass;
234DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
235
236MODULE_DEPEND(ix, pci, 1, 1, 1);
237MODULE_DEPEND(ix, ether, 1, 1, 1);
238#if __FreeBSD_version >= 1100000
239MODULE_DEPEND(ix, netmap, 1, 1, 1);
240#endif
241
242/*
243 * TUNEABLE PARAMETERS:
244 */
245
246static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
247
248/*
249 * AIM: Adaptive Interrupt Moderation
250 * which means that the interrupt rate
251 * is varied over time based on the
252 * traffic for that interrupt vector
253 */
254static int ixgbe_enable_aim = TRUE;
255TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
256SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
257    "Enable adaptive interrupt moderation");
258
259static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
260TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
261SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
262    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
263
264/* How many packets rxeof tries to clean at a time */
265static int ixgbe_rx_process_limit = 256;
266TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
267SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
268    &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
269
270/* How many packets txeof tries to clean at a time */
271static int ixgbe_tx_process_limit = 256;
272TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
273SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
274    &ixgbe_tx_process_limit, 0,
275    "Maximum number of sent packets to process at a time, -1 means unlimited");
276
277/* Flow control setting, default to full */
278static int ixgbe_flow_control = ixgbe_fc_full;
279TUNABLE_INT("hw.ix.flow_control", &ixgbe_flow_control);
280SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
281    &ixgbe_flow_control, 0, "Default flow control used for all adapters");
282
283/* Advertise Speed, default to 0 (auto) */
284static int ixgbe_advertise_speed = 0;
285TUNABLE_INT("hw.ix.advertise_speed", &ixgbe_advertise_speed);
286SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
287    &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
288
289/*
290 * Smart speed setting, default to on
291 * this only works as a compile option
292 * right now as its during attach, set
293 * this to 'ixgbe_smart_speed_off' to
294 * disable.
295 */
296static int ixgbe_smart_speed = ixgbe_smart_speed_on;
297
298/*
299 * MSI-X should be the default for best performance,
300 * but this allows it to be forced off for testing.
301 */
302static int ixgbe_enable_msix = 1;
303TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
304SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
305    "Enable MSI-X interrupts");
306
307/*
308 * Number of Queues, can be set to 0,
309 * it then autoconfigures based on the
310 * number of cpus with a max of 8. This
311 * can be overriden manually here.
312 */
313static int ixgbe_num_queues = 0;
314TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
315SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316    "Number of queues to configure, 0 indicates autoconfigure");
317
318/*
319 * Number of TX descriptors per ring,
320 * setting higher than RX as this seems
321 * the better performing choice.
322 */
323static int ixgbe_txd = PERFORM_TXD;
324TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
325SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
326    "Number of transmit descriptors per queue");
327
328/* Number of RX descriptors per ring */
329static int ixgbe_rxd = PERFORM_RXD;
330TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
331SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
332    "Number of receive descriptors per queue");
333
334/*
335 * Defining this on will allow the use
336 * of unsupported SFP+ modules, note that
337 * doing so you are on your own :)
338 */
339static int allow_unsupported_sfp = FALSE;
340TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
341SYSCTL_INT(_hw_ix, OID_AUTO, allow_unsupported_sfp, CTLFLAG_RDTUN,
342    &allow_unsupported_sfp, 0,
343    "Allow unsupported SFP modules...use at your own risk");
344
345/*
346 * Not sure if Flow Director is fully baked,
347 * so we'll default to turning it off.
348 */
349static int ixgbe_enable_fdir = 0;
350TUNABLE_INT("hw.ix.enable_fdir", &ixgbe_enable_fdir);
351SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
352    "Enable Flow Director");
353
354/* Legacy Transmit (single queue) */
355static int ixgbe_enable_legacy_tx = 0;
356TUNABLE_INT("hw.ix.enable_legacy_tx", &ixgbe_enable_legacy_tx);
357SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
358    &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
359
360/* Receive-Side Scaling */
361static int ixgbe_enable_rss = 1;
362TUNABLE_INT("hw.ix.enable_rss", &ixgbe_enable_rss);
363SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
364    "Enable Receive-Side Scaling (RSS)");
365
366/* Keep running tab on them for sanity check */
367static int ixgbe_total_ports;
368
369static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
370static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
371
372MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
373
374/************************************************************************
375 * ixgbe_probe - Device identification routine
376 *
377 *   Determines if the driver should be loaded on
378 *   adapter based on its PCI vendor/device ID.
379 *
380 *   return BUS_PROBE_DEFAULT on success, positive on failure
381 ************************************************************************/
382static int
383ixgbe_probe(device_t dev)
384{
385	ixgbe_vendor_info_t *ent;
386
387	u16  pci_vendor_id = 0;
388	u16  pci_device_id = 0;
389	u16  pci_subvendor_id = 0;
390	u16  pci_subdevice_id = 0;
391	char adapter_name[256];
392
393	INIT_DEBUGOUT("ixgbe_probe: begin");
394
395	pci_vendor_id = pci_get_vendor(dev);
396	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
397		return (ENXIO);
398
399	pci_device_id = pci_get_device(dev);
400	pci_subvendor_id = pci_get_subvendor(dev);
401	pci_subdevice_id = pci_get_subdevice(dev);
402
403	ent = ixgbe_vendor_info_array;
404	while (ent->vendor_id != 0) {
405		if ((pci_vendor_id == ent->vendor_id) &&
406		    (pci_device_id == ent->device_id) &&
407		    ((pci_subvendor_id == ent->subvendor_id) ||
408		     (ent->subvendor_id == 0)) &&
409		    ((pci_subdevice_id == ent->subdevice_id) ||
410		     (ent->subdevice_id == 0))) {
411			sprintf(adapter_name, "%s, Version - %s",
412				ixgbe_strings[ent->index],
413				ixgbe_driver_version);
414			device_set_desc_copy(dev, adapter_name);
415			++ixgbe_total_ports;
416			return (BUS_PROBE_DEFAULT);
417		}
418		ent++;
419	}
420
421	return (ENXIO);
422} /* ixgbe_probe */
423
424/************************************************************************
425 * ixgbe_attach - Device initialization routine
426 *
427 *   Called when the driver is being loaded.
428 *   Identifies the type of hardware, allocates all resources
429 *   and initializes the hardware.
430 *
431 *   return 0 on success, positive on failure
432 ************************************************************************/
433static int
434ixgbe_attach(device_t dev)
435{
436	struct adapter  *adapter;
437	struct ixgbe_hw *hw;
438	int             error = 0;
439	u32             ctrl_ext;
440
441	INIT_DEBUGOUT("ixgbe_attach: begin");
442
443	/* Allocate, clear, and link in our adapter structure */
444	adapter = device_get_softc(dev);
445	adapter->hw.back = adapter;
446	adapter->dev = dev;
447	hw = &adapter->hw;
448
449	/* Core Lock Init*/
450	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
451
452	/* Set up the timer callout */
453	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
454
455	/* Determine hardware revision */
456	hw->vendor_id = pci_get_vendor(dev);
457	hw->device_id = pci_get_device(dev);
458	hw->revision_id = pci_get_revid(dev);
459	hw->subsystem_vendor_id = pci_get_subvendor(dev);
460	hw->subsystem_device_id = pci_get_subdevice(dev);
461
462	/*
463	 * Make sure BUSMASTER is set
464	 */
465	pci_enable_busmaster(dev);
466
467	/* Do base PCI setup - map BAR0 */
468	if (ixgbe_allocate_pci_resources(adapter)) {
469		device_printf(dev, "Allocation of PCI resources failed\n");
470		error = ENXIO;
471		goto err_out;
472	}
473
474	/* let hardware know driver is loaded */
475	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
476	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
477	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
478
479	/*
480	 * Initialize the shared code
481	 */
482	if (ixgbe_init_shared_code(hw)) {
483		device_printf(dev, "Unable to initialize the shared code\n");
484		error = ENXIO;
485		goto err_out;
486	}
487
488	if (hw->mbx.ops.init_params)
489		hw->mbx.ops.init_params(hw);
490
491	hw->allow_unsupported_sfp = allow_unsupported_sfp;
492
493	/* Pick up the 82599 settings */
494	if (hw->mac.type != ixgbe_mac_82598EB) {
495		hw->phy.smart_speed = ixgbe_smart_speed;
496		adapter->num_segs = IXGBE_82599_SCATTER;
497	} else
498		adapter->num_segs = IXGBE_82598_SCATTER;
499
500	ixgbe_init_device_features(adapter);
501
502	if (ixgbe_configure_interrupts(adapter)) {
503		error = ENXIO;
504		goto err_out;
505	}
506
507	/* Allocate multicast array memory. */
508	adapter->mta = malloc(sizeof(*adapter->mta) *
509	    MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
510	if (adapter->mta == NULL) {
511		device_printf(dev, "Can not allocate multicast setup array\n");
512		error = ENOMEM;
513		goto err_out;
514	}
515
516	/* Enable WoL (if supported) */
517	ixgbe_check_wol_support(adapter);
518
519	/* Register for VLAN events */
520	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
521	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
522	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
523	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
524
525	/* Verify adapter fan is still functional (if applicable) */
526	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
527		u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
528		ixgbe_check_fan_failure(adapter, esdp, false);
529	}
530
531	/* Enable EEE power saving */
532	if (adapter->feat_en & IXGBE_FEATURE_EEE)
533		hw->mac.ops.setup_eee(hw, true);
534
535	/* Set an initial default flow control value */
536	hw->fc.requested_mode = ixgbe_flow_control;
537
538	/* Put the semaphore in a known state (released) */
539	ixgbe_init_swfw_semaphore(hw);
540
541	/* Sysctls for limiting the amount of work done in the taskqueues */
542	ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
543	    "max number of rx packets to process",
544	    &adapter->rx_process_limit, ixgbe_rx_process_limit);
545
546	ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
547	    "max number of tx packets to process",
548	    &adapter->tx_process_limit, ixgbe_tx_process_limit);
549
550	/* Do descriptor calc and sanity checks */
551	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
552	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
553		device_printf(dev, "TXD config issue, using default!\n");
554		adapter->num_tx_desc = DEFAULT_TXD;
555	} else
556		adapter->num_tx_desc = ixgbe_txd;
557
558	/*
559	 * With many RX rings it is easy to exceed the
560	 * system mbuf allocation. Tuning nmbclusters
561	 * can alleviate this.
562	 */
563	if (nmbclusters > 0) {
564		int s;
565		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
566		if (s > nmbclusters) {
567			device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
568			ixgbe_rxd = DEFAULT_RXD;
569		}
570	}
571
572	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
573	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
574		device_printf(dev, "RXD config issue, using default!\n");
575		adapter->num_rx_desc = DEFAULT_RXD;
576	} else
577		adapter->num_rx_desc = ixgbe_rxd;
578
579	/* Allocate our TX/RX Queues */
580	if (ixgbe_allocate_queues(adapter)) {
581		error = ENOMEM;
582		goto err_out;
583	}
584
585	hw->phy.reset_if_overtemp = TRUE;
586	error = ixgbe_reset_hw(hw);
587	hw->phy.reset_if_overtemp = FALSE;
588	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
589		/*
590		 * No optics in this port, set up
591		 * so the timer routine will probe
592		 * for later insertion.
593		 */
594		adapter->sfp_probe = TRUE;
595		error = IXGBE_SUCCESS;
596	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
597		device_printf(dev, "Unsupported SFP+ module detected!\n");
598		error = EIO;
599		goto err_late;
600	} else if (error) {
601		device_printf(dev, "Hardware initialization failed\n");
602		error = EIO;
603		goto err_late;
604	}
605
606	/* Make sure we have a good EEPROM before we read from it */
607	if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
608		device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
609		error = EIO;
610		goto err_late;
611	}
612
613	/* Setup OS specific network interface */
614	if (ixgbe_setup_interface(dev, adapter) != 0)
615		goto err_late;
616
617	if (adapter->feat_en & IXGBE_FEATURE_MSIX)
618		error = ixgbe_allocate_msix(adapter);
619	else
620		error = ixgbe_allocate_legacy(adapter);
621	if (error)
622		goto err_late;
623
624	error = ixgbe_start_hw(hw);
625	switch (error) {
626	case IXGBE_ERR_EEPROM_VERSION:
627		device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
628		break;
629	case IXGBE_ERR_SFP_NOT_SUPPORTED:
630		device_printf(dev, "Unsupported SFP+ Module\n");
631		error = EIO;
632		goto err_late;
633	case IXGBE_ERR_SFP_NOT_PRESENT:
634		device_printf(dev, "No SFP+ Module found\n");
635		/* falls thru */
636	default:
637		break;
638	}
639
640	/* Enable the optics for 82599 SFP+ fiber */
641	ixgbe_enable_tx_laser(hw);
642
643	/* Enable power to the phy. */
644	ixgbe_set_phy_power(hw, TRUE);
645
646	/* Initialize statistics */
647	ixgbe_update_stats_counters(adapter);
648
649	/* Check PCIE slot type/speed/width */
650	ixgbe_get_slot_info(adapter);
651
652	/*
653	 * Do time init and sysctl init here, but
654	 * only on the first port of a bypass adapter.
655	 */
656	ixgbe_bypass_init(adapter);
657
658	/* Set an initial dmac value */
659	adapter->dmac = 0;
660	/* Set initial advertised speeds (if applicable) */
661	adapter->advertise = ixgbe_get_advertise(adapter);
662
663	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
664		ixgbe_define_iov_schemas(dev, &error);
665
666	/* Add sysctls */
667	ixgbe_add_device_sysctls(adapter);
668	ixgbe_add_hw_stats(adapter);
669
670	/* For Netmap */
671	adapter->init_locked = ixgbe_init_locked;
672	adapter->stop_locked = ixgbe_stop;
673
674	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
675		ixgbe_netmap_attach(adapter);
676
677	INIT_DEBUGOUT("ixgbe_attach: end");
678
679	return (0);
680
681err_late:
682	ixgbe_free_transmit_structures(adapter);
683	ixgbe_free_receive_structures(adapter);
684	free(adapter->queues, M_IXGBE);
685err_out:
686	if (adapter->ifp != NULL)
687		if_free(adapter->ifp);
688	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
689	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
690	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
691	ixgbe_free_pci_resources(adapter);
692	free(adapter->mta, M_IXGBE);
693	IXGBE_CORE_LOCK_DESTROY(adapter);
694
695	return (error);
696} /* ixgbe_attach */
697
698/************************************************************************
699 * ixgbe_detach - Device removal routine
700 *
701 *   Called when the driver is being removed.
702 *   Stops the adapter and deallocates all the resources
703 *   that were allocated for driver operation.
704 *
705 *   return 0 on success, positive on failure
706 ************************************************************************/
707static int
708ixgbe_detach(device_t dev)
709{
710	struct adapter  *adapter = device_get_softc(dev);
711	struct ix_queue *que = adapter->queues;
712	struct tx_ring  *txr = adapter->tx_rings;
713	u32             ctrl_ext;
714
715	INIT_DEBUGOUT("ixgbe_detach: begin");
716
717	/* Make sure VLANS are not using driver */
718	if (adapter->ifp->if_vlantrunk != NULL) {
719		device_printf(dev, "Vlan in use, detach first\n");
720		return (EBUSY);
721	}
722
723	if (ixgbe_pci_iov_detach(dev) != 0) {
724		device_printf(dev, "SR-IOV in use; detach first.\n");
725		return (EBUSY);
726	}
727
728	ether_ifdetach(adapter->ifp);
729	/* Stop the adapter */
730	IXGBE_CORE_LOCK(adapter);
731	ixgbe_setup_low_power_mode(adapter);
732	IXGBE_CORE_UNLOCK(adapter);
733
734	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
735		if (que->tq) {
736			if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
737				taskqueue_drain(que->tq, &txr->txq_task);
738			taskqueue_drain(que->tq, &que->que_task);
739			taskqueue_free(que->tq);
740		}
741	}
742
743	/* Drain the Link queue */
744	if (adapter->tq) {
745		taskqueue_drain(adapter->tq, &adapter->link_task);
746		taskqueue_drain(adapter->tq, &adapter->mod_task);
747		taskqueue_drain(adapter->tq, &adapter->msf_task);
748		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
749			taskqueue_drain(adapter->tq, &adapter->mbx_task);
750		taskqueue_drain(adapter->tq, &adapter->phy_task);
751		if (adapter->feat_en & IXGBE_FEATURE_FDIR)
752			taskqueue_drain(adapter->tq, &adapter->fdir_task);
753		taskqueue_free(adapter->tq);
754	}
755
756	/* let hardware know driver is unloading */
757	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
758	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
759	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
760
761	/* Unregister VLAN events */
762	if (adapter->vlan_attach != NULL)
763		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
764	if (adapter->vlan_detach != NULL)
765		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
766
767	callout_drain(&adapter->timer);
768
769	if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
770		netmap_detach(adapter->ifp);
771
772	ixgbe_free_pci_resources(adapter);
773	bus_generic_detach(dev);
774	if_free(adapter->ifp);
775
776	ixgbe_free_transmit_structures(adapter);
777	ixgbe_free_receive_structures(adapter);
778	free(adapter->queues, M_IXGBE);
779	free(adapter->mta, M_IXGBE);
780
781	IXGBE_CORE_LOCK_DESTROY(adapter);
782
783	return (0);
784} /* ixgbe_detach */
785
786/************************************************************************
787 * ixgbe_shutdown - Shutdown entry point
788 ************************************************************************/
789static int
790ixgbe_shutdown(device_t dev)
791{
792	struct adapter *adapter = device_get_softc(dev);
793	int            error = 0;
794
795	INIT_DEBUGOUT("ixgbe_shutdown: begin");
796
797	IXGBE_CORE_LOCK(adapter);
798	error = ixgbe_setup_low_power_mode(adapter);
799	IXGBE_CORE_UNLOCK(adapter);
800
801	return (error);
802} /* ixgbe_shutdown */
803
804/************************************************************************
805 * ixgbe_suspend
806 *
807 *   From D0 to D3
808 ************************************************************************/
809static int
810ixgbe_suspend(device_t dev)
811{
812	struct adapter *adapter = device_get_softc(dev);
813	int            error = 0;
814
815	INIT_DEBUGOUT("ixgbe_suspend: begin");
816
817	IXGBE_CORE_LOCK(adapter);
818
819	error = ixgbe_setup_low_power_mode(adapter);
820
821	IXGBE_CORE_UNLOCK(adapter);
822
823	return (error);
824} /* ixgbe_suspend */
825
826/************************************************************************
827 * ixgbe_resume
828 *
829 *   From D3 to D0
830 ************************************************************************/
831static int
832ixgbe_resume(device_t dev)
833{
834	struct adapter  *adapter = device_get_softc(dev);
835	struct ifnet    *ifp = adapter->ifp;
836	struct ixgbe_hw *hw = &adapter->hw;
837	u32             wus;
838
839	INIT_DEBUGOUT("ixgbe_resume: begin");
840
841	IXGBE_CORE_LOCK(adapter);
842
843	/* Read & clear WUS register */
844	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
845	if (wus)
846		device_printf(dev, "Woken up by (WUS): %#010x\n",
847		    IXGBE_READ_REG(hw, IXGBE_WUS));
848	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
849	/* And clear WUFC until next low-power transition */
850	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
851
852	/*
853	 * Required after D3->D0 transition;
854	 * will re-advertise all previous advertised speeds
855	 */
856	if (ifp->if_flags & IFF_UP)
857		ixgbe_init_locked(adapter);
858
859	IXGBE_CORE_UNLOCK(adapter);
860
861	return (0);
862} /* ixgbe_resume */
863
864
865/************************************************************************
866 * ixgbe_ioctl - Ioctl entry point
867 *
868 *   Called when the user wants to configure the interface.
869 *
870 *   return 0 on success, positive on failure
871 ************************************************************************/
872static int
873ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
874{
875	struct adapter *adapter = ifp->if_softc;
876	struct ifreq   *ifr = (struct ifreq *) data;
877#if defined(INET) || defined(INET6)
878	struct ifaddr  *ifa = (struct ifaddr *)data;
879#endif
880	int            error = 0;
881	bool           avoid_reset = FALSE;
882
883	switch (command) {
884	case SIOCSIFADDR:
885#ifdef INET
886		if (ifa->ifa_addr->sa_family == AF_INET)
887			avoid_reset = TRUE;
888#endif
889#ifdef INET6
890		if (ifa->ifa_addr->sa_family == AF_INET6)
891			avoid_reset = TRUE;
892#endif
893		/*
894		 * Calling init results in link renegotiation,
895		 * so we avoid doing it when possible.
896		 */
897		if (avoid_reset) {
898			ifp->if_flags |= IFF_UP;
899			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
900				ixgbe_init(adapter);
901#ifdef INET
902			if (!(ifp->if_flags & IFF_NOARP))
903				arp_ifinit(ifp, ifa);
904#endif
905		} else
906			error = ether_ioctl(ifp, command, data);
907		break;
908	case SIOCSIFMTU:
909		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
910		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
911			error = EINVAL;
912		} else {
913			IXGBE_CORE_LOCK(adapter);
914			ifp->if_mtu = ifr->ifr_mtu;
915			adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
916			ixgbe_init_locked(adapter);
917			ixgbe_recalculate_max_frame(adapter);
918			IXGBE_CORE_UNLOCK(adapter);
919		}
920		break;
921	case SIOCSIFFLAGS:
922		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
923		IXGBE_CORE_LOCK(adapter);
924		if (ifp->if_flags & IFF_UP) {
925			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
926				if ((ifp->if_flags ^ adapter->if_flags) &
927				    (IFF_PROMISC | IFF_ALLMULTI)) {
928					ixgbe_set_promisc(adapter);
929				}
930			} else
931				ixgbe_init_locked(adapter);
932		} else
933			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
934				ixgbe_stop(adapter);
935		adapter->if_flags = ifp->if_flags;
936		IXGBE_CORE_UNLOCK(adapter);
937		break;
938	case SIOCADDMULTI:
939	case SIOCDELMULTI:
940		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
941		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
942			IXGBE_CORE_LOCK(adapter);
943			ixgbe_disable_intr(adapter);
944			ixgbe_set_multi(adapter);
945			ixgbe_enable_intr(adapter);
946			IXGBE_CORE_UNLOCK(adapter);
947		}
948		break;
949	case SIOCSIFMEDIA:
950	case SIOCGIFMEDIA:
951		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
952		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
953		break;
954	case SIOCSIFCAP:
955	{
956		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
957
958		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
959
960		if (!mask)
961			break;
962
963		/* HW cannot turn these on/off separately */
964		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
965			ifp->if_capenable ^= IFCAP_RXCSUM;
966			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
967		}
968		if (mask & IFCAP_TXCSUM)
969			ifp->if_capenable ^= IFCAP_TXCSUM;
970		if (mask & IFCAP_TXCSUM_IPV6)
971			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
972		if (mask & IFCAP_TSO4)
973			ifp->if_capenable ^= IFCAP_TSO4;
974		if (mask & IFCAP_TSO6)
975			ifp->if_capenable ^= IFCAP_TSO6;
976		if (mask & IFCAP_LRO)
977			ifp->if_capenable ^= IFCAP_LRO;
978		if (mask & IFCAP_VLAN_HWTAGGING)
979			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
980		if (mask & IFCAP_VLAN_HWFILTER)
981			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
982		if (mask & IFCAP_VLAN_HWTSO)
983			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
984
985		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986			IXGBE_CORE_LOCK(adapter);
987			ixgbe_init_locked(adapter);
988			IXGBE_CORE_UNLOCK(adapter);
989		}
990		VLAN_CAPABILITIES(ifp);
991		break;
992	}
993#if __FreeBSD_version >= 1002500
994	case SIOCGI2C:
995	{
996		struct ixgbe_hw *hw = &adapter->hw;
997		struct ifi2creq i2c;
998		int i;
999
1000		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
1001		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1002		if (error != 0)
1003			break;
1004		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1005			error = EINVAL;
1006			break;
1007		}
1008		if (i2c.len > sizeof(i2c.data)) {
1009			error = EINVAL;
1010			break;
1011		}
1012
1013		for (i = 0; i < i2c.len; i++)
1014			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
1015			    i2c.dev_addr, &i2c.data[i]);
1016		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1017		break;
1018	}
1019#endif
1020	default:
1021		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1022		error = ether_ioctl(ifp, command, data);
1023		break;
1024	}
1025
1026	return (error);
1027} /* ixgbe_ioctl */
1028
1029/************************************************************************
1030 * ixgbe_init_device_features
1031 ************************************************************************/
1032static void
1033ixgbe_init_device_features(struct adapter *adapter)
1034{
1035	adapter->feat_cap = IXGBE_FEATURE_NETMAP
1036	                  | IXGBE_FEATURE_RSS
1037	                  | IXGBE_FEATURE_MSI
1038	                  | IXGBE_FEATURE_MSIX
1039	                  | IXGBE_FEATURE_LEGACY_IRQ
1040	                  | IXGBE_FEATURE_LEGACY_TX;
1041
1042	/* Set capabilities first... */
1043	switch (adapter->hw.mac.type) {
1044	case ixgbe_mac_82598EB:
1045		if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
1046			adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
1047		break;
1048	case ixgbe_mac_X540:
1049		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1050		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1051		if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
1052		    (adapter->hw.bus.func == 0))
1053			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1054		break;
1055	case ixgbe_mac_X550:
1056		adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1057		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1058		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1059		break;
1060	case ixgbe_mac_X550EM_x:
1061		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1062		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1063		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
1064			adapter->feat_cap |= IXGBE_FEATURE_EEE;
1065		break;
1066	case ixgbe_mac_X550EM_a:
1067		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1068		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1069		adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1070		if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
1071		    (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
1072			adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
1073			adapter->feat_cap |= IXGBE_FEATURE_EEE;
1074		}
1075		break;
1076	case ixgbe_mac_82599EB:
1077		adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
1078		adapter->feat_cap |= IXGBE_FEATURE_FDIR;
1079		if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
1080		    (adapter->hw.bus.func == 0))
1081			adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
1082		if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
1083			adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
1084		break;
1085	default:
1086		break;
1087	}
1088
1089	/* Enabled by default... */
1090	/* Fan failure detection */
1091	if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
1092		adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
1093	/* Netmap */
1094	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1095		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1096	/* EEE */
1097	if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1098		adapter->feat_en |= IXGBE_FEATURE_EEE;
1099	/* Thermal Sensor */
1100	if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
1101		adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
1102
1103	/* Enabled via global sysctl... */
1104	/* Flow Director */
1105	if (ixgbe_enable_fdir) {
1106		if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
1107			adapter->feat_en |= IXGBE_FEATURE_FDIR;
1108		else
1109			device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
1110	}
1111	/* Legacy (single queue) transmit */
1112	if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
1113	    ixgbe_enable_legacy_tx)
1114		adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
1115	/*
1116	 * Message Signal Interrupts - Extended (MSI-X)
1117	 * Normal MSI is only enabled if MSI-X calls fail.
1118	 */
1119	if (!ixgbe_enable_msix)
1120		adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
1121	/* Receive-Side Scaling (RSS) */
1122	if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
1123		adapter->feat_en |= IXGBE_FEATURE_RSS;
1124
1125	/* Disable features with unmet dependencies... */
1126	/* No MSI-X */
1127	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
1128		adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
1129		adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
1130		adapter->feat_en &= ~IXGBE_FEATURE_RSS;
1131		adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
1132	}
1133} /* ixgbe_init_device_features */
1134
1135/************************************************************************
1136 * ixgbe_check_fan_failure
1137 ************************************************************************/
1138static void
1139ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
1140{
1141	u32 mask;
1142
1143	mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
1144	    IXGBE_ESDP_SDP1;
1145
1146	if (reg & mask)
1147		device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
1148} /* ixgbe_check_fan_failure */
1149
1150/************************************************************************
1151 * ixgbe_is_sfp
1152 ************************************************************************/
1153static inline bool
1154ixgbe_is_sfp(struct ixgbe_hw *hw)
1155{
1156	switch (hw->mac.type) {
1157	case ixgbe_mac_82598EB:
1158		if (hw->phy.type == ixgbe_phy_nl)
1159			return TRUE;
1160		return FALSE;
1161	case ixgbe_mac_82599EB:
1162		switch (hw->mac.ops.get_media_type(hw)) {
1163		case ixgbe_media_type_fiber:
1164		case ixgbe_media_type_fiber_qsfp:
1165			return TRUE;
1166		default:
1167			return FALSE;
1168		}
1169	case ixgbe_mac_X550EM_x:
1170	case ixgbe_mac_X550EM_a:
1171		if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1172			return TRUE;
1173		return FALSE;
1174	default:
1175		return FALSE;
1176	}
1177} /* ixgbe_is_sfp */
1178
1179/************************************************************************
1180 * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
1181 *
1182 *   Takes the ifnet's if_capenable flags (e.g. set by the user using
1183 *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
1184 *   field what mbuf offload flags the driver will understand.
1185 ************************************************************************/
1186static void
1187ixgbe_set_if_hwassist(struct adapter *adapter)
1188{
1189	struct ifnet *ifp = adapter->ifp;
1190
1191	ifp->if_hwassist = 0;
1192#if __FreeBSD_version >= 1000000
1193	if (ifp->if_capenable & IFCAP_TSO4)
1194		ifp->if_hwassist |= CSUM_IP_TSO;
1195	if (ifp->if_capenable & IFCAP_TSO6)
1196		ifp->if_hwassist |= CSUM_IP6_TSO;
1197	if (ifp->if_capenable & IFCAP_TXCSUM) {
1198		ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
1199		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1200			ifp->if_hwassist |= CSUM_IP_SCTP;
1201	}
1202	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
1203		ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
1204		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1205			ifp->if_hwassist |= CSUM_IP6_SCTP;
1206	}
1207#else
1208	if (ifp->if_capenable & IFCAP_TSO)
1209		ifp->if_hwassist |= CSUM_TSO;
1210	if (ifp->if_capenable & IFCAP_TXCSUM) {
1211		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1212		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
1213			ifp->if_hwassist |= CSUM_SCTP;
1214	}
1215#endif
1216} /* ixgbe_set_if_hwassist */
1217
1218/************************************************************************
1219 * ixgbe_init_locked - Init entry point
1220 *
1221 *   Used in two ways: It is used by the stack as an init
1222 *   entry point in network interface structure. It is also
1223 *   used by the driver as a hw/sw initialization routine to
1224 *   get to a consistent state.
1225 *
1226 *   return 0 on success, positive on failure
1227 ************************************************************************/
1228void
1229ixgbe_init_locked(struct adapter *adapter)
1230{
1231	struct ifnet    *ifp = adapter->ifp;
1232	device_t        dev = adapter->dev;
1233	struct ixgbe_hw *hw = &adapter->hw;
1234	struct tx_ring  *txr;
1235	struct rx_ring  *rxr;
1236	u32             txdctl, mhadd;
1237	u32             rxdctl, rxctrl;
1238	u32             ctrl_ext;
1239	int             err = 0;
1240
1241	mtx_assert(&adapter->core_mtx, MA_OWNED);
1242	INIT_DEBUGOUT("ixgbe_init_locked: begin");
1243
1244	hw->adapter_stopped = FALSE;
1245	ixgbe_stop_adapter(hw);
1246	callout_stop(&adapter->timer);
1247
1248	/* Queue indices may change with IOV mode */
1249	ixgbe_align_all_queue_indices(adapter);
1250
1251	/* reprogram the RAR[0] in case user changed it. */
1252	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1253
1254	/* Get the latest mac address, User can use a LAA */
1255	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1256	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1257	hw->addr_ctrl.rar_used_count = 1;
1258
1259	/* Set hardware offload abilities from ifnet flags */
1260	ixgbe_set_if_hwassist(adapter);
1261
1262	/* Prepare transmit descriptors and buffers */
1263	if (ixgbe_setup_transmit_structures(adapter)) {
1264		device_printf(dev, "Could not setup transmit structures\n");
1265		ixgbe_stop(adapter);
1266		return;
1267	}
1268
1269	ixgbe_init_hw(hw);
1270	ixgbe_initialize_iov(adapter);
1271	ixgbe_initialize_transmit_units(adapter);
1272
1273	/* Setup Multicast table */
1274	ixgbe_set_multi(adapter);
1275
1276	/* Determine the correct mbuf pool, based on frame size */
1277	if (adapter->max_frame_size <= MCLBYTES)
1278		adapter->rx_mbuf_sz = MCLBYTES;
1279	else
1280		adapter->rx_mbuf_sz = MJUMPAGESIZE;
1281
1282	/* Prepare receive descriptors and buffers */
1283	if (ixgbe_setup_receive_structures(adapter)) {
1284		device_printf(dev, "Could not setup receive structures\n");
1285		ixgbe_stop(adapter);
1286		return;
1287	}
1288
1289	/* Configure RX settings */
1290	ixgbe_initialize_receive_units(adapter);
1291
1292	/* Enable SDP & MSI-X interrupts based on adapter */
1293	ixgbe_config_gpie(adapter);
1294
1295	/* Set MTU size */
1296	if (ifp->if_mtu > ETHERMTU) {
1297		/* aka IXGBE_MAXFRS on 82599 and newer */
1298		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1299		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1300		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1301		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1302	}
1303
1304	/* Now enable all the queues */
1305	for (int i = 0; i < adapter->num_queues; i++) {
1306		txr = &adapter->tx_rings[i];
1307		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1308		txdctl |= IXGBE_TXDCTL_ENABLE;
1309		/* Set WTHRESH to 8, burst writeback */
1310		txdctl |= (8 << 16);
1311		/*
1312		 * When the internal queue falls below PTHRESH (32),
1313		 * start prefetching as long as there are at least
1314		 * HTHRESH (1) buffers ready. The values are taken
1315		 * from the Intel linux driver 3.8.21.
1316		 * Prefetching enables tx line rate even with 1 queue.
1317		 */
1318		txdctl |= (32 << 0) | (1 << 8);
1319		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1320	}
1321
1322	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1323		rxr = &adapter->rx_rings[i];
1324		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1325		if (hw->mac.type == ixgbe_mac_82598EB) {
1326			/*
1327			 * PTHRESH = 21
1328			 * HTHRESH = 4
1329			 * WTHRESH = 8
1330			 */
1331			rxdctl &= ~0x3FFFFF;
1332			rxdctl |= 0x080420;
1333		}
1334		rxdctl |= IXGBE_RXDCTL_ENABLE;
1335		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1336		for (; j < 10; j++) {
1337			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1338			    IXGBE_RXDCTL_ENABLE)
1339				break;
1340			else
1341				msec_delay(1);
1342		}
1343		wmb();
1344
1345		/*
1346		 * In netmap mode, we must preserve the buffers made
1347		 * available to userspace before the if_init()
1348		 * (this is true by default on the TX side, because
1349		 * init makes all buffers available to userspace).
1350		 *
1351		 * netmap_reset() and the device specific routines
1352		 * (e.g. ixgbe_setup_receive_rings()) map these
1353		 * buffers at the end of the NIC ring, so here we
1354		 * must set the RDT (tail) register to make sure
1355		 * they are not overwritten.
1356		 *
1357		 * In this driver the NIC ring starts at RDH = 0,
1358		 * RDT points to the last slot available for reception (?),
1359		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1360		 */
1361#ifdef DEV_NETMAP
1362		if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1363		    (ifp->if_capenable & IFCAP_NETMAP)) {
1364			struct netmap_adapter *na = NA(adapter->ifp);
1365			struct netmap_kring *kring = &na->rx_rings[i];
1366			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1367
1368			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1369		} else
1370#endif /* DEV_NETMAP */
1371			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
1372			    adapter->num_rx_desc - 1);
1373	}
1374
1375	/* Enable Receive engine */
1376	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1377	if (hw->mac.type == ixgbe_mac_82598EB)
1378		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1379	rxctrl |= IXGBE_RXCTRL_RXEN;
1380	ixgbe_enable_rx_dma(hw, rxctrl);
1381
1382	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1383
1384	/* Set up MSI-X routing */
1385	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1386		ixgbe_configure_ivars(adapter);
1387		/* Set up auto-mask */
1388		if (hw->mac.type == ixgbe_mac_82598EB)
1389			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1390		else {
1391			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1392			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1393		}
1394	} else {  /* Simple settings for Legacy/MSI */
1395		ixgbe_set_ivar(adapter, 0, 0, 0);
1396		ixgbe_set_ivar(adapter, 0, 0, 1);
1397		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1398	}
1399
1400	ixgbe_init_fdir(adapter);
1401
1402	/*
1403	 * Check on any SFP devices that
1404	 * need to be kick-started
1405	 */
1406	if (hw->phy.type == ixgbe_phy_none) {
1407		err = hw->phy.ops.identify(hw);
1408		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1409			device_printf(dev,
1410			    "Unsupported SFP+ module type was detected.\n");
1411			return;
1412		}
1413	}
1414
1415	/* Set moderation on the Link interrupt */
1416	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1417
1418	/* Config/Enable Link */
1419	ixgbe_config_link(adapter);
1420
1421	/* Hardware Packet Buffer & Flow Control setup */
1422	ixgbe_config_delay_values(adapter);
1423
1424	/* Initialize the FC settings */
1425	ixgbe_start_hw(hw);
1426
1427	/* Set up VLAN support and filter */
1428	ixgbe_setup_vlan_hw_support(adapter);
1429
1430	/* Setup DMA Coalescing */
1431	ixgbe_config_dmac(adapter);
1432
1433	/* And now turn on interrupts */
1434	ixgbe_enable_intr(adapter);
1435
1436	/* Enable the use of the MBX by the VF's */
1437	if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
1438		ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1439		ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1440		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1441	}
1442
1443	/* Now inform the stack we're ready */
1444	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1445
1446	return;
1447} /* ixgbe_init_locked */
1448
1449/************************************************************************
1450 * ixgbe_init
1451 ************************************************************************/
1452static void
1453ixgbe_init(void *arg)
1454{
1455	struct adapter *adapter = arg;
1456
1457	IXGBE_CORE_LOCK(adapter);
1458	ixgbe_init_locked(adapter);
1459	IXGBE_CORE_UNLOCK(adapter);
1460
1461	return;
1462} /* ixgbe_init */
1463
1464/************************************************************************
1465 * ixgbe_config_gpie
1466 ************************************************************************/
1467static void
1468ixgbe_config_gpie(struct adapter *adapter)
1469{
1470	struct ixgbe_hw *hw = &adapter->hw;
1471	u32             gpie;
1472
1473	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1474
1475	if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1476		/* Enable Enhanced MSI-X mode */
1477		gpie |= IXGBE_GPIE_MSIX_MODE
1478		     |  IXGBE_GPIE_EIAME
1479		     |  IXGBE_GPIE_PBA_SUPPORT
1480		     |  IXGBE_GPIE_OCD;
1481	}
1482
1483	/* Fan Failure Interrupt */
1484	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
1485		gpie |= IXGBE_SDP1_GPIEN;
1486
1487	/* Thermal Sensor Interrupt */
1488	if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
1489		gpie |= IXGBE_SDP0_GPIEN_X540;
1490
1491	/* Link detection */
1492	switch (hw->mac.type) {
1493	case ixgbe_mac_82599EB:
1494		gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
1495		break;
1496	case ixgbe_mac_X550EM_x:
1497	case ixgbe_mac_X550EM_a:
1498		gpie |= IXGBE_SDP0_GPIEN_X540;
1499		break;
1500	default:
1501		break;
1502	}
1503
1504	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1505
1506	return;
1507} /* ixgbe_config_gpie */
1508
1509/************************************************************************
1510 * ixgbe_config_delay_values
1511 *
1512 *   Requires adapter->max_frame_size to be set.
1513 ************************************************************************/
1514static void
1515ixgbe_config_delay_values(struct adapter *adapter)
1516{
1517	struct ixgbe_hw *hw = &adapter->hw;
1518	u32             rxpb, frame, size, tmp;
1519
1520	frame = adapter->max_frame_size;
1521
1522	/* Calculate High Water */
1523	switch (hw->mac.type) {
1524	case ixgbe_mac_X540:
1525	case ixgbe_mac_X550:
1526	case ixgbe_mac_X550EM_x:
1527	case ixgbe_mac_X550EM_a:
1528		tmp = IXGBE_DV_X540(frame, frame);
1529		break;
1530	default:
1531		tmp = IXGBE_DV(frame, frame);
1532		break;
1533	}
1534	size = IXGBE_BT2KB(tmp);
1535	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1536	hw->fc.high_water[0] = rxpb - size;
1537
1538	/* Now calculate Low Water */
1539	switch (hw->mac.type) {
1540	case ixgbe_mac_X540:
1541	case ixgbe_mac_X550:
1542	case ixgbe_mac_X550EM_x:
1543	case ixgbe_mac_X550EM_a:
1544		tmp = IXGBE_LOW_DV_X540(frame);
1545		break;
1546	default:
1547		tmp = IXGBE_LOW_DV(frame);
1548		break;
1549	}
1550	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1551
1552	hw->fc.pause_time = IXGBE_FC_PAUSE;
1553	hw->fc.send_xon = TRUE;
1554} /* ixgbe_config_delay_values */
1555
1556/************************************************************************
1557 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1558 ************************************************************************/
1559static inline void
1560ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1561{
1562	struct ixgbe_hw *hw = &adapter->hw;
1563	u64             queue = (u64)(1 << vector);
1564	u32             mask;
1565
1566	if (hw->mac.type == ixgbe_mac_82598EB) {
1567		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1568		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1569	} else {
1570		mask = (queue & 0xFFFFFFFF);
1571		if (mask)
1572			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1573		mask = (queue >> 32);
1574		if (mask)
1575			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1576	}
1577} /* ixgbe_enable_queue */
1578
1579/************************************************************************
1580 * ixgbe_disable_queue
1581 ************************************************************************/
1582static inline void
1583ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1584{
1585	struct ixgbe_hw *hw = &adapter->hw;
1586	u64             queue = (u64)(1 << vector);
1587	u32             mask;
1588
1589	if (hw->mac.type == ixgbe_mac_82598EB) {
1590		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1591		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1592	} else {
1593		mask = (queue & 0xFFFFFFFF);
1594		if (mask)
1595			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1596		mask = (queue >> 32);
1597		if (mask)
1598			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1599	}
1600} /* ixgbe_disable_queue */
1601
1602/************************************************************************
1603 * ixgbe_handle_que
1604 ************************************************************************/
1605static void
1606ixgbe_handle_que(void *context, int pending)
1607{
1608	struct ix_queue *que = context;
1609	struct adapter  *adapter = que->adapter;
1610	struct tx_ring  *txr = que->txr;
1611	struct ifnet    *ifp = adapter->ifp;
1612
1613	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1614		ixgbe_rxeof(que);
1615		IXGBE_TX_LOCK(txr);
1616		ixgbe_txeof(txr);
1617		if (!ixgbe_ring_empty(ifp, txr->br))
1618			ixgbe_start_locked(ifp, txr);
1619		IXGBE_TX_UNLOCK(txr);
1620	}
1621
1622	/* Reenable this interrupt */
1623	if (que->res != NULL)
1624		ixgbe_enable_queue(adapter, que->msix);
1625	else
1626		ixgbe_enable_intr(adapter);
1627
1628	return;
1629} /* ixgbe_handle_que */
1630
1631
1632/************************************************************************
1633 * ixgbe_legacy_irq - Legacy Interrupt Service routine
1634 ************************************************************************/
1635static void
1636ixgbe_legacy_irq(void *arg)
1637{
1638	struct ix_queue *que = arg;
1639	struct adapter  *adapter = que->adapter;
1640	struct ixgbe_hw *hw = &adapter->hw;
1641	struct ifnet    *ifp = adapter->ifp;
1642	struct tx_ring  *txr = adapter->tx_rings;
1643	bool            more;
1644	u32             eicr, eicr_mask;
1645
1646	/* Silicon errata #26 on 82598 */
1647	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1648
1649	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1650
1651	++que->irqs;
1652	if (!eicr) {
1653		ixgbe_enable_intr(adapter);
1654		return;
1655	}
1656
1657	more = ixgbe_rxeof(que);
1658
1659	IXGBE_TX_LOCK(txr);
1660	ixgbe_txeof(txr);
1661	if (!ixgbe_ring_empty(ifp, txr->br))
1662		ixgbe_start_locked(ifp, txr);
1663	IXGBE_TX_UNLOCK(txr);
1664
1665	/* Check for fan failure */
1666	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1667		ixgbe_check_fan_failure(adapter, eicr, true);
1668		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1669	}
1670
1671	/* Link status change */
1672	if (eicr & IXGBE_EICR_LSC)
1673		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1674
1675	if (ixgbe_is_sfp(hw)) {
1676		/* Pluggable optics-related interrupt */
1677		if (hw->mac.type >= ixgbe_mac_X540)
1678			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1679		else
1680			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1681
1682		if (eicr & eicr_mask) {
1683			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1684			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1685		}
1686
1687		if ((hw->mac.type == ixgbe_mac_82599EB) &&
1688		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1689			IXGBE_WRITE_REG(hw, IXGBE_EICR,
1690			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1691			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1692		}
1693	}
1694
1695	/* External PHY interrupt */
1696	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1697	    (eicr & IXGBE_EICR_GPI_SDP0_X540))
1698		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1699
1700	if (more)
1701		taskqueue_enqueue(que->tq, &que->que_task);
1702	else
1703		ixgbe_enable_intr(adapter);
1704
1705	return;
1706} /* ixgbe_legacy_irq */
1707
1708
1709/************************************************************************
1710 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1711 ************************************************************************/
1712void
1713ixgbe_msix_que(void *arg)
1714{
1715	struct ix_queue *que = arg;
1716	struct adapter  *adapter = que->adapter;
1717	struct ifnet    *ifp = adapter->ifp;
1718	struct tx_ring  *txr = que->txr;
1719	struct rx_ring  *rxr = que->rxr;
1720	bool            more;
1721	u32             newitr = 0;
1722
1723
1724	/* Protect against spurious interrupts */
1725	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1726		return;
1727
1728	ixgbe_disable_queue(adapter, que->msix);
1729	++que->irqs;
1730
1731	more = ixgbe_rxeof(que);
1732
1733	IXGBE_TX_LOCK(txr);
1734	ixgbe_txeof(txr);
1735	if (!ixgbe_ring_empty(ifp, txr->br))
1736		ixgbe_start_locked(ifp, txr);
1737	IXGBE_TX_UNLOCK(txr);
1738
1739	/* Do AIM now? */
1740
1741	if (adapter->enable_aim == FALSE)
1742		goto no_calc;
1743	/*
1744	 * Do Adaptive Interrupt Moderation:
1745	 *  - Write out last calculated setting
1746	 *  - Calculate based on average size over
1747	 *    the last interval.
1748	 */
1749	if (que->eitr_setting)
1750		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1751		    que->eitr_setting);
1752
1753	que->eitr_setting = 0;
1754
1755	/* Idle, do nothing */
1756	if ((txr->bytes == 0) && (rxr->bytes == 0))
1757		goto no_calc;
1758
1759	if ((txr->bytes) && (txr->packets))
1760		newitr = txr->bytes/txr->packets;
1761	if ((rxr->bytes) && (rxr->packets))
1762		newitr = max(newitr, (rxr->bytes / rxr->packets));
1763	newitr += 24; /* account for hardware frame, crc */
1764
1765	/* set an upper boundary */
1766	newitr = min(newitr, 3000);
1767
1768	/* Be nice to the mid range */
1769	if ((newitr > 300) && (newitr < 1200))
1770		newitr = (newitr / 3);
1771	else
1772		newitr = (newitr / 2);
1773
1774	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1775		newitr |= newitr << 16;
1776	else
1777		newitr |= IXGBE_EITR_CNT_WDIS;
1778
1779	/* save for next interrupt */
1780	que->eitr_setting = newitr;
1781
1782	/* Reset state */
1783	txr->bytes = 0;
1784	txr->packets = 0;
1785	rxr->bytes = 0;
1786	rxr->packets = 0;
1787
1788no_calc:
1789	if (more)
1790		taskqueue_enqueue(que->tq, &que->que_task);
1791	else
1792		ixgbe_enable_queue(adapter, que->msix);
1793
1794	return;
1795} /* ixgbe_msix_que */
1796
1797
1798/************************************************************************
1799 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
1800 ************************************************************************/
1801static void
1802ixgbe_msix_link(void *arg)
1803{
1804	struct adapter  *adapter = arg;
1805	struct ixgbe_hw *hw = &adapter->hw;
1806	u32             eicr, eicr_mask;
1807	s32             retval;
1808
1809	++adapter->link_irq;
1810
1811	/* Pause other interrupts */
1812	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
1813
1814	/* First get the cause */
1815	eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1816	/* Be sure the queue bits are not cleared */
1817	eicr &= ~IXGBE_EICR_RTX_QUEUE;
1818	/* Clear interrupt with write */
1819	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1820
1821	/* Link status change */
1822	if (eicr & IXGBE_EICR_LSC) {
1823		IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1824		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1825	}
1826
1827	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1828		if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
1829		    (eicr & IXGBE_EICR_FLOW_DIR)) {
1830			/* This is probably overkill :) */
1831			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1832				return;
1833			/* Disable the interrupt */
1834			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1835			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1836		}
1837
1838		if (eicr & IXGBE_EICR_ECC) {
1839			device_printf(adapter->dev,
1840			    "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
1841			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1842		}
1843
1844		/* Check for over temp condition */
1845		if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
1846			switch (adapter->hw.mac.type) {
1847			case ixgbe_mac_X550EM_a:
1848				if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
1849					break;
1850				IXGBE_WRITE_REG(hw, IXGBE_EIMC,
1851				    IXGBE_EICR_GPI_SDP0_X550EM_a);
1852				IXGBE_WRITE_REG(hw, IXGBE_EICR,
1853				    IXGBE_EICR_GPI_SDP0_X550EM_a);
1854				retval = hw->phy.ops.check_overtemp(hw);
1855				if (retval != IXGBE_ERR_OVERTEMP)
1856					break;
1857				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1858				device_printf(adapter->dev, "System shutdown required!\n");
1859				break;
1860			default:
1861				if (!(eicr & IXGBE_EICR_TS))
1862					break;
1863				retval = hw->phy.ops.check_overtemp(hw);
1864				if (retval != IXGBE_ERR_OVERTEMP)
1865					break;
1866				device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
1867				device_printf(adapter->dev, "System shutdown required!\n");
1868				IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1869				break;
1870			}
1871		}
1872
1873		/* Check for VF message */
1874		if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
1875		    (eicr & IXGBE_EICR_MAILBOX))
1876			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1877	}
1878
1879	if (ixgbe_is_sfp(hw)) {
1880		/* Pluggable optics-related interrupt */
1881		if (hw->mac.type >= ixgbe_mac_X540)
1882			eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
1883		else
1884			eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1885
1886		if (eicr & eicr_mask) {
1887			IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
1888			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1889		}
1890
1891		if ((hw->mac.type == ixgbe_mac_82599EB) &&
1892		    (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1893			IXGBE_WRITE_REG(hw, IXGBE_EICR,
1894			    IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1895			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1896		}
1897	}
1898
1899	/* Check for fan failure */
1900	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
1901		ixgbe_check_fan_failure(adapter, eicr, true);
1902		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1903	}
1904
1905	/* External PHY interrupt */
1906	if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
1907	    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1908		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1909		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1910	}
1911
1912	/* Re-enable other interrupts */
1913	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1914
1915	return;
1916} /* ixgbe_msix_link */
1917
1918/************************************************************************
1919 * ixgbe_media_status - Media Ioctl callback
1920 *
1921 *   Called whenever the user queries the status of
1922 *   the interface using ifconfig.
1923 ************************************************************************/
1924static void
1925ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1926{
1927	struct adapter  *adapter = ifp->if_softc;
1928	struct ixgbe_hw *hw = &adapter->hw;
1929	int             layer;
1930
1931	INIT_DEBUGOUT("ixgbe_media_status: begin");
1932	IXGBE_CORE_LOCK(adapter);
1933	ixgbe_update_link_status(adapter);
1934
1935	ifmr->ifm_status = IFM_AVALID;
1936	ifmr->ifm_active = IFM_ETHER;
1937
1938	if (!adapter->link_active) {
1939		IXGBE_CORE_UNLOCK(adapter);
1940		return;
1941	}
1942
1943	ifmr->ifm_status |= IFM_ACTIVE;
1944	layer = adapter->phy_layer;
1945
1946	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1947	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1948	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1949	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1950		switch (adapter->link_speed) {
1951		case IXGBE_LINK_SPEED_10GB_FULL:
1952			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1953			break;
1954		case IXGBE_LINK_SPEED_1GB_FULL:
1955			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1956			break;
1957		case IXGBE_LINK_SPEED_100_FULL:
1958			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1959			break;
1960		case IXGBE_LINK_SPEED_10_FULL:
1961			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1962			break;
1963		}
1964	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1965	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1966		switch (adapter->link_speed) {
1967		case IXGBE_LINK_SPEED_10GB_FULL:
1968			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1969			break;
1970		}
1971	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1972		switch (adapter->link_speed) {
1973		case IXGBE_LINK_SPEED_10GB_FULL:
1974			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1975			break;
1976		case IXGBE_LINK_SPEED_1GB_FULL:
1977			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1978			break;
1979		}
1980	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1981		switch (adapter->link_speed) {
1982		case IXGBE_LINK_SPEED_10GB_FULL:
1983			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1984			break;
1985		case IXGBE_LINK_SPEED_1GB_FULL:
1986			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1987			break;
1988		}
1989	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1990	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1991		switch (adapter->link_speed) {
1992		case IXGBE_LINK_SPEED_10GB_FULL:
1993			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1994			break;
1995		case IXGBE_LINK_SPEED_1GB_FULL:
1996			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1997			break;
1998		}
1999	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2000		switch (adapter->link_speed) {
2001		case IXGBE_LINK_SPEED_10GB_FULL:
2002			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2003			break;
2004		}
2005	/*
2006	 * XXX: These need to use the proper media types once
2007	 * they're added.
2008	 */
2009#ifndef IFM_ETH_XTYPE
2010	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2011		switch (adapter->link_speed) {
2012		case IXGBE_LINK_SPEED_10GB_FULL:
2013			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2014			break;
2015		case IXGBE_LINK_SPEED_2_5GB_FULL:
2016			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2017			break;
2018		case IXGBE_LINK_SPEED_1GB_FULL:
2019			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2020			break;
2021		}
2022	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2023	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2024		switch (adapter->link_speed) {
2025		case IXGBE_LINK_SPEED_10GB_FULL:
2026			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2027			break;
2028		case IXGBE_LINK_SPEED_2_5GB_FULL:
2029			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2030			break;
2031		case IXGBE_LINK_SPEED_1GB_FULL:
2032			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2033			break;
2034		}
2035#else
2036	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2037		switch (adapter->link_speed) {
2038		case IXGBE_LINK_SPEED_10GB_FULL:
2039			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2040			break;
2041		case IXGBE_LINK_SPEED_2_5GB_FULL:
2042			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2043			break;
2044		case IXGBE_LINK_SPEED_1GB_FULL:
2045			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2046			break;
2047		}
2048	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2049	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2050		switch (adapter->link_speed) {
2051		case IXGBE_LINK_SPEED_10GB_FULL:
2052			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2053			break;
2054		case IXGBE_LINK_SPEED_2_5GB_FULL:
2055			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2056			break;
2057		case IXGBE_LINK_SPEED_1GB_FULL:
2058			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2059			break;
2060		}
2061#endif
2062
2063	/* If nothing is recognized... */
2064	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2065		ifmr->ifm_active |= IFM_UNKNOWN;
2066
2067#if __FreeBSD_version >= 900025
2068	/* Display current flow control setting used on link */
2069	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2070	    hw->fc.current_mode == ixgbe_fc_full)
2071		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2072	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2073	    hw->fc.current_mode == ixgbe_fc_full)
2074		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2075#endif
2076
2077	IXGBE_CORE_UNLOCK(adapter);
2078
2079	return;
2080} /* ixgbe_media_status */
2081
2082/************************************************************************
2083 * ixgbe_media_change - Media Ioctl callback
2084 *
2085 *   Called when the user changes speed/duplex using
2086 *   media/mediopt option with ifconfig.
2087 ************************************************************************/
2088static int
2089ixgbe_media_change(struct ifnet *ifp)
2090{
2091	struct adapter   *adapter = ifp->if_softc;
2092	struct ifmedia   *ifm = &adapter->media;
2093	struct ixgbe_hw  *hw = &adapter->hw;
2094	ixgbe_link_speed speed = 0;
2095
2096	INIT_DEBUGOUT("ixgbe_media_change: begin");
2097
2098	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2099		return (EINVAL);
2100
2101	if (hw->phy.media_type == ixgbe_media_type_backplane)
2102		return (ENODEV);
2103
2104	/*
2105	 * We don't actually need to check against the supported
2106	 * media types of the adapter; ifmedia will take care of
2107	 * that for us.
2108	 */
2109	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2110		case IFM_AUTO:
2111		case IFM_10G_T:
2112			speed |= IXGBE_LINK_SPEED_100_FULL;
2113			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2114			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2115			break;
2116		case IFM_10G_LRM:
2117		case IFM_10G_LR:
2118#ifndef IFM_ETH_XTYPE
2119		case IFM_10G_SR: /* KR, too */
2120		case IFM_10G_CX4: /* KX4 */
2121#else
2122		case IFM_10G_KR:
2123		case IFM_10G_KX4:
2124#endif
2125			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2126			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2127			break;
2128#ifndef IFM_ETH_XTYPE
2129		case IFM_1000_CX: /* KX */
2130#else
2131		case IFM_1000_KX:
2132#endif
2133		case IFM_1000_LX:
2134		case IFM_1000_SX:
2135			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2136			break;
2137		case IFM_1000_T:
2138			speed |= IXGBE_LINK_SPEED_100_FULL;
2139			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2140			break;
2141		case IFM_10G_TWINAX:
2142			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2143			break;
2144		case IFM_100_TX:
2145			speed |= IXGBE_LINK_SPEED_100_FULL;
2146			break;
2147		case IFM_10_T:
2148			speed |= IXGBE_LINK_SPEED_10_FULL;
2149			break;
2150		default:
2151			goto invalid;
2152	}
2153
2154	hw->mac.autotry_restart = TRUE;
2155	hw->mac.ops.setup_link(hw, speed, TRUE);
2156	adapter->advertise =
2157	    ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2158	    ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2159	    ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2160	    ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2161
2162	return (0);
2163
2164invalid:
2165	device_printf(adapter->dev, "Invalid media type!\n");
2166
2167	return (EINVAL);
2168} /* ixgbe_media_change */
2169
2170/************************************************************************
2171 * ixgbe_set_promisc
2172 ************************************************************************/
2173static void
2174ixgbe_set_promisc(struct adapter *adapter)
2175{
2176	struct ifnet *ifp = adapter->ifp;
2177	int          mcnt = 0;
2178	u32          rctl;
2179
2180	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2181	rctl &= (~IXGBE_FCTRL_UPE);
2182	if (ifp->if_flags & IFF_ALLMULTI)
2183		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2184	else {
2185		struct ifmultiaddr *ifma;
2186#if __FreeBSD_version < 800000
2187		IF_ADDR_LOCK(ifp);
2188#else
2189		if_maddr_rlock(ifp);
2190#endif
2191		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2192			if (ifma->ifma_addr->sa_family != AF_LINK)
2193				continue;
2194			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2195				break;
2196			mcnt++;
2197		}
2198#if __FreeBSD_version < 800000
2199		IF_ADDR_UNLOCK(ifp);
2200#else
2201		if_maddr_runlock(ifp);
2202#endif
2203	}
2204	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2205		rctl &= (~IXGBE_FCTRL_MPE);
2206	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2207
2208	if (ifp->if_flags & IFF_PROMISC) {
2209		rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2210		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2211	} else if (ifp->if_flags & IFF_ALLMULTI) {
2212		rctl |= IXGBE_FCTRL_MPE;
2213		rctl &= ~IXGBE_FCTRL_UPE;
2214		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2215	}
2216
2217	return;
2218} /* ixgbe_set_promisc */
2219
2220
2221/************************************************************************
2222 * ixgbe_set_multi - Multicast Update
2223 *
2224 *   Called whenever multicast address list is updated.
2225 ************************************************************************/
2226static void
2227ixgbe_set_multi(struct adapter *adapter)
2228{
2229	struct ifmultiaddr   *ifma;
2230	struct ixgbe_mc_addr *mta;
2231	struct ifnet         *ifp = adapter->ifp;
2232	u8                   *update_ptr;
2233	int                  mcnt = 0;
2234	u32                  fctrl;
2235
2236	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
2237
2238	mta = adapter->mta;
2239	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
2240
2241#if __FreeBSD_version < 800000
2242	IF_ADDR_LOCK(ifp);
2243#else
2244	if_maddr_rlock(ifp);
2245#endif
2246	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2247		if (ifma->ifma_addr->sa_family != AF_LINK)
2248			continue;
2249		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2250			break;
2251		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2252		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2253		mta[mcnt].vmdq = adapter->pool;
2254		mcnt++;
2255	}
2256#if __FreeBSD_version < 800000
2257	IF_ADDR_UNLOCK(ifp);
2258#else
2259	if_maddr_runlock(ifp);
2260#endif
2261
2262	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2263	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2264	if (ifp->if_flags & IFF_PROMISC)
2265		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2266	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
2267	    ifp->if_flags & IFF_ALLMULTI) {
2268		fctrl |= IXGBE_FCTRL_MPE;
2269		fctrl &= ~IXGBE_FCTRL_UPE;
2270	} else
2271		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2272
2273	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2274
2275	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
2276		update_ptr = (u8 *)mta;
2277		ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
2278		    ixgbe_mc_array_itr, TRUE);
2279	}
2280
2281	return;
2282} /* ixgbe_set_multi */
2283
2284/************************************************************************
2285 * ixgbe_mc_array_itr
2286 *
2287 *   An iterator function needed by the multicast shared code.
2288 *   It feeds the shared code routine the addresses in the
2289 *   array of ixgbe_set_multi() one by one.
2290 ************************************************************************/
2291static u8 *
2292ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
2293{
2294	struct ixgbe_mc_addr *mta;
2295
2296	mta = (struct ixgbe_mc_addr *)*update_ptr;
2297	*vmdq = mta->vmdq;
2298
2299	*update_ptr = (u8*)(mta + 1);
2300
2301	return (mta->addr);
2302} /* ixgbe_mc_array_itr */
2303
2304
2305/************************************************************************
2306 * ixgbe_local_timer - Timer routine
2307 *
2308 *   Checks for link status, updates statistics,
2309 *   and runs the watchdog check.
2310 ************************************************************************/
2311static void
2312ixgbe_local_timer(void *arg)
2313{
2314	struct adapter  *adapter = arg;
2315	device_t        dev = adapter->dev;
2316	struct ix_queue *que = adapter->queues;
2317	u64             queues = 0;
2318	int             hung = 0;
2319
2320	mtx_assert(&adapter->core_mtx, MA_OWNED);
2321
2322	/* Check for pluggable optics */
2323	if (adapter->sfp_probe)
2324		if (!ixgbe_sfp_probe(adapter))
2325			goto out; /* Nothing to do */
2326
2327	ixgbe_update_link_status(adapter);
2328	ixgbe_update_stats_counters(adapter);
2329
2330	/*
2331	 * Check the TX queues status
2332	 *      - mark hung queues so we don't schedule on them
2333	 *      - watchdog only if all queues show hung
2334	 */
2335	for (int i = 0; i < adapter->num_queues; i++, que++) {
2336		/* Keep track of queues with work for soft irq */
2337		if (que->txr->busy)
2338			queues |= ((u64)1 << que->me);
2339		/*
2340		 * Each time txeof runs without cleaning, but there
2341		 * are uncleaned descriptors it increments busy. If
2342		 * we get to the MAX we declare it hung.
2343		 */
2344		if (que->busy == IXGBE_QUEUE_HUNG) {
2345			++hung;
2346			/* Mark the queue as inactive */
2347			adapter->active_queues &= ~((u64)1 << que->me);
2348			continue;
2349		} else {
2350			/* Check if we've come back from hung */
2351			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2352				adapter->active_queues |= ((u64)1 << que->me);
2353		}
2354		if (que->busy >= IXGBE_MAX_TX_BUSY) {
2355			device_printf(dev,
2356			    "Warning queue %d appears to be hung!\n", i);
2357			que->txr->busy = IXGBE_QUEUE_HUNG;
2358			++hung;
2359		}
2360	}
2361
2362	/* Only truly watchdog if all queues show hung */
2363	if (hung == adapter->num_queues)
2364		goto watchdog;
2365	else if (queues != 0) { /* Force an IRQ on queues with work */
2366		ixgbe_rearm_queues(adapter, queues);
2367	}
2368
2369out:
2370	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2371	return;
2372
2373watchdog:
2374	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2375	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2376	adapter->watchdog_events++;
2377	ixgbe_init_locked(adapter);
2378} /* ixgbe_local_timer */
2379
2380
2381/************************************************************************
2382 * ixgbe_update_link_status - Update OS on link state
2383 *
2384 * Note: Only updates the OS on the cached link state.
2385 *       The real check of the hardware only happens with
2386 *       a link interrupt.
2387 ************************************************************************/
2388static void
2389ixgbe_update_link_status(struct adapter *adapter)
2390{
2391	struct ifnet *ifp = adapter->ifp;
2392	device_t     dev = adapter->dev;
2393
2394	if (adapter->link_up) {
2395		if (adapter->link_active == FALSE) {
2396			if (bootverbose)
2397				device_printf(dev, "Link is up %d Gbps %s \n",
2398				    ((adapter->link_speed == 128) ? 10 : 1),
2399				    "Full Duplex");
2400			adapter->link_active = TRUE;
2401			/* Update any Flow Control changes */
2402			ixgbe_fc_enable(&adapter->hw);
2403			/* Update DMA coalescing config */
2404			ixgbe_config_dmac(adapter);
2405			if_link_state_change(ifp, LINK_STATE_UP);
2406			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2407				ixgbe_ping_all_vfs(adapter);
2408		}
2409	} else { /* Link down */
2410		if (adapter->link_active == TRUE) {
2411			if (bootverbose)
2412				device_printf(dev, "Link is Down\n");
2413			if_link_state_change(ifp, LINK_STATE_DOWN);
2414			adapter->link_active = FALSE;
2415			if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
2416				ixgbe_ping_all_vfs(adapter);
2417		}
2418	}
2419
2420	return;
2421} /* ixgbe_update_link_status */
2422
2423
2424/************************************************************************
2425 * ixgbe_stop - Stop the hardware
2426 *
2427 *   Disables all traffic on the adapter by issuing a
2428 *   global reset on the MAC and deallocates TX/RX buffers.
2429 ************************************************************************/
2430static void
2431ixgbe_stop(void *arg)
2432{
2433	struct ifnet    *ifp;
2434	struct adapter  *adapter = arg;
2435	struct ixgbe_hw *hw = &adapter->hw;
2436
2437	ifp = adapter->ifp;
2438
2439	mtx_assert(&adapter->core_mtx, MA_OWNED);
2440
2441	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2442	ixgbe_disable_intr(adapter);
2443	callout_stop(&adapter->timer);
2444
2445	/* Let the stack know...*/
2446	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2447
2448	ixgbe_reset_hw(hw);
2449	hw->adapter_stopped = FALSE;
2450	ixgbe_stop_adapter(hw);
2451	if (hw->mac.type == ixgbe_mac_82599EB)
2452		ixgbe_stop_mac_link_on_d3_82599(hw);
2453	/* Turn off the laser - noop with no optics */
2454	ixgbe_disable_tx_laser(hw);
2455
2456	/* Update the stack */
2457	adapter->link_up = FALSE;
2458	ixgbe_update_link_status(adapter);
2459
2460	/* reprogram the RAR[0] in case user changed it. */
2461	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2462
2463	return;
2464} /* ixgbe_stop */
2465
2466
2467/************************************************************************
2468 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
2469 ************************************************************************/
2470static int
2471ixgbe_allocate_legacy(struct adapter *adapter)
2472{
2473	device_t        dev = adapter->dev;
2474	struct ix_queue *que = adapter->queues;
2475	struct tx_ring  *txr = adapter->tx_rings;
2476	int             error;
2477
2478	/* We allocate a single interrupt resource */
2479	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2480	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2481	if (adapter->res == NULL) {
2482		device_printf(dev,
2483		    "Unable to allocate bus resource: interrupt\n");
2484		return (ENXIO);
2485	}
2486
2487	/*
2488	 * Try allocating a fast interrupt and the associated deferred
2489	 * processing contexts.
2490	 */
2491	if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2492		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2493	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2494	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2495	    taskqueue_thread_enqueue, &que->tq);
2496	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2497	    device_get_nameunit(adapter->dev));
2498
2499	/* Tasklets for Link, SFP and Multispeed Fiber */
2500	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2501	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2502	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2503	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2504	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2505		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2506	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2507	    taskqueue_thread_enqueue, &adapter->tq);
2508	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2509	    device_get_nameunit(adapter->dev));
2510
2511	if ((error = bus_setup_intr(dev, adapter->res,
2512	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
2513	    &adapter->tag)) != 0) {
2514		device_printf(dev,
2515		    "Failed to register fast interrupt handler: %d\n", error);
2516		taskqueue_free(que->tq);
2517		taskqueue_free(adapter->tq);
2518		que->tq = NULL;
2519		adapter->tq = NULL;
2520
2521		return (error);
2522	}
2523	/* For simplicity in the handlers */
2524	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2525
2526	return (0);
2527} /* ixgbe_allocate_legacy */
2528
2529
2530/************************************************************************
2531 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
2532 ************************************************************************/
2533static int
2534ixgbe_allocate_msix(struct adapter *adapter)
2535{
2536	device_t        dev = adapter->dev;
2537	struct ix_queue *que = adapter->queues;
2538	struct tx_ring  *txr = adapter->tx_rings;
2539	int             error, rid, vector = 0;
2540	int             cpu_id = 0;
2541	unsigned int    rss_buckets = 0;
2542	cpuset_t        cpu_mask;
2543
2544	/*
2545	 * If we're doing RSS, the number of queues needs to
2546	 * match the number of RSS buckets that are configured.
2547	 *
2548	 * + If there's more queues than RSS buckets, we'll end
2549	 *   up with queues that get no traffic.
2550	 *
2551	 * + If there's more RSS buckets than queues, we'll end
2552	 *   up having multiple RSS buckets map to the same queue,
2553	 *   so there'll be some contention.
2554	 */
2555	rss_buckets = rss_getnumbuckets();
2556	if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
2557	    (adapter->num_queues != rss_buckets)) {
2558		device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
2559		    __func__, adapter->num_queues, rss_buckets);
2560	}
2561
2562	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2563		rid = vector + 1;
2564		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2565		    RF_SHAREABLE | RF_ACTIVE);
2566		if (que->res == NULL) {
2567			device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
2568			    vector);
2569			return (ENXIO);
2570		}
2571		/* Set the handler function */
2572		error = bus_setup_intr(dev, que->res,
2573		    INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
2574		    &que->tag);
2575		if (error) {
2576			que->res = NULL;
2577			device_printf(dev, "Failed to register QUE handler");
2578			return (error);
2579		}
2580#if __FreeBSD_version >= 800504
2581		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
2582#endif
2583		que->msix = vector;
2584		adapter->active_queues |= (u64)(1 << que->msix);
2585
2586		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
2587			/*
2588			 * The queue ID is used as the RSS layer bucket ID.
2589			 * We look up the queue ID -> RSS CPU ID and select
2590			 * that.
2591			 */
2592			cpu_id = rss_getcpu(i % rss_buckets);
2593			CPU_SETOF(cpu_id, &cpu_mask);
2594		} else {
2595			/*
2596			 * Bind the msix vector, and thus the
2597			 * rings to the corresponding cpu.
2598			 *
2599			 * This just happens to match the default RSS
2600			 * round-robin bucket -> queue -> CPU allocation.
2601			 */
2602			if (adapter->num_queues > 1)
2603				cpu_id = i;
2604		}
2605		if (adapter->num_queues > 1)
2606			bus_bind_intr(dev, que->res, cpu_id);
2607#ifdef IXGBE_DEBUG
2608		if (adapter->feat_en & IXGBE_FEATURE_RSS)
2609			device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
2610			    cpu_id);
2611		else
2612			device_printf(dev, "Bound queue %d to cpu %d\n", i,
2613			    cpu_id);
2614#endif /* IXGBE_DEBUG */
2615
2616
2617		if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2618			TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
2619			    txr);
2620		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2621		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2622		    taskqueue_thread_enqueue, &que->tq);
2623#if __FreeBSD_version < 1100000
2624		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
2625		    device_get_nameunit(adapter->dev), i);
2626#else
2627		if (adapter->feat_en & IXGBE_FEATURE_RSS)
2628			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2629			    &cpu_mask, "%s (bucket %d)",
2630			    device_get_nameunit(adapter->dev), cpu_id);
2631		else
2632			taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2633			    NULL, "%s:q%d", device_get_nameunit(adapter->dev),
2634			    i);
2635#endif
2636	}
2637
2638	/* and Link */
2639	adapter->link_rid = vector + 1;
2640	adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2641	    &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
2642	if (!adapter->res) {
2643		device_printf(dev,
2644		    "Unable to allocate bus resource: Link interrupt [%d]\n",
2645		    adapter->link_rid);
2646		return (ENXIO);
2647	}
2648	/* Set the link handler function */
2649	error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
2650	    NULL, ixgbe_msix_link, adapter, &adapter->tag);
2651	if (error) {
2652		adapter->res = NULL;
2653		device_printf(dev, "Failed to register LINK handler");
2654		return (error);
2655	}
2656#if __FreeBSD_version >= 800504
2657	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2658#endif
2659	adapter->vector = vector;
2660	/* Tasklets for Link, SFP and Multispeed Fiber */
2661	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2662	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2663	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2664	if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2665		TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2666	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2667	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2668		TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2669	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2670	    taskqueue_thread_enqueue, &adapter->tq);
2671	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2672	    device_get_nameunit(adapter->dev));
2673
2674	return (0);
2675} /* ixgbe_allocate_msix */
2676
2677/************************************************************************
2678 * ixgbe_configure_interrupts
2679 *
2680 *   Setup MSI-X, MSI, or legacy interrupts (in that order).
2681 *   This will also depend on user settings.
2682 ************************************************************************/
2683static int
2684ixgbe_configure_interrupts(struct adapter *adapter)
2685{
2686	device_t dev = adapter->dev;
2687	int      rid, want, queues, msgs;
2688
2689	/* Default to 1 queue if MSI-X setup fails */
2690	adapter->num_queues = 1;
2691
2692	/* Override by tuneable */
2693	if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
2694		goto msi;
2695
2696	/* First try MSI-X */
2697	msgs = pci_msix_count(dev);
2698	if (msgs == 0)
2699		goto msi;
2700	rid = PCIR_BAR(MSIX_82598_BAR);
2701	adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2702	    RF_ACTIVE);
2703	if (adapter->msix_mem == NULL) {
2704		rid += 4;  /* 82599 maps in higher BAR */
2705		adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2706		    &rid, RF_ACTIVE);
2707	}
2708	if (adapter->msix_mem == NULL) {
2709		/* May not be enabled */
2710		device_printf(adapter->dev, "Unable to map MSI-X table.\n");
2711		goto msi;
2712	}
2713
2714	/* Figure out a reasonable auto config value */
2715	queues = min(mp_ncpus, msgs - 1);
2716	/* If we're doing RSS, clamp at the number of RSS buckets */
2717	if (adapter->feat_en & IXGBE_FEATURE_RSS)
2718		queues = min(queues, rss_getnumbuckets());
2719	if (ixgbe_num_queues > queues) {
2720		device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
2721		ixgbe_num_queues = queues;
2722	}
2723
2724	if (ixgbe_num_queues != 0)
2725		queues = ixgbe_num_queues;
2726	/* Set max queues to 8 when autoconfiguring */
2727	else
2728		queues = min(queues, 8);
2729
2730	/* reflect correct sysctl value */
2731	ixgbe_num_queues = queues;
2732
2733	/*
2734	 * Want one vector (RX/TX pair) per queue
2735	 * plus an additional for Link.
2736	 */
2737	want = queues + 1;
2738	if (msgs >= want)
2739		msgs = want;
2740	else {
2741		device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
2742		    msgs, want);
2743		goto msi;
2744	}
2745	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2746		device_printf(adapter->dev,
2747		    "Using MSI-X interrupts with %d vectors\n", msgs);
2748		adapter->num_queues = queues;
2749		adapter->feat_en |= IXGBE_FEATURE_MSIX;
2750		return 0;
2751	}
2752	/*
2753	 * MSI-X allocation failed or provided us with
2754	 * less vectors than needed. Free MSI-X resources
2755	 * and we'll try enabling MSI.
2756	 */
2757	pci_release_msi(dev);
2758
2759msi:
2760	/* Without MSI-X, some features are no longer supported */
2761	adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
2762	adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
2763	adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
2764	adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
2765
2766	if (adapter->msix_mem != NULL) {
2767		bus_release_resource(dev, SYS_RES_MEMORY, rid,
2768		    adapter->msix_mem);
2769		adapter->msix_mem = NULL;
2770	}
2771	msgs = 1;
2772	if (pci_alloc_msi(dev, &msgs) == 0) {
2773		adapter->feat_en |= IXGBE_FEATURE_MSI;
2774		adapter->link_rid = 1;
2775		device_printf(adapter->dev, "Using an MSI interrupt\n");
2776		return 0;
2777	}
2778
2779	if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
2780		device_printf(adapter->dev,
2781		    "Device does not support legacy interrupts.\n");
2782		return 1;
2783	}
2784
2785	adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
2786	adapter->link_rid = 0;
2787	device_printf(adapter->dev, "Using a Legacy interrupt\n");
2788
2789	return 0;
2790} /* ixgbe_configure_interrupts */
2791
2792
2793/************************************************************************
2794 * ixgbe_allocate_pci_resources
2795 ************************************************************************/
2796static int
2797ixgbe_allocate_pci_resources(struct adapter *adapter)
2798{
2799	device_t dev = adapter->dev;
2800	int      rid;
2801
2802	rid = PCIR_BAR(0);
2803	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2804	    RF_ACTIVE);
2805
2806	if (!(adapter->pci_mem)) {
2807		device_printf(dev, "Unable to allocate bus resource: memory\n");
2808		return (ENXIO);
2809	}
2810
2811	/* Save bus_space values for READ/WRITE_REG macros */
2812	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2813	adapter->osdep.mem_bus_space_handle =
2814	    rman_get_bushandle(adapter->pci_mem);
2815	/* Set hw values for shared code */
2816	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2817
2818	return (0);
2819} /* ixgbe_allocate_pci_resources */
2820
2821/************************************************************************
2822 * ixgbe_free_pci_resources
2823 ************************************************************************/
2824static void
2825ixgbe_free_pci_resources(struct adapter *adapter)
2826{
2827	struct ix_queue *que = adapter->queues;
2828	device_t        dev = adapter->dev;
2829	int             rid, memrid;
2830
2831	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2832		memrid = PCIR_BAR(MSIX_82598_BAR);
2833	else
2834		memrid = PCIR_BAR(MSIX_82599_BAR);
2835
2836	/*
2837	 * There is a slight possibility of a failure mode
2838	 * in attach that will result in entering this function
2839	 * before interrupt resources have been initialized, and
2840	 * in that case we do not want to execute the loops below
2841	 * We can detect this reliably by the state of the adapter
2842	 * res pointer.
2843	 */
2844	if (adapter->res == NULL)
2845		goto mem;
2846
2847	/*
2848	 * Release all msix queue resources:
2849	 */
2850	for (int i = 0; i < adapter->num_queues; i++, que++) {
2851		rid = que->msix + 1;
2852		if (que->tag != NULL) {
2853			bus_teardown_intr(dev, que->res, que->tag);
2854			que->tag = NULL;
2855		}
2856		if (que->res != NULL)
2857			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2858	}
2859
2860
2861	/* Clean the Legacy or Link interrupt last */
2862	if (adapter->tag != NULL) {
2863		bus_teardown_intr(dev, adapter->res, adapter->tag);
2864		adapter->tag = NULL;
2865	}
2866	if (adapter->res != NULL)
2867		bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
2868		    adapter->res);
2869
2870mem:
2871	if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
2872	    (adapter->feat_en & IXGBE_FEATURE_MSIX))
2873		pci_release_msi(dev);
2874
2875	if (adapter->msix_mem != NULL)
2876		bus_release_resource(dev, SYS_RES_MEMORY, memrid,
2877		    adapter->msix_mem);
2878
2879	if (adapter->pci_mem != NULL)
2880		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
2881		    adapter->pci_mem);
2882
2883	return;
2884} /* ixgbe_free_pci_resources */
2885
2886/************************************************************************
2887 * ixgbe_setup_interface
2888 *
2889 *   Setup networking device structure and register an interface.
2890 ************************************************************************/
2891static int
2892ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2893{
2894	struct ifnet *ifp;
2895
2896	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2897
2898	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2899	if (ifp == NULL) {
2900		device_printf(dev, "can not allocate ifnet structure\n");
2901		return (-1);
2902	}
2903	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2904	if_initbaudrate(ifp, IF_Gbps(10));
2905	ifp->if_init = ixgbe_init;
2906	ifp->if_softc = adapter;
2907	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2908	ifp->if_ioctl = ixgbe_ioctl;
2909#if __FreeBSD_version >= 1100036
2910	if_setgetcounterfn(ifp, ixgbe_get_counter);
2911#endif
2912#if __FreeBSD_version >= 1100045
2913	/* TSO parameters */
2914	ifp->if_hw_tsomax = 65518;
2915	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2916	ifp->if_hw_tsomaxsegsize = 2048;
2917#endif
2918	if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
2919		ifp->if_start = ixgbe_legacy_start;
2920		IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2921		ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2922		IFQ_SET_READY(&ifp->if_snd);
2923		ixgbe_start_locked = ixgbe_legacy_start_locked;
2924		ixgbe_ring_empty = ixgbe_legacy_ring_empty;
2925	} else {
2926		ifp->if_transmit = ixgbe_mq_start;
2927		ifp->if_qflush = ixgbe_qflush;
2928		ixgbe_start_locked = ixgbe_mq_start_locked;
2929		ixgbe_ring_empty = drbr_empty;
2930	}
2931
2932	ether_ifattach(ifp, adapter->hw.mac.addr);
2933
2934	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
2935
2936	/*
2937	 * Tell the upper layer(s) we support long frames.
2938	 */
2939	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2940
2941	/* Set capability flags */
2942	ifp->if_capabilities |= IFCAP_HWCSUM
2943	                     |  IFCAP_HWCSUM_IPV6
2944	                     |  IFCAP_TSO
2945	                     |  IFCAP_LRO
2946	                     |  IFCAP_VLAN_HWTAGGING
2947	                     |  IFCAP_VLAN_HWTSO
2948	                     |  IFCAP_VLAN_HWCSUM
2949	                     |  IFCAP_JUMBO_MTU
2950	                     |  IFCAP_VLAN_MTU
2951	                     |  IFCAP_HWSTATS;
2952
2953	/* Enable the above capabilities by default */
2954	ifp->if_capenable = ifp->if_capabilities;
2955
2956	/*
2957	 * Don't turn this on by default, if vlans are
2958	 * created on another pseudo device (eg. lagg)
2959	 * then vlan events are not passed thru, breaking
2960	 * operation, but with HW FILTER off it works. If
2961	 * using vlans directly on the ixgbe driver you can
2962	 * enable this and get full hardware tag filtering.
2963	 */
2964	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2965
2966	/*
2967	 * Specify the media types supported by this adapter and register
2968	 * callbacks to update media and link information
2969	 */
2970	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2971	    ixgbe_media_status);
2972
2973	adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
2974	ixgbe_add_media_types(adapter);
2975
2976	/* Set autoselect media by default */
2977	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2978
2979	return (0);
2980} /* ixgbe_setup_interface */
2981
2982/************************************************************************
2983 * ixgbe_add_media_types
2984 ************************************************************************/
2985static void
2986ixgbe_add_media_types(struct adapter *adapter)
2987{
2988	struct ixgbe_hw *hw = &adapter->hw;
2989	device_t        dev = adapter->dev;
2990	int             layer;
2991
2992	layer = adapter->phy_layer;
2993
2994	/* Media types with matching FreeBSD media defines */
2995	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2996		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2997	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2998		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2999	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
3000		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
3001	if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
3002		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3003
3004	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
3005	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
3006		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
3007		    NULL);
3008
3009	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
3010		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
3011		if (hw->phy.multispeed_fiber)
3012			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
3013			    NULL);
3014	}
3015	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
3016		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3017		if (hw->phy.multispeed_fiber)
3018			ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
3019			    NULL);
3020	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
3021		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
3022	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
3023		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3024
3025#ifdef IFM_ETH_XTYPE
3026	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
3027		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
3028	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
3029		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
3030	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
3031		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
3032#else
3033	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
3034		device_printf(dev, "Media supported: 10GbaseKR\n");
3035		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
3036		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
3037	}
3038	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
3039		device_printf(dev, "Media supported: 10GbaseKX4\n");
3040		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
3041		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
3042	}
3043	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
3044		device_printf(dev, "Media supported: 1000baseKX\n");
3045		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
3046		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
3047	}
3048#endif
3049	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
3050		device_printf(dev, "Media supported: 1000baseBX\n");
3051
3052	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
3053		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
3054		    0, NULL);
3055		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
3056	}
3057
3058	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3059} /* ixgbe_add_media_types */
3060
3061/************************************************************************
3062 * ixgbe_config_link
3063 ************************************************************************/
3064static void
3065ixgbe_config_link(struct adapter *adapter)
3066{
3067	struct ixgbe_hw *hw = &adapter->hw;
3068	u32             autoneg, err = 0;
3069	bool            sfp, negotiate;
3070
3071	sfp = ixgbe_is_sfp(hw);
3072
3073	if (sfp) {
3074		if (hw->phy.multispeed_fiber) {
3075			hw->mac.ops.setup_sfp(hw);
3076			ixgbe_enable_tx_laser(hw);
3077			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3078		} else
3079			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3080	} else {
3081		if (hw->mac.ops.check_link)
3082			err = ixgbe_check_link(hw, &adapter->link_speed,
3083			    &adapter->link_up, FALSE);
3084		if (err)
3085			goto out;
3086		autoneg = hw->phy.autoneg_advertised;
3087		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3088			err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3089			    &negotiate);
3090		if (err)
3091			goto out;
3092		if (hw->mac.ops.setup_link)
3093			err = hw->mac.ops.setup_link(hw, autoneg,
3094			    adapter->link_up);
3095	}
3096out:
3097
3098	return;
3099} /* ixgbe_config_link */
3100
3101
3102/************************************************************************
3103 * ixgbe_initialize_transmit_units - Enable transmit units.
3104 ************************************************************************/
3105static void
3106ixgbe_initialize_transmit_units(struct adapter *adapter)
3107{
3108	struct tx_ring  *txr = adapter->tx_rings;
3109	struct ixgbe_hw *hw = &adapter->hw;
3110
3111	/* Setup the Base and Length of the Tx Descriptor Ring */
3112	for (int i = 0; i < adapter->num_queues; i++, txr++) {
3113		u64 tdba = txr->txdma.dma_paddr;
3114		u32 txctrl = 0;
3115		int j = txr->me;
3116
3117		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
3118		    (tdba & 0x00000000ffffffffULL));
3119		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
3120		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
3121		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
3122
3123		/* Setup the HW Tx Head and Tail descriptor pointers */
3124		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
3125		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
3126
3127		/* Cache the tail address */
3128		txr->tail = IXGBE_TDT(j);
3129
3130		/* Disable Head Writeback */
3131		/*
3132		 * Note: for X550 series devices, these registers are actually
3133		 * prefixed with TPH_ isntead of DCA_, but the addresses and
3134		 * fields remain the same.
3135		 */
3136		switch (hw->mac.type) {
3137		case ixgbe_mac_82598EB:
3138			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
3139			break;
3140		default:
3141			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
3142			break;
3143		}
3144		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3145		switch (hw->mac.type) {
3146		case ixgbe_mac_82598EB:
3147			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
3148			break;
3149		default:
3150			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
3151			break;
3152		}
3153
3154	}
3155
3156	if (hw->mac.type != ixgbe_mac_82598EB) {
3157		u32 dmatxctl, rttdcs;
3158
3159		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3160		dmatxctl |= IXGBE_DMATXCTL_TE;
3161		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3162		/* Disable arbiter to set MTQC */
3163		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3164		rttdcs |= IXGBE_RTTDCS_ARBDIS;
3165		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3166		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
3167		    ixgbe_get_mtqc(adapter->iov_mode));
3168		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3169		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3170	}
3171
3172	return;
3173} /* ixgbe_initialize_transmit_units */
3174
3175/************************************************************************
3176 * ixgbe_initialize_rss_mapping
3177 ************************************************************************/
3178static void
3179ixgbe_initialize_rss_mapping(struct adapter *adapter)
3180{
3181	struct ixgbe_hw *hw = &adapter->hw;
3182	u32             reta = 0, mrqc, rss_key[10];
3183	int             queue_id, table_size, index_mult;
3184	int             i, j;
3185	u32             rss_hash_config;
3186
3187	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3188		/* Fetch the configured RSS key */
3189		rss_getkey((uint8_t *)&rss_key);
3190	} else {
3191		/* set up random bits */
3192		arc4rand(&rss_key, sizeof(rss_key), 0);
3193	}
3194
3195	/* Set multiplier for RETA setup and table size based on MAC */
3196	index_mult = 0x1;
3197	table_size = 128;
3198	switch (adapter->hw.mac.type) {
3199	case ixgbe_mac_82598EB:
3200		index_mult = 0x11;
3201		break;
3202	case ixgbe_mac_X550:
3203	case ixgbe_mac_X550EM_x:
3204	case ixgbe_mac_X550EM_a:
3205		table_size = 512;
3206		break;
3207	default:
3208		break;
3209	}
3210
3211	/* Set up the redirection table */
3212	for (i = 0, j = 0; i < table_size; i++, j++) {
3213		if (j == adapter->num_queues)
3214			j = 0;
3215
3216		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
3217			/*
3218			 * Fetch the RSS bucket id for the given indirection
3219			 * entry. Cap it at the number of configured buckets
3220			 * (which is num_queues.)
3221			 */
3222			queue_id = rss_get_indirection_to_bucket(i);
3223			queue_id = queue_id % adapter->num_queues;
3224		} else
3225			queue_id = (j * index_mult);
3226
3227		/*
3228		 * The low 8 bits are for hash value (n+0);
3229		 * The next 8 bits are for hash value (n+1), etc.
3230		 */
3231		reta = reta >> 8;
3232		reta = reta | (((uint32_t)queue_id) << 24);
3233		if ((i & 3) == 3) {
3234			if (i < 128)
3235				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3236			else
3237				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3238				    reta);
3239			reta = 0;
3240		}
3241	}
3242
3243	/* Now fill our hash function seeds */
3244	for (i = 0; i < 10; i++)
3245		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
3246
3247	/* Perform hash on these packet types */
3248	if (adapter->feat_en & IXGBE_FEATURE_RSS)
3249		rss_hash_config = rss_gethashconfig();
3250	else {
3251		/*
3252		 * Disable UDP - IP fragments aren't currently being handled
3253		 * and so we end up with a mix of 2-tuple and 4-tuple
3254		 * traffic.
3255		 */
3256		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
3257		                | RSS_HASHTYPE_RSS_TCP_IPV4
3258		                | RSS_HASHTYPE_RSS_IPV6
3259		                | RSS_HASHTYPE_RSS_TCP_IPV6
3260		                | RSS_HASHTYPE_RSS_IPV6_EX
3261		                | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
3262	}
3263
3264	mrqc = IXGBE_MRQC_RSSEN;
3265	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3266		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3267	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3268		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3269	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3270		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3271	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3272		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3273	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3274		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3275	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3276		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3277	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3278		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3279	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3280		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
3281		    __func__);
3282	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3283		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3284	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3285		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3286	mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
3287	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3288} /* ixgbe_initialize_rss_mapping */
3289
3290
3291/************************************************************************
3292 * ixgbe_initialize_receive_units - Setup receive registers and features.
3293 ************************************************************************/
3294#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3295
3296static void
3297ixgbe_initialize_receive_units(struct adapter *adapter)
3298{
3299	struct rx_ring  *rxr = adapter->rx_rings;
3300	struct ixgbe_hw *hw = &adapter->hw;
3301	struct ifnet    *ifp = adapter->ifp;
3302	int             i, j;
3303	u32             bufsz, fctrl, srrctl, rxcsum;
3304	u32             hlreg;
3305
3306	/*
3307	 * Make sure receives are disabled while
3308	 * setting up the descriptor ring
3309	 */
3310	ixgbe_disable_rx(hw);
3311
3312	/* Enable broadcasts */
3313	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3314	fctrl |= IXGBE_FCTRL_BAM;
3315	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3316		fctrl |= IXGBE_FCTRL_DPF;
3317		fctrl |= IXGBE_FCTRL_PMCF;
3318	}
3319	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3320
3321	/* Set for Jumbo Frames? */
3322	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3323	if (ifp->if_mtu > ETHERMTU)
3324		hlreg |= IXGBE_HLREG0_JUMBOEN;
3325	else
3326		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3327
3328#ifdef DEV_NETMAP
3329	/* CRC stripping is conditional in Netmap */
3330	if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3331	    (ifp->if_capenable & IFCAP_NETMAP) &&
3332	    !ix_crcstrip)
3333		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3334	else
3335#endif /* DEV_NETMAP */
3336		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3337
3338	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3339
3340	bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
3341	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3342
3343	for (i = 0; i < adapter->num_queues; i++, rxr++) {
3344		u64 rdba = rxr->rxdma.dma_paddr;
3345		j = rxr->me;
3346
3347		/* Setup the Base and Length of the Rx Descriptor Ring */
3348		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3349		    (rdba & 0x00000000ffffffffULL));
3350		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3351		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3352		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3353
3354		/* Set up the SRRCTL register */
3355		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3356		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3357		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3358		srrctl |= bufsz;
3359		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3360
3361		/*
3362		 * Set DROP_EN iff we have no flow control and >1 queue.
3363		 * Note that srrctl was cleared shortly before during reset,
3364		 * so we do not need to clear the bit, but do it just in case
3365		 * this code is moved elsewhere.
3366		 */
3367		if (adapter->num_queues > 1 &&
3368		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3369			srrctl |= IXGBE_SRRCTL_DROP_EN;
3370		} else {
3371			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3372		}
3373
3374		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3375
3376		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3377		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3378		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3379
3380		/* Set the driver rx tail address */
3381		rxr->tail =  IXGBE_RDT(rxr->me);
3382	}
3383
3384	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3385		u32 psrtype = IXGBE_PSRTYPE_TCPHDR
3386		            | IXGBE_PSRTYPE_UDPHDR
3387		            | IXGBE_PSRTYPE_IPV4HDR
3388		            | IXGBE_PSRTYPE_IPV6HDR;
3389		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3390	}
3391
3392	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3393
3394	ixgbe_initialize_rss_mapping(adapter);
3395
3396	if (adapter->num_queues > 1) {
3397		/* RSS and RX IPP Checksum are mutually exclusive */
3398		rxcsum |= IXGBE_RXCSUM_PCSD;
3399	}
3400
3401	if (ifp->if_capenable & IFCAP_RXCSUM)
3402		rxcsum |= IXGBE_RXCSUM_PCSD;
3403
3404	/* This is useful for calculating UDP/IP fragment checksums */
3405	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3406		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3407
3408	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3409
3410	return;
3411} /* ixgbe_initialize_receive_units */
3412
3413
3414/************************************************************************
3415 * ixgbe_register_vlan
3416 *
3417 *   Run via vlan config EVENT, it enables us to use the
3418 *   HW Filter table since we can get the vlan id. This
3419 *   just creates the entry in the soft version of the
3420 *   VFTA, init will repopulate the real table.
3421 ************************************************************************/
3422static void
3423ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3424{
3425	struct adapter *adapter = ifp->if_softc;
3426	u16            index, bit;
3427
3428	if (ifp->if_softc !=  arg)   /* Not our event */
3429		return;
3430
3431	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
3432		return;
3433
3434	IXGBE_CORE_LOCK(adapter);
3435	index = (vtag >> 5) & 0x7F;
3436	bit = vtag & 0x1F;
3437	adapter->shadow_vfta[index] |= (1 << bit);
3438	++adapter->num_vlans;
3439	ixgbe_setup_vlan_hw_support(adapter);
3440	IXGBE_CORE_UNLOCK(adapter);
3441} /* ixgbe_register_vlan */
3442
3443/************************************************************************
3444 * ixgbe_unregister_vlan
3445 *
3446 *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
3447 ************************************************************************/
3448static void
3449ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3450{
3451	struct adapter *adapter = ifp->if_softc;
3452	u16            index, bit;
3453
3454	if (ifp->if_softc != arg)
3455		return;
3456
3457	if ((vtag == 0) || (vtag > 4095))  /* Invalid */
3458		return;
3459
3460	IXGBE_CORE_LOCK(adapter);
3461	index = (vtag >> 5) & 0x7F;
3462	bit = vtag & 0x1F;
3463	adapter->shadow_vfta[index] &= ~(1 << bit);
3464	--adapter->num_vlans;
3465	/* Re-init to load the changes */
3466	ixgbe_setup_vlan_hw_support(adapter);
3467	IXGBE_CORE_UNLOCK(adapter);
3468} /* ixgbe_unregister_vlan */
3469
3470/************************************************************************
3471 * ixgbe_setup_vlan_hw_support
3472 ************************************************************************/
3473static void
3474ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3475{
3476	struct ifnet    *ifp = adapter->ifp;
3477	struct ixgbe_hw *hw = &adapter->hw;
3478	struct rx_ring  *rxr;
3479	int             i;
3480	u32             ctrl;
3481
3482
3483	/*
3484	 * We get here thru init_locked, meaning
3485	 * a soft reset, this has already cleared
3486	 * the VFTA and other state, so if there
3487	 * have been no vlan's registered do nothing.
3488	 */
3489	if (adapter->num_vlans == 0)
3490		return;
3491
3492	/* Setup the queues for vlans */
3493	for (i = 0; i < adapter->num_queues; i++) {
3494		rxr = &adapter->rx_rings[i];
3495		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3496		if (hw->mac.type != ixgbe_mac_82598EB) {
3497			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3498			ctrl |= IXGBE_RXDCTL_VME;
3499			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3500		}
3501		rxr->vtag_strip = TRUE;
3502	}
3503
3504	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3505		return;
3506	/*
3507	 * A soft reset zero's out the VFTA, so
3508	 * we need to repopulate it now.
3509	 */
3510	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
3511		if (adapter->shadow_vfta[i] != 0)
3512			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3513			    adapter->shadow_vfta[i]);
3514
3515	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3516	/* Enable the Filter Table if enabled */
3517	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3518		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3519		ctrl |= IXGBE_VLNCTRL_VFE;
3520	}
3521	if (hw->mac.type == ixgbe_mac_82598EB)
3522		ctrl |= IXGBE_VLNCTRL_VME;
3523	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3524} /* ixgbe_setup_vlan_hw_support */
3525
3526/************************************************************************
3527 * ixgbe_enable_intr
3528 ************************************************************************/
3529static void
3530ixgbe_enable_intr(struct adapter *adapter)
3531{
3532	struct ixgbe_hw *hw = &adapter->hw;
3533	struct ix_queue *que = adapter->queues;
3534	u32             mask, fwsm;
3535
3536	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3537
3538	switch (adapter->hw.mac.type) {
3539	case ixgbe_mac_82599EB:
3540		mask |= IXGBE_EIMS_ECC;
3541		/* Temperature sensor on some adapters */
3542		mask |= IXGBE_EIMS_GPI_SDP0;
3543		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3544		mask |= IXGBE_EIMS_GPI_SDP1;
3545		mask |= IXGBE_EIMS_GPI_SDP2;
3546		break;
3547	case ixgbe_mac_X540:
3548		/* Detect if Thermal Sensor is enabled */
3549		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3550		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3551			mask |= IXGBE_EIMS_TS;
3552		mask |= IXGBE_EIMS_ECC;
3553		break;
3554	case ixgbe_mac_X550:
3555		/* MAC thermal sensor is automatically enabled */
3556		mask |= IXGBE_EIMS_TS;
3557		mask |= IXGBE_EIMS_ECC;
3558		break;
3559	case ixgbe_mac_X550EM_x:
3560	case ixgbe_mac_X550EM_a:
3561		/* Some devices use SDP0 for important information */
3562		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3563		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3564		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3565		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3566			mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3567		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3568			mask |= IXGBE_EICR_GPI_SDP0_X540;
3569		mask |= IXGBE_EIMS_ECC;
3570		break;
3571	default:
3572		break;
3573	}
3574
3575	/* Enable Fan Failure detection */
3576	if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3577		mask |= IXGBE_EIMS_GPI_SDP1;
3578	/* Enable SR-IOV */
3579	if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3580		mask |= IXGBE_EIMS_MAILBOX;
3581	/* Enable Flow Director */
3582	if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3583		mask |= IXGBE_EIMS_FLOW_DIR;
3584
3585	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3586
3587	/* With MSI-X we use auto clear */
3588	if (adapter->msix_mem) {
3589		mask = IXGBE_EIMS_ENABLE_MASK;
3590		/* Don't autoclear Link */
3591		mask &= ~IXGBE_EIMS_OTHER;
3592		mask &= ~IXGBE_EIMS_LSC;
3593		if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3594			mask &= ~IXGBE_EIMS_MAILBOX;
3595		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3596	}
3597
3598	/*
3599	 * Now enable all queues, this is done separately to
3600	 * allow for handling the extended (beyond 32) MSI-X
3601	 * vectors that can be used by 82599
3602	 */
3603	for (int i = 0; i < adapter->num_queues; i++, que++)
3604		ixgbe_enable_queue(adapter, que->msix);
3605
3606	IXGBE_WRITE_FLUSH(hw);
3607
3608	return;
3609} /* ixgbe_enable_intr */
3610
3611/************************************************************************
3612 * ixgbe_disable_intr
3613 ************************************************************************/
3614static void
3615ixgbe_disable_intr(struct adapter *adapter)
3616{
3617	if (adapter->msix_mem)
3618		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3619	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3620		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3621	} else {
3622		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3623		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3624		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3625	}
3626	IXGBE_WRITE_FLUSH(&adapter->hw);
3627
3628	return;
3629} /* ixgbe_disable_intr */
3630
3631/************************************************************************
3632 * ixgbe_get_slot_info
3633 *
3634 *   Get the width and transaction speed of
3635 *   the slot this adapter is plugged into.
3636 ************************************************************************/
3637static void
3638ixgbe_get_slot_info(struct adapter *adapter)
3639{
3640	device_t              dev = adapter->dev;
3641	struct ixgbe_hw       *hw = &adapter->hw;
3642	u32                   offset;
3643	u16                   link;
3644	int                   bus_info_valid = TRUE;
3645
3646	/* Some devices are behind an internal bridge */
3647	switch (hw->device_id) {
3648	case IXGBE_DEV_ID_82599_SFP_SF_QP:
3649	case IXGBE_DEV_ID_82599_QSFP_SF_QP:
3650		goto get_parent_info;
3651	default:
3652		break;
3653	}
3654
3655	ixgbe_get_bus_info(hw);
3656
3657	/*
3658	 * Some devices don't use PCI-E, but there is no need
3659	 * to display "Unknown" for bus speed and width.
3660	 */
3661	switch (hw->mac.type) {
3662	case ixgbe_mac_X550EM_x:
3663	case ixgbe_mac_X550EM_a:
3664		return;
3665	default:
3666		goto display;
3667	}
3668
3669get_parent_info:
3670	/*
3671	 * For the Quad port adapter we need to parse back
3672	 * up the PCI tree to find the speed of the expansion
3673	 * slot into which this adapter is plugged. A bit more work.
3674	 */
3675	dev = device_get_parent(device_get_parent(dev));
3676#ifdef IXGBE_DEBUG
3677	device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
3678	    pci_get_slot(dev), pci_get_function(dev));
3679#endif
3680	dev = device_get_parent(device_get_parent(dev));
3681#ifdef IXGBE_DEBUG
3682	device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
3683	    pci_get_slot(dev), pci_get_function(dev));
3684#endif
3685	/* Now get the PCI Express Capabilities offset */
3686	if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
3687		/*
3688		 * Hmm...can't get PCI-Express capabilities.
3689		 * Falling back to default method.
3690		 */
3691		bus_info_valid = FALSE;
3692		ixgbe_get_bus_info(hw);
3693		goto display;
3694	}
3695	/* ...and read the Link Status Register */
3696	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3697	ixgbe_set_pci_config_data_generic(hw, link);
3698
3699display:
3700	device_printf(dev, "PCI Express Bus: Speed %s %s\n",
3701	    ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
3702	     (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
3703	     (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
3704	     "Unknown"),
3705	    ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3706	     (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3707	     (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3708	     "Unknown"));
3709
3710	if (bus_info_valid) {
3711		if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3712		    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3713		    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3714			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
3715			device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
3716		}
3717		if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3718		    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3719		    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3720			device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
3721			device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
3722		}
3723	} else
3724		device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
3725
3726	return;
3727} /* ixgbe_get_slot_info */
3728
3729
3730/************************************************************************
3731 * ixgbe_set_ivar
3732 *
3733 *   Setup the correct IVAR register for a particular MSI-X interrupt
3734 *     (yes this is all very magic and confusing :)
3735 *    - entry is the register array entry
3736 *    - vector is the MSI-X vector for this queue
3737 *    - type is RX/TX/MISC
3738 ************************************************************************/
3739static void
3740ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3741{
3742	struct ixgbe_hw *hw = &adapter->hw;
3743	u32 ivar, index;
3744
3745	vector |= IXGBE_IVAR_ALLOC_VAL;
3746
3747	switch (hw->mac.type) {
3748
3749	case ixgbe_mac_82598EB:
3750		if (type == -1)
3751			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3752		else
3753			entry += (type * 64);
3754		index = (entry >> 2) & 0x1F;
3755		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3756		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3757		ivar |= (vector << (8 * (entry & 0x3)));
3758		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3759		break;
3760
3761	case ixgbe_mac_82599EB:
3762	case ixgbe_mac_X540:
3763	case ixgbe_mac_X550:
3764	case ixgbe_mac_X550EM_x:
3765	case ixgbe_mac_X550EM_a:
3766		if (type == -1) { /* MISC IVAR */
3767			index = (entry & 1) * 8;
3768			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3769			ivar &= ~(0xFF << index);
3770			ivar |= (vector << index);
3771			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3772		} else {          /* RX/TX IVARS */
3773			index = (16 * (entry & 1)) + (8 * type);
3774			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3775			ivar &= ~(0xFF << index);
3776			ivar |= (vector << index);
3777			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3778		}
3779
3780	default:
3781		break;
3782	}
3783} /* ixgbe_set_ivar */
3784
3785/************************************************************************
3786 * ixgbe_configure_ivars
3787 ************************************************************************/
3788static void
3789ixgbe_configure_ivars(struct adapter *adapter)
3790{
3791	struct ix_queue *que = adapter->queues;
3792	u32             newitr;
3793
3794	if (ixgbe_max_interrupt_rate > 0)
3795		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3796	else {
3797		/*
3798		 * Disable DMA coalescing if interrupt moderation is
3799		 * disabled.
3800		 */
3801		adapter->dmac = 0;
3802		newitr = 0;
3803	}
3804
3805	for (int i = 0; i < adapter->num_queues; i++, que++) {
3806		struct rx_ring *rxr = &adapter->rx_rings[i];
3807		struct tx_ring *txr = &adapter->tx_rings[i];
3808		/* First the RX queue entry */
3809		ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3810		/* ... and the TX */
3811		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3812		/* Set an Initial EITR value */
3813		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3814	}
3815
3816	/* For the Link interrupt */
3817	ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3818} /* ixgbe_configure_ivars */
3819
3820/************************************************************************
3821 * ixgbe_sfp_probe
3822 *
3823 *   Determine if a port had optics inserted.
3824 ************************************************************************/
3825static bool
3826ixgbe_sfp_probe(struct adapter *adapter)
3827{
3828	struct ixgbe_hw *hw = &adapter->hw;
3829	device_t        dev = adapter->dev;
3830	bool            result = FALSE;
3831
3832	if ((hw->phy.type == ixgbe_phy_nl) &&
3833	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3834		s32 ret = hw->phy.ops.identify_sfp(hw);
3835		if (ret)
3836			goto out;
3837		ret = hw->phy.ops.reset(hw);
3838		adapter->sfp_probe = FALSE;
3839		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3840			device_printf(dev, "Unsupported SFP+ module detected!");
3841			device_printf(dev,
3842			    "Reload driver with supported module.\n");
3843			goto out;
3844		} else
3845			device_printf(dev, "SFP+ module detected!\n");
3846		/* We now have supported optics */
3847		result = TRUE;
3848	}
3849out:
3850
3851	return (result);
3852} /* ixgbe_sfp_probe */
3853
3854/************************************************************************
3855 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
3856 *
3857 *   Done outside of interrupt context since the driver might sleep
3858 ************************************************************************/
3859static void
3860ixgbe_handle_link(void *context, int pending)
3861{
3862	struct adapter  *adapter = context;
3863	struct ixgbe_hw *hw = &adapter->hw;
3864
3865	ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
3866	ixgbe_update_link_status(adapter);
3867
3868	/* Re-enable link interrupts */
3869	IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
3870} /* ixgbe_handle_link */
3871
3872/************************************************************************
3873 * ixgbe_handle_mod - Tasklet for SFP module interrupts
3874 ************************************************************************/
3875static void
3876ixgbe_handle_mod(void *context, int pending)
3877{
3878	struct adapter  *adapter = context;
3879	struct ixgbe_hw *hw = &adapter->hw;
3880	device_t        dev = adapter->dev;
3881	u32             err, cage_full = 0;
3882
3883	if (adapter->hw.need_crosstalk_fix) {
3884		switch (hw->mac.type) {
3885		case ixgbe_mac_82599EB:
3886			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3887			    IXGBE_ESDP_SDP2;
3888			break;
3889		case ixgbe_mac_X550EM_x:
3890		case ixgbe_mac_X550EM_a:
3891			cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3892			    IXGBE_ESDP_SDP0;
3893			break;
3894		default:
3895			break;
3896		}
3897
3898		if (!cage_full)
3899			return;
3900	}
3901
3902	err = hw->phy.ops.identify_sfp(hw);
3903	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3904		device_printf(dev,
3905		    "Unsupported SFP+ module type was detected.\n");
3906		return;
3907	}
3908
3909	err = hw->mac.ops.setup_sfp(hw);
3910	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3911		device_printf(dev,
3912		    "Setup failure - unsupported SFP+ module type.\n");
3913		return;
3914	}
3915	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3916
3917	return;
3918} /* ixgbe_handle_mod */
3919
3920
3921/************************************************************************
3922 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3923 ************************************************************************/
3924static void
3925ixgbe_handle_msf(void *context, int pending)
3926{
3927	struct adapter  *adapter = context;
3928	struct ixgbe_hw *hw = &adapter->hw;
3929	u32             autoneg;
3930	bool            negotiate;
3931
3932	/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3933	adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3934
3935	autoneg = hw->phy.autoneg_advertised;
3936	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3937		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3938	if (hw->mac.ops.setup_link)
3939		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3940
3941	/* Adjust media types shown in ifconfig */
3942	ifmedia_removeall(&adapter->media);
3943	ixgbe_add_media_types(adapter);
3944	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3945	IXGBE_CORE_UNLOCK(adapter);
3946	return;
3947} /* ixgbe_handle_msf */
3948
3949/************************************************************************
3950 * ixgbe_handle_phy - Tasklet for external PHY interrupts
3951 ************************************************************************/
3952static void
3953ixgbe_handle_phy(void *context, int pending)
3954{
3955	struct adapter  *adapter = context;
3956	struct ixgbe_hw *hw = &adapter->hw;
3957	int             error;
3958
3959	error = hw->phy.ops.handle_lasi(hw);
3960	if (error == IXGBE_ERR_OVERTEMP)
3961		device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3962	else if (error)
3963		device_printf(adapter->dev,
3964		    "Error handling LASI interrupt: %d\n", error);
3965
3966	return;
3967} /* ixgbe_handle_phy */
3968
3969/************************************************************************
3970 * ixgbe_config_dmac - Configure DMA Coalescing
3971 ************************************************************************/
3972static void
3973ixgbe_config_dmac(struct adapter *adapter)
3974{
3975	struct ixgbe_hw          *hw = &adapter->hw;
3976	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3977
3978	if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3979		return;
3980
3981	if (dcfg->watchdog_timer ^ adapter->dmac ||
3982	    dcfg->link_speed ^ adapter->link_speed) {
3983		dcfg->watchdog_timer = adapter->dmac;
3984		dcfg->fcoe_en = false;
3985		dcfg->link_speed = adapter->link_speed;
3986		dcfg->num_tcs = 1;
3987
3988		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3989		    dcfg->watchdog_timer, dcfg->link_speed);
3990
3991		hw->mac.ops.dmac_config(hw);
3992	}
3993} /* ixgbe_config_dmac */
3994
3995/************************************************************************
3996 * ixgbe_check_wol_support
3997 *
3998 *   Checks whether the adapter's ports are capable of
3999 *   Wake On LAN by reading the adapter's NVM.
4000 *
4001 *   Sets each port's hw->wol_enabled value depending
4002 *   on the value read here.
4003 ************************************************************************/
4004static void
4005ixgbe_check_wol_support(struct adapter *adapter)
4006{
4007	struct ixgbe_hw *hw = &adapter->hw;
4008	u16             dev_caps = 0;
4009
4010	/* Find out WoL support for port */
4011	adapter->wol_support = hw->wol_enabled = 0;
4012	ixgbe_get_device_caps(hw, &dev_caps);
4013	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
4014	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
4015	     hw->bus.func == 0))
4016		adapter->wol_support = hw->wol_enabled = 1;
4017
4018	/* Save initial wake up filter configuration */
4019	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
4020
4021	return;
4022} /* ixgbe_check_wol_support */
4023
4024/************************************************************************
4025 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
4026 *
4027 *   Prepare the adapter/port for LPLU and/or WoL
4028 ************************************************************************/
4029static int
4030ixgbe_setup_low_power_mode(struct adapter *adapter)
4031{
4032	struct ixgbe_hw *hw = &adapter->hw;
4033	device_t        dev = adapter->dev;
4034	s32             error = 0;
4035
4036	mtx_assert(&adapter->core_mtx, MA_OWNED);
4037
4038	/* Limit power management flow to X550EM baseT */
4039	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4040	    hw->phy.ops.enter_lplu) {
4041		/* Turn off support for APM wakeup. (Using ACPI instead) */
4042		IXGBE_WRITE_REG(hw, IXGBE_GRC,
4043		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
4044
4045		/*
4046		 * Clear Wake Up Status register to prevent any previous wakeup
4047		 * events from waking us up immediately after we suspend.
4048		 */
4049		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
4050
4051		/*
4052		 * Program the Wakeup Filter Control register with user filter
4053		 * settings
4054		 */
4055		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
4056
4057		/* Enable wakeups and power management in Wakeup Control */
4058		IXGBE_WRITE_REG(hw, IXGBE_WUC,
4059		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
4060
4061		/* X550EM baseT adapters need a special LPLU flow */
4062		hw->phy.reset_disable = true;
4063		ixgbe_stop(adapter);
4064		error = hw->phy.ops.enter_lplu(hw);
4065		if (error)
4066			device_printf(dev, "Error entering LPLU: %d\n", error);
4067		hw->phy.reset_disable = false;
4068	} else {
4069		/* Just stop for other adapters */
4070		ixgbe_stop(adapter);
4071	}
4072
4073	return error;
4074} /* ixgbe_setup_low_power_mode */
4075
4076/************************************************************************
4077 * ixgbe_update_stats_counters - Update board statistics counters.
4078 ************************************************************************/
4079static void
4080ixgbe_update_stats_counters(struct adapter *adapter)
4081{
4082	struct ixgbe_hw       *hw = &adapter->hw;
4083	struct ixgbe_hw_stats *stats = &adapter->stats_pf;
4084	u32                   missed_rx = 0, bprc, lxon, lxoff, total;
4085	u64                   total_missed_rx = 0;
4086
4087	stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4088	stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
4089	stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
4090	stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
4091	stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
4092
4093	for (int i = 0; i < 16; i++) {
4094		stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4095		stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4096		stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4097	}
4098	stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
4099	stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
4100	stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4101
4102	/* Hardware workaround, gprc counts missed packets */
4103	stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4104	stats->gprc -= missed_rx;
4105
4106	if (hw->mac.type != ixgbe_mac_82598EB) {
4107		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
4108		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
4109		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
4110		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
4111		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
4112		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
4113		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4114		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4115	} else {
4116		stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4117		stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4118		/* 82598 only has a counter in the high register */
4119		stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4120		stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4121		stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4122	}
4123
4124	/*
4125	 * Workaround: mprc hardware is incorrectly counting
4126	 * broadcasts, so for now we subtract those.
4127	 */
4128	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4129	stats->bprc += bprc;
4130	stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4131	if (hw->mac.type == ixgbe_mac_82598EB)
4132		stats->mprc -= bprc;
4133
4134	stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4135	stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4136	stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4137	stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4138	stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4139	stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4140
4141	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4142	stats->lxontxc += lxon;
4143	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4144	stats->lxofftxc += lxoff;
4145	total = lxon + lxoff;
4146
4147	stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4148	stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4149	stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4150	stats->gptc -= total;
4151	stats->mptc -= total;
4152	stats->ptc64 -= total;
4153	stats->gotc -= total * ETHER_MIN_LEN;
4154
4155	stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4156	stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4157	stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4158	stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4159	stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
4160	stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
4161	stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
4162	stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4163	stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
4164	stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4165	stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4166	stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4167	stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4168	stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4169	stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4170	stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
4171	stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4172	stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
4173	/* Only read FCOE on 82599 */
4174	if (hw->mac.type != ixgbe_mac_82598EB) {
4175		stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4176		stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4177		stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4178		stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4179		stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4180	}
4181
4182	/* Fill out the OS statistics structure */
4183	IXGBE_SET_IPACKETS(adapter, stats->gprc);
4184	IXGBE_SET_OPACKETS(adapter, stats->gptc);
4185	IXGBE_SET_IBYTES(adapter, stats->gorc);
4186	IXGBE_SET_OBYTES(adapter, stats->gotc);
4187	IXGBE_SET_IMCASTS(adapter, stats->mprc);
4188	IXGBE_SET_OMCASTS(adapter, stats->mptc);
4189	IXGBE_SET_COLLISIONS(adapter, 0);
4190	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
4191	IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
4192} /* ixgbe_update_stats_counters */
4193
4194#if __FreeBSD_version >= 1100036
4195/************************************************************************
4196 * ixgbe_get_counter
4197 ************************************************************************/
4198static uint64_t
4199ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
4200{
4201	struct adapter *adapter;
4202	struct tx_ring *txr;
4203	uint64_t       rv;
4204
4205	adapter = if_getsoftc(ifp);
4206
4207	switch (cnt) {
4208	case IFCOUNTER_IPACKETS:
4209		return (adapter->ipackets);
4210	case IFCOUNTER_OPACKETS:
4211		return (adapter->opackets);
4212	case IFCOUNTER_IBYTES:
4213		return (adapter->ibytes);
4214	case IFCOUNTER_OBYTES:
4215		return (adapter->obytes);
4216	case IFCOUNTER_IMCASTS:
4217		return (adapter->imcasts);
4218	case IFCOUNTER_OMCASTS:
4219		return (adapter->omcasts);
4220	case IFCOUNTER_COLLISIONS:
4221		return (0);
4222	case IFCOUNTER_IQDROPS:
4223		return (adapter->iqdrops);
4224	case IFCOUNTER_OQDROPS:
4225		rv = 0;
4226		txr = adapter->tx_rings;
4227		for (int i = 0; i < adapter->num_queues; i++, txr++)
4228			rv += txr->br->br_drops;
4229		return (rv);
4230	case IFCOUNTER_IERRORS:
4231		return (adapter->ierrors);
4232	default:
4233		return (if_get_counter_default(ifp, cnt));
4234	}
4235} /* ixgbe_get_counter */
4236#endif
4237
4238/************************************************************************
4239 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
4240 *
4241 *   Retrieves the TDH value from the hardware
4242 ************************************************************************/
4243static int
4244ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4245{
4246	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4247	int            error;
4248	unsigned int   val;
4249
4250	if (!txr)
4251		return 0;
4252
4253	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4254	error = sysctl_handle_int(oidp, &val, 0, req);
4255	if (error || !req->newptr)
4256		return error;
4257
4258	return 0;
4259} /* ixgbe_sysctl_tdh_handler */
4260
4261/************************************************************************
4262 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
4263 *
4264 *   Retrieves the TDT value from the hardware
4265 ************************************************************************/
4266static int
4267ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4268{
4269	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4270	int            error;
4271	unsigned int   val;
4272
4273	if (!txr)
4274		return 0;
4275
4276	val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4277	error = sysctl_handle_int(oidp, &val, 0, req);
4278	if (error || !req->newptr)
4279		return error;
4280
4281	return 0;
4282} /* ixgbe_sysctl_tdt_handler */
4283
4284/************************************************************************
4285 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
4286 *
4287 *   Retrieves the RDH value from the hardware
4288 ************************************************************************/
4289static int
4290ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4291{
4292	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4293	int            error;
4294	unsigned int   val;
4295
4296	if (!rxr)
4297		return 0;
4298
4299	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4300	error = sysctl_handle_int(oidp, &val, 0, req);
4301	if (error || !req->newptr)
4302		return error;
4303
4304	return 0;
4305} /* ixgbe_sysctl_rdh_handler */
4306
4307/************************************************************************
4308 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
4309 *
4310 *   Retrieves the RDT value from the hardware
4311 ************************************************************************/
4312static int
4313ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4314{
4315	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4316	int            error;
4317	unsigned int   val;
4318
4319	if (!rxr)
4320		return 0;
4321
4322	val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4323	error = sysctl_handle_int(oidp, &val, 0, req);
4324	if (error || !req->newptr)
4325		return error;
4326
4327	return 0;
4328} /* ixgbe_sysctl_rdt_handler */
4329
4330/************************************************************************
4331 * ixgbe_sysctl_interrupt_rate_handler
4332 ************************************************************************/
4333static int
4334ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4335{
4336	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4337	int             error;
4338	unsigned int    reg, usec, rate;
4339
4340	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4341	usec = ((reg & 0x0FF8) >> 3);
4342	if (usec > 0)
4343		rate = 500000 / usec;
4344	else
4345		rate = 0;
4346	error = sysctl_handle_int(oidp, &rate, 0, req);
4347	if (error || !req->newptr)
4348		return error;
4349	reg &= ~0xfff; /* default, no limitation */
4350	ixgbe_max_interrupt_rate = 0;
4351	if (rate > 0 && rate < 500000) {
4352		if (rate < 1000)
4353			rate = 1000;
4354		ixgbe_max_interrupt_rate = rate;
4355		reg |= ((4000000/rate) & 0xff8);
4356	}
4357	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4358
4359	return 0;
4360} /* ixgbe_sysctl_interrupt_rate_handler */
4361
4362/************************************************************************
4363 * ixgbe_add_device_sysctls
4364 ************************************************************************/
4365static void
4366ixgbe_add_device_sysctls(struct adapter *adapter)
4367{
4368	device_t               dev = adapter->dev;
4369	struct ixgbe_hw        *hw = &adapter->hw;
4370	struct sysctl_oid_list *child;
4371	struct sysctl_ctx_list *ctx;
4372
4373	ctx = device_get_sysctl_ctx(dev);
4374	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4375
4376	/* Sysctls for all devices */
4377	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4378	    adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4379
4380	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
4381	    &ixgbe_enable_aim, 1, "Interrupt Moderation");
4382
4383	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4384	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
4385	    IXGBE_SYSCTL_DESC_ADV_SPEED);
4386
4387#ifdef IXGBE_DEBUG
4388	/* testing sysctls (for all devices) */
4389	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
4390	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
4391	    "I", "PCI Power State");
4392
4393	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
4394	    CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
4395	    ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
4396#endif
4397	/* for X550 series devices */
4398	if (hw->mac.type >= ixgbe_mac_X550)
4399		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4400		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
4401		    "I", "DMA Coalesce");
4402
4403	/* for WoL-capable devices */
4404	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4405		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4406		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4407		    ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
4408
4409		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4410		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
4411		    "I", "Enable/Disable Wake Up Filters");
4412	}
4413
4414	/* for X552/X557-AT devices */
4415	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4416		struct sysctl_oid *phy_node;
4417		struct sysctl_oid_list *phy_list;
4418
4419		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4420		    CTLFLAG_RD, NULL, "External PHY sysctls");
4421		phy_list = SYSCTL_CHILDREN(phy_node);
4422
4423		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4424		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
4425		    "I", "Current External PHY Temperature (Celsius)");
4426
4427		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4428		    CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4429		    ixgbe_sysctl_phy_overtemp_occurred, "I",
4430		    "External PHY High Temperature Event Occurred");
4431	}
4432
4433	if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
4434		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
4435		    CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4436		    ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
4437	}
4438} /* ixgbe_add_device_sysctls */
4439
4440/************************************************************************
4441 * ixgbe_add_hw_stats
4442 *
4443 *   Add sysctl variables, one per statistic, to the system.
4444 ************************************************************************/
4445static void
4446ixgbe_add_hw_stats(struct adapter *adapter)
4447{
4448	device_t               dev = adapter->dev;
4449	struct tx_ring         *txr = adapter->tx_rings;
4450	struct rx_ring         *rxr = adapter->rx_rings;
4451	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4452	struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
4453	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4454	struct ixgbe_hw_stats  *stats = &adapter->stats_pf;
4455	struct sysctl_oid      *stat_node, *queue_node;
4456	struct sysctl_oid_list *stat_list, *queue_list;
4457
4458#define QUEUE_NAME_LEN 32
4459	char                   namebuf[QUEUE_NAME_LEN];
4460
4461	/* Driver Statistics */
4462	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4463	    CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
4464	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4465	    CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
4466	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4467	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
4468	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4469	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
4470
4471	for (int i = 0; i < adapter->num_queues; i++, txr++) {
4472		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4473		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4474		    CTLFLAG_RD, NULL, "Queue Name");
4475		queue_list = SYSCTL_CHILDREN(queue_node);
4476
4477		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4478		    CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4479		    sizeof(&adapter->queues[i]),
4480		    ixgbe_sysctl_interrupt_rate_handler, "IU",
4481		    "Interrupt Rate");
4482		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4483		    CTLFLAG_RD, &(adapter->queues[i].irqs),
4484		    "irqs on this queue");
4485		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4486		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4487		    ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
4488		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4489		    CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4490		    ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
4491		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4492		    CTLFLAG_RD, &txr->tso_tx, "TSO");
4493		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4494		    CTLFLAG_RD, &txr->no_tx_dma_setup,
4495		    "Driver tx dma failure in xmit");
4496		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4497		    CTLFLAG_RD, &txr->no_desc_avail,
4498		    "Queue No Descriptor Available");
4499		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4500		    CTLFLAG_RD, &txr->total_packets,
4501		    "Queue Packets Transmitted");
4502		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4503		    CTLFLAG_RD, &txr->br->br_drops,
4504		    "Packets dropped in buf_ring");
4505	}
4506
4507	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4508		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4509		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4510		    CTLFLAG_RD, NULL, "Queue Name");
4511		queue_list = SYSCTL_CHILDREN(queue_node);
4512
4513		struct lro_ctrl *lro = &rxr->lro;
4514
4515		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4516		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4517		    ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
4518		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4519		    CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4520		    ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
4521		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4522		    CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
4523		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4524		    CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
4525		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4526		    CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
4527		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
4528		    CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
4529#if __FreeBSD_version < 1100000
4530		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4531		    CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4532		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4533		    CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4534#else
4535		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
4536		    CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
4537		SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
4538		    CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
4539#endif
4540	}
4541
4542	/* MAC stats get their own sub node */
4543
4544	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4545	    CTLFLAG_RD, NULL, "MAC Statistics");
4546	stat_list = SYSCTL_CHILDREN(stat_node);
4547
4548	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4549	    CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
4550	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4551	    CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
4552	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4553	    CTLFLAG_RD, &stats->errbc, "Byte Errors");
4554	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4555	    CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
4556	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4557	    CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
4558	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4559	    CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
4560	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4561	    CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
4562	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
4563	    CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
4564
4565	/* Flow Control stats */
4566	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4567	    CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
4568	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4569	    CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
4570	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4571	    CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
4572	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4573	    CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
4574
4575	/* Packet Reception Stats */
4576	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4577	    CTLFLAG_RD, &stats->tor, "Total Octets Received");
4578	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4579	    CTLFLAG_RD, &stats->gorc, "Good Octets Received");
4580	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4581	    CTLFLAG_RD, &stats->tpr, "Total Packets Received");
4582	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4583	    CTLFLAG_RD, &stats->gprc, "Good Packets Received");
4584	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4585	    CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
4586	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4587	    CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
4588	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4589	    CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
4590	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4591	    CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
4592	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4593	    CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
4594	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4595	    CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
4596	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4597	    CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
4598	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4599	    CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
4600	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4601	    CTLFLAG_RD, &stats->ruc, "Receive Undersized");
4602	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4603	    CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
4604	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4605	    CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
4606	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4607	    CTLFLAG_RD, &stats->rjc, "Received Jabber");
4608	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4609	    CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
4610	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4611	    CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
4612	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4613	    CTLFLAG_RD, &stats->xec, "Checksum Errors");
4614
4615	/* Packet Transmission Stats */
4616	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4617	    CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
4618	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4619	    CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
4620	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4621	    CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
4622	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4623	    CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
4624	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4625	    CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
4626	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4627	    CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
4628	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4629	    CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
4630	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4631	    CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
4632	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4633	    CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
4634	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4635	    CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
4636	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4637	    CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
4638	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4639	    CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
4640} /* ixgbe_add_hw_stats */
4641
4642/************************************************************************
4643 * ixgbe_set_sysctl_value
4644 ************************************************************************/
4645static void
4646ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
4647    const char *description, int *limit, int value)
4648{
4649	*limit = value;
4650	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4651	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4652	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4653} /* ixgbe_set_sysctl_value */
4654
4655/************************************************************************
4656 * ixgbe_sysctl_flowcntl
4657 *
4658 *   SYSCTL wrapper around setting Flow Control
4659 ************************************************************************/
4660static int
4661ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4662{
4663	struct adapter *adapter;
4664	int            error, fc;
4665
4666	adapter = (struct adapter *)arg1;
4667	fc = adapter->hw.fc.current_mode;
4668
4669	error = sysctl_handle_int(oidp, &fc, 0, req);
4670	if ((error) || (req->newptr == NULL))
4671		return (error);
4672
4673	/* Don't bother if it's not changed */
4674	if (fc == adapter->hw.fc.current_mode)
4675		return (0);
4676
4677	return ixgbe_set_flowcntl(adapter, fc);
4678} /* ixgbe_sysctl_flowcntl */
4679
4680/************************************************************************
4681 * ixgbe_set_flowcntl - Set flow control
4682 *
4683 *   Flow control values:
4684 *     0 - off
4685 *     1 - rx pause
4686 *     2 - tx pause
4687 *     3 - full
4688 ************************************************************************/
4689static int
4690ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4691{
4692	switch (fc) {
4693	case ixgbe_fc_rx_pause:
4694	case ixgbe_fc_tx_pause:
4695	case ixgbe_fc_full:
4696		adapter->hw.fc.requested_mode = fc;
4697		if (adapter->num_queues > 1)
4698			ixgbe_disable_rx_drop(adapter);
4699		break;
4700	case ixgbe_fc_none:
4701		adapter->hw.fc.requested_mode = ixgbe_fc_none;
4702		if (adapter->num_queues > 1)
4703			ixgbe_enable_rx_drop(adapter);
4704		break;
4705	default:
4706		return (EINVAL);
4707	}
4708
4709	/* Don't autoneg if forcing a value */
4710	adapter->hw.fc.disable_fc_autoneg = TRUE;
4711	ixgbe_fc_enable(&adapter->hw);
4712
4713	return (0);
4714} /* ixgbe_set_flowcntl */
4715
4716/************************************************************************
4717 * ixgbe_sysctl_advertise
4718 *
4719 *   SYSCTL wrapper around setting advertised speed
4720 ************************************************************************/
4721static int
4722ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4723{
4724	struct adapter *adapter;
4725	int            error, advertise;
4726
4727	adapter = (struct adapter *)arg1;
4728	advertise = adapter->advertise;
4729
4730	error = sysctl_handle_int(oidp, &advertise, 0, req);
4731	if ((error) || (req->newptr == NULL))
4732		return (error);
4733
4734	return ixgbe_set_advertise(adapter, advertise);
4735} /* ixgbe_sysctl_advertise */
4736
4737/************************************************************************
4738 * ixgbe_set_advertise - Control advertised link speed
4739 *
4740 *   Flags:
4741 *     0x1 - advertise 100 Mb
4742 *     0x2 - advertise 1G
4743 *     0x4 - advertise 10G
4744 *     0x8 - advertise 10 Mb (yes, Mb)
4745 ************************************************************************/
4746static int
4747ixgbe_set_advertise(struct adapter *adapter, int advertise)
4748{
4749	device_t         dev;
4750	struct ixgbe_hw  *hw;
4751	ixgbe_link_speed speed = 0;
4752	ixgbe_link_speed link_caps = 0;
4753	s32              err = IXGBE_NOT_IMPLEMENTED;
4754	bool             negotiate = FALSE;
4755
4756	/* Checks to validate new value */
4757	if (adapter->advertise == advertise) /* no change */
4758		return (0);
4759
4760	dev = adapter->dev;
4761	hw = &adapter->hw;
4762
4763	/* No speed changes for backplane media */
4764	if (hw->phy.media_type == ixgbe_media_type_backplane)
4765		return (ENODEV);
4766
4767	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4768	      (hw->phy.multispeed_fiber))) {
4769		device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4770		return (EINVAL);
4771	}
4772
4773	if (advertise < 0x1 || advertise > 0xF) {
4774		device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4775		return (EINVAL);
4776	}
4777
4778	if (hw->mac.ops.get_link_capabilities) {
4779		err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4780		    &negotiate);
4781		if (err != IXGBE_SUCCESS) {
4782			device_printf(dev, "Unable to determine supported advertise speeds\n");
4783			return (ENODEV);
4784		}
4785	}
4786
4787	/* Set new value and report new advertised mode */
4788	if (advertise & 0x1) {
4789		if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4790			device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4791			return (EINVAL);
4792		}
4793		speed |= IXGBE_LINK_SPEED_100_FULL;
4794	}
4795	if (advertise & 0x2) {
4796		if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4797			device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4798			return (EINVAL);
4799		}
4800		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4801	}
4802	if (advertise & 0x4) {
4803		if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4804			device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4805			return (EINVAL);
4806		}
4807		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4808	}
4809	if (advertise & 0x8) {
4810		if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4811			device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4812			return (EINVAL);
4813		}
4814		speed |= IXGBE_LINK_SPEED_10_FULL;
4815	}
4816
4817	hw->mac.autotry_restart = TRUE;
4818	hw->mac.ops.setup_link(hw, speed, TRUE);
4819	adapter->advertise = advertise;
4820
4821	return (0);
4822} /* ixgbe_set_advertise */
4823
4824/************************************************************************
4825 * ixgbe_get_advertise - Get current advertised speed settings
4826 *
4827 *   Formatted for sysctl usage.
4828 *   Flags:
4829 *     0x1 - advertise 100 Mb
4830 *     0x2 - advertise 1G
4831 *     0x4 - advertise 10G
4832 *     0x8 - advertise 10 Mb (yes, Mb)
4833 ************************************************************************/
4834static int
4835ixgbe_get_advertise(struct adapter *adapter)
4836{
4837	struct ixgbe_hw  *hw = &adapter->hw;
4838	int              speed;
4839	ixgbe_link_speed link_caps = 0;
4840	s32              err;
4841	bool             negotiate = FALSE;
4842
4843	/*
4844	 * Advertised speed means nothing unless it's copper or
4845	 * multi-speed fiber
4846	 */
4847	if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4848	    !(hw->phy.multispeed_fiber))
4849		return 0;
4850
4851	err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4852	if (err != IXGBE_SUCCESS)
4853		return 0;
4854
4855	speed =
4856	    ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4857	    ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4858	    ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4859	    ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4860
4861	return speed;
4862} /* ixgbe_get_advertise */
4863
4864/************************************************************************
4865 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4866 *
4867 *   For X552/X557-AT devices using an external PHY
4868 ************************************************************************/
4869static int
4870ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4871{
4872	struct adapter  *adapter = (struct adapter *)arg1;
4873	struct ixgbe_hw *hw = &adapter->hw;
4874	u16             reg;
4875
4876	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4877		device_printf(adapter->dev,
4878		    "Device has no supported external thermal sensor.\n");
4879		return (ENODEV);
4880	}
4881
4882	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4883	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4884		device_printf(adapter->dev,
4885		    "Error reading from PHY's current temperature register\n");
4886		return (EAGAIN);
4887	}
4888
4889	/* Shift temp for output */
4890	reg = reg >> 8;
4891
4892	return (sysctl_handle_int(oidp, NULL, reg, req));
4893} /* ixgbe_sysctl_phy_temp */
4894
4895/************************************************************************
4896 * ixgbe_sysctl_phy_overtemp_occurred
4897 *
4898 *   Reports (directly from the PHY) whether the current PHY
4899 *   temperature is over the overtemp threshold.
4900 ************************************************************************/
4901static int
4902ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4903{
4904	struct adapter  *adapter = (struct adapter *)arg1;
4905	struct ixgbe_hw *hw = &adapter->hw;
4906	u16             reg;
4907
4908	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4909		device_printf(adapter->dev,
4910		    "Device has no supported external thermal sensor.\n");
4911		return (ENODEV);
4912	}
4913
4914	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4915	    IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4916		device_printf(adapter->dev,
4917		    "Error reading from PHY's temperature status register\n");
4918		return (EAGAIN);
4919	}
4920
4921	/* Get occurrence bit */
4922	reg = !!(reg & 0x4000);
4923
4924	return (sysctl_handle_int(oidp, 0, reg, req));
4925} /* ixgbe_sysctl_phy_overtemp_occurred */
4926
4927/************************************************************************
4928 * ixgbe_sysctl_dmac - Manage DMA Coalescing
4929 *
4930 *   Control values:
4931 *     0/1 - off / on (use default value of 1000)
4932 *
4933 *     Legal timer values are:
4934 *     50,100,250,500,1000,2000,5000,10000
4935 *
4936 *     Turning off interrupt moderation will also turn this off.
4937 ************************************************************************/
4938static int
4939ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4940{
4941	struct adapter *adapter = (struct adapter *)arg1;
4942	struct ifnet   *ifp = adapter->ifp;
4943	int            error;
4944	u32            newval;
4945
4946	newval = adapter->dmac;
4947	error = sysctl_handle_int(oidp, &newval, 0, req);
4948	if ((error) || (req->newptr == NULL))
4949		return (error);
4950
4951	switch (newval) {
4952	case 0:
4953		/* Disabled */
4954		adapter->dmac = 0;
4955		break;
4956	case 1:
4957		/* Enable and use default */
4958		adapter->dmac = 1000;
4959		break;
4960	case 50:
4961	case 100:
4962	case 250:
4963	case 500:
4964	case 1000:
4965	case 2000:
4966	case 5000:
4967	case 10000:
4968		/* Legal values - allow */
4969		adapter->dmac = newval;
4970		break;
4971	default:
4972		/* Do nothing, illegal value */
4973		return (EINVAL);
4974	}
4975
4976	/* Re-initialize hardware if it's already running */
4977	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4978		ixgbe_init(adapter);
4979
4980	return (0);
4981} /* ixgbe_sysctl_dmac */
4982
4983#ifdef IXGBE_DEBUG
4984/************************************************************************
4985 * ixgbe_sysctl_power_state
4986 *
4987 *   Sysctl to test power states
4988 *   Values:
4989 *     0      - set device to D0
4990 *     3      - set device to D3
4991 *     (none) - get current device power state
4992 ************************************************************************/
4993static int
4994ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4995{
4996	struct adapter *adapter = (struct adapter *)arg1;
4997	device_t       dev = adapter->dev;
4998	int            curr_ps, new_ps, error = 0;
4999
5000	curr_ps = new_ps = pci_get_powerstate(dev);
5001
5002	error = sysctl_handle_int(oidp, &new_ps, 0, req);
5003	if ((error) || (req->newptr == NULL))
5004		return (error);
5005
5006	if (new_ps == curr_ps)
5007		return (0);
5008
5009	if (new_ps == 3 && curr_ps == 0)
5010		error = DEVICE_SUSPEND(dev);
5011	else if (new_ps == 0 && curr_ps == 3)
5012		error = DEVICE_RESUME(dev);
5013	else
5014		return (EINVAL);
5015
5016	device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5017
5018	return (error);
5019} /* ixgbe_sysctl_power_state */
5020#endif
5021
5022/************************************************************************
5023 * ixgbe_sysctl_eee_state
5024 *
5025 *   Sysctl to set EEE power saving feature
5026 *   Values:
5027 *     0      - disable EEE
5028 *     1      - enable EEE
5029 *     (none) - get current device EEE state
5030 ************************************************************************/
5031static int
5032ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
5033{
5034	struct adapter *adapter = (struct adapter *)arg1;
5035	device_t       dev = adapter->dev;
5036	int            curr_eee, new_eee, error = 0;
5037	s32            retval;
5038
5039	curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5040
5041	error = sysctl_handle_int(oidp, &new_eee, 0, req);
5042	if ((error) || (req->newptr == NULL))
5043		return (error);
5044
5045	/* Nothing to do */
5046	if (new_eee == curr_eee)
5047		return (0);
5048
5049	/* Not supported */
5050	if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5051		return (EINVAL);
5052
5053	/* Bounds checking */
5054	if ((new_eee < 0) || (new_eee > 1))
5055		return (EINVAL);
5056
5057	retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
5058	if (retval) {
5059		device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5060		return (EINVAL);
5061	}
5062
5063	/* Restart auto-neg */
5064	ixgbe_init(adapter);
5065
5066	device_printf(dev, "New EEE state: %d\n", new_eee);
5067
5068	/* Cache new value */
5069	if (new_eee)
5070		adapter->feat_en |= IXGBE_FEATURE_EEE;
5071	else
5072		adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5073
5074	return (error);
5075} /* ixgbe_sysctl_eee_state */
5076
5077/************************************************************************
5078 * ixgbe_sysctl_wol_enable
5079 *
5080 *   Sysctl to enable/disable the WoL capability,
5081 *   if supported by the adapter.
5082 *
5083 *   Values:
5084 *     0 - disabled
5085 *     1 - enabled
5086 ************************************************************************/
5087static int
5088ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
5089{
5090	struct adapter  *adapter = (struct adapter *)arg1;
5091	struct ixgbe_hw *hw = &adapter->hw;
5092	int             new_wol_enabled;
5093	int             error = 0;
5094
5095	new_wol_enabled = hw->wol_enabled;
5096	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
5097	if ((error) || (req->newptr == NULL))
5098		return (error);
5099	new_wol_enabled = !!(new_wol_enabled);
5100	if (new_wol_enabled == hw->wol_enabled)
5101		return (0);
5102
5103	if (new_wol_enabled > 0 && !adapter->wol_support)
5104		return (ENODEV);
5105	else
5106		hw->wol_enabled = new_wol_enabled;
5107
5108	return (0);
5109} /* ixgbe_sysctl_wol_enable */
5110
5111/************************************************************************
5112 * ixgbe_sysctl_wufc - Wake Up Filter Control
5113 *
5114 *   Sysctl to enable/disable the types of packets that the
5115 *   adapter will wake up on upon receipt.
5116 *   Flags:
5117 *     0x1  - Link Status Change
5118 *     0x2  - Magic Packet
5119 *     0x4  - Direct Exact
5120 *     0x8  - Directed Multicast
5121 *     0x10 - Broadcast
5122 *     0x20 - ARP/IPv4 Request Packet
5123 *     0x40 - Direct IPv4 Packet
5124 *     0x80 - Direct IPv6 Packet
5125 *
5126 *   Settings not listed above will cause the sysctl to return an error.
5127 ************************************************************************/
5128static int
5129ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
5130{
5131	struct adapter *adapter = (struct adapter *)arg1;
5132	int            error = 0;
5133	u32            new_wufc;
5134
5135	new_wufc = adapter->wufc;
5136
5137	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
5138	if ((error) || (req->newptr == NULL))
5139		return (error);
5140	if (new_wufc == adapter->wufc)
5141		return (0);
5142
5143	if (new_wufc & 0xffffff00)
5144		return (EINVAL);
5145
5146	new_wufc &= 0xff;
5147	new_wufc |= (0xffffff & adapter->wufc);
5148	adapter->wufc = new_wufc;
5149
5150	return (0);
5151} /* ixgbe_sysctl_wufc */
5152
5153#ifdef IXGBE_DEBUG
5154/************************************************************************
5155 * ixgbe_sysctl_print_rss_config
5156 ************************************************************************/
5157static int
5158ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
5159{
5160	struct adapter  *adapter = (struct adapter *)arg1;
5161	struct ixgbe_hw *hw = &adapter->hw;
5162	device_t        dev = adapter->dev;
5163	struct sbuf     *buf;
5164	int             error = 0, reta_size;
5165	u32             reg;
5166
5167	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5168	if (!buf) {
5169		device_printf(dev, "Could not allocate sbuf for output.\n");
5170		return (ENOMEM);
5171	}
5172
5173	// TODO: use sbufs to make a string to print out
5174	/* Set multiplier for RETA setup and table size based on MAC */
5175	switch (adapter->hw.mac.type) {
5176	case ixgbe_mac_X550:
5177	case ixgbe_mac_X550EM_x:
5178	case ixgbe_mac_X550EM_a:
5179		reta_size = 128;
5180		break;
5181	default:
5182		reta_size = 32;
5183		break;
5184	}
5185
5186	/* Print out the redirection table */
5187	sbuf_cat(buf, "\n");
5188	for (int i = 0; i < reta_size; i++) {
5189		if (i < 32) {
5190			reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5191			sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5192		} else {
5193			reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5194			sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5195		}
5196	}
5197
5198	// TODO: print more config
5199
5200	error = sbuf_finish(buf);
5201	if (error)
5202		device_printf(dev, "Error finishing sbuf: %d\n", error);
5203
5204	sbuf_delete(buf);
5205
5206	return (0);
5207} /* ixgbe_sysctl_print_rss_config */
5208#endif /* IXGBE_DEBUG */
5209
5210/************************************************************************
5211 * ixgbe_enable_rx_drop
5212 *
5213 *   Enable the hardware to drop packets when the buffer is
5214 *   full. This is useful with multiqueue, so that no single
5215 *   queue being full stalls the entire RX engine. We only
5216 *   enable this when Multiqueue is enabled AND Flow Control
5217 *   is disabled.
5218 ************************************************************************/
5219static void
5220ixgbe_enable_rx_drop(struct adapter *adapter)
5221{
5222	struct ixgbe_hw *hw = &adapter->hw;
5223	struct rx_ring  *rxr;
5224	u32             srrctl;
5225
5226	for (int i = 0; i < adapter->num_queues; i++) {
5227		rxr = &adapter->rx_rings[i];
5228		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5229		srrctl |= IXGBE_SRRCTL_DROP_EN;
5230		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5231	}
5232
5233	/* enable drop for each vf */
5234	for (int i = 0; i < adapter->num_vfs; i++) {
5235		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5236		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5237		    IXGBE_QDE_ENABLE));
5238	}
5239} /* ixgbe_enable_rx_drop */
5240
5241/************************************************************************
5242 * ixgbe_disable_rx_drop
5243 ************************************************************************/
5244static void
5245ixgbe_disable_rx_drop(struct adapter *adapter)
5246{
5247	struct ixgbe_hw *hw = &adapter->hw;
5248	struct rx_ring  *rxr;
5249	u32             srrctl;
5250
5251	for (int i = 0; i < adapter->num_queues; i++) {
5252		rxr = &adapter->rx_rings[i];
5253		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5254		srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5255		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5256	}
5257
5258	/* disable drop for each vf */
5259	for (int i = 0; i < adapter->num_vfs; i++) {
5260		IXGBE_WRITE_REG(hw, IXGBE_QDE,
5261		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5262	}
5263} /* ixgbe_disable_rx_drop */
5264
5265/************************************************************************
5266 * ixgbe_rearm_queues
5267 ************************************************************************/
5268static void
5269ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5270{
5271	u32 mask;
5272
5273	switch (adapter->hw.mac.type) {
5274	case ixgbe_mac_82598EB:
5275		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5276		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5277		break;
5278	case ixgbe_mac_82599EB:
5279	case ixgbe_mac_X540:
5280	case ixgbe_mac_X550:
5281	case ixgbe_mac_X550EM_x:
5282	case ixgbe_mac_X550EM_a:
5283		mask = (queues & 0xFFFFFFFF);
5284		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5285		mask = (queues >> 32);
5286		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5287		break;
5288	default:
5289		break;
5290	}
5291} /* ixgbe_rearm_queues */
5292
5293