if_ix.c revision 285590
1/******************************************************************************
2
3  Copyright (c) 2001-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 285590 2015-07-15 00:35:50Z pkelsey $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#include "opt_rss.h"
40#endif
41
42#include "ixgbe.h"
43
44#ifdef	RSS
45#include <net/rss_config.h>
46#include <netinet/in_rss.h>
47#endif
48
49/*********************************************************************
50 *  Set this to one to display debug statistics
51 *********************************************************************/
52int             ixgbe_display_debug_stats = 0;
53
54/*********************************************************************
55 *  Driver version
56 *********************************************************************/
57char ixgbe_driver_version[] = "3.1.0";
58
59/*********************************************************************
60 *  PCI Device ID Table
61 *
62 *  Used by probe to select devices to load on
63 *  Last field stores an index into ixgbe_strings
64 *  Last entry must be all 0s
65 *
66 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67 *********************************************************************/
68
69static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
70{
71	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101	/* required last entry */
102	{0, 0, 0, 0, 0}
103};
104
105/*********************************************************************
106 *  Table of branding strings
107 *********************************************************************/
108
109static char    *ixgbe_strings[] = {
110	"Intel(R) PRO/10GbE PCI-Express Network Driver"
111};
112
113/*********************************************************************
114 *  Function prototypes
115 *********************************************************************/
116static int      ixgbe_probe(device_t);
117static int      ixgbe_attach(device_t);
118static int      ixgbe_detach(device_t);
119static int      ixgbe_shutdown(device_t);
120static int	ixgbe_suspend(device_t);
121static int	ixgbe_resume(device_t);
122static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
123static void	ixgbe_init(void *);
124static void	ixgbe_init_locked(struct adapter *);
125static void     ixgbe_stop(void *);
126#if __FreeBSD_version >= 1100036
127static uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
128#endif
129static void	ixgbe_add_media_types(struct adapter *);
130static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
131static int      ixgbe_media_change(struct ifnet *);
132static void     ixgbe_identify_hardware(struct adapter *);
133static int      ixgbe_allocate_pci_resources(struct adapter *);
134static void	ixgbe_get_slot_info(struct ixgbe_hw *);
135static int      ixgbe_allocate_msix(struct adapter *);
136static int      ixgbe_allocate_legacy(struct adapter *);
137static int	ixgbe_setup_msix(struct adapter *);
138static void	ixgbe_free_pci_resources(struct adapter *);
139static void	ixgbe_local_timer(void *);
140static int	ixgbe_setup_interface(device_t, struct adapter *);
141static void	ixgbe_config_gpie(struct adapter *);
142static void	ixgbe_config_dmac(struct adapter *);
143static void	ixgbe_config_delay_values(struct adapter *);
144static void	ixgbe_config_link(struct adapter *);
145static void	ixgbe_check_eee_support(struct adapter *);
146static void	ixgbe_check_wol_support(struct adapter *);
147static int	ixgbe_setup_low_power_mode(struct adapter *);
148static void	ixgbe_rearm_queues(struct adapter *, u64);
149
150static void     ixgbe_initialize_transmit_units(struct adapter *);
151static void     ixgbe_initialize_receive_units(struct adapter *);
152static void	ixgbe_enable_rx_drop(struct adapter *);
153static void	ixgbe_disable_rx_drop(struct adapter *);
154
155static void     ixgbe_enable_intr(struct adapter *);
156static void     ixgbe_disable_intr(struct adapter *);
157static void     ixgbe_update_stats_counters(struct adapter *);
158static void     ixgbe_set_promisc(struct adapter *);
159static void     ixgbe_set_multi(struct adapter *);
160static void     ixgbe_update_link_status(struct adapter *);
161static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
162static void	ixgbe_configure_ivars(struct adapter *);
163static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
164
165static void	ixgbe_setup_vlan_hw_support(struct adapter *);
166static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
167static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
168
169static void	ixgbe_add_device_sysctls(struct adapter *);
170static void     ixgbe_add_hw_stats(struct adapter *);
171
172/* Sysctl handlers */
173static int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
174static int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
175static int	ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
176static int	ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
177static int	ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
178static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
179static int	ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
180static int	ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
181static int	ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
182static int	ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
183static int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
184static int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
185
186/* Support for pluggable optic modules */
187static bool	ixgbe_sfp_probe(struct adapter *);
188static void	ixgbe_setup_optics(struct adapter *);
189
190/* Legacy (single vector interrupt handler */
191static void	ixgbe_legacy_irq(void *);
192
193/* The MSI/X Interrupt handlers */
194static void	ixgbe_msix_que(void *);
195static void	ixgbe_msix_link(void *);
196
197/* Deferred interrupt tasklets */
198static void	ixgbe_handle_que(void *, int);
199static void	ixgbe_handle_link(void *, int);
200static void	ixgbe_handle_msf(void *, int);
201static void	ixgbe_handle_mod(void *, int);
202static void	ixgbe_handle_phy(void *, int);
203
204#ifdef IXGBE_FDIR
205static void	ixgbe_reinit_fdir(void *, int);
206#endif
207
208#ifdef PCI_IOV
209static void	ixgbe_ping_all_vfs(struct adapter *);
210static void	ixgbe_handle_mbx(void *, int);
211static int	ixgbe_init_iov(device_t, u16, const nvlist_t *);
212static void	ixgbe_uninit_iov(device_t);
213static int	ixgbe_add_vf(device_t, u16, const nvlist_t *);
214static void	ixgbe_initialize_iov(struct adapter *);
215static void	ixgbe_recalculate_max_frame(struct adapter *);
216static void	ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
217#endif /* PCI_IOV */
218
219
220/*********************************************************************
221 *  FreeBSD Device Interface Entry Points
222 *********************************************************************/
223
224static device_method_t ix_methods[] = {
225	/* Device interface */
226	DEVMETHOD(device_probe, ixgbe_probe),
227	DEVMETHOD(device_attach, ixgbe_attach),
228	DEVMETHOD(device_detach, ixgbe_detach),
229	DEVMETHOD(device_shutdown, ixgbe_shutdown),
230	DEVMETHOD(device_suspend, ixgbe_suspend),
231	DEVMETHOD(device_resume, ixgbe_resume),
232#ifdef PCI_IOV
233	DEVMETHOD(pci_iov_init, ixgbe_init_iov),
234	DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
235	DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
236#endif /* PCI_IOV */
237	DEVMETHOD_END
238};
239
240static driver_t ix_driver = {
241	"ix", ix_methods, sizeof(struct adapter),
242};
243
244devclass_t ix_devclass;
245DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
246
247MODULE_DEPEND(ix, pci, 1, 1, 1);
248MODULE_DEPEND(ix, ether, 1, 1, 1);
249#ifdef DEV_NETMAP
250MODULE_DEPEND(ix, netmap, 1, 1, 1);
251#endif /* DEV_NETMAP */
252
253/*
254** TUNEABLE PARAMETERS:
255*/
256
257static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
258		   "IXGBE driver parameters");
259
260/*
261** AIM: Adaptive Interrupt Moderation
262** which means that the interrupt rate
263** is varied over time based on the
264** traffic for that interrupt vector
265*/
266static int ixgbe_enable_aim = TRUE;
267SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
268    "Enable adaptive interrupt moderation");
269
270static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
271SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
272    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
273
274/* How many packets rxeof tries to clean at a time */
275static int ixgbe_rx_process_limit = 256;
276TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
277SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
278    &ixgbe_rx_process_limit, 0,
279    "Maximum number of received packets to process at a time,"
280    "-1 means unlimited");
281
282/* How many packets txeof tries to clean at a time */
283static int ixgbe_tx_process_limit = 256;
284TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
285SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
286    &ixgbe_tx_process_limit, 0,
287    "Maximum number of sent packets to process at a time,"
288    "-1 means unlimited");
289
290/*
291** Smart speed setting, default to on
292** this only works as a compile option
293** right now as its during attach, set
294** this to 'ixgbe_smart_speed_off' to
295** disable.
296*/
297static int ixgbe_smart_speed = ixgbe_smart_speed_on;
298
299/*
300 * MSIX should be the default for best performance,
301 * but this allows it to be forced off for testing.
302 */
303static int ixgbe_enable_msix = 1;
304SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
305    "Enable MSI-X interrupts");
306
307/*
308 * Number of Queues, can be set to 0,
309 * it then autoconfigures based on the
310 * number of cpus with a max of 8. This
311 * can be overriden manually here.
312 */
313static int ixgbe_num_queues = 0;
314SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
315    "Number of queues to configure, 0 indicates autoconfigure");
316
317/*
318** Number of TX descriptors per ring,
319** setting higher than RX as this seems
320** the better performing choice.
321*/
322static int ixgbe_txd = PERFORM_TXD;
323SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
324    "Number of transmit descriptors per queue");
325
326/* Number of RX descriptors per ring */
327static int ixgbe_rxd = PERFORM_RXD;
328SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
329    "Number of receive descriptors per queue");
330
331/*
332** Defining this on will allow the use
333** of unsupported SFP+ modules, note that
334** doing so you are on your own :)
335*/
336static int allow_unsupported_sfp = FALSE;
337TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
338
339/* Keep running tab on them for sanity check */
340static int ixgbe_total_ports;
341
342#ifdef IXGBE_FDIR
343/*
344** Flow Director actually 'steals'
345** part of the packet buffer as its
346** filter pool, this variable controls
347** how much it uses:
348**  0 = 64K, 1 = 128K, 2 = 256K
349*/
350static int fdir_pballoc = 1;
351#endif
352
353#ifdef DEV_NETMAP
354/*
355 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
356 * be a reference on how to implement netmap support in a driver.
357 * Additional comments are in ixgbe_netmap.h .
358 *
359 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
360 * that extend the standard driver.
361 */
362#include <dev/netmap/ixgbe_netmap.h>
363#endif /* DEV_NETMAP */
364
365static MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
366
367/*********************************************************************
368 *  Device identification routine
369 *
370 *  ixgbe_probe determines if the driver should be loaded on
371 *  adapter based on PCI vendor/device id of the adapter.
372 *
373 *  return BUS_PROBE_DEFAULT on success, positive on failure
374 *********************************************************************/
375
376static int
377ixgbe_probe(device_t dev)
378{
379	ixgbe_vendor_info_t *ent;
380
381	u16	pci_vendor_id = 0;
382	u16	pci_device_id = 0;
383	u16	pci_subvendor_id = 0;
384	u16	pci_subdevice_id = 0;
385	char	adapter_name[256];
386
387	INIT_DEBUGOUT("ixgbe_probe: begin");
388
389	pci_vendor_id = pci_get_vendor(dev);
390	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
391		return (ENXIO);
392
393	pci_device_id = pci_get_device(dev);
394	pci_subvendor_id = pci_get_subvendor(dev);
395	pci_subdevice_id = pci_get_subdevice(dev);
396
397	ent = ixgbe_vendor_info_array;
398	while (ent->vendor_id != 0) {
399		if ((pci_vendor_id == ent->vendor_id) &&
400		    (pci_device_id == ent->device_id) &&
401
402		    ((pci_subvendor_id == ent->subvendor_id) ||
403		     (ent->subvendor_id == 0)) &&
404
405		    ((pci_subdevice_id == ent->subdevice_id) ||
406		     (ent->subdevice_id == 0))) {
407			sprintf(adapter_name, "%s, Version - %s",
408				ixgbe_strings[ent->index],
409				ixgbe_driver_version);
410			device_set_desc_copy(dev, adapter_name);
411			++ixgbe_total_ports;
412			return (BUS_PROBE_DEFAULT);
413		}
414		ent++;
415	}
416	return (ENXIO);
417}
418
419/*********************************************************************
420 *  Device initialization routine
421 *
422 *  The attach entry point is called when the driver is being loaded.
423 *  This routine identifies the type of hardware, allocates all resources
424 *  and initializes the hardware.
425 *
426 *  return 0 on success, positive on failure
427 *********************************************************************/
428
429static int
430ixgbe_attach(device_t dev)
431{
432	struct adapter *adapter;
433	struct ixgbe_hw *hw;
434	int             error = 0;
435	u16		csum;
436	u32		ctrl_ext;
437
438	INIT_DEBUGOUT("ixgbe_attach: begin");
439
440	/* Allocate, clear, and link in our adapter structure */
441	adapter = device_get_softc(dev);
442	adapter->dev = adapter->osdep.dev = dev;
443	hw = &adapter->hw;
444
445	/* Core Lock Init*/
446	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
447
448	/* Set up the timer callout */
449	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
450
451	/* Determine hardware revision */
452	ixgbe_identify_hardware(adapter);
453
454	/* Do base PCI setup - map BAR0 */
455	if (ixgbe_allocate_pci_resources(adapter)) {
456		device_printf(dev, "Allocation of PCI resources failed\n");
457		error = ENXIO;
458		goto err_out;
459	}
460
461	/* Do descriptor calc and sanity checks */
462	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
463	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
464		device_printf(dev, "TXD config issue, using default!\n");
465		adapter->num_tx_desc = DEFAULT_TXD;
466	} else
467		adapter->num_tx_desc = ixgbe_txd;
468
469	/*
470	** With many RX rings it is easy to exceed the
471	** system mbuf allocation. Tuning nmbclusters
472	** can alleviate this.
473	*/
474	if (nmbclusters > 0) {
475		int s;
476		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
477		if (s > nmbclusters) {
478			device_printf(dev, "RX Descriptors exceed "
479			    "system mbuf max, using default instead!\n");
480			ixgbe_rxd = DEFAULT_RXD;
481		}
482	}
483
484	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
485	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
486		device_printf(dev, "RXD config issue, using default!\n");
487		adapter->num_rx_desc = DEFAULT_RXD;
488	} else
489		adapter->num_rx_desc = ixgbe_rxd;
490
491	/* Allocate our TX/RX Queues */
492	if (ixgbe_allocate_queues(adapter)) {
493		error = ENOMEM;
494		goto err_out;
495	}
496
497	/* Allocate multicast array memory. */
498	adapter->mta = malloc(sizeof(*adapter->mta) *
499	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
500	if (adapter->mta == NULL) {
501		device_printf(dev, "Can not allocate multicast setup array\n");
502		error = ENOMEM;
503		goto err_late;
504	}
505
506	/* Initialize the shared code */
507	hw->allow_unsupported_sfp = allow_unsupported_sfp;
508	error = ixgbe_init_shared_code(hw);
509	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
510		/*
511		** No optics in this port, set up
512		** so the timer routine will probe
513		** for later insertion.
514		*/
515		adapter->sfp_probe = TRUE;
516		error = 0;
517	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
518		device_printf(dev,"Unsupported SFP+ module detected!\n");
519		error = EIO;
520		goto err_late;
521	} else if (error) {
522		device_printf(dev,"Unable to initialize the shared code\n");
523		error = EIO;
524		goto err_late;
525	}
526
527	/* Make sure we have a good EEPROM before we read from it */
528	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
529		device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
530		error = EIO;
531		goto err_late;
532	}
533
534	error = ixgbe_init_hw(hw);
535	switch (error) {
536	case IXGBE_ERR_EEPROM_VERSION:
537		device_printf(dev, "This device is a pre-production adapter/"
538		    "LOM.  Please be aware there may be issues associated "
539		    "with your hardware.\n If you are experiencing problems "
540		    "please contact your Intel or hardware representative "
541		    "who provided you with this hardware.\n");
542		break;
543	case IXGBE_ERR_SFP_NOT_SUPPORTED:
544		device_printf(dev,"Unsupported SFP+ Module\n");
545		error = EIO;
546		goto err_late;
547	case IXGBE_ERR_SFP_NOT_PRESENT:
548		device_printf(dev,"No SFP+ Module found\n");
549		/* falls thru */
550	default:
551		break;
552	}
553
554	/* Detect and set physical type */
555	ixgbe_setup_optics(adapter);
556
557	if ((adapter->msix > 1) && (ixgbe_enable_msix))
558		error = ixgbe_allocate_msix(adapter);
559	else
560		error = ixgbe_allocate_legacy(adapter);
561	if (error)
562		goto err_late;
563
564	/* Setup OS specific network interface */
565	if (ixgbe_setup_interface(dev, adapter) != 0)
566		goto err_late;
567
568	/* Initialize statistics */
569	ixgbe_update_stats_counters(adapter);
570
571	/* Register for VLAN events */
572	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
573	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
574	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
575	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
576
577        /* Check PCIE slot type/speed/width */
578	ixgbe_get_slot_info(hw);
579
580
581	/* Set an initial default flow control value */
582	adapter->fc = ixgbe_fc_full;
583
584#ifdef PCI_IOV
585	if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
586		nvlist_t *pf_schema, *vf_schema;
587
588		hw->mbx.ops.init_params(hw);
589		pf_schema = pci_iov_schema_alloc_node();
590		vf_schema = pci_iov_schema_alloc_node();
591		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
592		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
593		    IOV_SCHEMA_HASDEFAULT, TRUE);
594		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
595		    IOV_SCHEMA_HASDEFAULT, FALSE);
596		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
597		    IOV_SCHEMA_HASDEFAULT, FALSE);
598		error = pci_iov_attach(dev, pf_schema, vf_schema);
599		if (error != 0) {
600			device_printf(dev,
601			    "Error %d setting up SR-IOV\n", error);
602		}
603	}
604#endif /* PCI_IOV */
605
606	/* Check for certain supported features */
607	ixgbe_check_wol_support(adapter);
608	ixgbe_check_eee_support(adapter);
609
610	/* Add sysctls */
611	ixgbe_add_device_sysctls(adapter);
612	ixgbe_add_hw_stats(adapter);
613
614	/* let hardware know driver is loaded */
615	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
616	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
617	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
618
619#ifdef DEV_NETMAP
620	ixgbe_netmap_attach(adapter);
621#endif /* DEV_NETMAP */
622	INIT_DEBUGOUT("ixgbe_attach: end");
623	return (0);
624
625err_late:
626	ixgbe_free_transmit_structures(adapter);
627	ixgbe_free_receive_structures(adapter);
628err_out:
629	if (adapter->ifp != NULL)
630		if_free(adapter->ifp);
631	ixgbe_free_pci_resources(adapter);
632	free(adapter->mta, M_DEVBUF);
633	return (error);
634}
635
636/*********************************************************************
637 *  Device removal routine
638 *
639 *  The detach entry point is called when the driver is being removed.
640 *  This routine stops the adapter and deallocates all the resources
641 *  that were allocated for driver operation.
642 *
643 *  return 0 on success, positive on failure
644 *********************************************************************/
645
646static int
647ixgbe_detach(device_t dev)
648{
649	struct adapter *adapter = device_get_softc(dev);
650	struct ix_queue *que = adapter->queues;
651	struct tx_ring *txr = adapter->tx_rings;
652	u32	ctrl_ext;
653
654	INIT_DEBUGOUT("ixgbe_detach: begin");
655
656	/* Make sure VLANS are not using driver */
657	if (adapter->ifp->if_vlantrunk != NULL) {
658		device_printf(dev,"Vlan in use, detach first\n");
659		return (EBUSY);
660	}
661
662#ifdef PCI_IOV
663	if (pci_iov_detach(dev) != 0) {
664		device_printf(dev, "SR-IOV in use; detach first.\n");
665		return (EBUSY);
666	}
667#endif /* PCI_IOV */
668
669	/* Stop the adapter */
670	IXGBE_CORE_LOCK(adapter);
671	ixgbe_setup_low_power_mode(adapter);
672	IXGBE_CORE_UNLOCK(adapter);
673
674	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
675		if (que->tq) {
676#ifndef IXGBE_LEGACY_TX
677			taskqueue_drain(que->tq, &txr->txq_task);
678#endif
679			taskqueue_drain(que->tq, &que->que_task);
680			taskqueue_free(que->tq);
681		}
682	}
683
684	/* Drain the Link queue */
685	if (adapter->tq) {
686		taskqueue_drain(adapter->tq, &adapter->link_task);
687		taskqueue_drain(adapter->tq, &adapter->mod_task);
688		taskqueue_drain(adapter->tq, &adapter->msf_task);
689#ifdef PCI_IOV
690		taskqueue_drain(adapter->tq, &adapter->mbx_task);
691#endif
692		taskqueue_drain(adapter->tq, &adapter->phy_task);
693#ifdef IXGBE_FDIR
694		taskqueue_drain(adapter->tq, &adapter->fdir_task);
695#endif
696		taskqueue_free(adapter->tq);
697	}
698
699	/* let hardware know driver is unloading */
700	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
701	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
702	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
703
704	/* Unregister VLAN events */
705	if (adapter->vlan_attach != NULL)
706		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
707	if (adapter->vlan_detach != NULL)
708		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
709
710	ether_ifdetach(adapter->ifp);
711	callout_drain(&adapter->timer);
712#ifdef DEV_NETMAP
713	netmap_detach(adapter->ifp);
714#endif /* DEV_NETMAP */
715	ixgbe_free_pci_resources(adapter);
716	bus_generic_detach(dev);
717	if_free(adapter->ifp);
718
719	ixgbe_free_transmit_structures(adapter);
720	ixgbe_free_receive_structures(adapter);
721	free(adapter->mta, M_DEVBUF);
722
723	IXGBE_CORE_LOCK_DESTROY(adapter);
724	return (0);
725}
726
727/*********************************************************************
728 *
729 *  Shutdown entry point
730 *
731 **********************************************************************/
732
733static int
734ixgbe_shutdown(device_t dev)
735{
736	struct adapter *adapter = device_get_softc(dev);
737	int error = 0;
738
739	INIT_DEBUGOUT("ixgbe_shutdown: begin");
740
741	IXGBE_CORE_LOCK(adapter);
742	error = ixgbe_setup_low_power_mode(adapter);
743	IXGBE_CORE_UNLOCK(adapter);
744
745	return (error);
746}
747
748/**
749 * Methods for going from:
750 * D0 -> D3: ixgbe_suspend
751 * D3 -> D0: ixgbe_resume
752 */
753static int
754ixgbe_suspend(device_t dev)
755{
756	struct adapter *adapter = device_get_softc(dev);
757	int error = 0;
758
759	INIT_DEBUGOUT("ixgbe_suspend: begin");
760
761	IXGBE_CORE_LOCK(adapter);
762
763	error = ixgbe_setup_low_power_mode(adapter);
764
765	/* Save state and power down */
766	pci_save_state(dev);
767	pci_set_powerstate(dev, PCI_POWERSTATE_D3);
768
769	IXGBE_CORE_UNLOCK(adapter);
770
771	return (error);
772}
773
774static int
775ixgbe_resume(device_t dev)
776{
777	struct adapter *adapter = device_get_softc(dev);
778	struct ifnet *ifp = adapter->ifp;
779	struct ixgbe_hw *hw = &adapter->hw;
780	u32 wus;
781
782	INIT_DEBUGOUT("ixgbe_resume: begin");
783
784	IXGBE_CORE_LOCK(adapter);
785
786	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
787	pci_restore_state(dev);
788
789	/* Read & clear WUS register */
790	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
791	if (wus)
792		device_printf(dev, "Woken up by (WUS): %#010x\n",
793		    IXGBE_READ_REG(hw, IXGBE_WUS));
794	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
795	/* And clear WUFC until next low-power transition */
796	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
797
798	/*
799	 * Required after D3->D0 transition;
800	 * will re-advertise all previous advertised speeds
801	 */
802	if (ifp->if_flags & IFF_UP)
803		ixgbe_init_locked(adapter);
804
805	IXGBE_CORE_UNLOCK(adapter);
806
807	INIT_DEBUGOUT("ixgbe_resume: end");
808	return (0);
809}
810
811
812/*********************************************************************
813 *  Ioctl entry point
814 *
815 *  ixgbe_ioctl is called when the user wants to configure the
816 *  interface.
817 *
818 *  return 0 on success, positive on failure
819 **********************************************************************/
820
821static int
822ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
823{
824	struct adapter	*adapter = ifp->if_softc;
825	struct ifreq	*ifr = (struct ifreq *) data;
826#if defined(INET) || defined(INET6)
827	struct ifaddr *ifa = (struct ifaddr *)data;
828	bool		avoid_reset = FALSE;
829#endif
830	int             error = 0;
831
832	switch (command) {
833
834        case SIOCSIFADDR:
835#ifdef INET
836		if (ifa->ifa_addr->sa_family == AF_INET)
837			avoid_reset = TRUE;
838#endif
839#ifdef INET6
840		if (ifa->ifa_addr->sa_family == AF_INET6)
841			avoid_reset = TRUE;
842#endif
843#if defined(INET) || defined(INET6)
844		/*
845		** Calling init results in link renegotiation,
846		** so we avoid doing it when possible.
847		*/
848		if (avoid_reset) {
849			ifp->if_flags |= IFF_UP;
850			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
851				ixgbe_init(adapter);
852			if (!(ifp->if_flags & IFF_NOARP))
853				arp_ifinit(ifp, ifa);
854		} else
855			error = ether_ioctl(ifp, command, data);
856#endif
857		break;
858	case SIOCSIFMTU:
859		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
860		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
861			error = EINVAL;
862		} else {
863			IXGBE_CORE_LOCK(adapter);
864			ifp->if_mtu = ifr->ifr_mtu;
865			adapter->max_frame_size =
866				ifp->if_mtu + IXGBE_MTU_HDR;
867			ixgbe_init_locked(adapter);
868#ifdef PCI_IOV
869			ixgbe_recalculate_max_frame(adapter);
870#endif
871			IXGBE_CORE_UNLOCK(adapter);
872		}
873		break;
874	case SIOCSIFFLAGS:
875		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
876		IXGBE_CORE_LOCK(adapter);
877		if (ifp->if_flags & IFF_UP) {
878			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
879				if ((ifp->if_flags ^ adapter->if_flags) &
880				    (IFF_PROMISC | IFF_ALLMULTI)) {
881					ixgbe_set_promisc(adapter);
882                                }
883			} else
884				ixgbe_init_locked(adapter);
885		} else
886			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
887				ixgbe_stop(adapter);
888		adapter->if_flags = ifp->if_flags;
889		IXGBE_CORE_UNLOCK(adapter);
890		break;
891	case SIOCADDMULTI:
892	case SIOCDELMULTI:
893		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
894		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
895			IXGBE_CORE_LOCK(adapter);
896			ixgbe_disable_intr(adapter);
897			ixgbe_set_multi(adapter);
898			ixgbe_enable_intr(adapter);
899			IXGBE_CORE_UNLOCK(adapter);
900		}
901		break;
902	case SIOCSIFMEDIA:
903	case SIOCGIFMEDIA:
904		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
905		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
906		break;
907	case SIOCSIFCAP:
908	{
909		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
910		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
911		if (mask & IFCAP_HWCSUM)
912			ifp->if_capenable ^= IFCAP_HWCSUM;
913		if (mask & IFCAP_TSO4)
914			ifp->if_capenable ^= IFCAP_TSO4;
915		if (mask & IFCAP_TSO6)
916			ifp->if_capenable ^= IFCAP_TSO6;
917		if (mask & IFCAP_LRO)
918			ifp->if_capenable ^= IFCAP_LRO;
919		if (mask & IFCAP_VLAN_HWTAGGING)
920			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
921		if (mask & IFCAP_VLAN_HWFILTER)
922			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
923		if (mask & IFCAP_VLAN_HWTSO)
924			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
925		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
926			IXGBE_CORE_LOCK(adapter);
927			ixgbe_init_locked(adapter);
928			IXGBE_CORE_UNLOCK(adapter);
929		}
930		VLAN_CAPABILITIES(ifp);
931		break;
932	}
933#if __FreeBSD_version >= 1100036
934	case SIOCGI2C:
935	{
936		struct ixgbe_hw *hw = &adapter->hw;
937		struct ifi2creq i2c;
938		int i;
939		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
940		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
941		if (error != 0)
942			break;
943		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
944			error = EINVAL;
945			break;
946		}
947		if (i2c.len > sizeof(i2c.data)) {
948			error = EINVAL;
949			break;
950		}
951
952		for (i = 0; i < i2c.len; i++)
953			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
954			    i2c.dev_addr, &i2c.data[i]);
955		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
956		break;
957	}
958#endif
959	default:
960		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
961		error = ether_ioctl(ifp, command, data);
962		break;
963	}
964
965	return (error);
966}
967
968/*********************************************************************
969 *  Init entry point
970 *
971 *  This routine is used in two ways. It is used by the stack as
972 *  init entry point in network interface structure. It is also used
973 *  by the driver as a hw/sw initialization routine to get to a
974 *  consistent state.
975 *
976 *  return 0 on success, positive on failure
977 **********************************************************************/
978#define IXGBE_MHADD_MFS_SHIFT 16
979
980static void
981ixgbe_init_locked(struct adapter *adapter)
982{
983	struct ifnet   *ifp = adapter->ifp;
984	device_t 	dev = adapter->dev;
985	struct ixgbe_hw *hw = &adapter->hw;
986	struct tx_ring  *txr;
987	struct rx_ring  *rxr;
988	u32		txdctl, mhadd;
989	u32		rxdctl, rxctrl;
990#ifdef PCI_IOV
991	enum ixgbe_iov_mode mode;
992#endif
993
994	mtx_assert(&adapter->core_mtx, MA_OWNED);
995	INIT_DEBUGOUT("ixgbe_init_locked: begin");
996
997	hw->adapter_stopped = FALSE;
998	ixgbe_stop_adapter(hw);
999        callout_stop(&adapter->timer);
1000
1001#ifdef PCI_IOV
1002	mode = ixgbe_get_iov_mode(adapter);
1003	adapter->pool = ixgbe_max_vfs(mode);
1004	/* Queue indices may change with IOV mode */
1005	for (int i = 0; i < adapter->num_queues; i++) {
1006		adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
1007		adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
1008	}
1009#endif
1010        /* reprogram the RAR[0] in case user changed it. */
1011	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
1012
1013	/* Get the latest mac address, User can use a LAA */
1014	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1015	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
1016	hw->addr_ctrl.rar_used_count = 1;
1017
1018	/* Set the various hardware offload abilities */
1019	ifp->if_hwassist = 0;
1020	if (ifp->if_capenable & IFCAP_TSO)
1021		ifp->if_hwassist |= CSUM_TSO;
1022	if (ifp->if_capenable & IFCAP_TXCSUM) {
1023		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1024#if __FreeBSD_version >= 800000
1025		if (hw->mac.type != ixgbe_mac_82598EB)
1026			ifp->if_hwassist |= CSUM_SCTP;
1027#endif
1028	}
1029
1030	/* Prepare transmit descriptors and buffers */
1031	if (ixgbe_setup_transmit_structures(adapter)) {
1032		device_printf(dev, "Could not setup transmit structures\n");
1033		ixgbe_stop(adapter);
1034		return;
1035	}
1036
1037	ixgbe_init_hw(hw);
1038#ifdef PCI_IOV
1039	ixgbe_initialize_iov(adapter);
1040#endif
1041	ixgbe_initialize_transmit_units(adapter);
1042
1043	/* Setup Multicast table */
1044	ixgbe_set_multi(adapter);
1045
1046	/*
1047	** Determine the correct mbuf pool
1048	** for doing jumbo frames
1049	*/
1050	if (adapter->max_frame_size <= MCLBYTES)
1051		adapter->rx_mbuf_sz = MCLBYTES;
1052	else
1053		adapter->rx_mbuf_sz = MJUMPAGESIZE;
1054
1055	/* Prepare receive descriptors and buffers */
1056	if (ixgbe_setup_receive_structures(adapter)) {
1057		device_printf(dev, "Could not setup receive structures\n");
1058		ixgbe_stop(adapter);
1059		return;
1060	}
1061
1062	/* Configure RX settings */
1063	ixgbe_initialize_receive_units(adapter);
1064
1065	/* Enable SDP & MSIX interrupts based on adapter */
1066	ixgbe_config_gpie(adapter);
1067
1068	/* Set MTU size */
1069	if (ifp->if_mtu > ETHERMTU) {
1070		/* aka IXGBE_MAXFRS on 82599 and newer */
1071		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1072		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1073		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1074		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1075	}
1076
1077	/* Now enable all the queues */
1078	for (int i = 0; i < adapter->num_queues; i++) {
1079		txr = &adapter->tx_rings[i];
1080		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
1081		txdctl |= IXGBE_TXDCTL_ENABLE;
1082		/* Set WTHRESH to 8, burst writeback */
1083		txdctl |= (8 << 16);
1084		/*
1085		 * When the internal queue falls below PTHRESH (32),
1086		 * start prefetching as long as there are at least
1087		 * HTHRESH (1) buffers ready. The values are taken
1088		 * from the Intel linux driver 3.8.21.
1089		 * Prefetching enables tx line rate even with 1 queue.
1090		 */
1091		txdctl |= (32 << 0) | (1 << 8);
1092		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
1093	}
1094
1095	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
1096		rxr = &adapter->rx_rings[i];
1097		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1098		if (hw->mac.type == ixgbe_mac_82598EB) {
1099			/*
1100			** PTHRESH = 21
1101			** HTHRESH = 4
1102			** WTHRESH = 8
1103			*/
1104			rxdctl &= ~0x3FFFFF;
1105			rxdctl |= 0x080420;
1106		}
1107		rxdctl |= IXGBE_RXDCTL_ENABLE;
1108		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
1109		for (; j < 10; j++) {
1110			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
1111			    IXGBE_RXDCTL_ENABLE)
1112				break;
1113			else
1114				msec_delay(1);
1115		}
1116		wmb();
1117#ifdef DEV_NETMAP
1118		/*
1119		 * In netmap mode, we must preserve the buffers made
1120		 * available to userspace before the if_init()
1121		 * (this is true by default on the TX side, because
1122		 * init makes all buffers available to userspace).
1123		 *
1124		 * netmap_reset() and the device specific routines
1125		 * (e.g. ixgbe_setup_receive_rings()) map these
1126		 * buffers at the end of the NIC ring, so here we
1127		 * must set the RDT (tail) register to make sure
1128		 * they are not overwritten.
1129		 *
1130		 * In this driver the NIC ring starts at RDH = 0,
1131		 * RDT points to the last slot available for reception (?),
1132		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1133		 */
1134		if (ifp->if_capenable & IFCAP_NETMAP) {
1135			struct netmap_adapter *na = NA(adapter->ifp);
1136			struct netmap_kring *kring = &na->rx_rings[i];
1137			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1138
1139			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
1140		} else
1141#endif /* DEV_NETMAP */
1142		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
1143	}
1144
1145	/* Enable Receive engine */
1146	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1147	if (hw->mac.type == ixgbe_mac_82598EB)
1148		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1149	rxctrl |= IXGBE_RXCTRL_RXEN;
1150	ixgbe_enable_rx_dma(hw, rxctrl);
1151
1152	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1153
1154	/* Set up MSI/X routing */
1155	if (ixgbe_enable_msix)  {
1156		ixgbe_configure_ivars(adapter);
1157		/* Set up auto-mask */
1158		if (hw->mac.type == ixgbe_mac_82598EB)
1159			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1160		else {
1161			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1162			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1163		}
1164	} else {  /* Simple settings for Legacy/MSI */
1165                ixgbe_set_ivar(adapter, 0, 0, 0);
1166                ixgbe_set_ivar(adapter, 0, 0, 1);
1167		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1168	}
1169
1170#ifdef IXGBE_FDIR
1171	/* Init Flow director */
1172	if (hw->mac.type != ixgbe_mac_82598EB) {
1173		u32 hdrm = 32 << fdir_pballoc;
1174
1175		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1176		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1177	}
1178#endif
1179
1180	/*
1181	 * Check on any SFP devices that
1182	 * need to be kick-started
1183	 */
1184	if (hw->phy.type == ixgbe_phy_none) {
1185		int err = hw->phy.ops.identify(hw);
1186		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1187                	device_printf(dev,
1188			    "Unsupported SFP+ module type was detected.\n");
1189			return;
1190        	}
1191	}
1192
1193	/* Set moderation on the Link interrupt */
1194	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1195
1196	/* Configure Energy Efficient Ethernet for supported devices */
1197	ixgbe_setup_eee(hw, adapter->eee_enabled);
1198
1199	/* Config/Enable Link */
1200	ixgbe_config_link(adapter);
1201
1202	/* Hardware Packet Buffer & Flow Control setup */
1203	ixgbe_config_delay_values(adapter);
1204
1205	/* Initialize the FC settings */
1206	ixgbe_start_hw(hw);
1207
1208	/* Set up VLAN support and filter */
1209	ixgbe_setup_vlan_hw_support(adapter);
1210
1211	/* Setup DMA Coalescing */
1212	ixgbe_config_dmac(adapter);
1213
1214	/* And now turn on interrupts */
1215	ixgbe_enable_intr(adapter);
1216
1217#ifdef PCI_IOV
1218	/* Enable the use of the MBX by the VF's */
1219	{
1220		u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1221		reg |= IXGBE_CTRL_EXT_PFRSTD;
1222		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
1223	}
1224#endif
1225
1226	/* Now inform the stack we're ready */
1227	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1228
1229	return;
1230}
1231
1232static void
1233ixgbe_init(void *arg)
1234{
1235	struct adapter *adapter = arg;
1236
1237	IXGBE_CORE_LOCK(adapter);
1238	ixgbe_init_locked(adapter);
1239	IXGBE_CORE_UNLOCK(adapter);
1240	return;
1241}
1242
1243static void
1244ixgbe_config_gpie(struct adapter *adapter)
1245{
1246	struct ixgbe_hw *hw = &adapter->hw;
1247	u32 gpie;
1248
1249	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
1250
1251	/* Fan Failure Interrupt */
1252	if (hw->device_id == IXGBE_DEV_ID_82598AT)
1253		gpie |= IXGBE_SDP1_GPIEN;
1254
1255	/*
1256	 * Module detection (SDP2)
1257	 * Media ready (SDP1)
1258	 */
1259	if (hw->mac.type == ixgbe_mac_82599EB) {
1260		gpie |= IXGBE_SDP2_GPIEN;
1261		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
1262			gpie |= IXGBE_SDP1_GPIEN;
1263	}
1264
1265	/*
1266	 * Thermal Failure Detection (X540)
1267	 * Link Detection (X557)
1268	 */
1269	if (hw->mac.type == ixgbe_mac_X540 ||
1270	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1271	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1272		gpie |= IXGBE_SDP0_GPIEN_X540;
1273
1274	if (adapter->msix > 1) {
1275		/* Enable Enhanced MSIX mode */
1276		gpie |= IXGBE_GPIE_MSIX_MODE;
1277		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1278		    IXGBE_GPIE_OCD;
1279	}
1280
1281	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1282	return;
1283}
1284
1285/*
1286 * Requires adapter->max_frame_size to be set.
1287 */
1288static void
1289ixgbe_config_delay_values(struct adapter *adapter)
1290{
1291	struct ixgbe_hw *hw = &adapter->hw;
1292	u32 rxpb, frame, size, tmp;
1293
1294	frame = adapter->max_frame_size;
1295
1296	/* Calculate High Water */
1297	switch (hw->mac.type) {
1298	case ixgbe_mac_X540:
1299	case ixgbe_mac_X550:
1300	case ixgbe_mac_X550EM_x:
1301		tmp = IXGBE_DV_X540(frame, frame);
1302		break;
1303	default:
1304		tmp = IXGBE_DV(frame, frame);
1305		break;
1306	}
1307	size = IXGBE_BT2KB(tmp);
1308	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1309	hw->fc.high_water[0] = rxpb - size;
1310
1311	/* Now calculate Low Water */
1312	switch (hw->mac.type) {
1313	case ixgbe_mac_X540:
1314	case ixgbe_mac_X550:
1315	case ixgbe_mac_X550EM_x:
1316		tmp = IXGBE_LOW_DV_X540(frame);
1317		break;
1318	default:
1319		tmp = IXGBE_LOW_DV(frame);
1320		break;
1321	}
1322	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1323
1324	hw->fc.requested_mode = adapter->fc;
1325	hw->fc.pause_time = IXGBE_FC_PAUSE;
1326	hw->fc.send_xon = TRUE;
1327}
1328
1329/*
1330**
1331** MSIX Interrupt Handlers and Tasklets
1332**
1333*/
1334
1335static inline void
1336ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1337{
1338	struct ixgbe_hw *hw = &adapter->hw;
1339	u64	queue = (u64)(1 << vector);
1340	u32	mask;
1341
1342	if (hw->mac.type == ixgbe_mac_82598EB) {
1343                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1344                IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1345	} else {
1346                mask = (queue & 0xFFFFFFFF);
1347                if (mask)
1348                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1349                mask = (queue >> 32);
1350                if (mask)
1351                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1352	}
1353}
1354
1355static inline void
1356ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1357{
1358	struct ixgbe_hw *hw = &adapter->hw;
1359	u64	queue = (u64)(1 << vector);
1360	u32	mask;
1361
1362	if (hw->mac.type == ixgbe_mac_82598EB) {
1363                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1364                IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1365	} else {
1366                mask = (queue & 0xFFFFFFFF);
1367                if (mask)
1368                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1369                mask = (queue >> 32);
1370                if (mask)
1371                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1372	}
1373}
1374
1375static void
1376ixgbe_handle_que(void *context, int pending)
1377{
1378	struct ix_queue *que = context;
1379	struct adapter  *adapter = que->adapter;
1380	struct tx_ring  *txr = que->txr;
1381	struct ifnet    *ifp = adapter->ifp;
1382
1383	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1384		ixgbe_rxeof(que);
1385		IXGBE_TX_LOCK(txr);
1386		ixgbe_txeof(txr);
1387#ifndef IXGBE_LEGACY_TX
1388		if (!drbr_empty(ifp, txr->br))
1389			ixgbe_mq_start_locked(ifp, txr);
1390#else
1391		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1392			ixgbe_start_locked(txr, ifp);
1393#endif
1394		IXGBE_TX_UNLOCK(txr);
1395	}
1396
1397	/* Reenable this interrupt */
1398	if (que->res != NULL)
1399		ixgbe_enable_queue(adapter, que->msix);
1400	else
1401		ixgbe_enable_intr(adapter);
1402	return;
1403}
1404
1405
1406/*********************************************************************
1407 *
1408 *  Legacy Interrupt Service routine
1409 *
1410 **********************************************************************/
1411
1412static void
1413ixgbe_legacy_irq(void *arg)
1414{
1415	struct ix_queue *que = arg;
1416	struct adapter	*adapter = que->adapter;
1417	struct ixgbe_hw	*hw = &adapter->hw;
1418	struct ifnet    *ifp = adapter->ifp;
1419	struct 		tx_ring *txr = adapter->tx_rings;
1420	bool		more;
1421	u32       	reg_eicr;
1422
1423
1424	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1425
1426	++que->irqs;
1427	if (reg_eicr == 0) {
1428		ixgbe_enable_intr(adapter);
1429		return;
1430	}
1431
1432	more = ixgbe_rxeof(que);
1433
1434	IXGBE_TX_LOCK(txr);
1435	ixgbe_txeof(txr);
1436#ifdef IXGBE_LEGACY_TX
1437	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1438		ixgbe_start_locked(txr, ifp);
1439#else
1440	if (!drbr_empty(ifp, txr->br))
1441		ixgbe_mq_start_locked(ifp, txr);
1442#endif
1443	IXGBE_TX_UNLOCK(txr);
1444
1445	/* Check for fan failure */
1446	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1447	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1448                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1449		    "REPLACE IMMEDIATELY!!\n");
1450		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1451	}
1452
1453	/* Link status change */
1454	if (reg_eicr & IXGBE_EICR_LSC)
1455		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1456
1457	/* External PHY interrupt */
1458	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1459	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1460		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1461
1462	if (more)
1463		taskqueue_enqueue(que->tq, &que->que_task);
1464	else
1465		ixgbe_enable_intr(adapter);
1466	return;
1467}
1468
1469
1470/*********************************************************************
1471 *
1472 *  MSIX Queue Interrupt Service routine
1473 *
1474 **********************************************************************/
1475void
1476ixgbe_msix_que(void *arg)
1477{
1478	struct ix_queue	*que = arg;
1479	struct adapter  *adapter = que->adapter;
1480	struct ifnet    *ifp = adapter->ifp;
1481	struct tx_ring	*txr = que->txr;
1482	struct rx_ring	*rxr = que->rxr;
1483	bool		more;
1484	u32		newitr = 0;
1485
1486
1487	/* Protect against spurious interrupts */
1488	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1489		return;
1490
1491	ixgbe_disable_queue(adapter, que->msix);
1492	++que->irqs;
1493
1494	more = ixgbe_rxeof(que);
1495
1496	IXGBE_TX_LOCK(txr);
1497	ixgbe_txeof(txr);
1498#ifdef IXGBE_LEGACY_TX
1499	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1500		ixgbe_start_locked(txr, ifp);
1501#else
1502	if (!drbr_empty(ifp, txr->br))
1503		ixgbe_mq_start_locked(ifp, txr);
1504#endif
1505	IXGBE_TX_UNLOCK(txr);
1506
1507	/* Do AIM now? */
1508
1509	if (ixgbe_enable_aim == FALSE)
1510		goto no_calc;
1511	/*
1512	** Do Adaptive Interrupt Moderation:
1513        **  - Write out last calculated setting
1514	**  - Calculate based on average size over
1515	**    the last interval.
1516	*/
1517        if (que->eitr_setting)
1518                IXGBE_WRITE_REG(&adapter->hw,
1519                    IXGBE_EITR(que->msix), que->eitr_setting);
1520
1521        que->eitr_setting = 0;
1522
1523        /* Idle, do nothing */
1524        if ((txr->bytes == 0) && (rxr->bytes == 0))
1525                goto no_calc;
1526
1527	if ((txr->bytes) && (txr->packets))
1528               	newitr = txr->bytes/txr->packets;
1529	if ((rxr->bytes) && (rxr->packets))
1530		newitr = max(newitr,
1531		    (rxr->bytes / rxr->packets));
1532	newitr += 24; /* account for hardware frame, crc */
1533
1534	/* set an upper boundary */
1535	newitr = min(newitr, 3000);
1536
1537	/* Be nice to the mid range */
1538	if ((newitr > 300) && (newitr < 1200))
1539		newitr = (newitr / 3);
1540	else
1541		newitr = (newitr / 2);
1542
1543        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1544                newitr |= newitr << 16;
1545        else
1546                newitr |= IXGBE_EITR_CNT_WDIS;
1547
1548        /* save for next interrupt */
1549        que->eitr_setting = newitr;
1550
1551        /* Reset state */
1552        txr->bytes = 0;
1553        txr->packets = 0;
1554        rxr->bytes = 0;
1555        rxr->packets = 0;
1556
1557no_calc:
1558	if (more)
1559		taskqueue_enqueue(que->tq, &que->que_task);
1560	else
1561		ixgbe_enable_queue(adapter, que->msix);
1562	return;
1563}
1564
1565
1566static void
1567ixgbe_msix_link(void *arg)
1568{
1569	struct adapter	*adapter = arg;
1570	struct ixgbe_hw *hw = &adapter->hw;
1571	u32		reg_eicr, mod_mask;
1572
1573	++adapter->link_irq;
1574
1575	/* First get the cause */
1576	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1577	/* Be sure the queue bits are not cleared */
1578	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1579	/* Clear interrupt with write */
1580	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1581
1582	/* Link status change */
1583	if (reg_eicr & IXGBE_EICR_LSC)
1584		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1585
1586	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1587#ifdef IXGBE_FDIR
1588		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1589			/* This is probably overkill :) */
1590			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1591				return;
1592                	/* Disable the interrupt */
1593			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1594			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1595		} else
1596#endif
1597		if (reg_eicr & IXGBE_EICR_ECC) {
1598                	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1599			    "Please Reboot!!\n");
1600			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1601		}
1602
1603		/* Check for over temp condition */
1604		if (reg_eicr & IXGBE_EICR_TS) {
1605			device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1606			    "PHY IS SHUT DOWN!!\n");
1607			device_printf(adapter->dev, "System shutdown required!\n");
1608			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1609		}
1610#ifdef PCI_IOV
1611		if (reg_eicr & IXGBE_EICR_MAILBOX)
1612			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1613#endif
1614	}
1615
1616	/* Pluggable optics-related interrupt */
1617	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1618		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1619	else
1620		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1621
1622	if (ixgbe_is_sfp(hw)) {
1623		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1624			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1625			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1626		} else if (reg_eicr & mod_mask) {
1627			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1628			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1629		}
1630	}
1631
1632	/* Check for fan failure */
1633	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1634	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1635		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1636                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1637		    "REPLACE IMMEDIATELY!!\n");
1638	}
1639
1640	/* External PHY interrupt */
1641	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1642	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1643		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1644		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1645	}
1646
1647	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1648	return;
1649}
1650
1651/*********************************************************************
1652 *
1653 *  Media Ioctl callback
1654 *
1655 *  This routine is called whenever the user queries the status of
1656 *  the interface using ifconfig.
1657 *
1658 **********************************************************************/
1659static void
1660ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1661{
1662	struct adapter *adapter = ifp->if_softc;
1663	struct ixgbe_hw *hw = &adapter->hw;
1664	int layer;
1665
1666	INIT_DEBUGOUT("ixgbe_media_status: begin");
1667	IXGBE_CORE_LOCK(adapter);
1668	ixgbe_update_link_status(adapter);
1669
1670	ifmr->ifm_status = IFM_AVALID;
1671	ifmr->ifm_active = IFM_ETHER;
1672
1673	if (!adapter->link_active) {
1674		IXGBE_CORE_UNLOCK(adapter);
1675		return;
1676	}
1677
1678	ifmr->ifm_status |= IFM_ACTIVE;
1679	layer = adapter->phy_layer;
1680
1681	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1682	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1683	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1684		switch (adapter->link_speed) {
1685		case IXGBE_LINK_SPEED_10GB_FULL:
1686			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1687			break;
1688		case IXGBE_LINK_SPEED_1GB_FULL:
1689			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1690			break;
1691		case IXGBE_LINK_SPEED_100_FULL:
1692			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1693			break;
1694		}
1695	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1696	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1697		switch (adapter->link_speed) {
1698		case IXGBE_LINK_SPEED_10GB_FULL:
1699			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1700			break;
1701		}
1702	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1703		switch (adapter->link_speed) {
1704		case IXGBE_LINK_SPEED_10GB_FULL:
1705			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1706			break;
1707		case IXGBE_LINK_SPEED_1GB_FULL:
1708			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1709			break;
1710		}
1711	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1712		switch (adapter->link_speed) {
1713		case IXGBE_LINK_SPEED_10GB_FULL:
1714			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1715			break;
1716		case IXGBE_LINK_SPEED_1GB_FULL:
1717			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1718			break;
1719		}
1720	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1721	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1722		switch (adapter->link_speed) {
1723		case IXGBE_LINK_SPEED_10GB_FULL:
1724			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1725			break;
1726		case IXGBE_LINK_SPEED_1GB_FULL:
1727			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1728			break;
1729		}
1730	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1731		switch (adapter->link_speed) {
1732		case IXGBE_LINK_SPEED_10GB_FULL:
1733			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1734			break;
1735		}
1736	/*
1737	** XXX: These need to use the proper media types once
1738	** they're added.
1739	*/
1740	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1741		switch (adapter->link_speed) {
1742		case IXGBE_LINK_SPEED_10GB_FULL:
1743			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1744			break;
1745		case IXGBE_LINK_SPEED_2_5GB_FULL:
1746			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1747			break;
1748		case IXGBE_LINK_SPEED_1GB_FULL:
1749			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1750			break;
1751		}
1752	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1753	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1754		switch (adapter->link_speed) {
1755		case IXGBE_LINK_SPEED_10GB_FULL:
1756			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1757			break;
1758		case IXGBE_LINK_SPEED_2_5GB_FULL:
1759			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1760			break;
1761		case IXGBE_LINK_SPEED_1GB_FULL:
1762			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1763			break;
1764		}
1765
1766	/* If nothing is recognized... */
1767	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1768		ifmr->ifm_active |= IFM_UNKNOWN;
1769
1770#if __FreeBSD_version >= 900025
1771	/* Display current flow control setting used on link */
1772	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1773	    hw->fc.current_mode == ixgbe_fc_full)
1774		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1775	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1776	    hw->fc.current_mode == ixgbe_fc_full)
1777		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1778#endif
1779
1780	IXGBE_CORE_UNLOCK(adapter);
1781
1782	return;
1783}
1784
1785/*********************************************************************
1786 *
1787 *  Media Ioctl callback
1788 *
1789 *  This routine is called when the user changes speed/duplex using
1790 *  media/mediopt option with ifconfig.
1791 *
1792 **********************************************************************/
1793static int
1794ixgbe_media_change(struct ifnet * ifp)
1795{
1796	struct adapter *adapter = ifp->if_softc;
1797	struct ifmedia *ifm = &adapter->media;
1798	struct ixgbe_hw *hw = &adapter->hw;
1799	ixgbe_link_speed speed = 0;
1800
1801	INIT_DEBUGOUT("ixgbe_media_change: begin");
1802
1803	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1804		return (EINVAL);
1805
1806	if (hw->phy.media_type == ixgbe_media_type_backplane)
1807		return (EPERM);
1808
1809	/*
1810	** We don't actually need to check against the supported
1811	** media types of the adapter; ifmedia will take care of
1812	** that for us.
1813	*/
1814	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1815		case IFM_AUTO:
1816		case IFM_10G_T:
1817			speed |= IXGBE_LINK_SPEED_100_FULL;
1818		case IFM_10G_LRM:
1819		case IFM_10G_SR: /* KR, too */
1820		case IFM_10G_LR:
1821		case IFM_10G_CX4: /* KX4 */
1822			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1823		case IFM_10G_TWINAX:
1824			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1825			break;
1826		case IFM_1000_T:
1827			speed |= IXGBE_LINK_SPEED_100_FULL;
1828		case IFM_1000_LX:
1829		case IFM_1000_SX:
1830		case IFM_1000_CX: /* KX */
1831			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1832			break;
1833		case IFM_100_TX:
1834			speed |= IXGBE_LINK_SPEED_100_FULL;
1835			break;
1836		default:
1837			goto invalid;
1838	}
1839
1840	hw->mac.autotry_restart = TRUE;
1841	hw->mac.ops.setup_link(hw, speed, TRUE);
1842	adapter->advertise =
1843		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1844		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1845		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1846
1847	return (0);
1848
1849invalid:
1850	device_printf(adapter->dev, "Invalid media type!\n");
1851	return (EINVAL);
1852}
1853
1854static void
1855ixgbe_set_promisc(struct adapter *adapter)
1856{
1857	u_int32_t       reg_rctl;
1858	struct ifnet   *ifp = adapter->ifp;
1859	int		mcnt = 0;
1860
1861	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1862	reg_rctl &= (~IXGBE_FCTRL_UPE);
1863	if (ifp->if_flags & IFF_ALLMULTI)
1864		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1865	else {
1866		struct	ifmultiaddr *ifma;
1867#if __FreeBSD_version < 800000
1868		IF_ADDR_LOCK(ifp);
1869#else
1870		if_maddr_rlock(ifp);
1871#endif
1872		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1873			if (ifma->ifma_addr->sa_family != AF_LINK)
1874				continue;
1875			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1876				break;
1877			mcnt++;
1878		}
1879#if __FreeBSD_version < 800000
1880		IF_ADDR_UNLOCK(ifp);
1881#else
1882		if_maddr_runlock(ifp);
1883#endif
1884	}
1885	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1886		reg_rctl &= (~IXGBE_FCTRL_MPE);
1887	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1888
1889	if (ifp->if_flags & IFF_PROMISC) {
1890		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1891		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1892	} else if (ifp->if_flags & IFF_ALLMULTI) {
1893		reg_rctl |= IXGBE_FCTRL_MPE;
1894		reg_rctl &= ~IXGBE_FCTRL_UPE;
1895		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1896	}
1897	return;
1898}
1899
1900
1901/*********************************************************************
1902 *  Multicast Update
1903 *
1904 *  This routine is called whenever multicast address list is updated.
1905 *
1906 **********************************************************************/
1907#define IXGBE_RAR_ENTRIES 16
1908
1909static void
1910ixgbe_set_multi(struct adapter *adapter)
1911{
1912	u32			fctrl;
1913	u8			*update_ptr;
1914	struct ifmultiaddr	*ifma;
1915	struct ixgbe_mc_addr	*mta;
1916	int			mcnt = 0;
1917	struct ifnet		*ifp = adapter->ifp;
1918
1919	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1920
1921	mta = adapter->mta;
1922	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
1923
1924#if __FreeBSD_version < 800000
1925	IF_ADDR_LOCK(ifp);
1926#else
1927	if_maddr_rlock(ifp);
1928#endif
1929	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1930		if (ifma->ifma_addr->sa_family != AF_LINK)
1931			continue;
1932		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1933			break;
1934		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1935		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
1936		mta[mcnt].vmdq = adapter->pool;
1937		mcnt++;
1938	}
1939#if __FreeBSD_version < 800000
1940	IF_ADDR_UNLOCK(ifp);
1941#else
1942	if_maddr_runlock(ifp);
1943#endif
1944
1945	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1946	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1947	if (ifp->if_flags & IFF_PROMISC)
1948		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1949	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1950	    ifp->if_flags & IFF_ALLMULTI) {
1951		fctrl |= IXGBE_FCTRL_MPE;
1952		fctrl &= ~IXGBE_FCTRL_UPE;
1953	} else
1954		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1955
1956	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1957
1958	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1959		update_ptr = (u8 *)mta;
1960		ixgbe_update_mc_addr_list(&adapter->hw,
1961		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1962	}
1963
1964	return;
1965}
1966
1967/*
1968 * This is an iterator function now needed by the multicast
1969 * shared code. It simply feeds the shared code routine the
1970 * addresses in the array of ixgbe_set_multi() one by one.
1971 */
1972static u8 *
1973ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1974{
1975	struct ixgbe_mc_addr *mta;
1976
1977	mta = (struct ixgbe_mc_addr *)*update_ptr;
1978	*vmdq = mta->vmdq;
1979
1980	*update_ptr = (u8*)(mta + 1);;
1981	return (mta->addr);
1982}
1983
1984
1985/*********************************************************************
1986 *  Timer routine
1987 *
1988 *  This routine checks for link status,updates statistics,
1989 *  and runs the watchdog check.
1990 *
1991 **********************************************************************/
1992
1993static void
1994ixgbe_local_timer(void *arg)
1995{
1996	struct adapter	*adapter = arg;
1997	device_t	dev = adapter->dev;
1998	struct ix_queue *que = adapter->queues;
1999	u64		queues = 0;
2000	int		hung = 0;
2001
2002	mtx_assert(&adapter->core_mtx, MA_OWNED);
2003
2004	/* Check for pluggable optics */
2005	if (adapter->sfp_probe)
2006		if (!ixgbe_sfp_probe(adapter))
2007			goto out; /* Nothing to do */
2008
2009	ixgbe_update_link_status(adapter);
2010	ixgbe_update_stats_counters(adapter);
2011
2012	/*
2013	** Check the TX queues status
2014	**	- mark hung queues so we don't schedule on them
2015	**      - watchdog only if all queues show hung
2016	*/
2017	for (int i = 0; i < adapter->num_queues; i++, que++) {
2018		/* Keep track of queues with work for soft irq */
2019		if (que->txr->busy)
2020			queues |= ((u64)1 << que->me);
2021		/*
2022		** Each time txeof runs without cleaning, but there
2023		** are uncleaned descriptors it increments busy. If
2024		** we get to the MAX we declare it hung.
2025		*/
2026		if (que->busy == IXGBE_QUEUE_HUNG) {
2027			++hung;
2028			/* Mark the queue as inactive */
2029			adapter->active_queues &= ~((u64)1 << que->me);
2030			continue;
2031		} else {
2032			/* Check if we've come back from hung */
2033			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
2034                                adapter->active_queues |= ((u64)1 << que->me);
2035		}
2036		if (que->busy >= IXGBE_MAX_TX_BUSY) {
2037			device_printf(dev,"Warning queue %d "
2038			    "appears to be hung!\n", i);
2039			que->txr->busy = IXGBE_QUEUE_HUNG;
2040			++hung;
2041		}
2042
2043	}
2044
2045	/* Only truly watchdog if all queues show hung */
2046	if (hung == adapter->num_queues)
2047		goto watchdog;
2048	else if (queues != 0) { /* Force an IRQ on queues with work */
2049		ixgbe_rearm_queues(adapter, queues);
2050	}
2051
2052out:
2053	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2054	return;
2055
2056watchdog:
2057	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2058	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2059	adapter->watchdog_events++;
2060	ixgbe_init_locked(adapter);
2061}
2062
2063
2064/*
2065** Note: this routine updates the OS on the link state
2066**	the real check of the hardware only happens with
2067**	a link interrupt.
2068*/
2069static void
2070ixgbe_update_link_status(struct adapter *adapter)
2071{
2072	struct ifnet	*ifp = adapter->ifp;
2073	device_t dev = adapter->dev;
2074
2075	if (adapter->link_up){
2076		if (adapter->link_active == FALSE) {
2077			if (bootverbose)
2078				device_printf(dev,"Link is up %d Gbps %s \n",
2079				    ((adapter->link_speed == 128)? 10:1),
2080				    "Full Duplex");
2081			adapter->link_active = TRUE;
2082			/* Update any Flow Control changes */
2083			ixgbe_fc_enable(&adapter->hw);
2084			/* Update DMA coalescing config */
2085			ixgbe_config_dmac(adapter);
2086			if_link_state_change(ifp, LINK_STATE_UP);
2087#ifdef PCI_IOV
2088			ixgbe_ping_all_vfs(adapter);
2089#endif
2090		}
2091	} else { /* Link down */
2092		if (adapter->link_active == TRUE) {
2093			if (bootverbose)
2094				device_printf(dev,"Link is Down\n");
2095			if_link_state_change(ifp, LINK_STATE_DOWN);
2096			adapter->link_active = FALSE;
2097#ifdef PCI_IOV
2098			ixgbe_ping_all_vfs(adapter);
2099#endif
2100		}
2101	}
2102
2103	return;
2104}
2105
2106
2107/*********************************************************************
2108 *
2109 *  This routine disables all traffic on the adapter by issuing a
2110 *  global reset on the MAC and deallocates TX/RX buffers.
2111 *
2112 **********************************************************************/
2113
2114static void
2115ixgbe_stop(void *arg)
2116{
2117	struct ifnet   *ifp;
2118	struct adapter *adapter = arg;
2119	struct ixgbe_hw *hw = &adapter->hw;
2120	ifp = adapter->ifp;
2121
2122	mtx_assert(&adapter->core_mtx, MA_OWNED);
2123
2124	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2125	ixgbe_disable_intr(adapter);
2126	callout_stop(&adapter->timer);
2127
2128	/* Let the stack know...*/
2129	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2130
2131	ixgbe_reset_hw(hw);
2132	hw->adapter_stopped = FALSE;
2133	ixgbe_stop_adapter(hw);
2134	if (hw->mac.type == ixgbe_mac_82599EB)
2135		ixgbe_stop_mac_link_on_d3_82599(hw);
2136	/* Turn off the laser - noop with no optics */
2137	ixgbe_disable_tx_laser(hw);
2138
2139	/* Update the stack */
2140	adapter->link_up = FALSE;
2141       	ixgbe_update_link_status(adapter);
2142
2143	/* reprogram the RAR[0] in case user changed it. */
2144	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2145
2146	return;
2147}
2148
2149
2150/*********************************************************************
2151 *
2152 *  Determine hardware revision.
2153 *
2154 **********************************************************************/
2155static void
2156ixgbe_identify_hardware(struct adapter *adapter)
2157{
2158	device_t        dev = adapter->dev;
2159	struct ixgbe_hw *hw = &adapter->hw;
2160
2161	/* Save off the information about this board */
2162	hw->vendor_id = pci_get_vendor(dev);
2163	hw->device_id = pci_get_device(dev);
2164	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2165	hw->subsystem_vendor_id =
2166	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2167	hw->subsystem_device_id =
2168	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2169
2170	/*
2171	** Make sure BUSMASTER is set
2172	*/
2173	pci_enable_busmaster(dev);
2174
2175	/* We need this here to set the num_segs below */
2176	ixgbe_set_mac_type(hw);
2177
2178	/* Pick up the 82599 settings */
2179	if (hw->mac.type != ixgbe_mac_82598EB) {
2180		hw->phy.smart_speed = ixgbe_smart_speed;
2181		adapter->num_segs = IXGBE_82599_SCATTER;
2182	} else
2183		adapter->num_segs = IXGBE_82598_SCATTER;
2184
2185	return;
2186}
2187
2188/*********************************************************************
2189 *
2190 *  Determine optic type
2191 *
2192 **********************************************************************/
2193static void
2194ixgbe_setup_optics(struct adapter *adapter)
2195{
2196	struct ixgbe_hw *hw = &adapter->hw;
2197	int		layer;
2198
2199	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2200
2201	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2202		adapter->optics = IFM_10G_T;
2203		return;
2204	}
2205
2206	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2207		adapter->optics = IFM_1000_T;
2208		return;
2209	}
2210
2211	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2212		adapter->optics = IFM_1000_SX;
2213		return;
2214	}
2215
2216	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2217	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2218		adapter->optics = IFM_10G_LR;
2219		return;
2220	}
2221
2222	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2223		adapter->optics = IFM_10G_SR;
2224		return;
2225	}
2226
2227	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2228		adapter->optics = IFM_10G_TWINAX;
2229		return;
2230	}
2231
2232	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2233	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2234		adapter->optics = IFM_10G_CX4;
2235		return;
2236	}
2237
2238	/* If we get here just set the default */
2239	adapter->optics = IFM_ETHER | IFM_AUTO;
2240	return;
2241}
2242
2243/*********************************************************************
2244 *
2245 *  Setup the Legacy or MSI Interrupt handler
2246 *
2247 **********************************************************************/
2248static int
2249ixgbe_allocate_legacy(struct adapter *adapter)
2250{
2251	device_t	dev = adapter->dev;
2252	struct		ix_queue *que = adapter->queues;
2253#ifndef IXGBE_LEGACY_TX
2254	struct tx_ring		*txr = adapter->tx_rings;
2255#endif
2256	int		error, rid = 0;
2257
2258	/* MSI RID at 1 */
2259	if (adapter->msix == 1)
2260		rid = 1;
2261
2262	/* We allocate a single interrupt resource */
2263	adapter->res = bus_alloc_resource_any(dev,
2264            SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2265	if (adapter->res == NULL) {
2266		device_printf(dev, "Unable to allocate bus resource: "
2267		    "interrupt\n");
2268		return (ENXIO);
2269	}
2270
2271	/*
2272	 * Try allocating a fast interrupt and the associated deferred
2273	 * processing contexts.
2274	 */
2275#ifndef IXGBE_LEGACY_TX
2276	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2277#endif
2278	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2279	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2280            taskqueue_thread_enqueue, &que->tq);
2281	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2282            device_get_nameunit(adapter->dev));
2283
2284	/* Tasklets for Link, SFP and Multispeed Fiber */
2285	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2286	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2287	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2288	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2289#ifdef IXGBE_FDIR
2290	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2291#endif
2292	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2293	    taskqueue_thread_enqueue, &adapter->tq);
2294	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2295	    device_get_nameunit(adapter->dev));
2296
2297	if ((error = bus_setup_intr(dev, adapter->res,
2298            INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2299            que, &adapter->tag)) != 0) {
2300		device_printf(dev, "Failed to register fast interrupt "
2301		    "handler: %d\n", error);
2302		taskqueue_free(que->tq);
2303		taskqueue_free(adapter->tq);
2304		que->tq = NULL;
2305		adapter->tq = NULL;
2306		return (error);
2307	}
2308	/* For simplicity in the handlers */
2309	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2310
2311	return (0);
2312}
2313
2314
2315/*********************************************************************
2316 *
2317 *  Setup MSIX Interrupt resources and handlers
2318 *
2319 **********************************************************************/
2320static int
2321ixgbe_allocate_msix(struct adapter *adapter)
2322{
2323	device_t        dev = adapter->dev;
2324	struct 		ix_queue *que = adapter->queues;
2325	struct  	tx_ring *txr = adapter->tx_rings;
2326	int 		error, rid, vector = 0;
2327	int		cpu_id = 0;
2328#ifdef	RSS
2329	cpuset_t	cpu_mask;
2330#endif
2331
2332#ifdef	RSS
2333	/*
2334	 * If we're doing RSS, the number of queues needs to
2335	 * match the number of RSS buckets that are configured.
2336	 *
2337	 * + If there's more queues than RSS buckets, we'll end
2338	 *   up with queues that get no traffic.
2339	 *
2340	 * + If there's more RSS buckets than queues, we'll end
2341	 *   up having multiple RSS buckets map to the same queue,
2342	 *   so there'll be some contention.
2343	 */
2344	if (adapter->num_queues != rss_getnumbuckets()) {
2345		device_printf(dev,
2346		    "%s: number of queues (%d) != number of RSS buckets (%d)"
2347		    "; performance will be impacted.\n",
2348		    __func__,
2349		    adapter->num_queues,
2350		    rss_getnumbuckets());
2351	}
2352#endif
2353
2354	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2355		rid = vector + 1;
2356		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2357		    RF_SHAREABLE | RF_ACTIVE);
2358		if (que->res == NULL) {
2359			device_printf(dev,"Unable to allocate"
2360		    	    " bus resource: que interrupt [%d]\n", vector);
2361			return (ENXIO);
2362		}
2363		/* Set the handler function */
2364		error = bus_setup_intr(dev, que->res,
2365		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2366		    ixgbe_msix_que, que, &que->tag);
2367		if (error) {
2368			que->res = NULL;
2369			device_printf(dev, "Failed to register QUE handler");
2370			return (error);
2371		}
2372#if __FreeBSD_version >= 800504
2373		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2374#endif
2375		que->msix = vector;
2376		adapter->active_queues |= (u64)(1 << que->msix);
2377#ifdef	RSS
2378		/*
2379		 * The queue ID is used as the RSS layer bucket ID.
2380		 * We look up the queue ID -> RSS CPU ID and select
2381		 * that.
2382		 */
2383		cpu_id = rss_getcpu(i % rss_getnumbuckets());
2384#else
2385		/*
2386		 * Bind the msix vector, and thus the
2387		 * rings to the corresponding cpu.
2388		 *
2389		 * This just happens to match the default RSS round-robin
2390		 * bucket -> queue -> CPU allocation.
2391		 */
2392		if (adapter->num_queues > 1)
2393			cpu_id = i;
2394#endif
2395		if (adapter->num_queues > 1)
2396			bus_bind_intr(dev, que->res, cpu_id);
2397#ifdef IXGBE_DEBUG
2398#ifdef	RSS
2399		device_printf(dev,
2400		    "Bound RSS bucket %d to CPU %d\n",
2401		    i, cpu_id);
2402#else
2403		device_printf(dev,
2404		    "Bound queue %d to cpu %d\n",
2405		    i, cpu_id);
2406#endif
2407#endif /* IXGBE_DEBUG */
2408
2409
2410#ifndef IXGBE_LEGACY_TX
2411		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2412#endif
2413		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2414		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2415		    taskqueue_thread_enqueue, &que->tq);
2416#ifdef	RSS
2417		CPU_SETOF(cpu_id, &cpu_mask);
2418		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2419		    &cpu_mask,
2420		    "%s (bucket %d)",
2421		    device_get_nameunit(adapter->dev),
2422		    cpu_id);
2423#else
2424		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2425		    device_get_nameunit(adapter->dev));
2426#endif
2427	}
2428
2429	/* and Link */
2430	rid = vector + 1;
2431	adapter->res = bus_alloc_resource_any(dev,
2432    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2433	if (!adapter->res) {
2434		device_printf(dev,"Unable to allocate"
2435    	    " bus resource: Link interrupt [%d]\n", rid);
2436		return (ENXIO);
2437	}
2438	/* Set the link handler function */
2439	error = bus_setup_intr(dev, adapter->res,
2440	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2441	    ixgbe_msix_link, adapter, &adapter->tag);
2442	if (error) {
2443		adapter->res = NULL;
2444		device_printf(dev, "Failed to register LINK handler");
2445		return (error);
2446	}
2447#if __FreeBSD_version >= 800504
2448	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2449#endif
2450	adapter->vector = vector;
2451	/* Tasklets for Link, SFP and Multispeed Fiber */
2452	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2453	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2454	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2455#ifdef PCI_IOV
2456	TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
2457#endif
2458	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2459#ifdef IXGBE_FDIR
2460	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2461#endif
2462	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2463	    taskqueue_thread_enqueue, &adapter->tq);
2464	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2465	    device_get_nameunit(adapter->dev));
2466
2467	return (0);
2468}
2469
2470/*
2471 * Setup Either MSI/X or MSI
2472 */
2473static int
2474ixgbe_setup_msix(struct adapter *adapter)
2475{
2476	device_t dev = adapter->dev;
2477	int rid, want, queues, msgs;
2478
2479	/* Override by tuneable */
2480	if (ixgbe_enable_msix == 0)
2481		goto msi;
2482
2483	/* First try MSI/X */
2484	msgs = pci_msix_count(dev);
2485	if (msgs == 0)
2486		goto msi;
2487	rid = PCIR_BAR(MSIX_82598_BAR);
2488	adapter->msix_mem = bus_alloc_resource_any(dev,
2489	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2490       	if (adapter->msix_mem == NULL) {
2491		rid += 4;	/* 82599 maps in higher BAR */
2492		adapter->msix_mem = bus_alloc_resource_any(dev,
2493		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2494	}
2495       	if (adapter->msix_mem == NULL) {
2496		/* May not be enabled */
2497		device_printf(adapter->dev,
2498		    "Unable to map MSIX table \n");
2499		goto msi;
2500	}
2501
2502	/* Figure out a reasonable auto config value */
2503	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2504
2505#ifdef	RSS
2506	/* If we're doing RSS, clamp at the number of RSS buckets */
2507	if (queues > rss_getnumbuckets())
2508		queues = rss_getnumbuckets();
2509#endif
2510
2511	if (ixgbe_num_queues != 0)
2512		queues = ixgbe_num_queues;
2513
2514	/* reflect correct sysctl value */
2515	ixgbe_num_queues = queues;
2516
2517	/*
2518	** Want one vector (RX/TX pair) per queue
2519	** plus an additional for Link.
2520	*/
2521	want = queues + 1;
2522	if (msgs >= want)
2523		msgs = want;
2524	else {
2525               	device_printf(adapter->dev,
2526		    "MSIX Configuration Problem, "
2527		    "%d vectors but %d queues wanted!\n",
2528		    msgs, want);
2529		goto msi;
2530	}
2531	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2532               	device_printf(adapter->dev,
2533		    "Using MSIX interrupts with %d vectors\n", msgs);
2534		adapter->num_queues = queues;
2535		return (msgs);
2536	}
2537	/*
2538	** If MSIX alloc failed or provided us with
2539	** less than needed, free and fall through to MSI
2540	*/
2541	pci_release_msi(dev);
2542
2543msi:
2544       	if (adapter->msix_mem != NULL) {
2545		bus_release_resource(dev, SYS_RES_MEMORY,
2546		    rid, adapter->msix_mem);
2547		adapter->msix_mem = NULL;
2548	}
2549       	msgs = 1;
2550       	if (pci_alloc_msi(dev, &msgs) == 0) {
2551               	device_printf(adapter->dev,"Using an MSI interrupt\n");
2552		return (msgs);
2553	}
2554	device_printf(adapter->dev,"Using a Legacy interrupt\n");
2555	return (0);
2556}
2557
2558
2559static int
2560ixgbe_allocate_pci_resources(struct adapter *adapter)
2561{
2562	int             rid;
2563	device_t        dev = adapter->dev;
2564
2565	rid = PCIR_BAR(0);
2566	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2567	    &rid, RF_ACTIVE);
2568
2569	if (!(adapter->pci_mem)) {
2570		device_printf(dev,"Unable to allocate bus resource: memory\n");
2571		return (ENXIO);
2572	}
2573
2574	adapter->osdep.mem_bus_space_tag =
2575		rman_get_bustag(adapter->pci_mem);
2576	adapter->osdep.mem_bus_space_handle =
2577		rman_get_bushandle(adapter->pci_mem);
2578	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2579
2580	/* Legacy defaults */
2581	adapter->num_queues = 1;
2582	adapter->hw.back = &adapter->osdep;
2583
2584	/*
2585	** Now setup MSI or MSI/X, should
2586	** return us the number of supported
2587	** vectors. (Will be 1 for MSI)
2588	*/
2589	adapter->msix = ixgbe_setup_msix(adapter);
2590	return (0);
2591}
2592
2593static void
2594ixgbe_free_pci_resources(struct adapter * adapter)
2595{
2596	struct 		ix_queue *que = adapter->queues;
2597	device_t	dev = adapter->dev;
2598	int		rid, memrid;
2599
2600	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2601		memrid = PCIR_BAR(MSIX_82598_BAR);
2602	else
2603		memrid = PCIR_BAR(MSIX_82599_BAR);
2604
2605	/*
2606	** There is a slight possibility of a failure mode
2607	** in attach that will result in entering this function
2608	** before interrupt resources have been initialized, and
2609	** in that case we do not want to execute the loops below
2610	** We can detect this reliably by the state of the adapter
2611	** res pointer.
2612	*/
2613	if (adapter->res == NULL)
2614		goto mem;
2615
2616	/*
2617	**  Release all msix queue resources:
2618	*/
2619	for (int i = 0; i < adapter->num_queues; i++, que++) {
2620		rid = que->msix + 1;
2621		if (que->tag != NULL) {
2622			bus_teardown_intr(dev, que->res, que->tag);
2623			que->tag = NULL;
2624		}
2625		if (que->res != NULL)
2626			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2627	}
2628
2629
2630	/* Clean the Legacy or Link interrupt last */
2631	if (adapter->vector) /* we are doing MSIX */
2632		rid = adapter->vector + 1;
2633	else
2634		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2635
2636	if (adapter->tag != NULL) {
2637		bus_teardown_intr(dev, adapter->res, adapter->tag);
2638		adapter->tag = NULL;
2639	}
2640	if (adapter->res != NULL)
2641		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2642
2643mem:
2644	if (adapter->msix)
2645		pci_release_msi(dev);
2646
2647	if (adapter->msix_mem != NULL)
2648		bus_release_resource(dev, SYS_RES_MEMORY,
2649		    memrid, adapter->msix_mem);
2650
2651	if (adapter->pci_mem != NULL)
2652		bus_release_resource(dev, SYS_RES_MEMORY,
2653		    PCIR_BAR(0), adapter->pci_mem);
2654
2655	return;
2656}
2657
2658/*********************************************************************
2659 *
2660 *  Setup networking device structure and register an interface.
2661 *
2662 **********************************************************************/
2663static int
2664ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2665{
2666	struct ifnet   *ifp;
2667
2668	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2669
2670	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2671	if (ifp == NULL) {
2672		device_printf(dev, "can not allocate ifnet structure\n");
2673		return (-1);
2674	}
2675	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2676	ifp->if_baudrate = IF_Gbps(10);
2677	ifp->if_init = ixgbe_init;
2678	ifp->if_softc = adapter;
2679	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2680	ifp->if_ioctl = ixgbe_ioctl;
2681#if __FreeBSD_version >= 1100036
2682	if_setgetcounterfn(ifp, ixgbe_get_counter);
2683#endif
2684#if __FreeBSD_version >= 1100045
2685	/* TSO parameters */
2686	ifp->if_hw_tsomax = 65518;
2687	ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2688	ifp->if_hw_tsomaxsegsize = 2048;
2689#endif
2690#ifndef IXGBE_LEGACY_TX
2691	ifp->if_transmit = ixgbe_mq_start;
2692	ifp->if_qflush = ixgbe_qflush;
2693#else
2694	ifp->if_start = ixgbe_start;
2695	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2696	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2697	IFQ_SET_READY(&ifp->if_snd);
2698#endif
2699
2700	ether_ifattach(ifp, adapter->hw.mac.addr);
2701
2702	adapter->max_frame_size =
2703	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2704
2705	/*
2706	 * Tell the upper layer(s) we support long frames.
2707	 */
2708	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2709
2710	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2711	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2712	ifp->if_capabilities |= IFCAP_LRO;
2713	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2714			     |  IFCAP_VLAN_HWTSO
2715			     |  IFCAP_VLAN_MTU
2716			     |  IFCAP_HWSTATS;
2717	ifp->if_capenable = ifp->if_capabilities;
2718
2719	/*
2720	** Don't turn this on by default, if vlans are
2721	** created on another pseudo device (eg. lagg)
2722	** then vlan events are not passed thru, breaking
2723	** operation, but with HW FILTER off it works. If
2724	** using vlans directly on the ixgbe driver you can
2725	** enable this and get full hardware tag filtering.
2726	*/
2727	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2728
2729	/*
2730	 * Specify the media types supported by this adapter and register
2731	 * callbacks to update media and link information
2732	 */
2733	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2734		    ixgbe_media_status);
2735
2736	ixgbe_add_media_types(adapter);
2737
2738	/* Autoselect media by default */
2739	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2740
2741	return (0);
2742}
2743
2744static void
2745ixgbe_add_media_types(struct adapter *adapter)
2746{
2747	struct ixgbe_hw *hw = &adapter->hw;
2748	device_t dev = adapter->dev;
2749	int layer;
2750
2751	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
2752
2753	/* Media types with matching FreeBSD media defines */
2754	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2755		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2756	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2757		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2758	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2759		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2760
2761	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2762	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2763		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2764
2765	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2766		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2767	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2768		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2769	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2770		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2771	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2772		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2773
2774	/*
2775	** Other (no matching FreeBSD media type):
2776	** To workaround this, we'll assign these completely
2777	** inappropriate media types.
2778	*/
2779	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2780		device_printf(dev, "Media supported: 10GbaseKR\n");
2781		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2782		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2783	}
2784	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2785		device_printf(dev, "Media supported: 10GbaseKX4\n");
2786		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2787		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2788	}
2789	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2790		device_printf(dev, "Media supported: 1000baseKX\n");
2791		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2792		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2793	}
2794	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2795		/* Someday, someone will care about you... */
2796		device_printf(dev, "Media supported: 1000baseBX\n");
2797	}
2798
2799	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2800		ifmedia_add(&adapter->media,
2801		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2802		ifmedia_add(&adapter->media,
2803		    IFM_ETHER | IFM_1000_T, 0, NULL);
2804	}
2805
2806	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2807}
2808
2809static void
2810ixgbe_config_link(struct adapter *adapter)
2811{
2812	struct ixgbe_hw *hw = &adapter->hw;
2813	u32	autoneg, err = 0;
2814	bool	sfp, negotiate;
2815
2816	sfp = ixgbe_is_sfp(hw);
2817
2818	if (sfp) {
2819		if (hw->phy.multispeed_fiber) {
2820			hw->mac.ops.setup_sfp(hw);
2821			ixgbe_enable_tx_laser(hw);
2822			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2823		} else
2824			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2825	} else {
2826		if (hw->mac.ops.check_link)
2827			err = ixgbe_check_link(hw, &adapter->link_speed,
2828			    &adapter->link_up, FALSE);
2829		if (err)
2830			goto out;
2831		autoneg = hw->phy.autoneg_advertised;
2832		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2833                	err  = hw->mac.ops.get_link_capabilities(hw,
2834			    &autoneg, &negotiate);
2835		if (err)
2836			goto out;
2837		if (hw->mac.ops.setup_link)
2838                	err = hw->mac.ops.setup_link(hw,
2839			    autoneg, adapter->link_up);
2840	}
2841out:
2842	return;
2843}
2844
2845
2846/*********************************************************************
2847 *
2848 *  Enable transmit units.
2849 *
2850 **********************************************************************/
2851static void
2852ixgbe_initialize_transmit_units(struct adapter *adapter)
2853{
2854	struct tx_ring	*txr = adapter->tx_rings;
2855	struct ixgbe_hw	*hw = &adapter->hw;
2856
2857	/* Setup the Base and Length of the Tx Descriptor Ring */
2858
2859	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2860		u64	tdba = txr->txdma.dma_paddr;
2861		u32	txctrl = 0;
2862		int	j = txr->me;
2863
2864		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2865		       (tdba & 0x00000000ffffffffULL));
2866		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2867		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
2868		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2869
2870		/* Setup the HW Tx Head and Tail descriptor pointers */
2871		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2872		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2873
2874		/* Cache the tail address */
2875		txr->tail = IXGBE_TDT(j);
2876
2877		/* Set the processing limit */
2878		txr->process_limit = ixgbe_tx_process_limit;
2879
2880		/* Disable Head Writeback */
2881		switch (hw->mac.type) {
2882		case ixgbe_mac_82598EB:
2883			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
2884			break;
2885		case ixgbe_mac_82599EB:
2886		case ixgbe_mac_X540:
2887		default:
2888			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
2889			break;
2890                }
2891		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2892		switch (hw->mac.type) {
2893		case ixgbe_mac_82598EB:
2894			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2895			break;
2896		case ixgbe_mac_82599EB:
2897		case ixgbe_mac_X540:
2898		default:
2899			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2900			break;
2901		}
2902
2903	}
2904
2905	if (hw->mac.type != ixgbe_mac_82598EB) {
2906		u32 dmatxctl, rttdcs;
2907#ifdef PCI_IOV
2908		enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
2909#endif
2910		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2911		dmatxctl |= IXGBE_DMATXCTL_TE;
2912		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2913		/* Disable arbiter to set MTQC */
2914		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2915		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2916		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2917#ifdef PCI_IOV
2918		IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
2919#else
2920		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2921#endif
2922		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2923		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2924	}
2925
2926	return;
2927}
2928
2929static void
2930ixgbe_initialise_rss_mapping(struct adapter *adapter)
2931{
2932	struct ixgbe_hw	*hw = &adapter->hw;
2933	u32 reta = 0, mrqc, rss_key[10];
2934	int queue_id, table_size, index_mult;
2935#ifdef	RSS
2936	u32 rss_hash_config;
2937#endif
2938#ifdef PCI_IOV
2939	enum ixgbe_iov_mode mode;
2940#endif
2941
2942#ifdef	RSS
2943	/* Fetch the configured RSS key */
2944	rss_getkey((uint8_t *) &rss_key);
2945#else
2946	/* set up random bits */
2947	arc4rand(&rss_key, sizeof(rss_key), 0);
2948#endif
2949
2950	/* Set multiplier for RETA setup and table size based on MAC */
2951	index_mult = 0x1;
2952	table_size = 128;
2953	switch (adapter->hw.mac.type) {
2954	case ixgbe_mac_82598EB:
2955		index_mult = 0x11;
2956		break;
2957	case ixgbe_mac_X550:
2958	case ixgbe_mac_X550EM_x:
2959		table_size = 512;
2960		break;
2961	default:
2962		break;
2963	}
2964
2965	/* Set up the redirection table */
2966	for (int i = 0, j = 0; i < table_size; i++, j++) {
2967		if (j == adapter->num_queues) j = 0;
2968#ifdef	RSS
2969		/*
2970		 * Fetch the RSS bucket id for the given indirection entry.
2971		 * Cap it at the number of configured buckets (which is
2972		 * num_queues.)
2973		 */
2974		queue_id = rss_get_indirection_to_bucket(i);
2975		queue_id = queue_id % adapter->num_queues;
2976#else
2977		queue_id = (j * index_mult);
2978#endif
2979		/*
2980		 * The low 8 bits are for hash value (n+0);
2981		 * The next 8 bits are for hash value (n+1), etc.
2982		 */
2983		reta = reta >> 8;
2984		reta = reta | ( ((uint32_t) queue_id) << 24);
2985		if ((i & 3) == 3) {
2986			if (i < 128)
2987				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2988			else
2989				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2990			reta = 0;
2991		}
2992	}
2993
2994	/* Now fill our hash function seeds */
2995	for (int i = 0; i < 10; i++)
2996		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2997
2998	/* Perform hash on these packet types */
2999#ifdef	RSS
3000	mrqc = IXGBE_MRQC_RSSEN;
3001	rss_hash_config = rss_gethashconfig();
3002	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
3003		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3004	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
3005		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3006	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
3007		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3008	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
3009		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3010	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
3011		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3012	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
3013		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3014	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
3015		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3016	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
3017		device_printf(adapter->dev,
3018		    "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
3019		    "but not supported\n", __func__);
3020	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
3021		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3022	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
3023		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3024#else
3025	/*
3026	 * Disable UDP - IP fragments aren't currently being handled
3027	 * and so we end up with a mix of 2-tuple and 4-tuple
3028	 * traffic.
3029	 */
3030	mrqc = IXGBE_MRQC_RSSEN
3031	     | IXGBE_MRQC_RSS_FIELD_IPV4
3032	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3033	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3034	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3035	     | IXGBE_MRQC_RSS_FIELD_IPV6
3036	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3037	;
3038#endif /* RSS */
3039#ifdef PCI_IOV
3040	mode = ixgbe_get_iov_mode(adapter);
3041	mrqc |= ixgbe_get_mrqc(mode);
3042#endif
3043	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3044}
3045
3046
3047/*********************************************************************
3048 *
3049 *  Setup receive registers and features.
3050 *
3051 **********************************************************************/
3052#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3053
3054#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3055
3056static void
3057ixgbe_initialize_receive_units(struct adapter *adapter)
3058{
3059	struct	rx_ring	*rxr = adapter->rx_rings;
3060	struct ixgbe_hw	*hw = &adapter->hw;
3061	struct ifnet   *ifp = adapter->ifp;
3062	u32		bufsz, fctrl, srrctl, rxcsum;
3063	u32		hlreg;
3064
3065
3066	/*
3067	 * Make sure receives are disabled while
3068	 * setting up the descriptor ring
3069	 */
3070	ixgbe_disable_rx(hw);
3071
3072	/* Enable broadcasts */
3073	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3074	fctrl |= IXGBE_FCTRL_BAM;
3075	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3076		fctrl |= IXGBE_FCTRL_DPF;
3077		fctrl |= IXGBE_FCTRL_PMCF;
3078	}
3079	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3080
3081	/* Set for Jumbo Frames? */
3082	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3083	if (ifp->if_mtu > ETHERMTU)
3084		hlreg |= IXGBE_HLREG0_JUMBOEN;
3085	else
3086		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3087#ifdef DEV_NETMAP
3088	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
3089	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3090		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
3091	else
3092		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
3093#endif /* DEV_NETMAP */
3094	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3095
3096	bufsz = (adapter->rx_mbuf_sz +
3097	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3098
3099	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3100		u64 rdba = rxr->rxdma.dma_paddr;
3101		int j = rxr->me;
3102
3103		/* Setup the Base and Length of the Rx Descriptor Ring */
3104		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
3105			       (rdba & 0x00000000ffffffffULL));
3106		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
3107		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
3108		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3109
3110		/* Set up the SRRCTL register */
3111		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
3112		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3113		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3114		srrctl |= bufsz;
3115		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3116
3117		/*
3118		 * Set DROP_EN iff we have no flow control and >1 queue.
3119		 * Note that srrctl was cleared shortly before during reset,
3120		 * so we do not need to clear the bit, but do it just in case
3121		 * this code is moved elsewhere.
3122		 */
3123		if (adapter->num_queues > 1 &&
3124		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
3125			srrctl |= IXGBE_SRRCTL_DROP_EN;
3126		} else {
3127			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3128		}
3129
3130		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
3131
3132		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3133		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
3134		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
3135
3136		/* Set the processing limit */
3137		rxr->process_limit = ixgbe_rx_process_limit;
3138
3139		/* Set the driver rx tail address */
3140		rxr->tail =  IXGBE_RDT(rxr->me);
3141	}
3142
3143	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3144		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3145			      IXGBE_PSRTYPE_UDPHDR |
3146			      IXGBE_PSRTYPE_IPV4HDR |
3147			      IXGBE_PSRTYPE_IPV6HDR;
3148		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3149	}
3150
3151	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3152
3153	ixgbe_initialise_rss_mapping(adapter);
3154
3155	if (adapter->num_queues > 1) {
3156		/* RSS and RX IPP Checksum are mutually exclusive */
3157		rxcsum |= IXGBE_RXCSUM_PCSD;
3158	}
3159
3160	if (ifp->if_capenable & IFCAP_RXCSUM)
3161		rxcsum |= IXGBE_RXCSUM_PCSD;
3162
3163	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3164		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3165
3166	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3167
3168	return;
3169}
3170
3171
3172/*
3173** This routine is run via an vlan config EVENT,
3174** it enables us to use the HW Filter table since
3175** we can get the vlan id. This just creates the
3176** entry in the soft version of the VFTA, init will
3177** repopulate the real table.
3178*/
3179static void
3180ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3181{
3182	struct adapter	*adapter = ifp->if_softc;
3183	u16		index, bit;
3184
3185	if (ifp->if_softc !=  arg)   /* Not our event */
3186		return;
3187
3188	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3189		return;
3190
3191	IXGBE_CORE_LOCK(adapter);
3192	index = (vtag >> 5) & 0x7F;
3193	bit = vtag & 0x1F;
3194	adapter->shadow_vfta[index] |= (1 << bit);
3195	++adapter->num_vlans;
3196	ixgbe_setup_vlan_hw_support(adapter);
3197	IXGBE_CORE_UNLOCK(adapter);
3198}
3199
3200/*
3201** This routine is run via an vlan
3202** unconfig EVENT, remove our entry
3203** in the soft vfta.
3204*/
3205static void
3206ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3207{
3208	struct adapter	*adapter = ifp->if_softc;
3209	u16		index, bit;
3210
3211	if (ifp->if_softc !=  arg)
3212		return;
3213
3214	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3215		return;
3216
3217	IXGBE_CORE_LOCK(adapter);
3218	index = (vtag >> 5) & 0x7F;
3219	bit = vtag & 0x1F;
3220	adapter->shadow_vfta[index] &= ~(1 << bit);
3221	--adapter->num_vlans;
3222	/* Re-init to load the changes */
3223	ixgbe_setup_vlan_hw_support(adapter);
3224	IXGBE_CORE_UNLOCK(adapter);
3225}
3226
3227static void
3228ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3229{
3230	struct ifnet 	*ifp = adapter->ifp;
3231	struct ixgbe_hw *hw = &adapter->hw;
3232	struct rx_ring	*rxr;
3233	u32		ctrl;
3234
3235
3236	/*
3237	** We get here thru init_locked, meaning
3238	** a soft reset, this has already cleared
3239	** the VFTA and other state, so if there
3240	** have been no vlan's registered do nothing.
3241	*/
3242	if (adapter->num_vlans == 0)
3243		return;
3244
3245	/* Setup the queues for vlans */
3246	for (int i = 0; i < adapter->num_queues; i++) {
3247		rxr = &adapter->rx_rings[i];
3248		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3249		if (hw->mac.type != ixgbe_mac_82598EB) {
3250			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
3251			ctrl |= IXGBE_RXDCTL_VME;
3252			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
3253		}
3254		rxr->vtag_strip = TRUE;
3255	}
3256
3257	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3258		return;
3259	/*
3260	** A soft reset zero's out the VFTA, so
3261	** we need to repopulate it now.
3262	*/
3263	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3264		if (adapter->shadow_vfta[i] != 0)
3265			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3266			    adapter->shadow_vfta[i]);
3267
3268	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3269	/* Enable the Filter Table if enabled */
3270	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3271		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3272		ctrl |= IXGBE_VLNCTRL_VFE;
3273	}
3274	if (hw->mac.type == ixgbe_mac_82598EB)
3275		ctrl |= IXGBE_VLNCTRL_VME;
3276	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3277}
3278
3279static void
3280ixgbe_enable_intr(struct adapter *adapter)
3281{
3282	struct ixgbe_hw	*hw = &adapter->hw;
3283	struct ix_queue	*que = adapter->queues;
3284	u32		mask, fwsm;
3285
3286	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3287	/* Enable Fan Failure detection */
3288	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3289		    mask |= IXGBE_EIMS_GPI_SDP1;
3290
3291	switch (adapter->hw.mac.type) {
3292		case ixgbe_mac_82599EB:
3293			mask |= IXGBE_EIMS_ECC;
3294			/* Temperature sensor on some adapters */
3295			mask |= IXGBE_EIMS_GPI_SDP0;
3296			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3297			mask |= IXGBE_EIMS_GPI_SDP1;
3298			mask |= IXGBE_EIMS_GPI_SDP2;
3299#ifdef IXGBE_FDIR
3300			mask |= IXGBE_EIMS_FLOW_DIR;
3301#endif
3302#ifdef PCI_IOV
3303			mask |= IXGBE_EIMS_MAILBOX;
3304#endif
3305			break;
3306		case ixgbe_mac_X540:
3307			/* Detect if Thermal Sensor is enabled */
3308			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3309			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3310				mask |= IXGBE_EIMS_TS;
3311			mask |= IXGBE_EIMS_ECC;
3312#ifdef IXGBE_FDIR
3313			mask |= IXGBE_EIMS_FLOW_DIR;
3314#endif
3315			break;
3316		case ixgbe_mac_X550:
3317		case ixgbe_mac_X550EM_x:
3318			/* MAC thermal sensor is automatically enabled */
3319			mask |= IXGBE_EIMS_TS;
3320			/* Some devices use SDP0 for important information */
3321			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3322			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3323				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3324			mask |= IXGBE_EIMS_ECC;
3325#ifdef IXGBE_FDIR
3326			mask |= IXGBE_EIMS_FLOW_DIR;
3327#endif
3328#ifdef PCI_IOV
3329			mask |= IXGBE_EIMS_MAILBOX;
3330#endif
3331		/* falls through */
3332		default:
3333			break;
3334	}
3335
3336	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3337
3338	/* With MSI-X we use auto clear */
3339	if (adapter->msix_mem) {
3340		mask = IXGBE_EIMS_ENABLE_MASK;
3341		/* Don't autoclear Link */
3342		mask &= ~IXGBE_EIMS_OTHER;
3343		mask &= ~IXGBE_EIMS_LSC;
3344#ifdef PCI_IOV
3345		mask &= ~IXGBE_EIMS_MAILBOX;
3346#endif
3347		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3348	}
3349
3350	/*
3351	** Now enable all queues, this is done separately to
3352	** allow for handling the extended (beyond 32) MSIX
3353	** vectors that can be used by 82599
3354	*/
3355        for (int i = 0; i < adapter->num_queues; i++, que++)
3356                ixgbe_enable_queue(adapter, que->msix);
3357
3358	IXGBE_WRITE_FLUSH(hw);
3359
3360	return;
3361}
3362
3363static void
3364ixgbe_disable_intr(struct adapter *adapter)
3365{
3366	if (adapter->msix_mem)
3367		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3368	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3369		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3370	} else {
3371		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3372		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3373		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3374	}
3375	IXGBE_WRITE_FLUSH(&adapter->hw);
3376	return;
3377}
3378
3379/*
3380** Get the width and transaction speed of
3381** the slot this adapter is plugged into.
3382*/
3383static void
3384ixgbe_get_slot_info(struct ixgbe_hw *hw)
3385{
3386	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
3387	struct ixgbe_mac_info	*mac = &hw->mac;
3388	u16			link;
3389	u32			offset;
3390
3391	/* For most devices simply call the shared code routine */
3392	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3393		ixgbe_get_bus_info(hw);
3394		/* These devices don't use PCI-E */
3395		switch (hw->mac.type) {
3396		case ixgbe_mac_X550EM_x:
3397			return;
3398		default:
3399			goto display;
3400		}
3401	}
3402
3403	/*
3404	** For the Quad port adapter we need to parse back
3405	** up the PCI tree to find the speed of the expansion
3406	** slot into which this adapter is plugged. A bit more work.
3407	*/
3408	dev = device_get_parent(device_get_parent(dev));
3409#ifdef IXGBE_DEBUG
3410	device_printf(dev, "parent pcib = %x,%x,%x\n",
3411	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3412#endif
3413	dev = device_get_parent(device_get_parent(dev));
3414#ifdef IXGBE_DEBUG
3415	device_printf(dev, "slot pcib = %x,%x,%x\n",
3416	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3417#endif
3418	/* Now get the PCI Express Capabilities offset */
3419	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3420	/* ...and read the Link Status Register */
3421	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3422	switch (link & IXGBE_PCI_LINK_WIDTH) {
3423	case IXGBE_PCI_LINK_WIDTH_1:
3424		hw->bus.width = ixgbe_bus_width_pcie_x1;
3425		break;
3426	case IXGBE_PCI_LINK_WIDTH_2:
3427		hw->bus.width = ixgbe_bus_width_pcie_x2;
3428		break;
3429	case IXGBE_PCI_LINK_WIDTH_4:
3430		hw->bus.width = ixgbe_bus_width_pcie_x4;
3431		break;
3432	case IXGBE_PCI_LINK_WIDTH_8:
3433		hw->bus.width = ixgbe_bus_width_pcie_x8;
3434		break;
3435	default:
3436		hw->bus.width = ixgbe_bus_width_unknown;
3437		break;
3438	}
3439
3440	switch (link & IXGBE_PCI_LINK_SPEED) {
3441	case IXGBE_PCI_LINK_SPEED_2500:
3442		hw->bus.speed = ixgbe_bus_speed_2500;
3443		break;
3444	case IXGBE_PCI_LINK_SPEED_5000:
3445		hw->bus.speed = ixgbe_bus_speed_5000;
3446		break;
3447	case IXGBE_PCI_LINK_SPEED_8000:
3448		hw->bus.speed = ixgbe_bus_speed_8000;
3449		break;
3450	default:
3451		hw->bus.speed = ixgbe_bus_speed_unknown;
3452		break;
3453	}
3454
3455	mac->ops.set_lan_id(hw);
3456
3457display:
3458	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3459	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3460	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3461	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3462	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3463	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3464	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3465	    ("Unknown"));
3466
3467	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3468	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3469	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3470		device_printf(dev, "PCI-Express bandwidth available"
3471		    " for this card\n     is not sufficient for"
3472		    " optimal performance.\n");
3473		device_printf(dev, "For optimal performance a x8 "
3474		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3475        }
3476	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3477	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3478	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3479		device_printf(dev, "PCI-Express bandwidth available"
3480		    " for this card\n     is not sufficient for"
3481		    " optimal performance.\n");
3482		device_printf(dev, "For optimal performance a x8 "
3483		    "PCIE Gen3 slot is required.\n");
3484        }
3485
3486	return;
3487}
3488
3489
3490/*
3491** Setup the correct IVAR register for a particular MSIX interrupt
3492**   (yes this is all very magic and confusing :)
3493**  - entry is the register array entry
3494**  - vector is the MSIX vector for this queue
3495**  - type is RX/TX/MISC
3496*/
3497static void
3498ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3499{
3500	struct ixgbe_hw *hw = &adapter->hw;
3501	u32 ivar, index;
3502
3503	vector |= IXGBE_IVAR_ALLOC_VAL;
3504
3505	switch (hw->mac.type) {
3506
3507	case ixgbe_mac_82598EB:
3508		if (type == -1)
3509			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3510		else
3511			entry += (type * 64);
3512		index = (entry >> 2) & 0x1F;
3513		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3514		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3515		ivar |= (vector << (8 * (entry & 0x3)));
3516		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3517		break;
3518
3519	case ixgbe_mac_82599EB:
3520	case ixgbe_mac_X540:
3521	case ixgbe_mac_X550:
3522	case ixgbe_mac_X550EM_x:
3523		if (type == -1) { /* MISC IVAR */
3524			index = (entry & 1) * 8;
3525			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3526			ivar &= ~(0xFF << index);
3527			ivar |= (vector << index);
3528			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3529		} else {	/* RX/TX IVARS */
3530			index = (16 * (entry & 1)) + (8 * type);
3531			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3532			ivar &= ~(0xFF << index);
3533			ivar |= (vector << index);
3534			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3535		}
3536
3537	default:
3538		break;
3539	}
3540}
3541
3542static void
3543ixgbe_configure_ivars(struct adapter *adapter)
3544{
3545	struct  ix_queue	*que = adapter->queues;
3546	u32			newitr;
3547
3548	if (ixgbe_max_interrupt_rate > 0)
3549		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3550	else {
3551		/*
3552		** Disable DMA coalescing if interrupt moderation is
3553		** disabled.
3554		*/
3555		adapter->dmac = 0;
3556		newitr = 0;
3557	}
3558
3559        for (int i = 0; i < adapter->num_queues; i++, que++) {
3560		struct rx_ring *rxr = &adapter->rx_rings[i];
3561		struct tx_ring *txr = &adapter->tx_rings[i];
3562		/* First the RX queue entry */
3563                ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3564		/* ... and the TX */
3565		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3566		/* Set an Initial EITR value */
3567                IXGBE_WRITE_REG(&adapter->hw,
3568                    IXGBE_EITR(que->msix), newitr);
3569	}
3570
3571	/* For the Link interrupt */
3572        ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3573}
3574
3575/*
3576** ixgbe_sfp_probe - called in the local timer to
3577** determine if a port had optics inserted.
3578*/
3579static bool
3580ixgbe_sfp_probe(struct adapter *adapter)
3581{
3582	struct ixgbe_hw	*hw = &adapter->hw;
3583	device_t	dev = adapter->dev;
3584	bool		result = FALSE;
3585
3586	if ((hw->phy.type == ixgbe_phy_nl) &&
3587	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3588		s32 ret = hw->phy.ops.identify_sfp(hw);
3589		if (ret)
3590                        goto out;
3591		ret = hw->phy.ops.reset(hw);
3592		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3593			device_printf(dev,"Unsupported SFP+ module detected!");
3594			printf(" Reload driver with supported module.\n");
3595			adapter->sfp_probe = FALSE;
3596                        goto out;
3597		} else
3598			device_printf(dev,"SFP+ module detected!\n");
3599		/* We now have supported optics */
3600		adapter->sfp_probe = FALSE;
3601		/* Set the optics type so system reports correctly */
3602		ixgbe_setup_optics(adapter);
3603		result = TRUE;
3604	}
3605out:
3606	return (result);
3607}
3608
3609/*
3610** Tasklet handler for MSIX Link interrupts
3611**  - do outside interrupt since it might sleep
3612*/
3613static void
3614ixgbe_handle_link(void *context, int pending)
3615{
3616	struct adapter  *adapter = context;
3617
3618	ixgbe_check_link(&adapter->hw,
3619	    &adapter->link_speed, &adapter->link_up, 0);
3620	ixgbe_update_link_status(adapter);
3621}
3622
3623/*
3624** Tasklet for handling SFP module interrupts
3625*/
3626static void
3627ixgbe_handle_mod(void *context, int pending)
3628{
3629	struct adapter  *adapter = context;
3630	struct ixgbe_hw *hw = &adapter->hw;
3631	device_t	dev = adapter->dev;
3632	u32 err;
3633
3634	err = hw->phy.ops.identify_sfp(hw);
3635	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3636		device_printf(dev,
3637		    "Unsupported SFP+ module type was detected.\n");
3638		return;
3639	}
3640
3641	err = hw->mac.ops.setup_sfp(hw);
3642	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3643		device_printf(dev,
3644		    "Setup failure - unsupported SFP+ module type.\n");
3645		return;
3646	}
3647	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3648	return;
3649}
3650
3651
3652/*
3653** Tasklet for handling MSF (multispeed fiber) interrupts
3654*/
3655static void
3656ixgbe_handle_msf(void *context, int pending)
3657{
3658	struct adapter  *adapter = context;
3659	struct ixgbe_hw *hw = &adapter->hw;
3660	u32 autoneg;
3661	bool negotiate;
3662	int err;
3663
3664	err = hw->phy.ops.identify_sfp(hw);
3665	if (!err) {
3666		ixgbe_setup_optics(adapter);
3667		INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3668	}
3669
3670	autoneg = hw->phy.autoneg_advertised;
3671	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3672		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3673	if (hw->mac.ops.setup_link)
3674		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3675
3676	ifmedia_removeall(&adapter->media);
3677	ixgbe_add_media_types(adapter);
3678	return;
3679}
3680
3681/*
3682** Tasklet for handling interrupts from an external PHY
3683*/
3684static void
3685ixgbe_handle_phy(void *context, int pending)
3686{
3687	struct adapter  *adapter = context;
3688	struct ixgbe_hw *hw = &adapter->hw;
3689	int error;
3690
3691	error = hw->phy.ops.handle_lasi(hw);
3692	if (error == IXGBE_ERR_OVERTEMP)
3693		device_printf(adapter->dev,
3694		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3695		    " PHY will downshift to lower power state!\n");
3696	else if (error)
3697		device_printf(adapter->dev,
3698		    "Error handling LASI interrupt: %d\n",
3699		    error);
3700	return;
3701}
3702
3703#ifdef IXGBE_FDIR
3704/*
3705** Tasklet for reinitializing the Flow Director filter table
3706*/
3707static void
3708ixgbe_reinit_fdir(void *context, int pending)
3709{
3710	struct adapter  *adapter = context;
3711	struct ifnet   *ifp = adapter->ifp;
3712
3713	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3714		return;
3715	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3716	adapter->fdir_reinit = 0;
3717	/* re-enable flow director interrupts */
3718	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3719	/* Restart the interface */
3720	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3721	return;
3722}
3723#endif
3724
3725/*********************************************************************
3726 *
3727 *  Configure DMA Coalescing
3728 *
3729 **********************************************************************/
3730static void
3731ixgbe_config_dmac(struct adapter *adapter)
3732{
3733	struct ixgbe_hw *hw = &adapter->hw;
3734	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3735
3736	if (hw->mac.type < ixgbe_mac_X550 ||
3737	    !hw->mac.ops.dmac_config)
3738		return;
3739
3740	if (dcfg->watchdog_timer ^ adapter->dmac ||
3741	    dcfg->link_speed ^ adapter->link_speed) {
3742		dcfg->watchdog_timer = adapter->dmac;
3743		dcfg->fcoe_en = false;
3744		dcfg->link_speed = adapter->link_speed;
3745		dcfg->num_tcs = 1;
3746
3747		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3748		    dcfg->watchdog_timer, dcfg->link_speed);
3749
3750		hw->mac.ops.dmac_config(hw);
3751	}
3752}
3753
3754/*
3755 * Checks whether the adapter supports Energy Efficient Ethernet
3756 * or not, based on device ID.
3757 */
3758static void
3759ixgbe_check_eee_support(struct adapter *adapter)
3760{
3761	struct ixgbe_hw *hw = &adapter->hw;
3762
3763	adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
3764}
3765
3766/*
3767 * Checks whether the adapter's ports are capable of
3768 * Wake On LAN by reading the adapter's NVM.
3769 *
3770 * Sets each port's hw->wol_enabled value depending
3771 * on the value read here.
3772 */
3773static void
3774ixgbe_check_wol_support(struct adapter *adapter)
3775{
3776	struct ixgbe_hw *hw = &adapter->hw;
3777	u16 dev_caps = 0;
3778
3779	/* Find out WoL support for port */
3780	adapter->wol_support = hw->wol_enabled = 0;
3781	ixgbe_get_device_caps(hw, &dev_caps);
3782	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3783	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3784	        hw->bus.func == 0))
3785	    adapter->wol_support = hw->wol_enabled = 1;
3786
3787	/* Save initial wake up filter configuration */
3788	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3789
3790	return;
3791}
3792
3793/*
3794 * Prepare the adapter/port for LPLU and/or WoL
3795 */
3796static int
3797ixgbe_setup_low_power_mode(struct adapter *adapter)
3798{
3799	struct ixgbe_hw *hw = &adapter->hw;
3800	device_t dev = adapter->dev;
3801	s32 error = 0;
3802
3803	mtx_assert(&adapter->core_mtx, MA_OWNED);
3804
3805	/* Limit power management flow to X550EM baseT */
3806	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3807	    && hw->phy.ops.enter_lplu) {
3808		/* Turn off support for APM wakeup. (Using ACPI instead) */
3809		IXGBE_WRITE_REG(hw, IXGBE_GRC,
3810		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3811
3812		/*
3813		 * Clear Wake Up Status register to prevent any previous wakeup
3814		 * events from waking us up immediately after we suspend.
3815		 */
3816		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3817
3818		/*
3819		 * Program the Wakeup Filter Control register with user filter
3820		 * settings
3821		 */
3822		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3823
3824		/* Enable wakeups and power management in Wakeup Control */
3825		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3826		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3827
3828		/* X550EM baseT adapters need a special LPLU flow */
3829		hw->phy.reset_disable = true;
3830		ixgbe_stop(adapter);
3831		error = hw->phy.ops.enter_lplu(hw);
3832		if (error)
3833			device_printf(dev,
3834			    "Error entering LPLU: %d\n", error);
3835		hw->phy.reset_disable = false;
3836	} else {
3837		/* Just stop for other adapters */
3838		ixgbe_stop(adapter);
3839	}
3840
3841	return error;
3842}
3843
3844/**********************************************************************
3845 *
3846 *  Update the board statistics counters.
3847 *
3848 **********************************************************************/
3849static void
3850ixgbe_update_stats_counters(struct adapter *adapter)
3851{
3852	struct ixgbe_hw *hw = &adapter->hw;
3853	u32 missed_rx = 0, bprc, lxon, lxoff, total;
3854	u64 total_missed_rx = 0;
3855
3856	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3857	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3858	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3859	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3860
3861	for (int i = 0; i < 16; i++) {
3862		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3863		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3864		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3865	}
3866	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3867	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3868	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3869
3870	/* Hardware workaround, gprc counts missed packets */
3871	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3872	adapter->stats.pf.gprc -= missed_rx;
3873
3874	if (hw->mac.type != ixgbe_mac_82598EB) {
3875		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3876		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3877		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3878		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3879		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3880		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3881		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3882		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3883	} else {
3884		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3885		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3886		/* 82598 only has a counter in the high register */
3887		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3888		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3889		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3890	}
3891
3892	/*
3893	 * Workaround: mprc hardware is incorrectly counting
3894	 * broadcasts, so for now we subtract those.
3895	 */
3896	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3897	adapter->stats.pf.bprc += bprc;
3898	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3899	if (hw->mac.type == ixgbe_mac_82598EB)
3900		adapter->stats.pf.mprc -= bprc;
3901
3902	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3903	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3904	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3905	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3906	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3907	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3908
3909	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3910	adapter->stats.pf.lxontxc += lxon;
3911	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3912	adapter->stats.pf.lxofftxc += lxoff;
3913	total = lxon + lxoff;
3914
3915	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3916	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3917	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3918	adapter->stats.pf.gptc -= total;
3919	adapter->stats.pf.mptc -= total;
3920	adapter->stats.pf.ptc64 -= total;
3921	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3922
3923	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3924	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3925	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3926	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3927	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3928	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3929	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3930	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3931	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3932	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3933	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3934	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3935	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3936	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3937	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3938	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3939	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3940	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3941	/* Only read FCOE on 82599 */
3942	if (hw->mac.type != ixgbe_mac_82598EB) {
3943		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3944		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3945		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3946		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3947		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3948	}
3949
3950	/* Fill out the OS statistics structure */
3951	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3952	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3953	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3954	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3955	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3956	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3957	IXGBE_SET_COLLISIONS(adapter, 0);
3958	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3959	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3960	    + adapter->stats.pf.rlec);
3961}
3962
3963#if __FreeBSD_version >= 1100036
3964static uint64_t
3965ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3966{
3967	struct adapter *adapter;
3968	struct tx_ring *txr;
3969	uint64_t rv;
3970
3971	adapter = if_getsoftc(ifp);
3972
3973	switch (cnt) {
3974	case IFCOUNTER_IPACKETS:
3975		return (adapter->ipackets);
3976	case IFCOUNTER_OPACKETS:
3977		return (adapter->opackets);
3978	case IFCOUNTER_IBYTES:
3979		return (adapter->ibytes);
3980	case IFCOUNTER_OBYTES:
3981		return (adapter->obytes);
3982	case IFCOUNTER_IMCASTS:
3983		return (adapter->imcasts);
3984	case IFCOUNTER_OMCASTS:
3985		return (adapter->omcasts);
3986	case IFCOUNTER_COLLISIONS:
3987		return (0);
3988	case IFCOUNTER_IQDROPS:
3989		return (adapter->iqdrops);
3990	case IFCOUNTER_OQDROPS:
3991		rv = 0;
3992		txr = adapter->tx_rings;
3993		for (int i = 0; i < adapter->num_queues; i++, txr++)
3994			rv += txr->br->br_drops;
3995		return (rv);
3996	case IFCOUNTER_IERRORS:
3997		return (adapter->ierrors);
3998	default:
3999		return (if_get_counter_default(ifp, cnt));
4000	}
4001}
4002#endif
4003
4004/** ixgbe_sysctl_tdh_handler - Handler function
4005 *  Retrieves the TDH value from the hardware
4006 */
4007static int
4008ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
4009{
4010	int error;
4011
4012	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4013	if (!txr) return 0;
4014
4015	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
4016	error = sysctl_handle_int(oidp, &val, 0, req);
4017	if (error || !req->newptr)
4018		return error;
4019	return 0;
4020}
4021
4022/** ixgbe_sysctl_tdt_handler - Handler function
4023 *  Retrieves the TDT value from the hardware
4024 */
4025static int
4026ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
4027{
4028	int error;
4029
4030	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
4031	if (!txr) return 0;
4032
4033	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
4034	error = sysctl_handle_int(oidp, &val, 0, req);
4035	if (error || !req->newptr)
4036		return error;
4037	return 0;
4038}
4039
4040/** ixgbe_sysctl_rdh_handler - Handler function
4041 *  Retrieves the RDH value from the hardware
4042 */
4043static int
4044ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
4045{
4046	int error;
4047
4048	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4049	if (!rxr) return 0;
4050
4051	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
4052	error = sysctl_handle_int(oidp, &val, 0, req);
4053	if (error || !req->newptr)
4054		return error;
4055	return 0;
4056}
4057
4058/** ixgbe_sysctl_rdt_handler - Handler function
4059 *  Retrieves the RDT value from the hardware
4060 */
4061static int
4062ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
4063{
4064	int error;
4065
4066	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
4067	if (!rxr) return 0;
4068
4069	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
4070	error = sysctl_handle_int(oidp, &val, 0, req);
4071	if (error || !req->newptr)
4072		return error;
4073	return 0;
4074}
4075
4076static int
4077ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
4078{
4079	int error;
4080	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
4081	unsigned int reg, usec, rate;
4082
4083	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
4084	usec = ((reg & 0x0FF8) >> 3);
4085	if (usec > 0)
4086		rate = 500000 / usec;
4087	else
4088		rate = 0;
4089	error = sysctl_handle_int(oidp, &rate, 0, req);
4090	if (error || !req->newptr)
4091		return error;
4092	reg &= ~0xfff; /* default, no limitation */
4093	ixgbe_max_interrupt_rate = 0;
4094	if (rate > 0 && rate < 500000) {
4095		if (rate < 1000)
4096			rate = 1000;
4097		ixgbe_max_interrupt_rate = rate;
4098		reg |= ((4000000/rate) & 0xff8 );
4099	}
4100	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
4101	return 0;
4102}
4103
4104static void
4105ixgbe_add_device_sysctls(struct adapter *adapter)
4106{
4107	device_t dev = adapter->dev;
4108	struct ixgbe_hw *hw = &adapter->hw;
4109	struct sysctl_oid_list *child;
4110	struct sysctl_ctx_list *ctx;
4111
4112	ctx = device_get_sysctl_ctx(dev);
4113	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
4114
4115	/* Sysctls for all devices */
4116	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
4117			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4118			ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
4119
4120        SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
4121			CTLFLAG_RW,
4122			&ixgbe_enable_aim, 1, "Interrupt Moderation");
4123
4124	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
4125			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4126			ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
4127
4128	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
4129			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4130			ixgbe_sysctl_thermal_test, "I", "Thermal Test");
4131
4132	/* for X550 devices */
4133	if (hw->mac.type >= ixgbe_mac_X550)
4134		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
4135				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4136				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
4137
4138	/* for X550T and X550EM backplane devices */
4139	if (hw->mac.ops.setup_eee) {
4140		struct sysctl_oid *eee_node;
4141		struct sysctl_oid_list *eee_list;
4142
4143		eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
4144					   CTLFLAG_RD, NULL,
4145					   "Energy Efficient Ethernet sysctls");
4146		eee_list = SYSCTL_CHILDREN(eee_node);
4147
4148		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
4149				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4150				ixgbe_sysctl_eee_enable, "I",
4151				"Enable or Disable EEE");
4152
4153		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
4154				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4155				ixgbe_sysctl_eee_negotiated, "I",
4156				"EEE negotiated on link");
4157
4158		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
4159				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4160				ixgbe_sysctl_eee_tx_lpi_status, "I",
4161				"Whether or not TX link is in LPI state");
4162
4163		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
4164				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4165				ixgbe_sysctl_eee_rx_lpi_status, "I",
4166				"Whether or not RX link is in LPI state");
4167	}
4168
4169	/* for certain 10GBaseT devices */
4170	if (hw->device_id == IXGBE_DEV_ID_X550T ||
4171	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4172		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
4173				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4174				ixgbe_sysctl_wol_enable, "I",
4175				"Enable/Disable Wake on LAN");
4176
4177		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
4178				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
4179				ixgbe_sysctl_wufc, "I",
4180				"Enable/Disable Wake Up Filters");
4181	}
4182
4183	/* for X550EM 10GBaseT devices */
4184	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
4185		struct sysctl_oid *phy_node;
4186		struct sysctl_oid_list *phy_list;
4187
4188		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
4189					   CTLFLAG_RD, NULL,
4190					   "External PHY sysctls");
4191		phy_list = SYSCTL_CHILDREN(phy_node);
4192
4193		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
4194				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4195				ixgbe_sysctl_phy_temp, "I",
4196				"Current External PHY Temperature (Celsius)");
4197
4198		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
4199				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
4200				ixgbe_sysctl_phy_overtemp_occurred, "I",
4201				"External PHY High Temperature Event Occurred");
4202	}
4203}
4204
4205/*
4206 * Add sysctl variables, one per statistic, to the system.
4207 */
4208static void
4209ixgbe_add_hw_stats(struct adapter *adapter)
4210{
4211	device_t dev = adapter->dev;
4212
4213	struct tx_ring *txr = adapter->tx_rings;
4214	struct rx_ring *rxr = adapter->rx_rings;
4215
4216	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
4217	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
4218	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
4219	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
4220
4221	struct sysctl_oid *stat_node, *queue_node;
4222	struct sysctl_oid_list *stat_list, *queue_list;
4223
4224#define QUEUE_NAME_LEN 32
4225	char namebuf[QUEUE_NAME_LEN];
4226
4227	/* Driver Statistics */
4228	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
4229			CTLFLAG_RD, &adapter->dropped_pkts,
4230			"Driver dropped packets");
4231	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
4232			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
4233			"m_defrag() failed");
4234	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
4235			CTLFLAG_RD, &adapter->watchdog_events,
4236			"Watchdog timeouts");
4237	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4238			CTLFLAG_RD, &adapter->link_irq,
4239			"Link MSIX IRQ Handled");
4240
4241	for (int i = 0; i < adapter->num_queues; i++, txr++) {
4242		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4243		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4244					    CTLFLAG_RD, NULL, "Queue Name");
4245		queue_list = SYSCTL_CHILDREN(queue_node);
4246
4247		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4248				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4249				sizeof(&adapter->queues[i]),
4250				ixgbe_sysctl_interrupt_rate_handler, "IU",
4251				"Interrupt Rate");
4252		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4253				CTLFLAG_RD, &(adapter->queues[i].irqs),
4254				"irqs on this queue");
4255		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4256				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4257				ixgbe_sysctl_tdh_handler, "IU",
4258				"Transmit Descriptor Head");
4259		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4260				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4261				ixgbe_sysctl_tdt_handler, "IU",
4262				"Transmit Descriptor Tail");
4263		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4264				CTLFLAG_RD, &txr->tso_tx,
4265				"TSO");
4266		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4267				CTLFLAG_RD, &txr->no_tx_dma_setup,
4268				"Driver tx dma failure in xmit");
4269		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4270				CTLFLAG_RD, &txr->no_desc_avail,
4271				"Queue No Descriptor Available");
4272		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4273				CTLFLAG_RD, &txr->total_packets,
4274				"Queue Packets Transmitted");
4275		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4276				CTLFLAG_RD, &txr->br->br_drops,
4277				"Packets dropped in buf_ring");
4278	}
4279
4280	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4281		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4282		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4283					    CTLFLAG_RD, NULL, "Queue Name");
4284		queue_list = SYSCTL_CHILDREN(queue_node);
4285
4286		struct lro_ctrl *lro = &rxr->lro;
4287
4288		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4289		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4290					    CTLFLAG_RD, NULL, "Queue Name");
4291		queue_list = SYSCTL_CHILDREN(queue_node);
4292
4293		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4294				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4295				ixgbe_sysctl_rdh_handler, "IU",
4296				"Receive Descriptor Head");
4297		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4298				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4299				ixgbe_sysctl_rdt_handler, "IU",
4300				"Receive Descriptor Tail");
4301		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4302				CTLFLAG_RD, &rxr->rx_packets,
4303				"Queue Packets Received");
4304		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4305				CTLFLAG_RD, &rxr->rx_bytes,
4306				"Queue Bytes Received");
4307		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4308				CTLFLAG_RD, &rxr->rx_copies,
4309				"Copied RX Frames");
4310		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4311				CTLFLAG_RD, &lro->lro_queued, 0,
4312				"LRO Queued");
4313		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4314				CTLFLAG_RD, &lro->lro_flushed, 0,
4315				"LRO Flushed");
4316	}
4317
4318	/* MAC stats get the own sub node */
4319
4320	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4321				    CTLFLAG_RD, NULL, "MAC Statistics");
4322	stat_list = SYSCTL_CHILDREN(stat_node);
4323
4324	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4325			CTLFLAG_RD, &stats->crcerrs,
4326			"CRC Errors");
4327	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4328			CTLFLAG_RD, &stats->illerrc,
4329			"Illegal Byte Errors");
4330	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4331			CTLFLAG_RD, &stats->errbc,
4332			"Byte Errors");
4333	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4334			CTLFLAG_RD, &stats->mspdc,
4335			"MAC Short Packets Discarded");
4336	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4337			CTLFLAG_RD, &stats->mlfc,
4338			"MAC Local Faults");
4339	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4340			CTLFLAG_RD, &stats->mrfc,
4341			"MAC Remote Faults");
4342	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4343			CTLFLAG_RD, &stats->rlec,
4344			"Receive Length Errors");
4345
4346	/* Flow Control stats */
4347	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4348			CTLFLAG_RD, &stats->lxontxc,
4349			"Link XON Transmitted");
4350	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4351			CTLFLAG_RD, &stats->lxonrxc,
4352			"Link XON Received");
4353	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4354			CTLFLAG_RD, &stats->lxofftxc,
4355			"Link XOFF Transmitted");
4356	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4357			CTLFLAG_RD, &stats->lxoffrxc,
4358			"Link XOFF Received");
4359
4360	/* Packet Reception Stats */
4361	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4362			CTLFLAG_RD, &stats->tor,
4363			"Total Octets Received");
4364	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4365			CTLFLAG_RD, &stats->gorc,
4366			"Good Octets Received");
4367	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4368			CTLFLAG_RD, &stats->tpr,
4369			"Total Packets Received");
4370	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4371			CTLFLAG_RD, &stats->gprc,
4372			"Good Packets Received");
4373	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4374			CTLFLAG_RD, &stats->mprc,
4375			"Multicast Packets Received");
4376	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4377			CTLFLAG_RD, &stats->bprc,
4378			"Broadcast Packets Received");
4379	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4380			CTLFLAG_RD, &stats->prc64,
4381			"64 byte frames received ");
4382	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4383			CTLFLAG_RD, &stats->prc127,
4384			"65-127 byte frames received");
4385	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4386			CTLFLAG_RD, &stats->prc255,
4387			"128-255 byte frames received");
4388	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4389			CTLFLAG_RD, &stats->prc511,
4390			"256-511 byte frames received");
4391	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4392			CTLFLAG_RD, &stats->prc1023,
4393			"512-1023 byte frames received");
4394	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4395			CTLFLAG_RD, &stats->prc1522,
4396			"1023-1522 byte frames received");
4397	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4398			CTLFLAG_RD, &stats->ruc,
4399			"Receive Undersized");
4400	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4401			CTLFLAG_RD, &stats->rfc,
4402			"Fragmented Packets Received ");
4403	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4404			CTLFLAG_RD, &stats->roc,
4405			"Oversized Packets Received");
4406	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4407			CTLFLAG_RD, &stats->rjc,
4408			"Received Jabber");
4409	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4410			CTLFLAG_RD, &stats->mngprc,
4411			"Management Packets Received");
4412	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4413			CTLFLAG_RD, &stats->mngptc,
4414			"Management Packets Dropped");
4415	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4416			CTLFLAG_RD, &stats->xec,
4417			"Checksum Errors");
4418
4419	/* Packet Transmission Stats */
4420	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4421			CTLFLAG_RD, &stats->gotc,
4422			"Good Octets Transmitted");
4423	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4424			CTLFLAG_RD, &stats->tpt,
4425			"Total Packets Transmitted");
4426	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4427			CTLFLAG_RD, &stats->gptc,
4428			"Good Packets Transmitted");
4429	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4430			CTLFLAG_RD, &stats->bptc,
4431			"Broadcast Packets Transmitted");
4432	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4433			CTLFLAG_RD, &stats->mptc,
4434			"Multicast Packets Transmitted");
4435	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4436			CTLFLAG_RD, &stats->mngptc,
4437			"Management Packets Transmitted");
4438	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4439			CTLFLAG_RD, &stats->ptc64,
4440			"64 byte frames transmitted ");
4441	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4442			CTLFLAG_RD, &stats->ptc127,
4443			"65-127 byte frames transmitted");
4444	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4445			CTLFLAG_RD, &stats->ptc255,
4446			"128-255 byte frames transmitted");
4447	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4448			CTLFLAG_RD, &stats->ptc511,
4449			"256-511 byte frames transmitted");
4450	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4451			CTLFLAG_RD, &stats->ptc1023,
4452			"512-1023 byte frames transmitted");
4453	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4454			CTLFLAG_RD, &stats->ptc1522,
4455			"1024-1522 byte frames transmitted");
4456}
4457
4458/*
4459** Set flow control using sysctl:
4460** Flow control values:
4461** 	0 - off
4462**	1 - rx pause
4463**	2 - tx pause
4464**	3 - full
4465*/
4466static int
4467ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4468{
4469	int error, last;
4470	struct adapter *adapter = (struct adapter *) arg1;
4471
4472	last = adapter->fc;
4473	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4474	if ((error) || (req->newptr == NULL))
4475		return (error);
4476
4477	/* Don't bother if it's not changed */
4478	if (adapter->fc == last)
4479		return (0);
4480
4481	switch (adapter->fc) {
4482		case ixgbe_fc_rx_pause:
4483		case ixgbe_fc_tx_pause:
4484		case ixgbe_fc_full:
4485			adapter->hw.fc.requested_mode = adapter->fc;
4486			if (adapter->num_queues > 1)
4487				ixgbe_disable_rx_drop(adapter);
4488			break;
4489		case ixgbe_fc_none:
4490			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4491			if (adapter->num_queues > 1)
4492				ixgbe_enable_rx_drop(adapter);
4493			break;
4494		default:
4495			adapter->fc = last;
4496			return (EINVAL);
4497	}
4498	/* Don't autoneg if forcing a value */
4499	adapter->hw.fc.disable_fc_autoneg = TRUE;
4500	ixgbe_fc_enable(&adapter->hw);
4501	return error;
4502}
4503
4504/*
4505** Control advertised link speed:
4506**	Flags:
4507**	0x1 - advertise 100 Mb
4508**	0x2 - advertise 1G
4509**	0x4 - advertise 10G
4510*/
4511static int
4512ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4513{
4514	int			error = 0, requested;
4515	struct adapter		*adapter;
4516	device_t		dev;
4517	struct ixgbe_hw		*hw;
4518	ixgbe_link_speed	speed = 0;
4519
4520	adapter = (struct adapter *) arg1;
4521	dev = adapter->dev;
4522	hw = &adapter->hw;
4523
4524	requested = adapter->advertise;
4525	error = sysctl_handle_int(oidp, &requested, 0, req);
4526	if ((error) || (req->newptr == NULL))
4527		return (error);
4528
4529	/* Checks to validate new value */
4530	if (adapter->advertise == requested) /* no change */
4531		return (0);
4532
4533	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4534	    (hw->phy.multispeed_fiber))) {
4535		device_printf(dev,
4536		    "Advertised speed can only be set on copper or "
4537		    "multispeed fiber media types.\n");
4538		return (EINVAL);
4539	}
4540
4541	if (requested < 0x1 || requested > 0x7) {
4542		device_printf(dev,
4543		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4544		return (EINVAL);
4545	}
4546
4547	if ((requested & 0x1)
4548	    && (hw->mac.type != ixgbe_mac_X540)
4549	    && (hw->mac.type != ixgbe_mac_X550)) {
4550		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4551		return (EINVAL);
4552	}
4553
4554	/* Set new value and report new advertised mode */
4555	if (requested & 0x1)
4556		speed |= IXGBE_LINK_SPEED_100_FULL;
4557	if (requested & 0x2)
4558		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4559	if (requested & 0x4)
4560		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4561
4562	hw->mac.autotry_restart = TRUE;
4563	hw->mac.ops.setup_link(hw, speed, TRUE);
4564	adapter->advertise = requested;
4565
4566	return (error);
4567}
4568
4569/*
4570 * The following two sysctls are for X550 BaseT devices;
4571 * they deal with the external PHY used in them.
4572 */
4573static int
4574ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4575{
4576	struct adapter	*adapter = (struct adapter *) arg1;
4577	struct ixgbe_hw *hw = &adapter->hw;
4578	u16 reg;
4579
4580	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4581		device_printf(adapter->dev,
4582		    "Device has no supported external thermal sensor.\n");
4583		return (ENODEV);
4584	}
4585
4586	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4587				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4588				      &reg)) {
4589		device_printf(adapter->dev,
4590		    "Error reading from PHY's current temperature register\n");
4591		return (EAGAIN);
4592	}
4593
4594	/* Shift temp for output */
4595	reg = reg >> 8;
4596
4597	return (sysctl_handle_int(oidp, NULL, reg, req));
4598}
4599
4600/*
4601 * Reports whether the current PHY temperature is over
4602 * the overtemp threshold.
4603 *  - This is reported directly from the PHY
4604 */
4605static int
4606ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4607{
4608	struct adapter	*adapter = (struct adapter *) arg1;
4609	struct ixgbe_hw *hw = &adapter->hw;
4610	u16 reg;
4611
4612	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4613		device_printf(adapter->dev,
4614		    "Device has no supported external thermal sensor.\n");
4615		return (ENODEV);
4616	}
4617
4618	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4619				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4620				      &reg)) {
4621		device_printf(adapter->dev,
4622		    "Error reading from PHY's temperature status register\n");
4623		return (EAGAIN);
4624	}
4625
4626	/* Get occurrence bit */
4627	reg = !!(reg & 0x4000);
4628	return (sysctl_handle_int(oidp, 0, reg, req));
4629}
4630
4631/*
4632** Thermal Shutdown Trigger (internal MAC)
4633**   - Set this to 1 to cause an overtemp event to occur
4634*/
4635static int
4636ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4637{
4638	struct adapter	*adapter = (struct adapter *) arg1;
4639	struct ixgbe_hw *hw = &adapter->hw;
4640	int error, fire = 0;
4641
4642	error = sysctl_handle_int(oidp, &fire, 0, req);
4643	if ((error) || (req->newptr == NULL))
4644		return (error);
4645
4646	if (fire) {
4647		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4648		reg |= IXGBE_EICR_TS;
4649		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4650	}
4651
4652	return (0);
4653}
4654
4655/*
4656** Manage DMA Coalescing.
4657** Control values:
4658** 	0/1 - off / on (use default value of 1000)
4659**
4660**	Legal timer values are:
4661**	50,100,250,500,1000,2000,5000,10000
4662**
4663**	Turning off interrupt moderation will also turn this off.
4664*/
4665static int
4666ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4667{
4668	struct adapter *adapter = (struct adapter *) arg1;
4669	struct ixgbe_hw *hw = &adapter->hw;
4670	struct ifnet *ifp = adapter->ifp;
4671	int		error;
4672	u16		oldval;
4673
4674	oldval = adapter->dmac;
4675	error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4676	if ((error) || (req->newptr == NULL))
4677		return (error);
4678
4679	switch (hw->mac.type) {
4680	case ixgbe_mac_X550:
4681	case ixgbe_mac_X550EM_x:
4682		break;
4683	default:
4684		device_printf(adapter->dev,
4685		    "DMA Coalescing is only supported on X550 devices\n");
4686		return (ENODEV);
4687	}
4688
4689	switch (adapter->dmac) {
4690	case 0:
4691		/* Disabled */
4692		break;
4693	case 1: /* Enable and use default */
4694		adapter->dmac = 1000;
4695		break;
4696	case 50:
4697	case 100:
4698	case 250:
4699	case 500:
4700	case 1000:
4701	case 2000:
4702	case 5000:
4703	case 10000:
4704		/* Legal values - allow */
4705		break;
4706	default:
4707		/* Do nothing, illegal value */
4708		adapter->dmac = oldval;
4709		return (EINVAL);
4710	}
4711
4712	/* Re-initialize hardware if it's already running */
4713	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4714		ixgbe_init(adapter);
4715
4716	return (0);
4717}
4718
4719/*
4720 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4721 * Values:
4722 *	0 - disabled
4723 *	1 - enabled
4724 */
4725static int
4726ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4727{
4728	struct adapter *adapter = (struct adapter *) arg1;
4729	struct ixgbe_hw *hw = &adapter->hw;
4730	int new_wol_enabled;
4731	int error = 0;
4732
4733	new_wol_enabled = hw->wol_enabled;
4734	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4735	if ((error) || (req->newptr == NULL))
4736		return (error);
4737	if (new_wol_enabled == hw->wol_enabled)
4738		return (0);
4739
4740	if (new_wol_enabled > 0 && !adapter->wol_support)
4741		return (ENODEV);
4742	else
4743		hw->wol_enabled = !!(new_wol_enabled);
4744
4745	return (0);
4746}
4747
4748/*
4749 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4750 * if supported by the adapter.
4751 * Values:
4752 *	0 - disabled
4753 *	1 - enabled
4754 */
4755static int
4756ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4757{
4758	struct adapter *adapter = (struct adapter *) arg1;
4759	struct ixgbe_hw *hw = &adapter->hw;
4760	struct ifnet *ifp = adapter->ifp;
4761	int new_eee_enabled, error = 0;
4762
4763	new_eee_enabled = adapter->eee_enabled;
4764	error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4765	if ((error) || (req->newptr == NULL))
4766		return (error);
4767	if (new_eee_enabled == adapter->eee_enabled)
4768		return (0);
4769
4770	if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
4771		return (ENODEV);
4772	else
4773		adapter->eee_enabled = !!(new_eee_enabled);
4774
4775	/* Re-initialize hardware if it's already running */
4776	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4777		ixgbe_init(adapter);
4778
4779	return (0);
4780}
4781
4782/*
4783 * Read-only sysctl indicating whether EEE support was negotiated
4784 * on the link.
4785 */
4786static int
4787ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4788{
4789	struct adapter *adapter = (struct adapter *) arg1;
4790	struct ixgbe_hw *hw = &adapter->hw;
4791	bool status;
4792
4793	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4794
4795	return (sysctl_handle_int(oidp, 0, status, req));
4796}
4797
4798/*
4799 * Read-only sysctl indicating whether RX Link is in LPI state.
4800 */
4801static int
4802ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4803{
4804	struct adapter *adapter = (struct adapter *) arg1;
4805	struct ixgbe_hw *hw = &adapter->hw;
4806	bool status;
4807
4808	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4809	    IXGBE_EEE_RX_LPI_STATUS);
4810
4811	return (sysctl_handle_int(oidp, 0, status, req));
4812}
4813
4814/*
4815 * Read-only sysctl indicating whether TX Link is in LPI state.
4816 */
4817static int
4818ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4819{
4820	struct adapter *adapter = (struct adapter *) arg1;
4821	struct ixgbe_hw *hw = &adapter->hw;
4822	bool status;
4823
4824	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4825	    IXGBE_EEE_TX_LPI_STATUS);
4826
4827	return (sysctl_handle_int(oidp, 0, status, req));
4828}
4829
4830/*
4831 * Sysctl to enable/disable the types of packets that the
4832 * adapter will wake up on upon receipt.
4833 * WUFC - Wake Up Filter Control
4834 * Flags:
4835 *	0x1  - Link Status Change
4836 *	0x2  - Magic Packet
4837 *	0x4  - Direct Exact
4838 *	0x8  - Directed Multicast
4839 *	0x10 - Broadcast
4840 *	0x20 - ARP/IPv4 Request Packet
4841 *	0x40 - Direct IPv4 Packet
4842 *	0x80 - Direct IPv6 Packet
4843 *
4844 * Setting another flag will cause the sysctl to return an
4845 * error.
4846 */
4847static int
4848ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4849{
4850	struct adapter *adapter = (struct adapter *) arg1;
4851	int error = 0;
4852	u32 new_wufc;
4853
4854	new_wufc = adapter->wufc;
4855
4856	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4857	if ((error) || (req->newptr == NULL))
4858		return (error);
4859	if (new_wufc == adapter->wufc)
4860		return (0);
4861
4862	if (new_wufc & 0xffffff00)
4863		return (EINVAL);
4864	else {
4865		new_wufc &= 0xff;
4866		new_wufc |= (0xffffff & adapter->wufc);
4867		adapter->wufc = new_wufc;
4868	}
4869
4870	return (0);
4871}
4872
4873/*
4874** Enable the hardware to drop packets when the buffer is
4875** full. This is useful when multiqueue,so that no single
4876** queue being full stalls the entire RX engine. We only
4877** enable this when Multiqueue AND when Flow Control is
4878** disabled.
4879*/
4880static void
4881ixgbe_enable_rx_drop(struct adapter *adapter)
4882{
4883        struct ixgbe_hw *hw = &adapter->hw;
4884
4885	for (int i = 0; i < adapter->num_queues; i++) {
4886		struct rx_ring *rxr = &adapter->rx_rings[i];
4887        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4888        	srrctl |= IXGBE_SRRCTL_DROP_EN;
4889        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4890	}
4891#ifdef PCI_IOV
4892	/* enable drop for each vf */
4893	for (int i = 0; i < adapter->num_vfs; i++) {
4894		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4895		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4896		    IXGBE_QDE_ENABLE));
4897	}
4898#endif
4899}
4900
4901static void
4902ixgbe_disable_rx_drop(struct adapter *adapter)
4903{
4904        struct ixgbe_hw *hw = &adapter->hw;
4905
4906	for (int i = 0; i < adapter->num_queues; i++) {
4907		struct rx_ring *rxr = &adapter->rx_rings[i];
4908        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4909        	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4910        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4911	}
4912#ifdef PCI_IOV
4913	/* disable drop for each vf */
4914	for (int i = 0; i < adapter->num_vfs; i++) {
4915		IXGBE_WRITE_REG(hw, IXGBE_QDE,
4916		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4917	}
4918#endif
4919}
4920
4921static void
4922ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4923{
4924	u32 mask;
4925
4926	switch (adapter->hw.mac.type) {
4927	case ixgbe_mac_82598EB:
4928		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4929		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4930		break;
4931	case ixgbe_mac_82599EB:
4932	case ixgbe_mac_X540:
4933	case ixgbe_mac_X550:
4934	case ixgbe_mac_X550EM_x:
4935		mask = (queues & 0xFFFFFFFF);
4936		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4937		mask = (queues >> 32);
4938		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4939		break;
4940	default:
4941		break;
4942	}
4943}
4944
4945#ifdef PCI_IOV
4946
4947/*
4948** Support functions for SRIOV/VF management
4949*/
4950
4951static void
4952ixgbe_ping_all_vfs(struct adapter *adapter)
4953{
4954	struct ixgbe_vf *vf;
4955
4956	for (int i = 0; i < adapter->num_vfs; i++) {
4957		vf = &adapter->vfs[i];
4958		if (vf->flags & IXGBE_VF_ACTIVE)
4959			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
4960	}
4961}
4962
4963
4964static void
4965ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
4966    uint16_t tag)
4967{
4968	struct ixgbe_hw *hw;
4969	uint32_t vmolr, vmvir;
4970
4971	hw = &adapter->hw;
4972
4973	vf->vlan_tag = tag;
4974
4975	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
4976
4977	/* Do not receive packets that pass inexact filters. */
4978	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
4979
4980	/* Disable Multicast Promicuous Mode. */
4981	vmolr &= ~IXGBE_VMOLR_MPE;
4982
4983	/* Accept broadcasts. */
4984	vmolr |= IXGBE_VMOLR_BAM;
4985
4986	if (tag == 0) {
4987		/* Accept non-vlan tagged traffic. */
4988		//vmolr |= IXGBE_VMOLR_AUPE;
4989
4990		/* Allow VM to tag outgoing traffic; no default tag. */
4991		vmvir = 0;
4992	} else {
4993		/* Require vlan-tagged traffic. */
4994		vmolr &= ~IXGBE_VMOLR_AUPE;
4995
4996		/* Tag all traffic with provided vlan tag. */
4997		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
4998	}
4999	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
5000	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
5001}
5002
5003
5004static boolean_t
5005ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
5006{
5007
5008	/*
5009	 * Frame size compatibility between PF and VF is only a problem on
5010	 * 82599-based cards.  X540 and later support any combination of jumbo
5011	 * frames on PFs and VFs.
5012	 */
5013	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
5014		return (TRUE);
5015
5016	switch (vf->api_ver) {
5017	case IXGBE_API_VER_1_0:
5018	case IXGBE_API_VER_UNKNOWN:
5019		/*
5020		 * On legacy (1.0 and older) VF versions, we don't support jumbo
5021		 * frames on either the PF or the VF.
5022		 */
5023		if (adapter->max_frame_size > ETHER_MAX_LEN ||
5024		    vf->max_frame_size > ETHER_MAX_LEN)
5025		    return (FALSE);
5026
5027		return (TRUE);
5028
5029		break;
5030	case IXGBE_API_VER_1_1:
5031	default:
5032		/*
5033		 * 1.1 or later VF versions always work if they aren't using
5034		 * jumbo frames.
5035		 */
5036		if (vf->max_frame_size <= ETHER_MAX_LEN)
5037			return (TRUE);
5038
5039		/*
5040		 * Jumbo frames only work with VFs if the PF is also using jumbo
5041		 * frames.
5042		 */
5043		if (adapter->max_frame_size <= ETHER_MAX_LEN)
5044			return (TRUE);
5045
5046		return (FALSE);
5047
5048	}
5049}
5050
5051
5052static void
5053ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
5054{
5055	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
5056
5057	// XXX clear multicast addresses
5058
5059	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
5060
5061	vf->api_ver = IXGBE_API_VER_UNKNOWN;
5062}
5063
5064
5065static void
5066ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
5067{
5068	struct ixgbe_hw *hw;
5069	uint32_t vf_index, vfte;
5070
5071	hw = &adapter->hw;
5072
5073	vf_index = IXGBE_VF_INDEX(vf->pool);
5074	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
5075	vfte |= IXGBE_VF_BIT(vf->pool);
5076	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
5077}
5078
5079
5080static void
5081ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
5082{
5083	struct ixgbe_hw *hw;
5084	uint32_t vf_index, vfre;
5085
5086	hw = &adapter->hw;
5087
5088	vf_index = IXGBE_VF_INDEX(vf->pool);
5089	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
5090	if (ixgbe_vf_frame_size_compatible(adapter, vf))
5091		vfre |= IXGBE_VF_BIT(vf->pool);
5092	else
5093		vfre &= ~IXGBE_VF_BIT(vf->pool);
5094	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
5095}
5096
5097
5098static void
5099ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5100{
5101	struct ixgbe_hw *hw;
5102	uint32_t ack;
5103	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
5104
5105	hw = &adapter->hw;
5106
5107	ixgbe_process_vf_reset(adapter, vf);
5108
5109	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5110		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5111		    vf->ether_addr, vf->pool, TRUE);
5112		ack = IXGBE_VT_MSGTYPE_ACK;
5113	} else
5114		ack = IXGBE_VT_MSGTYPE_NACK;
5115
5116	ixgbe_vf_enable_transmit(adapter, vf);
5117	ixgbe_vf_enable_receive(adapter, vf);
5118
5119	vf->flags |= IXGBE_VF_CTS;
5120
5121	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
5122	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
5123	resp[3] = hw->mac.mc_filter_type;
5124	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
5125}
5126
5127
5128static void
5129ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5130{
5131	uint8_t *mac;
5132
5133	mac = (uint8_t*)&msg[1];
5134
5135	/* Check that the VF has permission to change the MAC address. */
5136	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
5137		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5138		return;
5139	}
5140
5141	if (ixgbe_validate_mac_addr(mac) != 0) {
5142		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5143		return;
5144	}
5145
5146	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5147
5148	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
5149	    vf->pool, TRUE);
5150
5151	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5152}
5153
5154
5155/*
5156** VF multicast addresses are set by using the appropriate bit in
5157** 1 of 128 32 bit addresses (4096 possible).
5158*/
5159static void
5160ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
5161{
5162	u16	*list = (u16*)&msg[1];
5163	int	entries;
5164	u32	vmolr, vec_bit, vec_reg, mta_reg;
5165
5166	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
5167	entries = min(entries, IXGBE_MAX_VF_MC);
5168
5169	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
5170
5171	vf->num_mc_hashes = entries;
5172
5173	/* Set the appropriate MTA bit */
5174	for (int i = 0; i < entries; i++) {
5175		vf->mc_hash[i] = list[i];
5176		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
5177                vec_bit = vf->mc_hash[i] & 0x1F;
5178                mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
5179                mta_reg |= (1 << vec_bit);
5180                IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
5181        }
5182
5183	vmolr |= IXGBE_VMOLR_ROMPE;
5184	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
5185	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5186	return;
5187}
5188
5189
5190static void
5191ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5192{
5193	struct ixgbe_hw *hw;
5194	int enable;
5195	uint16_t tag;
5196
5197	hw = &adapter->hw;
5198	enable = IXGBE_VT_MSGINFO(msg[0]);
5199	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
5200
5201	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
5202		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5203		return;
5204	}
5205
5206	/* It is illegal to enable vlan tag 0. */
5207	if (tag == 0 && enable != 0){
5208		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5209		return;
5210	}
5211
5212	ixgbe_set_vfta(hw, tag, vf->pool, enable);
5213	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5214}
5215
5216
5217static void
5218ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
5219{
5220	struct ixgbe_hw *hw;
5221	uint32_t vf_max_size, pf_max_size, mhadd;
5222
5223	hw = &adapter->hw;
5224	vf_max_size = msg[1];
5225
5226	if (vf_max_size < ETHER_CRC_LEN) {
5227		/* We intentionally ACK invalid LPE requests. */
5228		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5229		return;
5230	}
5231
5232	vf_max_size -= ETHER_CRC_LEN;
5233
5234	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5235		/* We intentionally ACK invalid LPE requests. */
5236		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5237		return;
5238	}
5239
5240	vf->max_frame_size = vf_max_size;
5241	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5242
5243	/*
5244	 * We might have to disable reception to this VF if the frame size is
5245	 * not compatible with the config on the PF.
5246	 */
5247	ixgbe_vf_enable_receive(adapter, vf);
5248
5249	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5250	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5251
5252	if (pf_max_size < adapter->max_frame_size) {
5253		mhadd &= ~IXGBE_MHADD_MFS_MASK;
5254		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5255		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5256	}
5257
5258	ixgbe_send_vf_ack(adapter, vf, msg[0]);
5259}
5260
5261
5262static void
5263ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
5264    uint32_t *msg)
5265{
5266	//XXX implement this
5267	ixgbe_send_vf_nack(adapter, vf, msg[0]);
5268}
5269
5270
5271static void
5272ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
5273    uint32_t *msg)
5274{
5275
5276	switch (msg[1]) {
5277	case IXGBE_API_VER_1_0:
5278	case IXGBE_API_VER_1_1:
5279		vf->api_ver = msg[1];
5280		ixgbe_send_vf_ack(adapter, vf, msg[0]);
5281		break;
5282	default:
5283		vf->api_ver = IXGBE_API_VER_UNKNOWN;
5284		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5285		break;
5286	}
5287}
5288
5289
5290static void
5291ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
5292    uint32_t *msg)
5293{
5294	struct ixgbe_hw *hw;
5295	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5296	int num_queues;
5297
5298	hw = &adapter->hw;
5299
5300	/* GET_QUEUES is not supported on pre-1.1 APIs. */
5301	switch (msg[0]) {
5302	case IXGBE_API_VER_1_0:
5303	case IXGBE_API_VER_UNKNOWN:
5304		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5305		return;
5306	}
5307
5308	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
5309	    IXGBE_VT_MSGTYPE_CTS;
5310
5311	num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
5312	resp[IXGBE_VF_TX_QUEUES] = num_queues;
5313	resp[IXGBE_VF_RX_QUEUES] = num_queues;
5314	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5315	resp[IXGBE_VF_DEF_QUEUE] = 0;
5316
5317	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5318}
5319
5320
5321static void
5322ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
5323{
5324	struct ixgbe_hw *hw;
5325	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5326	int error;
5327
5328	hw = &adapter->hw;
5329
5330	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
5331
5332	if (error != 0)
5333		return;
5334
5335	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
5336	    adapter->ifp->if_xname, msg[0], vf->pool);
5337	if (msg[0] == IXGBE_VF_RESET) {
5338		ixgbe_vf_reset_msg(adapter, vf, msg);
5339		return;
5340	}
5341
5342	if (!(vf->flags & IXGBE_VF_CTS)) {
5343		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5344		return;
5345	}
5346
5347	switch (msg[0] & IXGBE_VT_MSG_MASK) {
5348	case IXGBE_VF_SET_MAC_ADDR:
5349		ixgbe_vf_set_mac(adapter, vf, msg);
5350		break;
5351	case IXGBE_VF_SET_MULTICAST:
5352		ixgbe_vf_set_mc_addr(adapter, vf, msg);
5353		break;
5354	case IXGBE_VF_SET_VLAN:
5355		ixgbe_vf_set_vlan(adapter, vf, msg);
5356		break;
5357	case IXGBE_VF_SET_LPE:
5358		ixgbe_vf_set_lpe(adapter, vf, msg);
5359		break;
5360	case IXGBE_VF_SET_MACVLAN:
5361		ixgbe_vf_set_macvlan(adapter, vf, msg);
5362		break;
5363	case IXGBE_VF_API_NEGOTIATE:
5364		ixgbe_vf_api_negotiate(adapter, vf, msg);
5365		break;
5366	case IXGBE_VF_GET_QUEUES:
5367		ixgbe_vf_get_queues(adapter, vf, msg);
5368		break;
5369	default:
5370		ixgbe_send_vf_nack(adapter, vf, msg[0]);
5371	}
5372}
5373
5374
5375/*
5376 * Tasklet for handling VF -> PF mailbox messages.
5377 */
5378static void
5379ixgbe_handle_mbx(void *context, int pending)
5380{
5381	struct adapter *adapter;
5382	struct ixgbe_hw *hw;
5383	struct ixgbe_vf *vf;
5384	int i;
5385
5386	adapter = context;
5387	hw = &adapter->hw;
5388
5389	IXGBE_CORE_LOCK(adapter);
5390	for (i = 0; i < adapter->num_vfs; i++) {
5391		vf = &adapter->vfs[i];
5392
5393		if (vf->flags & IXGBE_VF_ACTIVE) {
5394			if (ixgbe_check_for_rst(hw, vf->pool) == 0)
5395				ixgbe_process_vf_reset(adapter, vf);
5396
5397			if (ixgbe_check_for_msg(hw, vf->pool) == 0)
5398				ixgbe_process_vf_msg(adapter, vf);
5399
5400			if (ixgbe_check_for_ack(hw, vf->pool) == 0)
5401				ixgbe_process_vf_ack(adapter, vf);
5402		}
5403	}
5404	IXGBE_CORE_UNLOCK(adapter);
5405}
5406
5407
5408static int
5409ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
5410{
5411	struct adapter *adapter;
5412	enum ixgbe_iov_mode mode;
5413
5414	adapter = device_get_softc(dev);
5415	adapter->num_vfs = num_vfs;
5416	mode = ixgbe_get_iov_mode(adapter);
5417
5418	if (num_vfs > ixgbe_max_vfs(mode)) {
5419		adapter->num_vfs = 0;
5420		return (ENOSPC);
5421	}
5422
5423	IXGBE_CORE_LOCK(adapter);
5424
5425	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
5426	    M_NOWAIT | M_ZERO);
5427
5428	if (adapter->vfs == NULL) {
5429		adapter->num_vfs = 0;
5430		IXGBE_CORE_UNLOCK(adapter);
5431		return (ENOMEM);
5432	}
5433
5434	ixgbe_init_locked(adapter);
5435
5436	IXGBE_CORE_UNLOCK(adapter);
5437
5438	return (0);
5439}
5440
5441
5442static void
5443ixgbe_uninit_iov(device_t dev)
5444{
5445	struct ixgbe_hw *hw;
5446	struct adapter *adapter;
5447	uint32_t pf_reg, vf_reg;
5448
5449	adapter = device_get_softc(dev);
5450	hw = &adapter->hw;
5451
5452	IXGBE_CORE_LOCK(adapter);
5453
5454	/* Enable rx/tx for the PF and disable it for all VFs. */
5455	pf_reg = IXGBE_VF_INDEX(adapter->pool);
5456	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
5457	    IXGBE_VF_BIT(adapter->pool));
5458	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
5459	    IXGBE_VF_BIT(adapter->pool));
5460
5461	if (pf_reg == 0)
5462		vf_reg = 1;
5463	else
5464		vf_reg = 0;
5465	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
5466	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
5467
5468	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
5469
5470	free(adapter->vfs, M_IXGBE);
5471	adapter->vfs = NULL;
5472	adapter->num_vfs = 0;
5473
5474	IXGBE_CORE_UNLOCK(adapter);
5475}
5476
5477
5478static void
5479ixgbe_initialize_iov(struct adapter *adapter)
5480{
5481	struct ixgbe_hw *hw = &adapter->hw;
5482	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
5483	enum ixgbe_iov_mode mode;
5484	int i;
5485
5486	mode = ixgbe_get_iov_mode(adapter);
5487	if (mode == IXGBE_NO_VM)
5488		return;
5489
5490	IXGBE_CORE_LOCK_ASSERT(adapter);
5491
5492	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
5493	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
5494
5495	switch (mode) {
5496	case IXGBE_64_VM:
5497		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
5498		break;
5499	case IXGBE_32_VM:
5500		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
5501		break;
5502	default:
5503		panic("Unexpected SR-IOV mode %d", mode);
5504	}
5505	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
5506
5507	mtqc = IXGBE_MTQC_VT_ENA;
5508	switch (mode) {
5509	case IXGBE_64_VM:
5510		mtqc |= IXGBE_MTQC_64VF;
5511		break;
5512	case IXGBE_32_VM:
5513		mtqc |= IXGBE_MTQC_32VF;
5514		break;
5515	default:
5516		panic("Unexpected SR-IOV mode %d", mode);
5517	}
5518	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
5519
5520
5521	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
5522	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
5523	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
5524	switch (mode) {
5525	case IXGBE_64_VM:
5526		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
5527		break;
5528	case IXGBE_32_VM:
5529		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
5530		break;
5531	default:
5532		panic("Unexpected SR-IOV mode %d", mode);
5533	}
5534	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
5535
5536
5537	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5538	gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
5539	switch (mode) {
5540	case IXGBE_64_VM:
5541		gpie |= IXGBE_GPIE_VTMODE_64;
5542		break;
5543	case IXGBE_32_VM:
5544		gpie |= IXGBE_GPIE_VTMODE_32;
5545		break;
5546	default:
5547		panic("Unexpected SR-IOV mode %d", mode);
5548	}
5549	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5550
5551	/* Enable rx/tx for the PF. */
5552	vf_reg = IXGBE_VF_INDEX(adapter->pool);
5553	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg),
5554	    IXGBE_VF_BIT(adapter->pool));
5555	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg),
5556	    IXGBE_VF_BIT(adapter->pool));
5557
5558	/* Allow VM-to-VM communication. */
5559	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
5560
5561	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
5562	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
5563	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
5564
5565	for (i = 0; i < adapter->num_vfs; i++)
5566		ixgbe_init_vf(adapter, &adapter->vfs[i]);
5567}
5568
5569
5570/*
5571** Check the max frame setting of all active VF's
5572*/
5573static void
5574ixgbe_recalculate_max_frame(struct adapter *adapter)
5575{
5576	struct ixgbe_vf *vf;
5577
5578	IXGBE_CORE_LOCK_ASSERT(adapter);
5579
5580	for (int i = 0; i < adapter->num_vfs; i++) {
5581		vf = &adapter->vfs[i];
5582		if (vf->flags & IXGBE_VF_ACTIVE)
5583			ixgbe_update_max_frame(adapter, vf->max_frame_size);
5584	}
5585}
5586
5587
5588static void
5589ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
5590{
5591	struct ixgbe_hw *hw;
5592	uint32_t vf_index, pfmbimr;
5593
5594	IXGBE_CORE_LOCK_ASSERT(adapter);
5595
5596	hw = &adapter->hw;
5597
5598	if (!(vf->flags & IXGBE_VF_ACTIVE))
5599		return;
5600
5601	vf_index = IXGBE_VF_INDEX(vf->pool);
5602	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
5603	pfmbimr |= IXGBE_VF_BIT(vf->pool);
5604	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
5605
5606	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
5607
5608	// XXX multicast addresses
5609
5610	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
5611		ixgbe_set_rar(&adapter->hw, vf->rar_index,
5612		    vf->ether_addr, vf->pool, TRUE);
5613	}
5614
5615	ixgbe_vf_enable_transmit(adapter, vf);
5616	ixgbe_vf_enable_receive(adapter, vf);
5617
5618	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
5619}
5620
5621static int
5622ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
5623{
5624	struct adapter *adapter;
5625	struct ixgbe_vf *vf;
5626	const void *mac;
5627
5628	adapter = device_get_softc(dev);
5629
5630	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
5631	    vfnum, adapter->num_vfs));
5632
5633	IXGBE_CORE_LOCK(adapter);
5634	vf = &adapter->vfs[vfnum];
5635	vf->pool= vfnum;
5636
5637	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
5638	vf->rar_index = vfnum + 1;
5639	vf->default_vlan = 0;
5640	vf->max_frame_size = ETHER_MAX_LEN;
5641	ixgbe_update_max_frame(adapter, vf->max_frame_size);
5642
5643	if (nvlist_exists_binary(config, "mac-addr")) {
5644		mac = nvlist_get_binary(config, "mac-addr", NULL);
5645		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
5646		if (nvlist_get_bool(config, "allow-set-mac"))
5647			vf->flags |= IXGBE_VF_CAP_MAC;
5648	} else
5649		/*
5650		 * If the administrator has not specified a MAC address then
5651		 * we must allow the VF to choose one.
5652		 */
5653		vf->flags |= IXGBE_VF_CAP_MAC;
5654
5655	vf->flags = IXGBE_VF_ACTIVE;
5656
5657	ixgbe_init_vf(adapter, vf);
5658	IXGBE_CORE_UNLOCK(adapter);
5659
5660	return (0);
5661}
5662#endif /* PCI_IOV */
5663
5664