if_ix.c revision 283668
1/******************************************************************************
2
3  Copyright (c) 2001-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/if_ix.c 283668 2015-05-28 20:56:11Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include "ixgbe.h"
42
43/*********************************************************************
44 *  Set this to one to display debug statistics
45 *********************************************************************/
46int             ixgbe_display_debug_stats = 0;
47
48/*********************************************************************
49 *  Driver version
50 *********************************************************************/
51char ixgbe_driver_version[] = "2.8.3";
52
53/*********************************************************************
54 *  PCI Device ID Table
55 *
56 *  Used by probe to select devices to load on
57 *  Last field stores an index into ixgbe_strings
58 *  Last entry must be all 0s
59 *
60 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
62
63static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64{
65	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95	/* required last entry */
96	{0, 0, 0, 0, 0}
97};
98
99/*********************************************************************
100 *  Table of branding strings
101 *********************************************************************/
102
103static char    *ixgbe_strings[] = {
104	"Intel(R) PRO/10GbE PCI-Express Network Driver"
105};
106
107/*********************************************************************
108 *  Function prototypes
109 *********************************************************************/
110static int      ixgbe_probe(device_t);
111static int      ixgbe_attach(device_t);
112static int      ixgbe_detach(device_t);
113static int      ixgbe_shutdown(device_t);
114static int	ixgbe_suspend(device_t);
115static int	ixgbe_resume(device_t);
116static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117static void	ixgbe_init(void *);
118static void	ixgbe_init_locked(struct adapter *);
119static void     ixgbe_stop(void *);
120#if __FreeBSD_version >= 1100036
121static uint64_t	ixgbe_get_counter(struct ifnet *, ift_counter);
122#endif
123static void	ixgbe_add_media_types(struct adapter *);
124static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125static int      ixgbe_media_change(struct ifnet *);
126static void     ixgbe_identify_hardware(struct adapter *);
127static int      ixgbe_allocate_pci_resources(struct adapter *);
128static void	ixgbe_get_slot_info(struct ixgbe_hw *);
129static int      ixgbe_allocate_msix(struct adapter *);
130static int      ixgbe_allocate_legacy(struct adapter *);
131static int	ixgbe_setup_msix(struct adapter *);
132static void	ixgbe_free_pci_resources(struct adapter *);
133static void	ixgbe_local_timer(void *);
134static int	ixgbe_setup_interface(device_t, struct adapter *);
135static void	ixgbe_config_dmac(struct adapter *);
136static void	ixgbe_config_delay_values(struct adapter *);
137static void	ixgbe_config_link(struct adapter *);
138static void	ixgbe_check_eee_support(struct adapter *);
139static void	ixgbe_check_wol_support(struct adapter *);
140static int	ixgbe_setup_low_power_mode(struct adapter *);
141static void	ixgbe_rearm_queues(struct adapter *, u64);
142
143static void     ixgbe_initialize_transmit_units(struct adapter *);
144static void     ixgbe_initialize_receive_units(struct adapter *);
145static void	ixgbe_enable_rx_drop(struct adapter *);
146static void	ixgbe_disable_rx_drop(struct adapter *);
147
148static void     ixgbe_enable_intr(struct adapter *);
149static void     ixgbe_disable_intr(struct adapter *);
150static void     ixgbe_update_stats_counters(struct adapter *);
151static void     ixgbe_set_promisc(struct adapter *);
152static void     ixgbe_set_multi(struct adapter *);
153static void     ixgbe_update_link_status(struct adapter *);
154static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155static void	ixgbe_configure_ivars(struct adapter *);
156static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
157
158static void	ixgbe_setup_vlan_hw_support(struct adapter *);
159static void	ixgbe_register_vlan(void *, struct ifnet *, u16);
160static void	ixgbe_unregister_vlan(void *, struct ifnet *, u16);
161
162static void	ixgbe_add_device_sysctls(struct adapter *);
163static void     ixgbe_add_hw_stats(struct adapter *);
164
165/* Sysctl handlers */
166static int	ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167static int	ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168static int	ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169static int	ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170static int	ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171static int	ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172static int	ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173static int	ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174static int	ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175static int	ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176static int	ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177static int	ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
178
179/* Support for pluggable optic modules */
180static bool	ixgbe_sfp_probe(struct adapter *);
181static void	ixgbe_setup_optics(struct adapter *);
182
183/* Legacy (single vector interrupt handler */
184static void	ixgbe_legacy_irq(void *);
185
186/* The MSI/X Interrupt handlers */
187static void	ixgbe_msix_que(void *);
188static void	ixgbe_msix_link(void *);
189
190/* Deferred interrupt tasklets */
191static void	ixgbe_handle_que(void *, int);
192static void	ixgbe_handle_link(void *, int);
193static void	ixgbe_handle_msf(void *, int);
194static void	ixgbe_handle_mod(void *, int);
195static void	ixgbe_handle_phy(void *, int);
196
197#ifdef IXGBE_FDIR
198static void	ixgbe_reinit_fdir(void *, int);
199#endif
200
201/*********************************************************************
202 *  FreeBSD Device Interface Entry Points
203 *********************************************************************/
204
205static device_method_t ix_methods[] = {
206	/* Device interface */
207	DEVMETHOD(device_probe, ixgbe_probe),
208	DEVMETHOD(device_attach, ixgbe_attach),
209	DEVMETHOD(device_detach, ixgbe_detach),
210	DEVMETHOD(device_shutdown, ixgbe_shutdown),
211	DEVMETHOD(device_suspend, ixgbe_suspend),
212	DEVMETHOD(device_resume, ixgbe_resume),
213	DEVMETHOD_END
214};
215
216static driver_t ix_driver = {
217	"ix", ix_methods, sizeof(struct adapter),
218};
219
220devclass_t ix_devclass;
221DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
222
223MODULE_DEPEND(ix, pci, 1, 1, 1);
224MODULE_DEPEND(ix, ether, 1, 1, 1);
225
226/*
227** TUNEABLE PARAMETERS:
228*/
229
230static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231		   "IXGBE driver parameters");
232
233/*
234** AIM: Adaptive Interrupt Moderation
235** which means that the interrupt rate
236** is varied over time based on the
237** traffic for that interrupt vector
238*/
239static int ixgbe_enable_aim = TRUE;
240SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241    "Enable adaptive interrupt moderation");
242
243static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245    &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
246
247/* How many packets rxeof tries to clean at a time */
248static int ixgbe_rx_process_limit = 256;
249TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251    &ixgbe_rx_process_limit, 0,
252    "Maximum number of received packets to process at a time,"
253    "-1 means unlimited");
254
255/* How many packets txeof tries to clean at a time */
256static int ixgbe_tx_process_limit = 256;
257TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259    &ixgbe_tx_process_limit, 0,
260    "Maximum number of sent packets to process at a time,"
261    "-1 means unlimited");
262
263/*
264** Smart speed setting, default to on
265** this only works as a compile option
266** right now as its during attach, set
267** this to 'ixgbe_smart_speed_off' to
268** disable.
269*/
270static int ixgbe_smart_speed = ixgbe_smart_speed_on;
271
272/*
273 * MSIX should be the default for best performance,
274 * but this allows it to be forced off for testing.
275 */
276static int ixgbe_enable_msix = 1;
277SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278    "Enable MSI-X interrupts");
279
280/*
281 * Number of Queues, can be set to 0,
282 * it then autoconfigures based on the
283 * number of cpus with a max of 8. This
284 * can be overriden manually here.
285 */
286static int ixgbe_num_queues = 0;
287SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288    "Number of queues to configure, 0 indicates autoconfigure");
289
290/*
291** Number of TX descriptors per ring,
292** setting higher than RX as this seems
293** the better performing choice.
294*/
295static int ixgbe_txd = PERFORM_TXD;
296SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
297    "Number of transmit descriptors per queue");
298
299/* Number of RX descriptors per ring */
300static int ixgbe_rxd = PERFORM_RXD;
301SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
302    "Number of receive descriptors per queue");
303
304/*
305** Defining this on will allow the use
306** of unsupported SFP+ modules, note that
307** doing so you are on your own :)
308*/
309static int allow_unsupported_sfp = FALSE;
310TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
311
312/* Keep running tab on them for sanity check */
313static int ixgbe_total_ports;
314
315#ifdef IXGBE_FDIR
316/*
317** Flow Director actually 'steals'
318** part of the packet buffer as its
319** filter pool, this variable controls
320** how much it uses:
321**  0 = 64K, 1 = 128K, 2 = 256K
322*/
323static int fdir_pballoc = 1;
324#endif
325
326#ifdef DEV_NETMAP
327/*
328 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
329 * be a reference on how to implement netmap support in a driver.
330 * Additional comments are in ixgbe_netmap.h .
331 *
332 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
333 * that extend the standard driver.
334 */
335#include <dev/netmap/ixgbe_netmap.h>
336#endif /* DEV_NETMAP */
337
338/*********************************************************************
339 *  Device identification routine
340 *
341 *  ixgbe_probe determines if the driver should be loaded on
342 *  adapter based on PCI vendor/device id of the adapter.
343 *
344 *  return BUS_PROBE_DEFAULT on success, positive on failure
345 *********************************************************************/
346
347static int
348ixgbe_probe(device_t dev)
349{
350	ixgbe_vendor_info_t *ent;
351
352	u16	pci_vendor_id = 0;
353	u16	pci_device_id = 0;
354	u16	pci_subvendor_id = 0;
355	u16	pci_subdevice_id = 0;
356	char	adapter_name[256];
357
358	INIT_DEBUGOUT("ixgbe_probe: begin");
359
360	pci_vendor_id = pci_get_vendor(dev);
361	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
362		return (ENXIO);
363
364	pci_device_id = pci_get_device(dev);
365	pci_subvendor_id = pci_get_subvendor(dev);
366	pci_subdevice_id = pci_get_subdevice(dev);
367
368	ent = ixgbe_vendor_info_array;
369	while (ent->vendor_id != 0) {
370		if ((pci_vendor_id == ent->vendor_id) &&
371		    (pci_device_id == ent->device_id) &&
372
373		    ((pci_subvendor_id == ent->subvendor_id) ||
374		     (ent->subvendor_id == 0)) &&
375
376		    ((pci_subdevice_id == ent->subdevice_id) ||
377		     (ent->subdevice_id == 0))) {
378			sprintf(adapter_name, "%s, Version - %s",
379				ixgbe_strings[ent->index],
380				ixgbe_driver_version);
381			device_set_desc_copy(dev, adapter_name);
382			++ixgbe_total_ports;
383			return (BUS_PROBE_DEFAULT);
384		}
385		ent++;
386	}
387	return (ENXIO);
388}
389
390/*********************************************************************
391 *  Device initialization routine
392 *
393 *  The attach entry point is called when the driver is being loaded.
394 *  This routine identifies the type of hardware, allocates all resources
395 *  and initializes the hardware.
396 *
397 *  return 0 on success, positive on failure
398 *********************************************************************/
399
400static int
401ixgbe_attach(device_t dev)
402{
403	struct adapter *adapter;
404	struct ixgbe_hw *hw;
405	int             error = 0;
406	u16		csum;
407	u32		ctrl_ext;
408
409	INIT_DEBUGOUT("ixgbe_attach: begin");
410
411	/* Allocate, clear, and link in our adapter structure */
412	adapter = device_get_softc(dev);
413	adapter->dev = adapter->osdep.dev = dev;
414	hw = &adapter->hw;
415
416	/* Core Lock Init*/
417	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
418
419	/* Set up the timer callout */
420	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
421
422	/* Determine hardware revision */
423	ixgbe_identify_hardware(adapter);
424
425	/* Do base PCI setup - map BAR0 */
426	if (ixgbe_allocate_pci_resources(adapter)) {
427		device_printf(dev, "Allocation of PCI resources failed\n");
428		error = ENXIO;
429		goto err_out;
430	}
431
432	/* Do descriptor calc and sanity checks */
433	if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
434	    ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
435		device_printf(dev, "TXD config issue, using default!\n");
436		adapter->num_tx_desc = DEFAULT_TXD;
437	} else
438		adapter->num_tx_desc = ixgbe_txd;
439
440	/*
441	** With many RX rings it is easy to exceed the
442	** system mbuf allocation. Tuning nmbclusters
443	** can alleviate this.
444	*/
445	if (nmbclusters > 0) {
446		int s;
447		s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
448		if (s > nmbclusters) {
449			device_printf(dev, "RX Descriptors exceed "
450			    "system mbuf max, using default instead!\n");
451			ixgbe_rxd = DEFAULT_RXD;
452		}
453	}
454
455	if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
456	    ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
457		device_printf(dev, "RXD config issue, using default!\n");
458		adapter->num_rx_desc = DEFAULT_RXD;
459	} else
460		adapter->num_rx_desc = ixgbe_rxd;
461
462	/* Allocate our TX/RX Queues */
463	if (ixgbe_allocate_queues(adapter)) {
464		error = ENOMEM;
465		goto err_out;
466	}
467
468	/* Allocate multicast array memory. */
469	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
470	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
471	if (adapter->mta == NULL) {
472		device_printf(dev, "Can not allocate multicast setup array\n");
473		error = ENOMEM;
474		goto err_late;
475	}
476
477	/* Initialize the shared code */
478	hw->allow_unsupported_sfp = allow_unsupported_sfp;
479	error = ixgbe_init_shared_code(hw);
480	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
481		/*
482		** No optics in this port, set up
483		** so the timer routine will probe
484		** for later insertion.
485		*/
486		adapter->sfp_probe = TRUE;
487		error = 0;
488	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
489		device_printf(dev,"Unsupported SFP+ module detected!\n");
490		error = EIO;
491		goto err_late;
492	} else if (error) {
493		device_printf(dev,"Unable to initialize the shared code\n");
494		error = EIO;
495		goto err_late;
496	}
497
498	/* Make sure we have a good EEPROM before we read from it */
499	if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
500		device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
501		error = EIO;
502		goto err_late;
503	}
504
505	error = ixgbe_init_hw(hw);
506	switch (error) {
507	case IXGBE_ERR_EEPROM_VERSION:
508		device_printf(dev, "This device is a pre-production adapter/"
509		    "LOM.  Please be aware there may be issues associated "
510		    "with your hardware.\n If you are experiencing problems "
511		    "please contact your Intel or hardware representative "
512		    "who provided you with this hardware.\n");
513		break;
514	case IXGBE_ERR_SFP_NOT_SUPPORTED:
515		device_printf(dev,"Unsupported SFP+ Module\n");
516		error = EIO;
517		goto err_late;
518	case IXGBE_ERR_SFP_NOT_PRESENT:
519		device_printf(dev,"No SFP+ Module found\n");
520		/* falls thru */
521	default:
522		break;
523	}
524
525	/* Detect and set physical type */
526	ixgbe_setup_optics(adapter);
527
528	if ((adapter->msix > 1) && (ixgbe_enable_msix))
529		error = ixgbe_allocate_msix(adapter);
530	else
531		error = ixgbe_allocate_legacy(adapter);
532	if (error)
533		goto err_late;
534
535	/* Setup OS specific network interface */
536	if (ixgbe_setup_interface(dev, adapter) != 0)
537		goto err_late;
538
539	/* Initialize statistics */
540	ixgbe_update_stats_counters(adapter);
541
542	/* Register for VLAN events */
543	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
544	    ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
545	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
546	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
547
548        /* Check PCIE slot type/speed/width */
549	ixgbe_get_slot_info(hw);
550
551
552	/* Set an initial default flow control value */
553	adapter->fc = ixgbe_fc_full;
554
555	/* Check for certain supported features */
556	ixgbe_check_wol_support(adapter);
557	ixgbe_check_eee_support(adapter);
558
559	/* Add sysctls */
560	ixgbe_add_device_sysctls(adapter);
561	ixgbe_add_hw_stats(adapter);
562
563	/* let hardware know driver is loaded */
564	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
567
568#ifdef DEV_NETMAP
569	ixgbe_netmap_attach(adapter);
570#endif /* DEV_NETMAP */
571	INIT_DEBUGOUT("ixgbe_attach: end");
572	return (0);
573
574err_late:
575	ixgbe_free_transmit_structures(adapter);
576	ixgbe_free_receive_structures(adapter);
577err_out:
578	if (adapter->ifp != NULL)
579		if_free(adapter->ifp);
580	ixgbe_free_pci_resources(adapter);
581	free(adapter->mta, M_DEVBUF);
582	return (error);
583}
584
585/*********************************************************************
586 *  Device removal routine
587 *
588 *  The detach entry point is called when the driver is being removed.
589 *  This routine stops the adapter and deallocates all the resources
590 *  that were allocated for driver operation.
591 *
592 *  return 0 on success, positive on failure
593 *********************************************************************/
594
595static int
596ixgbe_detach(device_t dev)
597{
598	struct adapter *adapter = device_get_softc(dev);
599	struct ix_queue *que = adapter->queues;
600	struct tx_ring *txr = adapter->tx_rings;
601	u32	ctrl_ext;
602
603	INIT_DEBUGOUT("ixgbe_detach: begin");
604
605	/* Make sure VLANS are not using driver */
606	if (adapter->ifp->if_vlantrunk != NULL) {
607		device_printf(dev,"Vlan in use, detach first\n");
608		return (EBUSY);
609	}
610
611	/* Stop the adapter */
612	IXGBE_CORE_LOCK(adapter);
613	ixgbe_setup_low_power_mode(adapter);
614	IXGBE_CORE_UNLOCK(adapter);
615
616	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
617		if (que->tq) {
618#ifndef IXGBE_LEGACY_TX
619			taskqueue_drain(que->tq, &txr->txq_task);
620#endif
621			taskqueue_drain(que->tq, &que->que_task);
622			taskqueue_free(que->tq);
623		}
624	}
625
626	/* Drain the Link queue */
627	if (adapter->tq) {
628		taskqueue_drain(adapter->tq, &adapter->link_task);
629		taskqueue_drain(adapter->tq, &adapter->mod_task);
630		taskqueue_drain(adapter->tq, &adapter->msf_task);
631		taskqueue_drain(adapter->tq, &adapter->phy_task);
632#ifdef IXGBE_FDIR
633		taskqueue_drain(adapter->tq, &adapter->fdir_task);
634#endif
635		taskqueue_free(adapter->tq);
636	}
637
638	/* let hardware know driver is unloading */
639	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
642
643	/* Unregister VLAN events */
644	if (adapter->vlan_attach != NULL)
645		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646	if (adapter->vlan_detach != NULL)
647		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
648
649	ether_ifdetach(adapter->ifp);
650	callout_drain(&adapter->timer);
651#ifdef DEV_NETMAP
652	netmap_detach(adapter->ifp);
653#endif /* DEV_NETMAP */
654	ixgbe_free_pci_resources(adapter);
655	bus_generic_detach(dev);
656	if_free(adapter->ifp);
657
658	ixgbe_free_transmit_structures(adapter);
659	ixgbe_free_receive_structures(adapter);
660	free(adapter->mta, M_DEVBUF);
661
662	IXGBE_CORE_LOCK_DESTROY(adapter);
663	return (0);
664}
665
666/*********************************************************************
667 *
668 *  Shutdown entry point
669 *
670 **********************************************************************/
671
672static int
673ixgbe_shutdown(device_t dev)
674{
675	struct adapter *adapter = device_get_softc(dev);
676	int error = 0;
677
678	INIT_DEBUGOUT("ixgbe_shutdown: begin");
679
680	IXGBE_CORE_LOCK(adapter);
681	error = ixgbe_setup_low_power_mode(adapter);
682	IXGBE_CORE_UNLOCK(adapter);
683
684	return (error);
685}
686
687/**
688 * Methods for going from:
689 * D0 -> D3: ixgbe_suspend
690 * D3 -> D0: ixgbe_resume
691 */
692static int
693ixgbe_suspend(device_t dev)
694{
695	struct adapter *adapter = device_get_softc(dev);
696	int error = 0;
697
698	INIT_DEBUGOUT("ixgbe_suspend: begin");
699
700	IXGBE_CORE_LOCK(adapter);
701
702	error = ixgbe_setup_low_power_mode(adapter);
703
704	/* Save state and power down */
705	pci_save_state(dev);
706	pci_set_powerstate(dev, PCI_POWERSTATE_D3);
707
708	IXGBE_CORE_UNLOCK(adapter);
709
710	return (error);
711}
712
713static int
714ixgbe_resume(device_t dev)
715{
716	struct adapter *adapter = device_get_softc(dev);
717	struct ifnet *ifp = adapter->ifp;
718	struct ixgbe_hw *hw = &adapter->hw;
719	u32 wus;
720
721	INIT_DEBUGOUT("ixgbe_resume: begin");
722
723	IXGBE_CORE_LOCK(adapter);
724
725	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726	pci_restore_state(dev);
727
728	/* Read & clear WUS register */
729	wus = IXGBE_READ_REG(hw, IXGBE_WUS);
730	if (wus)
731		device_printf(dev, "Woken up by (WUS): %#010x\n",
732		    IXGBE_READ_REG(hw, IXGBE_WUS));
733	IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734	/* And clear WUFC until next low-power transition */
735	IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
736
737	/*
738	 * Required after D3->D0 transition;
739	 * will re-advertise all previous advertised speeds
740	 */
741	if (ifp->if_flags & IFF_UP)
742		ixgbe_init_locked(adapter);
743
744	IXGBE_CORE_UNLOCK(adapter);
745
746	INIT_DEBUGOUT("ixgbe_resume: end");
747	return (0);
748}
749
750
751/*********************************************************************
752 *  Ioctl entry point
753 *
754 *  ixgbe_ioctl is called when the user wants to configure the
755 *  interface.
756 *
757 *  return 0 on success, positive on failure
758 **********************************************************************/
759
760static int
761ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
762{
763	struct adapter	*adapter = ifp->if_softc;
764	struct ifreq	*ifr = (struct ifreq *) data;
765#if defined(INET) || defined(INET6)
766	struct ifaddr *ifa = (struct ifaddr *)data;
767	bool		avoid_reset = FALSE;
768#endif
769	int             error = 0;
770
771	switch (command) {
772
773        case SIOCSIFADDR:
774#ifdef INET
775		if (ifa->ifa_addr->sa_family == AF_INET)
776			avoid_reset = TRUE;
777#endif
778#ifdef INET6
779		if (ifa->ifa_addr->sa_family == AF_INET6)
780			avoid_reset = TRUE;
781#endif
782#if defined(INET) || defined(INET6)
783		/*
784		** Calling init results in link renegotiation,
785		** so we avoid doing it when possible.
786		*/
787		if (avoid_reset) {
788			ifp->if_flags |= IFF_UP;
789			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
790				ixgbe_init(adapter);
791			if (!(ifp->if_flags & IFF_NOARP))
792				arp_ifinit(ifp, ifa);
793		} else
794			error = ether_ioctl(ifp, command, data);
795#endif
796		break;
797	case SIOCSIFMTU:
798		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799		if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
800			error = EINVAL;
801		} else {
802			IXGBE_CORE_LOCK(adapter);
803			ifp->if_mtu = ifr->ifr_mtu;
804			adapter->max_frame_size =
805				ifp->if_mtu + IXGBE_MTU_HDR;
806			ixgbe_init_locked(adapter);
807			IXGBE_CORE_UNLOCK(adapter);
808		}
809		break;
810	case SIOCSIFFLAGS:
811		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812		IXGBE_CORE_LOCK(adapter);
813		if (ifp->if_flags & IFF_UP) {
814			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815				if ((ifp->if_flags ^ adapter->if_flags) &
816				    (IFF_PROMISC | IFF_ALLMULTI)) {
817					ixgbe_set_promisc(adapter);
818                                }
819			} else
820				ixgbe_init_locked(adapter);
821		} else
822			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
823				ixgbe_stop(adapter);
824		adapter->if_flags = ifp->if_flags;
825		IXGBE_CORE_UNLOCK(adapter);
826		break;
827	case SIOCADDMULTI:
828	case SIOCDELMULTI:
829		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831			IXGBE_CORE_LOCK(adapter);
832			ixgbe_disable_intr(adapter);
833			ixgbe_set_multi(adapter);
834			ixgbe_enable_intr(adapter);
835			IXGBE_CORE_UNLOCK(adapter);
836		}
837		break;
838	case SIOCSIFMEDIA:
839	case SIOCGIFMEDIA:
840		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
842		break;
843	case SIOCSIFCAP:
844	{
845		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847		if (mask & IFCAP_HWCSUM)
848			ifp->if_capenable ^= IFCAP_HWCSUM;
849		if (mask & IFCAP_TSO4)
850			ifp->if_capenable ^= IFCAP_TSO4;
851		if (mask & IFCAP_TSO6)
852			ifp->if_capenable ^= IFCAP_TSO6;
853		if (mask & IFCAP_LRO)
854			ifp->if_capenable ^= IFCAP_LRO;
855		if (mask & IFCAP_VLAN_HWTAGGING)
856			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857		if (mask & IFCAP_VLAN_HWFILTER)
858			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859		if (mask & IFCAP_VLAN_HWTSO)
860			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862			IXGBE_CORE_LOCK(adapter);
863			ixgbe_init_locked(adapter);
864			IXGBE_CORE_UNLOCK(adapter);
865		}
866		VLAN_CAPABILITIES(ifp);
867		break;
868	}
869#if __FreeBSD_version >= 1100036
870	case SIOCGI2C:
871	{
872		struct ixgbe_hw *hw = &adapter->hw;
873		struct ifi2creq i2c;
874		int i;
875		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
877		if (error != 0)
878			break;
879		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
880			error = EINVAL;
881			break;
882		}
883		if (i2c.len > sizeof(i2c.data)) {
884			error = EINVAL;
885			break;
886		}
887
888		for (i = 0; i < i2c.len; i++)
889			hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890			    i2c.dev_addr, &i2c.data[i]);
891		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
892		break;
893	}
894#endif
895	default:
896		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897		error = ether_ioctl(ifp, command, data);
898		break;
899	}
900
901	return (error);
902}
903
904/*********************************************************************
905 *  Init entry point
906 *
907 *  This routine is used in two ways. It is used by the stack as
908 *  init entry point in network interface structure. It is also used
909 *  by the driver as a hw/sw initialization routine to get to a
910 *  consistent state.
911 *
912 *  return 0 on success, positive on failure
913 **********************************************************************/
914#define IXGBE_MHADD_MFS_SHIFT 16
915
916static void
917ixgbe_init_locked(struct adapter *adapter)
918{
919	struct ifnet   *ifp = adapter->ifp;
920	device_t 	dev = adapter->dev;
921	struct ixgbe_hw *hw = &adapter->hw;
922	u32		k, txdctl, mhadd, gpie;
923	u32		rxdctl, rxctrl;
924
925	mtx_assert(&adapter->core_mtx, MA_OWNED);
926	INIT_DEBUGOUT("ixgbe_init_locked: begin");
927	hw->adapter_stopped = FALSE;
928	ixgbe_stop_adapter(hw);
929        callout_stop(&adapter->timer);
930
931        /* reprogram the RAR[0] in case user changed it. */
932        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
933
934	/* Get the latest mac address, User can use a LAA */
935	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936	      IXGBE_ETH_LENGTH_OF_ADDRESS);
937	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938	hw->addr_ctrl.rar_used_count = 1;
939
940	/* Set the various hardware offload abilities */
941	ifp->if_hwassist = 0;
942	if (ifp->if_capenable & IFCAP_TSO)
943		ifp->if_hwassist |= CSUM_TSO;
944	if (ifp->if_capenable & IFCAP_TXCSUM) {
945		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946#if __FreeBSD_version >= 800000
947		if (hw->mac.type != ixgbe_mac_82598EB)
948			ifp->if_hwassist |= CSUM_SCTP;
949#endif
950	}
951
952	/* Prepare transmit descriptors and buffers */
953	if (ixgbe_setup_transmit_structures(adapter)) {
954		device_printf(dev, "Could not setup transmit structures\n");
955		ixgbe_stop(adapter);
956		return;
957	}
958
959	ixgbe_init_hw(hw);
960	ixgbe_initialize_transmit_units(adapter);
961
962	/* Setup Multicast table */
963	ixgbe_set_multi(adapter);
964
965	/*
966	** Determine the correct mbuf pool
967	** for doing jumbo frames
968	*/
969	if (adapter->max_frame_size <= 2048)
970		adapter->rx_mbuf_sz = MCLBYTES;
971	else if (adapter->max_frame_size <= 4096)
972		adapter->rx_mbuf_sz = MJUMPAGESIZE;
973	else if (adapter->max_frame_size <= 9216)
974		adapter->rx_mbuf_sz = MJUM9BYTES;
975	else
976		adapter->rx_mbuf_sz = MJUM16BYTES;
977
978	/* Prepare receive descriptors and buffers */
979	if (ixgbe_setup_receive_structures(adapter)) {
980		device_printf(dev, "Could not setup receive structures\n");
981		ixgbe_stop(adapter);
982		return;
983	}
984
985	/* Configure RX settings */
986	ixgbe_initialize_receive_units(adapter);
987
988	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
989
990	/* Enable Fan Failure Interrupt */
991	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
992
993	/* Add for Module detection */
994	if (hw->mac.type == ixgbe_mac_82599EB)
995		gpie |= IXGBE_SDP2_GPIEN;
996
997	/*
998	 * Thermal Failure Detection (X540)
999	 * Link Detection (X552)
1000	 */
1001	if (hw->mac.type == ixgbe_mac_X540 ||
1002	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004		gpie |= IXGBE_SDP0_GPIEN_X540;
1005
1006	if (adapter->msix > 1) {
1007		/* Enable Enhanced MSIX mode */
1008		gpie |= IXGBE_GPIE_MSIX_MODE;
1009		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1010		    IXGBE_GPIE_OCD;
1011	}
1012	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1013
1014	/* Set MTU size */
1015	if (ifp->if_mtu > ETHERMTU) {
1016		/* aka IXGBE_MAXFRS on 82599 and newer */
1017		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018		mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1021	}
1022
1023	/* Now enable all the queues */
1024	for (int i = 0; i < adapter->num_queues; i++) {
1025		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026		txdctl |= IXGBE_TXDCTL_ENABLE;
1027		/* Set WTHRESH to 8, burst writeback */
1028		txdctl |= (8 << 16);
1029		/*
1030		 * When the internal queue falls below PTHRESH (32),
1031		 * start prefetching as long as there are at least
1032		 * HTHRESH (1) buffers ready. The values are taken
1033		 * from the Intel linux driver 3.8.21.
1034		 * Prefetching enables tx line rate even with 1 queue.
1035		 */
1036		txdctl |= (32 << 0) | (1 << 8);
1037		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1038	}
1039
1040	for (int i = 0; i < adapter->num_queues; i++) {
1041		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042		if (hw->mac.type == ixgbe_mac_82598EB) {
1043			/*
1044			** PTHRESH = 21
1045			** HTHRESH = 4
1046			** WTHRESH = 8
1047			*/
1048			rxdctl &= ~0x3FFFFF;
1049			rxdctl |= 0x080420;
1050		}
1051		rxdctl |= IXGBE_RXDCTL_ENABLE;
1052		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053		for (k = 0; k < 10; k++) {
1054			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055			    IXGBE_RXDCTL_ENABLE)
1056				break;
1057			else
1058				msec_delay(1);
1059		}
1060		wmb();
1061#ifdef DEV_NETMAP
1062		/*
1063		 * In netmap mode, we must preserve the buffers made
1064		 * available to userspace before the if_init()
1065		 * (this is true by default on the TX side, because
1066		 * init makes all buffers available to userspace).
1067		 *
1068		 * netmap_reset() and the device specific routines
1069		 * (e.g. ixgbe_setup_receive_rings()) map these
1070		 * buffers at the end of the NIC ring, so here we
1071		 * must set the RDT (tail) register to make sure
1072		 * they are not overwritten.
1073		 *
1074		 * In this driver the NIC ring starts at RDH = 0,
1075		 * RDT points to the last slot available for reception (?),
1076		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1077		 */
1078		if (ifp->if_capenable & IFCAP_NETMAP) {
1079			struct netmap_adapter *na = NA(adapter->ifp);
1080			struct netmap_kring *kring = &na->rx_rings[i];
1081			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1082
1083			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1084		} else
1085#endif /* DEV_NETMAP */
1086		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1087	}
1088
1089	/* Enable Receive engine */
1090	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091	if (hw->mac.type == ixgbe_mac_82598EB)
1092		rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093	rxctrl |= IXGBE_RXCTRL_RXEN;
1094	ixgbe_enable_rx_dma(hw, rxctrl);
1095
1096	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1097
1098	/* Set up MSI/X routing */
1099	if (ixgbe_enable_msix)  {
1100		ixgbe_configure_ivars(adapter);
1101		/* Set up auto-mask */
1102		if (hw->mac.type == ixgbe_mac_82598EB)
1103			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1104		else {
1105			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1107		}
1108	} else {  /* Simple settings for Legacy/MSI */
1109                ixgbe_set_ivar(adapter, 0, 0, 0);
1110                ixgbe_set_ivar(adapter, 0, 0, 1);
1111		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1112	}
1113
1114#ifdef IXGBE_FDIR
1115	/* Init Flow director */
1116	if (hw->mac.type != ixgbe_mac_82598EB) {
1117		u32 hdrm = 32 << fdir_pballoc;
1118
1119		hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120		ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1121	}
1122#endif
1123
1124	/*
1125	** Check on any SFP devices that
1126	** need to be kick-started
1127	*/
1128	if (hw->phy.type == ixgbe_phy_none) {
1129		int err = hw->phy.ops.identify(hw);
1130		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1131                	device_printf(dev,
1132			    "Unsupported SFP+ module type was detected.\n");
1133			return;
1134        	}
1135	}
1136
1137	/* Set moderation on the Link interrupt */
1138	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1139
1140	/* Configure Energy Efficient Ethernet for supported devices */
1141	if (adapter->eee_support)
1142		ixgbe_setup_eee(hw, adapter->eee_enabled);
1143
1144	/* Config/Enable Link */
1145	ixgbe_config_link(adapter);
1146
1147	/* Hardware Packet Buffer & Flow Control setup */
1148	ixgbe_config_delay_values(adapter);
1149
1150	/* Initialize the FC settings */
1151	ixgbe_start_hw(hw);
1152
1153	/* Set up VLAN support and filter */
1154	ixgbe_setup_vlan_hw_support(adapter);
1155
1156	/* Setup DMA Coalescing */
1157	ixgbe_config_dmac(adapter);
1158
1159	/* And now turn on interrupts */
1160	ixgbe_enable_intr(adapter);
1161
1162	/* Now inform the stack we're ready */
1163	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1164
1165	return;
1166}
1167
1168static void
1169ixgbe_init(void *arg)
1170{
1171	struct adapter *adapter = arg;
1172
1173	IXGBE_CORE_LOCK(adapter);
1174	ixgbe_init_locked(adapter);
1175	IXGBE_CORE_UNLOCK(adapter);
1176	return;
1177}
1178
1179static void
1180ixgbe_config_delay_values(struct adapter *adapter)
1181{
1182	struct ixgbe_hw *hw = &adapter->hw;
1183	u32 rxpb, frame, size, tmp;
1184
1185	frame = adapter->max_frame_size;
1186
1187	/* Calculate High Water */
1188	switch (hw->mac.type) {
1189	case ixgbe_mac_X540:
1190	case ixgbe_mac_X550:
1191	case ixgbe_mac_X550EM_x:
1192		tmp = IXGBE_DV_X540(frame, frame);
1193		break;
1194	default:
1195		tmp = IXGBE_DV(frame, frame);
1196		break;
1197	}
1198	size = IXGBE_BT2KB(tmp);
1199	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200	hw->fc.high_water[0] = rxpb - size;
1201
1202	/* Now calculate Low Water */
1203	switch (hw->mac.type) {
1204	case ixgbe_mac_X540:
1205	case ixgbe_mac_X550:
1206	case ixgbe_mac_X550EM_x:
1207		tmp = IXGBE_LOW_DV_X540(frame);
1208		break;
1209	default:
1210		tmp = IXGBE_LOW_DV(frame);
1211		break;
1212	}
1213	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1214
1215	hw->fc.requested_mode = adapter->fc;
1216	hw->fc.pause_time = IXGBE_FC_PAUSE;
1217	hw->fc.send_xon = TRUE;
1218}
1219
1220/*
1221**
1222** MSIX Interrupt Handlers and Tasklets
1223**
1224*/
1225
1226static inline void
1227ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1228{
1229	struct ixgbe_hw *hw = &adapter->hw;
1230	u64	queue = (u64)(1 << vector);
1231	u32	mask;
1232
1233	if (hw->mac.type == ixgbe_mac_82598EB) {
1234                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235                IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1236	} else {
1237                mask = (queue & 0xFFFFFFFF);
1238                if (mask)
1239                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240                mask = (queue >> 32);
1241                if (mask)
1242                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1243	}
1244}
1245
1246static inline void
1247ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1248{
1249	struct ixgbe_hw *hw = &adapter->hw;
1250	u64	queue = (u64)(1 << vector);
1251	u32	mask;
1252
1253	if (hw->mac.type == ixgbe_mac_82598EB) {
1254                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255                IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1256	} else {
1257                mask = (queue & 0xFFFFFFFF);
1258                if (mask)
1259                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260                mask = (queue >> 32);
1261                if (mask)
1262                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1263	}
1264}
1265
1266static void
1267ixgbe_handle_que(void *context, int pending)
1268{
1269	struct ix_queue *que = context;
1270	struct adapter  *adapter = que->adapter;
1271	struct tx_ring  *txr = que->txr;
1272	struct ifnet    *ifp = adapter->ifp;
1273	bool		more;
1274
1275	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276		more = ixgbe_rxeof(que);
1277		IXGBE_TX_LOCK(txr);
1278		ixgbe_txeof(txr);
1279#ifndef IXGBE_LEGACY_TX
1280		if (!drbr_empty(ifp, txr->br))
1281			ixgbe_mq_start_locked(ifp, txr);
1282#else
1283		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284			ixgbe_start_locked(txr, ifp);
1285#endif
1286		IXGBE_TX_UNLOCK(txr);
1287	}
1288
1289	/* Reenable this interrupt */
1290	if (que->res != NULL)
1291		ixgbe_enable_queue(adapter, que->msix);
1292	else
1293		ixgbe_enable_intr(adapter);
1294	return;
1295}
1296
1297
1298/*********************************************************************
1299 *
1300 *  Legacy Interrupt Service routine
1301 *
1302 **********************************************************************/
1303
1304static void
1305ixgbe_legacy_irq(void *arg)
1306{
1307	struct ix_queue *que = arg;
1308	struct adapter	*adapter = que->adapter;
1309	struct ixgbe_hw	*hw = &adapter->hw;
1310	struct ifnet    *ifp = adapter->ifp;
1311	struct 		tx_ring *txr = adapter->tx_rings;
1312	bool		more;
1313	u32       	reg_eicr;
1314
1315
1316	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1317
1318	++que->irqs;
1319	if (reg_eicr == 0) {
1320		ixgbe_enable_intr(adapter);
1321		return;
1322	}
1323
1324	more = ixgbe_rxeof(que);
1325
1326	IXGBE_TX_LOCK(txr);
1327	ixgbe_txeof(txr);
1328#ifdef IXGBE_LEGACY_TX
1329	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330		ixgbe_start_locked(txr, ifp);
1331#else
1332	if (!drbr_empty(ifp, txr->br))
1333		ixgbe_mq_start_locked(ifp, txr);
1334#endif
1335	IXGBE_TX_UNLOCK(txr);
1336
1337	/* Check for fan failure */
1338	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341		    "REPLACE IMMEDIATELY!!\n");
1342		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1343	}
1344
1345	/* Link status change */
1346	if (reg_eicr & IXGBE_EICR_LSC)
1347		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1348
1349	/* External PHY interrupt */
1350	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1353
1354	if (more)
1355		taskqueue_enqueue(que->tq, &que->que_task);
1356	else
1357		ixgbe_enable_intr(adapter);
1358	return;
1359}
1360
1361
1362/*********************************************************************
1363 *
1364 *  MSIX Queue Interrupt Service routine
1365 *
1366 **********************************************************************/
1367void
1368ixgbe_msix_que(void *arg)
1369{
1370	struct ix_queue	*que = arg;
1371	struct adapter  *adapter = que->adapter;
1372	struct ifnet    *ifp = adapter->ifp;
1373	struct tx_ring	*txr = que->txr;
1374	struct rx_ring	*rxr = que->rxr;
1375	bool		more;
1376	u32		newitr = 0;
1377
1378	/* Protect against spurious interrupts */
1379	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1380		return;
1381
1382	ixgbe_disable_queue(adapter, que->msix);
1383	++que->irqs;
1384
1385	more = ixgbe_rxeof(que);
1386
1387	IXGBE_TX_LOCK(txr);
1388	ixgbe_txeof(txr);
1389#ifdef IXGBE_LEGACY_TX
1390	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391		ixgbe_start_locked(txr, ifp);
1392#else
1393	if (!drbr_empty(ifp, txr->br))
1394		ixgbe_mq_start_locked(ifp, txr);
1395#endif
1396	IXGBE_TX_UNLOCK(txr);
1397
1398	/* Do AIM now? */
1399
1400	if (ixgbe_enable_aim == FALSE)
1401		goto no_calc;
1402	/*
1403	** Do Adaptive Interrupt Moderation:
1404        **  - Write out last calculated setting
1405	**  - Calculate based on average size over
1406	**    the last interval.
1407	*/
1408        if (que->eitr_setting)
1409                IXGBE_WRITE_REG(&adapter->hw,
1410                    IXGBE_EITR(que->msix), que->eitr_setting);
1411
1412        que->eitr_setting = 0;
1413
1414        /* Idle, do nothing */
1415        if ((txr->bytes == 0) && (rxr->bytes == 0))
1416                goto no_calc;
1417
1418	if ((txr->bytes) && (txr->packets))
1419               	newitr = txr->bytes/txr->packets;
1420	if ((rxr->bytes) && (rxr->packets))
1421		newitr = max(newitr,
1422		    (rxr->bytes / rxr->packets));
1423	newitr += 24; /* account for hardware frame, crc */
1424
1425	/* set an upper boundary */
1426	newitr = min(newitr, 3000);
1427
1428	/* Be nice to the mid range */
1429	if ((newitr > 300) && (newitr < 1200))
1430		newitr = (newitr / 3);
1431	else
1432		newitr = (newitr / 2);
1433
1434        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435                newitr |= newitr << 16;
1436        else
1437                newitr |= IXGBE_EITR_CNT_WDIS;
1438
1439        /* save for next interrupt */
1440        que->eitr_setting = newitr;
1441
1442        /* Reset state */
1443        txr->bytes = 0;
1444        txr->packets = 0;
1445        rxr->bytes = 0;
1446        rxr->packets = 0;
1447
1448no_calc:
1449	if (more)
1450		taskqueue_enqueue(que->tq, &que->que_task);
1451	else
1452		ixgbe_enable_queue(adapter, que->msix);
1453	return;
1454}
1455
1456
1457static void
1458ixgbe_msix_link(void *arg)
1459{
1460	struct adapter	*adapter = arg;
1461	struct ixgbe_hw *hw = &adapter->hw;
1462	u32		reg_eicr, mod_mask;
1463
1464	++adapter->link_irq;
1465
1466	/* First get the cause */
1467	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468	/* Be sure the queue bits are not cleared */
1469	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470	/* Clear interrupt with write */
1471	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1472
1473	/* Link status change */
1474	if (reg_eicr & IXGBE_EICR_LSC)
1475		taskqueue_enqueue(adapter->tq, &adapter->link_task);
1476
1477	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1478#ifdef IXGBE_FDIR
1479		if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480			/* This is probably overkill :) */
1481			if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1482				return;
1483                	/* Disable the interrupt */
1484			IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485			taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1486		} else
1487#endif
1488		if (reg_eicr & IXGBE_EICR_ECC) {
1489                	device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490			    "Please Reboot!!\n");
1491			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1492		}
1493
1494		/* Check for over temp condition */
1495		if (reg_eicr & IXGBE_EICR_TS) {
1496			device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497			    "PHY IS SHUT DOWN!!\n");
1498			device_printf(adapter->dev, "System shutdown required!\n");
1499			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1500		}
1501	}
1502
1503	/* Pluggable optics-related interrupt */
1504	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505		mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1506	else
1507		mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1508
1509	if (ixgbe_is_sfp(hw)) {
1510		if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513		} else if (reg_eicr & mod_mask) {
1514			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1516		}
1517	}
1518
1519	/* Check for fan failure */
1520	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524		    "REPLACE IMMEDIATELY!!\n");
1525	}
1526
1527	/* External PHY interrupt */
1528	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531		taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1532	}
1533
1534	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1535	return;
1536}
1537
1538/*********************************************************************
1539 *
1540 *  Media Ioctl callback
1541 *
1542 *  This routine is called whenever the user queries the status of
1543 *  the interface using ifconfig.
1544 *
1545 **********************************************************************/
1546static void
1547ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1548{
1549	struct adapter *adapter = ifp->if_softc;
1550	struct ixgbe_hw *hw = &adapter->hw;
1551	int layer;
1552
1553	INIT_DEBUGOUT("ixgbe_media_status: begin");
1554	IXGBE_CORE_LOCK(adapter);
1555	ixgbe_update_link_status(adapter);
1556
1557	ifmr->ifm_status = IFM_AVALID;
1558	ifmr->ifm_active = IFM_ETHER;
1559
1560	if (!adapter->link_active) {
1561		IXGBE_CORE_UNLOCK(adapter);
1562		return;
1563	}
1564
1565	ifmr->ifm_status |= IFM_ACTIVE;
1566	layer = ixgbe_get_supported_physical_layer(hw);
1567
1568	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571		switch (adapter->link_speed) {
1572		case IXGBE_LINK_SPEED_10GB_FULL:
1573			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1574			break;
1575		case IXGBE_LINK_SPEED_1GB_FULL:
1576			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1577			break;
1578		case IXGBE_LINK_SPEED_100_FULL:
1579			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1580			break;
1581		}
1582	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584		switch (adapter->link_speed) {
1585		case IXGBE_LINK_SPEED_10GB_FULL:
1586			ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1587			break;
1588		}
1589	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590		switch (adapter->link_speed) {
1591		case IXGBE_LINK_SPEED_10GB_FULL:
1592			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1593			break;
1594		case IXGBE_LINK_SPEED_1GB_FULL:
1595			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1596			break;
1597		}
1598	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599		switch (adapter->link_speed) {
1600		case IXGBE_LINK_SPEED_10GB_FULL:
1601			ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1602			break;
1603		case IXGBE_LINK_SPEED_1GB_FULL:
1604			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1605			break;
1606		}
1607	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609		switch (adapter->link_speed) {
1610		case IXGBE_LINK_SPEED_10GB_FULL:
1611			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1612			break;
1613		case IXGBE_LINK_SPEED_1GB_FULL:
1614			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1615			break;
1616		}
1617	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618		switch (adapter->link_speed) {
1619		case IXGBE_LINK_SPEED_10GB_FULL:
1620			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1621			break;
1622		}
1623	/*
1624	** XXX: These need to use the proper media types once
1625	** they're added.
1626	*/
1627	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628		switch (adapter->link_speed) {
1629		case IXGBE_LINK_SPEED_10GB_FULL:
1630			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1631			break;
1632		case IXGBE_LINK_SPEED_2_5GB_FULL:
1633			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1634			break;
1635		case IXGBE_LINK_SPEED_1GB_FULL:
1636			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1637			break;
1638		}
1639	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640	    || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641		switch (adapter->link_speed) {
1642		case IXGBE_LINK_SPEED_10GB_FULL:
1643			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1644			break;
1645		case IXGBE_LINK_SPEED_2_5GB_FULL:
1646			ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1647			break;
1648		case IXGBE_LINK_SPEED_1GB_FULL:
1649			ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1650			break;
1651		}
1652
1653	/* If nothing is recognized... */
1654	if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655		ifmr->ifm_active |= IFM_UNKNOWN;
1656
1657#if __FreeBSD_version >= 900025
1658	/* Display current flow control setting used on link */
1659	if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660	    hw->fc.current_mode == ixgbe_fc_full)
1661		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662	if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663	    hw->fc.current_mode == ixgbe_fc_full)
1664		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1665#endif
1666
1667	IXGBE_CORE_UNLOCK(adapter);
1668
1669	return;
1670}
1671
1672/*********************************************************************
1673 *
1674 *  Media Ioctl callback
1675 *
1676 *  This routine is called when the user changes speed/duplex using
1677 *  media/mediopt option with ifconfig.
1678 *
1679 **********************************************************************/
1680static int
1681ixgbe_media_change(struct ifnet * ifp)
1682{
1683	struct adapter *adapter = ifp->if_softc;
1684	struct ifmedia *ifm = &adapter->media;
1685	struct ixgbe_hw *hw = &adapter->hw;
1686	ixgbe_link_speed speed = 0;
1687
1688	INIT_DEBUGOUT("ixgbe_media_change: begin");
1689
1690	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1691		return (EINVAL);
1692
1693	if (hw->phy.media_type == ixgbe_media_type_backplane)
1694		return (EPERM);
1695
1696	/*
1697	** We don't actually need to check against the supported
1698	** media types of the adapter; ifmedia will take care of
1699	** that for us.
1700	*/
1701	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1702		case IFM_AUTO:
1703		case IFM_10G_T:
1704			speed |= IXGBE_LINK_SPEED_100_FULL;
1705		case IFM_10G_LRM:
1706		case IFM_10G_SR: /* KR, too */
1707		case IFM_10G_LR:
1708		case IFM_10G_CX4: /* KX4 */
1709			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710		case IFM_10G_TWINAX:
1711			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1712			break;
1713		case IFM_1000_T:
1714			speed |= IXGBE_LINK_SPEED_100_FULL;
1715		case IFM_1000_LX:
1716		case IFM_1000_SX:
1717		case IFM_1000_CX: /* KX */
1718			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1719			break;
1720		case IFM_100_TX:
1721			speed |= IXGBE_LINK_SPEED_100_FULL;
1722			break;
1723		default:
1724			goto invalid;
1725	}
1726
1727	hw->mac.autotry_restart = TRUE;
1728	hw->mac.ops.setup_link(hw, speed, TRUE);
1729	adapter->advertise =
1730		((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731		((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732		((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1733
1734	return (0);
1735
1736invalid:
1737	device_printf(adapter->dev, "Invalid media type!\n");
1738	return (EINVAL);
1739}
1740
1741static void
1742ixgbe_set_promisc(struct adapter *adapter)
1743{
1744	u_int32_t       reg_rctl;
1745	struct ifnet   *ifp = adapter->ifp;
1746	int		mcnt = 0;
1747
1748	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749	reg_rctl &= (~IXGBE_FCTRL_UPE);
1750	if (ifp->if_flags & IFF_ALLMULTI)
1751		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1752	else {
1753		struct	ifmultiaddr *ifma;
1754#if __FreeBSD_version < 800000
1755		IF_ADDR_LOCK(ifp);
1756#else
1757		if_maddr_rlock(ifp);
1758#endif
1759		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760			if (ifma->ifma_addr->sa_family != AF_LINK)
1761				continue;
1762			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1763				break;
1764			mcnt++;
1765		}
1766#if __FreeBSD_version < 800000
1767		IF_ADDR_UNLOCK(ifp);
1768#else
1769		if_maddr_runlock(ifp);
1770#endif
1771	}
1772	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773		reg_rctl &= (~IXGBE_FCTRL_MPE);
1774	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1775
1776	if (ifp->if_flags & IFF_PROMISC) {
1777		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779	} else if (ifp->if_flags & IFF_ALLMULTI) {
1780		reg_rctl |= IXGBE_FCTRL_MPE;
1781		reg_rctl &= ~IXGBE_FCTRL_UPE;
1782		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1783	}
1784	return;
1785}
1786
1787
1788/*********************************************************************
1789 *  Multicast Update
1790 *
1791 *  This routine is called whenever multicast address list is updated.
1792 *
1793 **********************************************************************/
1794#define IXGBE_RAR_ENTRIES 16
1795
1796static void
1797ixgbe_set_multi(struct adapter *adapter)
1798{
1799	u32	fctrl;
1800	u8	*mta;
1801	u8	*update_ptr;
1802	struct	ifmultiaddr *ifma;
1803	int	mcnt = 0;
1804	struct ifnet   *ifp = adapter->ifp;
1805
1806	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1807
1808	mta = adapter->mta;
1809	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810	    MAX_NUM_MULTICAST_ADDRESSES);
1811
1812#if __FreeBSD_version < 800000
1813	IF_ADDR_LOCK(ifp);
1814#else
1815	if_maddr_rlock(ifp);
1816#endif
1817	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818		if (ifma->ifma_addr->sa_family != AF_LINK)
1819			continue;
1820		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1821			break;
1822		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1825		mcnt++;
1826	}
1827#if __FreeBSD_version < 800000
1828	IF_ADDR_UNLOCK(ifp);
1829#else
1830	if_maddr_runlock(ifp);
1831#endif
1832
1833	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835	if (ifp->if_flags & IFF_PROMISC)
1836		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838	    ifp->if_flags & IFF_ALLMULTI) {
1839		fctrl |= IXGBE_FCTRL_MPE;
1840		fctrl &= ~IXGBE_FCTRL_UPE;
1841	} else
1842		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843
1844	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1845
1846	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1847		update_ptr = mta;
1848		ixgbe_update_mc_addr_list(&adapter->hw,
1849		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1850	}
1851
1852	return;
1853}
1854
1855/*
1856 * This is an iterator function now needed by the multicast
1857 * shared code. It simply feeds the shared code routine the
1858 * addresses in the array of ixgbe_set_multi() one by one.
1859 */
1860static u8 *
1861ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1862{
1863	u8 *addr = *update_ptr;
1864	u8 *newptr;
1865	*vmdq = 0;
1866
1867	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868	*update_ptr = newptr;
1869	return addr;
1870}
1871
1872
1873/*********************************************************************
1874 *  Timer routine
1875 *
1876 *  This routine checks for link status,updates statistics,
1877 *  and runs the watchdog check.
1878 *
1879 **********************************************************************/
1880
1881static void
1882ixgbe_local_timer(void *arg)
1883{
1884	struct adapter	*adapter = arg;
1885	device_t	dev = adapter->dev;
1886	struct ix_queue *que = adapter->queues;
1887	u64		queues = 0;
1888	int		hung = 0;
1889
1890	mtx_assert(&adapter->core_mtx, MA_OWNED);
1891
1892	/* Check for pluggable optics */
1893	if (adapter->sfp_probe)
1894		if (!ixgbe_sfp_probe(adapter))
1895			goto out; /* Nothing to do */
1896
1897	ixgbe_update_link_status(adapter);
1898	ixgbe_update_stats_counters(adapter);
1899
1900	/*
1901	** Check the TX queues status
1902	**	- mark hung queues so we don't schedule on them
1903	**      - watchdog only if all queues show hung
1904	*/
1905	for (int i = 0; i < adapter->num_queues; i++, que++) {
1906		/* Keep track of queues with work for soft irq */
1907		if (que->txr->busy)
1908			queues |= ((u64)1 << que->me);
1909		/*
1910		** Each time txeof runs without cleaning, but there
1911		** are uncleaned descriptors it increments busy. If
1912		** we get to the MAX we declare it hung.
1913		*/
1914		if (que->busy == IXGBE_QUEUE_HUNG) {
1915			++hung;
1916			/* Mark the queue as inactive */
1917			adapter->active_queues &= ~((u64)1 << que->me);
1918			continue;
1919		} else {
1920			/* Check if we've come back from hung */
1921			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922                                adapter->active_queues |= ((u64)1 << que->me);
1923		}
1924		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925			device_printf(dev,"Warning queue %d "
1926			    "appears to be hung!\n", i);
1927			que->txr->busy = IXGBE_QUEUE_HUNG;
1928			++hung;
1929		}
1930
1931	}
1932
1933	/* Only truly watchdog if all queues show hung */
1934	if (hung == adapter->num_queues)
1935		goto watchdog;
1936	else if (queues != 0) { /* Force an IRQ on queues with work */
1937		ixgbe_rearm_queues(adapter, queues);
1938	}
1939
1940out:
1941	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1942	return;
1943
1944watchdog:
1945	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947	adapter->watchdog_events++;
1948	ixgbe_init_locked(adapter);
1949}
1950
1951/*
1952** Note: this routine updates the OS on the link state
1953**	the real check of the hardware only happens with
1954**	a link interrupt.
1955*/
1956static void
1957ixgbe_update_link_status(struct adapter *adapter)
1958{
1959	struct ifnet	*ifp = adapter->ifp;
1960	device_t dev = adapter->dev;
1961
1962	if (adapter->link_up){
1963		if (adapter->link_active == FALSE) {
1964			if (bootverbose)
1965				device_printf(dev,"Link is up %d Gbps %s \n",
1966				    ((adapter->link_speed == 128)? 10:1),
1967				    "Full Duplex");
1968			adapter->link_active = TRUE;
1969			/* Update any Flow Control changes */
1970			ixgbe_fc_enable(&adapter->hw);
1971			/* Update DMA coalescing config */
1972			ixgbe_config_dmac(adapter);
1973			if_link_state_change(ifp, LINK_STATE_UP);
1974		}
1975	} else { /* Link down */
1976		if (adapter->link_active == TRUE) {
1977			if (bootverbose)
1978				device_printf(dev,"Link is Down\n");
1979			if_link_state_change(ifp, LINK_STATE_DOWN);
1980			adapter->link_active = FALSE;
1981		}
1982	}
1983
1984	return;
1985}
1986
1987
1988/*********************************************************************
1989 *
1990 *  This routine disables all traffic on the adapter by issuing a
1991 *  global reset on the MAC and deallocates TX/RX buffers.
1992 *
1993 **********************************************************************/
1994
1995static void
1996ixgbe_stop(void *arg)
1997{
1998	struct ifnet   *ifp;
1999	struct adapter *adapter = arg;
2000	struct ixgbe_hw *hw = &adapter->hw;
2001	ifp = adapter->ifp;
2002
2003	mtx_assert(&adapter->core_mtx, MA_OWNED);
2004
2005	INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006	ixgbe_disable_intr(adapter);
2007	callout_stop(&adapter->timer);
2008
2009	/* Let the stack know...*/
2010	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2011
2012	ixgbe_reset_hw(hw);
2013	hw->adapter_stopped = FALSE;
2014	ixgbe_stop_adapter(hw);
2015	if (hw->mac.type == ixgbe_mac_82599EB)
2016		ixgbe_stop_mac_link_on_d3_82599(hw);
2017	/* Turn off the laser - noop with no optics */
2018	ixgbe_disable_tx_laser(hw);
2019
2020	/* Update the stack */
2021	adapter->link_up = FALSE;
2022       	ixgbe_update_link_status(adapter);
2023
2024	/* reprogram the RAR[0] in case user changed it. */
2025	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2026
2027	return;
2028}
2029
2030
2031/*********************************************************************
2032 *
2033 *  Determine hardware revision.
2034 *
2035 **********************************************************************/
2036static void
2037ixgbe_identify_hardware(struct adapter *adapter)
2038{
2039	device_t        dev = adapter->dev;
2040	struct ixgbe_hw *hw = &adapter->hw;
2041
2042	/* Save off the information about this board */
2043	hw->vendor_id = pci_get_vendor(dev);
2044	hw->device_id = pci_get_device(dev);
2045	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046	hw->subsystem_vendor_id =
2047	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048	hw->subsystem_device_id =
2049	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
2050
2051	/*
2052	** Make sure BUSMASTER is set
2053	*/
2054	pci_enable_busmaster(dev);
2055
2056	/* We need this here to set the num_segs below */
2057	ixgbe_set_mac_type(hw);
2058
2059	/* Pick up the 82599 settings */
2060	if (hw->mac.type != ixgbe_mac_82598EB) {
2061		hw->phy.smart_speed = ixgbe_smart_speed;
2062		adapter->num_segs = IXGBE_82599_SCATTER;
2063	} else
2064		adapter->num_segs = IXGBE_82598_SCATTER;
2065
2066	return;
2067}
2068
2069/*********************************************************************
2070 *
2071 *  Determine optic type
2072 *
2073 **********************************************************************/
2074static void
2075ixgbe_setup_optics(struct adapter *adapter)
2076{
2077	struct ixgbe_hw *hw = &adapter->hw;
2078	int		layer;
2079
2080	layer = ixgbe_get_supported_physical_layer(hw);
2081
2082	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083		adapter->optics = IFM_10G_T;
2084		return;
2085	}
2086
2087	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088		adapter->optics = IFM_1000_T;
2089		return;
2090	}
2091
2092	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093		adapter->optics = IFM_1000_SX;
2094		return;
2095	}
2096
2097	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099		adapter->optics = IFM_10G_LR;
2100		return;
2101	}
2102
2103	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104		adapter->optics = IFM_10G_SR;
2105		return;
2106	}
2107
2108	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109		adapter->optics = IFM_10G_TWINAX;
2110		return;
2111	}
2112
2113	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114	    IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115		adapter->optics = IFM_10G_CX4;
2116		return;
2117	}
2118
2119	/* If we get here just set the default */
2120	adapter->optics = IFM_ETHER | IFM_AUTO;
2121	return;
2122}
2123
2124/*********************************************************************
2125 *
2126 *  Setup the Legacy or MSI Interrupt handler
2127 *
2128 **********************************************************************/
2129static int
2130ixgbe_allocate_legacy(struct adapter *adapter)
2131{
2132	device_t	dev = adapter->dev;
2133	struct		ix_queue *que = adapter->queues;
2134#ifndef IXGBE_LEGACY_TX
2135	struct tx_ring		*txr = adapter->tx_rings;
2136#endif
2137	int		error, rid = 0;
2138
2139	/* MSI RID at 1 */
2140	if (adapter->msix == 1)
2141		rid = 1;
2142
2143	/* We allocate a single interrupt resource */
2144	adapter->res = bus_alloc_resource_any(dev,
2145            SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146	if (adapter->res == NULL) {
2147		device_printf(dev, "Unable to allocate bus resource: "
2148		    "interrupt\n");
2149		return (ENXIO);
2150	}
2151
2152	/*
2153	 * Try allocating a fast interrupt and the associated deferred
2154	 * processing contexts.
2155	 */
2156#ifndef IXGBE_LEGACY_TX
2157	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2158#endif
2159	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161            taskqueue_thread_enqueue, &que->tq);
2162	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163            device_get_nameunit(adapter->dev));
2164
2165	/* Tasklets for Link, SFP and Multispeed Fiber */
2166	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2170#ifdef IXGBE_FDIR
2171	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2172#endif
2173	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174	    taskqueue_thread_enqueue, &adapter->tq);
2175	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176	    device_get_nameunit(adapter->dev));
2177
2178	if ((error = bus_setup_intr(dev, adapter->res,
2179            INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180            que, &adapter->tag)) != 0) {
2181		device_printf(dev, "Failed to register fast interrupt "
2182		    "handler: %d\n", error);
2183		taskqueue_free(que->tq);
2184		taskqueue_free(adapter->tq);
2185		que->tq = NULL;
2186		adapter->tq = NULL;
2187		return (error);
2188	}
2189	/* For simplicity in the handlers */
2190	adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2191
2192	return (0);
2193}
2194
2195
2196/*********************************************************************
2197 *
2198 *  Setup MSIX Interrupt resources and handlers
2199 *
2200 **********************************************************************/
2201static int
2202ixgbe_allocate_msix(struct adapter *adapter)
2203{
2204	device_t        dev = adapter->dev;
2205	struct 		ix_queue *que = adapter->queues;
2206	struct  	tx_ring *txr = adapter->tx_rings;
2207	int 		error, rid, vector = 0;
2208	int		cpu_id = 0;
2209
2210	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2211		rid = vector + 1;
2212		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213		    RF_SHAREABLE | RF_ACTIVE);
2214		if (que->res == NULL) {
2215			device_printf(dev,"Unable to allocate"
2216		    	    " bus resource: que interrupt [%d]\n", vector);
2217			return (ENXIO);
2218		}
2219		/* Set the handler function */
2220		error = bus_setup_intr(dev, que->res,
2221		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222		    ixgbe_msix_que, que, &que->tag);
2223		if (error) {
2224			que->res = NULL;
2225			device_printf(dev, "Failed to register QUE handler");
2226			return (error);
2227		}
2228#if __FreeBSD_version >= 800504
2229		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2230#endif
2231		que->msix = vector;
2232		adapter->active_queues |= (u64)(1 << que->msix);
2233		/*
2234		 * Bind the msix vector, and thus the
2235		 * rings to the corresponding cpu.
2236		 *
2237		 * This just happens to match the default RSS round-robin
2238		 * bucket -> queue -> CPU allocation.
2239		 */
2240		if (adapter->num_queues > 1)
2241			cpu_id = i;
2242
2243		if (adapter->num_queues > 1)
2244			bus_bind_intr(dev, que->res, cpu_id);
2245
2246#ifndef IXGBE_LEGACY_TX
2247		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2248#endif
2249		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251		    taskqueue_thread_enqueue, &que->tq);
2252		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253		    device_get_nameunit(adapter->dev));
2254	}
2255
2256	/* and Link */
2257	rid = vector + 1;
2258	adapter->res = bus_alloc_resource_any(dev,
2259    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260	if (!adapter->res) {
2261		device_printf(dev,"Unable to allocate"
2262    	    " bus resource: Link interrupt [%d]\n", rid);
2263		return (ENXIO);
2264	}
2265	/* Set the link handler function */
2266	error = bus_setup_intr(dev, adapter->res,
2267	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268	    ixgbe_msix_link, adapter, &adapter->tag);
2269	if (error) {
2270		adapter->res = NULL;
2271		device_printf(dev, "Failed to register LINK handler");
2272		return (error);
2273	}
2274#if __FreeBSD_version >= 800504
2275	bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2276#endif
2277	adapter->vector = vector;
2278	/* Tasklets for Link, SFP and Multispeed Fiber */
2279	TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280	TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281	TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282	TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2283#ifdef IXGBE_FDIR
2284	TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2285#endif
2286	adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287	    taskqueue_thread_enqueue, &adapter->tq);
2288	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289	    device_get_nameunit(adapter->dev));
2290
2291	return (0);
2292}
2293
2294/*
2295 * Setup Either MSI/X or MSI
2296 */
2297static int
2298ixgbe_setup_msix(struct adapter *adapter)
2299{
2300	device_t dev = adapter->dev;
2301	int rid, want, queues, msgs;
2302
2303	/* Override by tuneable */
2304	if (ixgbe_enable_msix == 0)
2305		goto msi;
2306
2307	/* First try MSI/X */
2308	msgs = pci_msix_count(dev);
2309	if (msgs == 0)
2310		goto msi;
2311	rid = PCIR_BAR(MSIX_82598_BAR);
2312	adapter->msix_mem = bus_alloc_resource_any(dev,
2313	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314       	if (adapter->msix_mem == NULL) {
2315		rid += 4;	/* 82599 maps in higher BAR */
2316		adapter->msix_mem = bus_alloc_resource_any(dev,
2317		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
2318	}
2319       	if (adapter->msix_mem == NULL) {
2320		/* May not be enabled */
2321		device_printf(adapter->dev,
2322		    "Unable to map MSIX table \n");
2323		goto msi;
2324	}
2325
2326	/* Figure out a reasonable auto config value */
2327	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2328
2329	if (ixgbe_num_queues != 0)
2330		queues = ixgbe_num_queues;
2331
2332	/* reflect correct sysctl value */
2333	ixgbe_num_queues = queues;
2334
2335	/*
2336	** Want one vector (RX/TX pair) per queue
2337	** plus an additional for Link.
2338	*/
2339	want = queues + 1;
2340	if (msgs >= want)
2341		msgs = want;
2342	else {
2343               	device_printf(adapter->dev,
2344		    "MSIX Configuration Problem, "
2345		    "%d vectors but %d queues wanted!\n",
2346		    msgs, want);
2347		goto msi;
2348	}
2349	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2350               	device_printf(adapter->dev,
2351		    "Using MSIX interrupts with %d vectors\n", msgs);
2352		adapter->num_queues = queues;
2353		return (msgs);
2354	}
2355	/*
2356	** If MSIX alloc failed or provided us with
2357	** less than needed, free and fall through to MSI
2358	*/
2359	pci_release_msi(dev);
2360
2361msi:
2362       	if (adapter->msix_mem != NULL) {
2363		bus_release_resource(dev, SYS_RES_MEMORY,
2364		    rid, adapter->msix_mem);
2365		adapter->msix_mem = NULL;
2366	}
2367       	msgs = 1;
2368       	if (pci_alloc_msi(dev, &msgs) == 0) {
2369               	device_printf(adapter->dev,"Using an MSI interrupt\n");
2370		return (msgs);
2371	}
2372	device_printf(adapter->dev,"Using a Legacy interrupt\n");
2373	return (0);
2374}
2375
2376
2377static int
2378ixgbe_allocate_pci_resources(struct adapter *adapter)
2379{
2380	int             rid;
2381	device_t        dev = adapter->dev;
2382
2383	rid = PCIR_BAR(0);
2384	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2385	    &rid, RF_ACTIVE);
2386
2387	if (!(adapter->pci_mem)) {
2388		device_printf(dev,"Unable to allocate bus resource: memory\n");
2389		return (ENXIO);
2390	}
2391
2392	adapter->osdep.mem_bus_space_tag =
2393		rman_get_bustag(adapter->pci_mem);
2394	adapter->osdep.mem_bus_space_handle =
2395		rman_get_bushandle(adapter->pci_mem);
2396	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2397
2398	/* Legacy defaults */
2399	adapter->num_queues = 1;
2400	adapter->hw.back = &adapter->osdep;
2401
2402	/*
2403	** Now setup MSI or MSI/X, should
2404	** return us the number of supported
2405	** vectors. (Will be 1 for MSI)
2406	*/
2407	adapter->msix = ixgbe_setup_msix(adapter);
2408	return (0);
2409}
2410
2411static void
2412ixgbe_free_pci_resources(struct adapter * adapter)
2413{
2414	struct 		ix_queue *que = adapter->queues;
2415	device_t	dev = adapter->dev;
2416	int		rid, memrid;
2417
2418	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2419		memrid = PCIR_BAR(MSIX_82598_BAR);
2420	else
2421		memrid = PCIR_BAR(MSIX_82599_BAR);
2422
2423	/*
2424	** There is a slight possibility of a failure mode
2425	** in attach that will result in entering this function
2426	** before interrupt resources have been initialized, and
2427	** in that case we do not want to execute the loops below
2428	** We can detect this reliably by the state of the adapter
2429	** res pointer.
2430	*/
2431	if (adapter->res == NULL)
2432		goto mem;
2433
2434	/*
2435	**  Release all msix queue resources:
2436	*/
2437	for (int i = 0; i < adapter->num_queues; i++, que++) {
2438		rid = que->msix + 1;
2439		if (que->tag != NULL) {
2440			bus_teardown_intr(dev, que->res, que->tag);
2441			que->tag = NULL;
2442		}
2443		if (que->res != NULL)
2444			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2445	}
2446
2447
2448	/* Clean the Legacy or Link interrupt last */
2449	if (adapter->vector) /* we are doing MSIX */
2450		rid = adapter->vector + 1;
2451	else
2452		(adapter->msix != 0) ? (rid = 1):(rid = 0);
2453
2454	if (adapter->tag != NULL) {
2455		bus_teardown_intr(dev, adapter->res, adapter->tag);
2456		adapter->tag = NULL;
2457	}
2458	if (adapter->res != NULL)
2459		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2460
2461mem:
2462	if (adapter->msix)
2463		pci_release_msi(dev);
2464
2465	if (adapter->msix_mem != NULL)
2466		bus_release_resource(dev, SYS_RES_MEMORY,
2467		    memrid, adapter->msix_mem);
2468
2469	if (adapter->pci_mem != NULL)
2470		bus_release_resource(dev, SYS_RES_MEMORY,
2471		    PCIR_BAR(0), adapter->pci_mem);
2472
2473	return;
2474}
2475
2476/*********************************************************************
2477 *
2478 *  Setup networking device structure and register an interface.
2479 *
2480 **********************************************************************/
2481static int
2482ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2483{
2484	struct ifnet   *ifp;
2485
2486	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2487
2488	ifp = adapter->ifp = if_alloc(IFT_ETHER);
2489	if (ifp == NULL) {
2490		device_printf(dev, "can not allocate ifnet structure\n");
2491		return (-1);
2492	}
2493	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2494	if_initbaudrate(ifp, IF_Gbps(10));
2495	ifp->if_init = ixgbe_init;
2496	ifp->if_softc = adapter;
2497	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2498	ifp->if_ioctl = ixgbe_ioctl;
2499#ifndef IXGBE_LEGACY_TX
2500	ifp->if_transmit = ixgbe_mq_start;
2501	ifp->if_qflush = ixgbe_qflush;
2502#else
2503	ifp->if_start = ixgbe_start;
2504	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2505	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2506	IFQ_SET_READY(&ifp->if_snd);
2507#endif
2508
2509	ether_ifattach(ifp, adapter->hw.mac.addr);
2510
2511	adapter->max_frame_size =
2512	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2513
2514	/*
2515	 * Tell the upper layer(s) we support long frames.
2516	 */
2517	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2518
2519	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2520	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2521	ifp->if_capabilities |= IFCAP_LRO;
2522	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2523			     |  IFCAP_VLAN_HWTSO
2524			     |  IFCAP_VLAN_MTU
2525			     |  IFCAP_HWSTATS;
2526	ifp->if_capenable = ifp->if_capabilities;
2527
2528	/*
2529	** Don't turn this on by default, if vlans are
2530	** created on another pseudo device (eg. lagg)
2531	** then vlan events are not passed thru, breaking
2532	** operation, but with HW FILTER off it works. If
2533	** using vlans directly on the ixgbe driver you can
2534	** enable this and get full hardware tag filtering.
2535	*/
2536	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2537
2538	/*
2539	 * Specify the media types supported by this adapter and register
2540	 * callbacks to update media and link information
2541	 */
2542	ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2543		    ixgbe_media_status);
2544
2545	ixgbe_add_media_types(adapter);
2546
2547	/* Autoselect media by default */
2548	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2549
2550	return (0);
2551}
2552
2553static void
2554ixgbe_add_media_types(struct adapter *adapter)
2555{
2556	struct ixgbe_hw *hw = &adapter->hw;
2557	device_t dev = adapter->dev;
2558	int layer;
2559
2560	layer = ixgbe_get_supported_physical_layer(hw);
2561
2562	/* Media types with matching FreeBSD media defines */
2563	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2564		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2565	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2566		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2567	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2568		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2569
2570	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2571	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2572		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2573
2574	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2575		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2576	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2577		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2578	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2579		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2580	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2581		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2582
2583	/*
2584	** Other (no matching FreeBSD media type):
2585	** To workaround this, we'll assign these completely
2586	** inappropriate media types.
2587	*/
2588	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2589		device_printf(dev, "Media supported: 10GbaseKR\n");
2590		device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2591		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2592	}
2593	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2594		device_printf(dev, "Media supported: 10GbaseKX4\n");
2595		device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2596		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2597	}
2598	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2599		device_printf(dev, "Media supported: 1000baseKX\n");
2600		device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2601		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2602	}
2603	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2604		/* Someday, someone will care about you... */
2605		device_printf(dev, "Media supported: 1000baseBX\n");
2606	}
2607
2608	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2609		ifmedia_add(&adapter->media,
2610		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2611		ifmedia_add(&adapter->media,
2612		    IFM_ETHER | IFM_1000_T, 0, NULL);
2613	}
2614
2615	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2616}
2617
2618static void
2619ixgbe_config_link(struct adapter *adapter)
2620{
2621	struct ixgbe_hw *hw = &adapter->hw;
2622	u32	autoneg, err = 0;
2623	bool	sfp, negotiate;
2624
2625	sfp = ixgbe_is_sfp(hw);
2626
2627	if (sfp) {
2628		if (hw->phy.multispeed_fiber) {
2629			hw->mac.ops.setup_sfp(hw);
2630			ixgbe_enable_tx_laser(hw);
2631			taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2632		} else
2633			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2634	} else {
2635		if (hw->mac.ops.check_link)
2636			err = ixgbe_check_link(hw, &adapter->link_speed,
2637			    &adapter->link_up, FALSE);
2638		if (err)
2639			goto out;
2640		autoneg = hw->phy.autoneg_advertised;
2641		if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2642                	err  = hw->mac.ops.get_link_capabilities(hw,
2643			    &autoneg, &negotiate);
2644		if (err)
2645			goto out;
2646		if (hw->mac.ops.setup_link)
2647                	err = hw->mac.ops.setup_link(hw,
2648			    autoneg, adapter->link_up);
2649	}
2650out:
2651	return;
2652}
2653
2654
2655/*********************************************************************
2656 *
2657 *  Enable transmit units.
2658 *
2659 **********************************************************************/
2660static void
2661ixgbe_initialize_transmit_units(struct adapter *adapter)
2662{
2663	struct tx_ring	*txr = adapter->tx_rings;
2664	struct ixgbe_hw	*hw = &adapter->hw;
2665
2666	/* Setup the Base and Length of the Tx Descriptor Ring */
2667
2668	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2669		u64	tdba = txr->txdma.dma_paddr;
2670		u32	txctrl = 0;
2671
2672		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2673		       (tdba & 0x00000000ffffffffULL));
2674		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2675		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2676		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2677
2678		/* Setup the HW Tx Head and Tail descriptor pointers */
2679		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2680		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2681
2682		/* Cache the tail address */
2683		txr->tail = IXGBE_TDT(txr->me);
2684
2685		/* Set the processing limit */
2686		txr->process_limit = ixgbe_tx_process_limit;
2687
2688		/* Disable Head Writeback */
2689		switch (hw->mac.type) {
2690		case ixgbe_mac_82598EB:
2691			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2692			break;
2693		case ixgbe_mac_82599EB:
2694		case ixgbe_mac_X540:
2695		default:
2696			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2697			break;
2698                }
2699		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2700		switch (hw->mac.type) {
2701		case ixgbe_mac_82598EB:
2702			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2703			break;
2704		case ixgbe_mac_82599EB:
2705		case ixgbe_mac_X540:
2706		default:
2707			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2708			break;
2709		}
2710
2711	}
2712
2713	if (hw->mac.type != ixgbe_mac_82598EB) {
2714		u32 dmatxctl, rttdcs;
2715		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2716		dmatxctl |= IXGBE_DMATXCTL_TE;
2717		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2718		/* Disable arbiter to set MTQC */
2719		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2720		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2721		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2722		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2723		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2724		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2725	}
2726
2727	return;
2728}
2729
2730static void
2731ixgbe_initialise_rss_mapping(struct adapter *adapter)
2732{
2733	struct ixgbe_hw	*hw = &adapter->hw;
2734	uint32_t reta;
2735	int i, j, queue_id, table_size;
2736	int index_mult;
2737	uint32_t rss_key[10];
2738	uint32_t mrqc;
2739
2740	/* Setup RSS */
2741	reta = 0;
2742
2743	/* set up random bits */
2744	arc4rand(&rss_key, sizeof(rss_key), 0);
2745
2746	/* Set multiplier for RETA setup and table size based on MAC */
2747	index_mult = 0x1;
2748	table_size = 128;
2749	switch (adapter->hw.mac.type) {
2750	case ixgbe_mac_82598EB:
2751		index_mult = 0x11;
2752		break;
2753	case ixgbe_mac_X550:
2754	case ixgbe_mac_X550EM_x:
2755		table_size = 512;
2756		break;
2757	default:
2758		break;
2759	}
2760
2761	/* Set up the redirection table */
2762	for (i = 0, j = 0; i < table_size; i++, j++) {
2763		if (j == adapter->num_queues) j = 0;
2764		queue_id = (j * index_mult);
2765		/*
2766		 * The low 8 bits are for hash value (n+0);
2767		 * The next 8 bits are for hash value (n+1), etc.
2768		 */
2769		reta = reta >> 8;
2770		reta = reta | ( ((uint32_t) queue_id) << 24);
2771		if ((i & 3) == 3) {
2772			if (i < 128)
2773				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2774			else
2775				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2776			reta = 0;
2777		}
2778	}
2779
2780	/* Now fill our hash function seeds */
2781	for (int i = 0; i < 10; i++)
2782		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2783
2784	/* Perform hash on these packet types */
2785	/*
2786	 * Disable UDP - IP fragments aren't currently being handled
2787	 * and so we end up with a mix of 2-tuple and 4-tuple
2788	 * traffic.
2789	 */
2790	mrqc = IXGBE_MRQC_RSSEN
2791	     | IXGBE_MRQC_RSS_FIELD_IPV4
2792	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2793#if 0
2794	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2795#endif
2796	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2797	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2798	     | IXGBE_MRQC_RSS_FIELD_IPV6
2799	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2800#if 0
2801	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2802	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2803#endif
2804	;
2805	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2806}
2807
2808
2809/*********************************************************************
2810 *
2811 *  Setup receive registers and features.
2812 *
2813 **********************************************************************/
2814#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2815
2816#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2817
2818static void
2819ixgbe_initialize_receive_units(struct adapter *adapter)
2820{
2821	struct	rx_ring	*rxr = adapter->rx_rings;
2822	struct ixgbe_hw	*hw = &adapter->hw;
2823	struct ifnet   *ifp = adapter->ifp;
2824	u32		bufsz, fctrl, srrctl, rxcsum;
2825	u32		hlreg;
2826
2827
2828	/*
2829	 * Make sure receives are disabled while
2830	 * setting up the descriptor ring
2831	 */
2832	ixgbe_disable_rx(hw);
2833
2834	/* Enable broadcasts */
2835	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2836	fctrl |= IXGBE_FCTRL_BAM;
2837	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2838		fctrl |= IXGBE_FCTRL_DPF;
2839		fctrl |= IXGBE_FCTRL_PMCF;
2840	}
2841	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2842
2843	/* Set for Jumbo Frames? */
2844	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2845	if (ifp->if_mtu > ETHERMTU)
2846		hlreg |= IXGBE_HLREG0_JUMBOEN;
2847	else
2848		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2849#ifdef DEV_NETMAP
2850	/* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2851	if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2852		hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2853	else
2854		hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2855#endif /* DEV_NETMAP */
2856	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2857
2858	bufsz = (adapter->rx_mbuf_sz +
2859	    BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2860
2861	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2862		u64 rdba = rxr->rxdma.dma_paddr;
2863
2864		/* Setup the Base and Length of the Rx Descriptor Ring */
2865		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2866			       (rdba & 0x00000000ffffffffULL));
2867		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2868		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2869		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2870
2871		/* Set up the SRRCTL register */
2872		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2873		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2874		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2875		srrctl |= bufsz;
2876		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2877
2878		/*
2879		 * Set DROP_EN iff we have no flow control and >1 queue.
2880		 * Note that srrctl was cleared shortly before during reset,
2881		 * so we do not need to clear the bit, but do it just in case
2882		 * this code is moved elsewhere.
2883		 */
2884		if (adapter->num_queues > 1 &&
2885		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2886			srrctl |= IXGBE_SRRCTL_DROP_EN;
2887		} else {
2888			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2889		}
2890
2891		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2892
2893		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2894		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2895		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2896
2897		/* Set the processing limit */
2898		rxr->process_limit = ixgbe_rx_process_limit;
2899
2900		/* Set the driver rx tail address */
2901		rxr->tail =  IXGBE_RDT(rxr->me);
2902	}
2903
2904	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2905		u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2906			      IXGBE_PSRTYPE_UDPHDR |
2907			      IXGBE_PSRTYPE_IPV4HDR |
2908			      IXGBE_PSRTYPE_IPV6HDR;
2909		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2910	}
2911
2912	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2913
2914	ixgbe_initialise_rss_mapping(adapter);
2915
2916	if (adapter->num_queues > 1) {
2917		/* RSS and RX IPP Checksum are mutually exclusive */
2918		rxcsum |= IXGBE_RXCSUM_PCSD;
2919	}
2920
2921	if (ifp->if_capenable & IFCAP_RXCSUM)
2922		rxcsum |= IXGBE_RXCSUM_PCSD;
2923
2924	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2925		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2926
2927	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2928
2929	return;
2930}
2931
2932
2933/*
2934** This routine is run via an vlan config EVENT,
2935** it enables us to use the HW Filter table since
2936** we can get the vlan id. This just creates the
2937** entry in the soft version of the VFTA, init will
2938** repopulate the real table.
2939*/
2940static void
2941ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2942{
2943	struct adapter	*adapter = ifp->if_softc;
2944	u16		index, bit;
2945
2946	if (ifp->if_softc !=  arg)   /* Not our event */
2947		return;
2948
2949	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
2950		return;
2951
2952	IXGBE_CORE_LOCK(adapter);
2953	index = (vtag >> 5) & 0x7F;
2954	bit = vtag & 0x1F;
2955	adapter->shadow_vfta[index] |= (1 << bit);
2956	++adapter->num_vlans;
2957	ixgbe_setup_vlan_hw_support(adapter);
2958	IXGBE_CORE_UNLOCK(adapter);
2959}
2960
2961/*
2962** This routine is run via an vlan
2963** unconfig EVENT, remove our entry
2964** in the soft vfta.
2965*/
2966static void
2967ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2968{
2969	struct adapter	*adapter = ifp->if_softc;
2970	u16		index, bit;
2971
2972	if (ifp->if_softc !=  arg)
2973		return;
2974
2975	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
2976		return;
2977
2978	IXGBE_CORE_LOCK(adapter);
2979	index = (vtag >> 5) & 0x7F;
2980	bit = vtag & 0x1F;
2981	adapter->shadow_vfta[index] &= ~(1 << bit);
2982	--adapter->num_vlans;
2983	/* Re-init to load the changes */
2984	ixgbe_setup_vlan_hw_support(adapter);
2985	IXGBE_CORE_UNLOCK(adapter);
2986}
2987
2988static void
2989ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2990{
2991	struct ifnet 	*ifp = adapter->ifp;
2992	struct ixgbe_hw *hw = &adapter->hw;
2993	struct rx_ring	*rxr;
2994	u32		ctrl;
2995
2996
2997	/*
2998	** We get here thru init_locked, meaning
2999	** a soft reset, this has already cleared
3000	** the VFTA and other state, so if there
3001	** have been no vlan's registered do nothing.
3002	*/
3003	if (adapter->num_vlans == 0)
3004		return;
3005
3006	/* Setup the queues for vlans */
3007	for (int i = 0; i < adapter->num_queues; i++) {
3008		rxr = &adapter->rx_rings[i];
3009		/* On 82599 the VLAN enable is per/queue in RXDCTL */
3010		if (hw->mac.type != ixgbe_mac_82598EB) {
3011			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3012			ctrl |= IXGBE_RXDCTL_VME;
3013			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3014		}
3015		rxr->vtag_strip = TRUE;
3016	}
3017
3018	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3019		return;
3020	/*
3021	** A soft reset zero's out the VFTA, so
3022	** we need to repopulate it now.
3023	*/
3024	for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3025		if (adapter->shadow_vfta[i] != 0)
3026			IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3027			    adapter->shadow_vfta[i]);
3028
3029	ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3030	/* Enable the Filter Table if enabled */
3031	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3032		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3033		ctrl |= IXGBE_VLNCTRL_VFE;
3034	}
3035	if (hw->mac.type == ixgbe_mac_82598EB)
3036		ctrl |= IXGBE_VLNCTRL_VME;
3037	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3038}
3039
3040static void
3041ixgbe_enable_intr(struct adapter *adapter)
3042{
3043	struct ixgbe_hw	*hw = &adapter->hw;
3044	struct ix_queue	*que = adapter->queues;
3045	u32		mask, fwsm;
3046
3047	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3048	/* Enable Fan Failure detection */
3049	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3050		    mask |= IXGBE_EIMS_GPI_SDP1;
3051
3052	switch (adapter->hw.mac.type) {
3053		case ixgbe_mac_82599EB:
3054			mask |= IXGBE_EIMS_ECC;
3055			/* Temperature sensor on some adapters */
3056			mask |= IXGBE_EIMS_GPI_SDP0;
3057			/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3058			mask |= IXGBE_EIMS_GPI_SDP1;
3059			mask |= IXGBE_EIMS_GPI_SDP2;
3060#ifdef IXGBE_FDIR
3061			mask |= IXGBE_EIMS_FLOW_DIR;
3062#endif
3063			break;
3064		case ixgbe_mac_X540:
3065			/* Detect if Thermal Sensor is enabled */
3066			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3067			if (fwsm & IXGBE_FWSM_TS_ENABLED)
3068				mask |= IXGBE_EIMS_TS;
3069			mask |= IXGBE_EIMS_ECC;
3070#ifdef IXGBE_FDIR
3071			mask |= IXGBE_EIMS_FLOW_DIR;
3072#endif
3073			break;
3074		case ixgbe_mac_X550:
3075		case ixgbe_mac_X550EM_x:
3076			/* MAC thermal sensor is automatically enabled */
3077			mask |= IXGBE_EIMS_TS;
3078			/* Some devices use SDP0 for important information */
3079			if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3080			    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3081				mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3082			mask |= IXGBE_EIMS_ECC;
3083#ifdef IXGBE_FDIR
3084			mask |= IXGBE_EIMS_FLOW_DIR;
3085#endif
3086		/* falls through */
3087		default:
3088			break;
3089	}
3090
3091	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3092
3093	/* With MSI-X we use auto clear */
3094	if (adapter->msix_mem) {
3095		mask = IXGBE_EIMS_ENABLE_MASK;
3096		/* Don't autoclear Link */
3097		mask &= ~IXGBE_EIMS_OTHER;
3098		mask &= ~IXGBE_EIMS_LSC;
3099		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3100	}
3101
3102	/*
3103	** Now enable all queues, this is done separately to
3104	** allow for handling the extended (beyond 32) MSIX
3105	** vectors that can be used by 82599
3106	*/
3107        for (int i = 0; i < adapter->num_queues; i++, que++)
3108                ixgbe_enable_queue(adapter, que->msix);
3109
3110	IXGBE_WRITE_FLUSH(hw);
3111
3112	return;
3113}
3114
3115static void
3116ixgbe_disable_intr(struct adapter *adapter)
3117{
3118	if (adapter->msix_mem)
3119		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3120	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3121		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3122	} else {
3123		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3124		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3125		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3126	}
3127	IXGBE_WRITE_FLUSH(&adapter->hw);
3128	return;
3129}
3130
3131/*
3132** Get the width and transaction speed of
3133** the slot this adapter is plugged into.
3134*/
3135static void
3136ixgbe_get_slot_info(struct ixgbe_hw *hw)
3137{
3138	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
3139	struct ixgbe_mac_info	*mac = &hw->mac;
3140	u16			link;
3141	u32			offset;
3142
3143	/* For most devices simply call the shared code routine */
3144	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3145		ixgbe_get_bus_info(hw);
3146		/* These devices don't use PCI-E */
3147		switch (hw->mac.type) {
3148		case ixgbe_mac_X550EM_x:
3149			return;
3150		default:
3151			goto display;
3152		}
3153	}
3154
3155	/*
3156	** For the Quad port adapter we need to parse back
3157	** up the PCI tree to find the speed of the expansion
3158	** slot into which this adapter is plugged. A bit more work.
3159	*/
3160	dev = device_get_parent(device_get_parent(dev));
3161#ifdef IXGBE_DEBUG
3162	device_printf(dev, "parent pcib = %x,%x,%x\n",
3163	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3164#endif
3165	dev = device_get_parent(device_get_parent(dev));
3166#ifdef IXGBE_DEBUG
3167	device_printf(dev, "slot pcib = %x,%x,%x\n",
3168	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3169#endif
3170	/* Now get the PCI Express Capabilities offset */
3171	pci_find_cap(dev, PCIY_EXPRESS, &offset);
3172	/* ...and read the Link Status Register */
3173	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3174	switch (link & IXGBE_PCI_LINK_WIDTH) {
3175	case IXGBE_PCI_LINK_WIDTH_1:
3176		hw->bus.width = ixgbe_bus_width_pcie_x1;
3177		break;
3178	case IXGBE_PCI_LINK_WIDTH_2:
3179		hw->bus.width = ixgbe_bus_width_pcie_x2;
3180		break;
3181	case IXGBE_PCI_LINK_WIDTH_4:
3182		hw->bus.width = ixgbe_bus_width_pcie_x4;
3183		break;
3184	case IXGBE_PCI_LINK_WIDTH_8:
3185		hw->bus.width = ixgbe_bus_width_pcie_x8;
3186		break;
3187	default:
3188		hw->bus.width = ixgbe_bus_width_unknown;
3189		break;
3190	}
3191
3192	switch (link & IXGBE_PCI_LINK_SPEED) {
3193	case IXGBE_PCI_LINK_SPEED_2500:
3194		hw->bus.speed = ixgbe_bus_speed_2500;
3195		break;
3196	case IXGBE_PCI_LINK_SPEED_5000:
3197		hw->bus.speed = ixgbe_bus_speed_5000;
3198		break;
3199	case IXGBE_PCI_LINK_SPEED_8000:
3200		hw->bus.speed = ixgbe_bus_speed_8000;
3201		break;
3202	default:
3203		hw->bus.speed = ixgbe_bus_speed_unknown;
3204		break;
3205	}
3206
3207	mac->ops.set_lan_id(hw);
3208
3209display:
3210	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3211	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3212	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3213	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3214	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3215	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3216	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3217	    ("Unknown"));
3218
3219	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3220	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3221	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
3222		device_printf(dev, "PCI-Express bandwidth available"
3223		    " for this card\n     is not sufficient for"
3224		    " optimal performance.\n");
3225		device_printf(dev, "For optimal performance a x8 "
3226		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
3227        }
3228	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3229	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3230	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
3231		device_printf(dev, "PCI-Express bandwidth available"
3232		    " for this card\n     is not sufficient for"
3233		    " optimal performance.\n");
3234		device_printf(dev, "For optimal performance a x8 "
3235		    "PCIE Gen3 slot is required.\n");
3236        }
3237
3238	return;
3239}
3240
3241
3242/*
3243** Setup the correct IVAR register for a particular MSIX interrupt
3244**   (yes this is all very magic and confusing :)
3245**  - entry is the register array entry
3246**  - vector is the MSIX vector for this queue
3247**  - type is RX/TX/MISC
3248*/
3249static void
3250ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3251{
3252	struct ixgbe_hw *hw = &adapter->hw;
3253	u32 ivar, index;
3254
3255	vector |= IXGBE_IVAR_ALLOC_VAL;
3256
3257	switch (hw->mac.type) {
3258
3259	case ixgbe_mac_82598EB:
3260		if (type == -1)
3261			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3262		else
3263			entry += (type * 64);
3264		index = (entry >> 2) & 0x1F;
3265		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3266		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3267		ivar |= (vector << (8 * (entry & 0x3)));
3268		IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3269		break;
3270
3271	case ixgbe_mac_82599EB:
3272	case ixgbe_mac_X540:
3273	case ixgbe_mac_X550:
3274	case ixgbe_mac_X550EM_x:
3275		if (type == -1) { /* MISC IVAR */
3276			index = (entry & 1) * 8;
3277			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3278			ivar &= ~(0xFF << index);
3279			ivar |= (vector << index);
3280			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3281		} else {	/* RX/TX IVARS */
3282			index = (16 * (entry & 1)) + (8 * type);
3283			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3284			ivar &= ~(0xFF << index);
3285			ivar |= (vector << index);
3286			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3287		}
3288
3289	default:
3290		break;
3291	}
3292}
3293
3294static void
3295ixgbe_configure_ivars(struct adapter *adapter)
3296{
3297	struct  ix_queue *que = adapter->queues;
3298	u32 newitr;
3299
3300	if (ixgbe_max_interrupt_rate > 0)
3301		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3302	else {
3303		/*
3304		** Disable DMA coalescing if interrupt moderation is
3305		** disabled.
3306		*/
3307		adapter->dmac = 0;
3308		newitr = 0;
3309	}
3310
3311        for (int i = 0; i < adapter->num_queues; i++, que++) {
3312		/* First the RX queue entry */
3313                ixgbe_set_ivar(adapter, i, que->msix, 0);
3314		/* ... and the TX */
3315		ixgbe_set_ivar(adapter, i, que->msix, 1);
3316		/* Set an Initial EITR value */
3317                IXGBE_WRITE_REG(&adapter->hw,
3318                    IXGBE_EITR(que->msix), newitr);
3319	}
3320
3321	/* For the Link interrupt */
3322        ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3323}
3324
3325/*
3326** ixgbe_sfp_probe - called in the local timer to
3327** determine if a port had optics inserted.
3328*/
3329static bool ixgbe_sfp_probe(struct adapter *adapter)
3330{
3331	struct ixgbe_hw	*hw = &adapter->hw;
3332	device_t	dev = adapter->dev;
3333	bool		result = FALSE;
3334
3335	if ((hw->phy.type == ixgbe_phy_nl) &&
3336	    (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3337		s32 ret = hw->phy.ops.identify_sfp(hw);
3338		if (ret)
3339                        goto out;
3340		ret = hw->phy.ops.reset(hw);
3341		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3342			device_printf(dev,"Unsupported SFP+ module detected!");
3343			printf(" Reload driver with supported module.\n");
3344			adapter->sfp_probe = FALSE;
3345                        goto out;
3346		} else
3347			device_printf(dev,"SFP+ module detected!\n");
3348		/* We now have supported optics */
3349		adapter->sfp_probe = FALSE;
3350		/* Set the optics type so system reports correctly */
3351		ixgbe_setup_optics(adapter);
3352		result = TRUE;
3353	}
3354out:
3355	return (result);
3356}
3357
3358/*
3359** Tasklet handler for MSIX Link interrupts
3360**  - do outside interrupt since it might sleep
3361*/
3362static void
3363ixgbe_handle_link(void *context, int pending)
3364{
3365	struct adapter  *adapter = context;
3366
3367	ixgbe_check_link(&adapter->hw,
3368	    &adapter->link_speed, &adapter->link_up, 0);
3369	ixgbe_update_link_status(adapter);
3370}
3371
3372/*
3373** Tasklet for handling SFP module interrupts
3374*/
3375static void
3376ixgbe_handle_mod(void *context, int pending)
3377{
3378	struct adapter  *adapter = context;
3379	struct ixgbe_hw *hw = &adapter->hw;
3380	device_t	dev = adapter->dev;
3381	u32 err;
3382
3383	err = hw->phy.ops.identify_sfp(hw);
3384	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3385		device_printf(dev,
3386		    "Unsupported SFP+ module type was detected.\n");
3387		return;
3388	}
3389	err = hw->mac.ops.setup_sfp(hw);
3390	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3391		device_printf(dev,
3392		    "Setup failure - unsupported SFP+ module type.\n");
3393		return;
3394	}
3395	taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3396	return;
3397}
3398
3399
3400/*
3401** Tasklet for handling MSF (multispeed fiber) interrupts
3402*/
3403static void
3404ixgbe_handle_msf(void *context, int pending)
3405{
3406	struct adapter  *adapter = context;
3407	struct ixgbe_hw *hw = &adapter->hw;
3408	u32 autoneg;
3409	bool negotiate;
3410	int err;
3411
3412	err = hw->phy.ops.identify_sfp(hw);
3413	if (!err) {
3414		ixgbe_setup_optics(adapter);
3415		INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3416	}
3417
3418	autoneg = hw->phy.autoneg_advertised;
3419	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3420		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3421	if (hw->mac.ops.setup_link)
3422		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3423
3424	ifmedia_removeall(&adapter->media);
3425	ixgbe_add_media_types(adapter);
3426	return;
3427}
3428
3429/*
3430** Tasklet for handling interrupts from an external PHY
3431*/
3432static void
3433ixgbe_handle_phy(void *context, int pending)
3434{
3435	struct adapter  *adapter = context;
3436	struct ixgbe_hw *hw = &adapter->hw;
3437	int error;
3438
3439	error = hw->phy.ops.handle_lasi(hw);
3440	if (error == IXGBE_ERR_OVERTEMP)
3441		device_printf(adapter->dev,
3442		    "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3443		    " PHY will downshift to lower power state!\n");
3444	else if (error)
3445		device_printf(adapter->dev,
3446		    "Error handling LASI interrupt: %d\n",
3447		    error);
3448	return;
3449}
3450
3451#ifdef IXGBE_FDIR
3452/*
3453** Tasklet for reinitializing the Flow Director filter table
3454*/
3455static void
3456ixgbe_reinit_fdir(void *context, int pending)
3457{
3458	struct adapter  *adapter = context;
3459	struct ifnet   *ifp = adapter->ifp;
3460
3461	if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3462		return;
3463	ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3464	adapter->fdir_reinit = 0;
3465	/* re-enable flow director interrupts */
3466	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3467	/* Restart the interface */
3468	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3469	return;
3470}
3471#endif
3472
3473/*********************************************************************
3474 *
3475 *  Configure DMA Coalescing
3476 *
3477 **********************************************************************/
3478static void
3479ixgbe_config_dmac(struct adapter *adapter)
3480{
3481	struct ixgbe_hw *hw = &adapter->hw;
3482	struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3483
3484	if (hw->mac.type < ixgbe_mac_X550 ||
3485	    !hw->mac.ops.dmac_config)
3486		return;
3487
3488	if (dcfg->watchdog_timer ^ adapter->dmac ||
3489	    dcfg->link_speed ^ adapter->link_speed) {
3490		dcfg->watchdog_timer = adapter->dmac;
3491		dcfg->fcoe_en = false;
3492		dcfg->link_speed = adapter->link_speed;
3493		dcfg->num_tcs = 1;
3494
3495		INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3496		    dcfg->watchdog_timer, dcfg->link_speed);
3497
3498		hw->mac.ops.dmac_config(hw);
3499	}
3500}
3501
3502/*
3503 * Checks whether the adapter supports Energy Efficient Ethernet
3504 * or not, based on device ID.
3505 */
3506static void
3507ixgbe_check_eee_support(struct adapter *adapter)
3508{
3509	struct ixgbe_hw *hw = &adapter->hw;
3510
3511	adapter->eee_support = adapter->eee_enabled =
3512	    (hw->device_id == IXGBE_DEV_ID_X550T ||
3513	        hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3514}
3515
3516/*
3517 * Checks whether the adapter's ports are capable of
3518 * Wake On LAN by reading the adapter's NVM.
3519 *
3520 * Sets each port's hw->wol_enabled value depending
3521 * on the value read here.
3522 */
3523static void
3524ixgbe_check_wol_support(struct adapter *adapter)
3525{
3526	struct ixgbe_hw *hw = &adapter->hw;
3527	u16 dev_caps = 0;
3528
3529	/* Find out WoL support for port */
3530	adapter->wol_support = hw->wol_enabled = 0;
3531	ixgbe_get_device_caps(hw, &dev_caps);
3532	if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3533	    ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3534	        hw->bus.func == 0))
3535	    adapter->wol_support = hw->wol_enabled = 1;
3536
3537	/* Save initial wake up filter configuration */
3538	adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3539
3540	return;
3541}
3542
3543/*
3544 * Prepare the adapter/port for LPLU and/or WoL
3545 */
3546static int
3547ixgbe_setup_low_power_mode(struct adapter *adapter)
3548{
3549	struct ixgbe_hw *hw = &adapter->hw;
3550	device_t dev = adapter->dev;
3551	s32 error = 0;
3552
3553	mtx_assert(&adapter->core_mtx, MA_OWNED);
3554
3555	/* Limit power management flow to X550EM baseT */
3556	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3557	    && hw->phy.ops.enter_lplu) {
3558		/* Turn off support for APM wakeup. (Using ACPI instead) */
3559		IXGBE_WRITE_REG(hw, IXGBE_GRC,
3560		    IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3561
3562		/*
3563		 * Clear Wake Up Status register to prevent any previous wakeup
3564		 * events from waking us up immediately after we suspend.
3565		 */
3566		IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3567
3568		/*
3569		 * Program the Wakeup Filter Control register with user filter
3570		 * settings
3571		 */
3572		IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3573
3574		/* Enable wakeups and power management in Wakeup Control */
3575		IXGBE_WRITE_REG(hw, IXGBE_WUC,
3576		    IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3577
3578		/* X550EM baseT adapters need a special LPLU flow */
3579		hw->phy.reset_disable = true;
3580		ixgbe_stop(adapter);
3581		error = hw->phy.ops.enter_lplu(hw);
3582		if (error)
3583			device_printf(dev,
3584			    "Error entering LPLU: %d\n", error);
3585		hw->phy.reset_disable = false;
3586	} else {
3587		/* Just stop for other adapters */
3588		ixgbe_stop(adapter);
3589	}
3590
3591	return error;
3592}
3593
3594/**********************************************************************
3595 *
3596 *  Update the board statistics counters.
3597 *
3598 **********************************************************************/
3599static void
3600ixgbe_update_stats_counters(struct adapter *adapter)
3601{
3602	struct ixgbe_hw *hw = &adapter->hw;
3603	u32 missed_rx = 0, bprc, lxon, lxoff, total;
3604	u64 total_missed_rx = 0;
3605
3606	adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3607	adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3608	adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3609	adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3610
3611	for (int i = 0; i < 16; i++) {
3612		adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3613		adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3614		adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3615	}
3616	adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3617	adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3618	adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3619
3620	/* Hardware workaround, gprc counts missed packets */
3621	adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3622	adapter->stats.pf.gprc -= missed_rx;
3623
3624	if (hw->mac.type != ixgbe_mac_82598EB) {
3625		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3626		    ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3627		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3628		    ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3629		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3630		    ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3631		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3632		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3633	} else {
3634		adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3635		adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3636		/* 82598 only has a counter in the high register */
3637		adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3638		adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3639		adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3640	}
3641
3642	/*
3643	 * Workaround: mprc hardware is incorrectly counting
3644	 * broadcasts, so for now we subtract those.
3645	 */
3646	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3647	adapter->stats.pf.bprc += bprc;
3648	adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3649	if (hw->mac.type == ixgbe_mac_82598EB)
3650		adapter->stats.pf.mprc -= bprc;
3651
3652	adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3653	adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3654	adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3655	adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3656	adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3657	adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3658
3659	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3660	adapter->stats.pf.lxontxc += lxon;
3661	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3662	adapter->stats.pf.lxofftxc += lxoff;
3663	total = lxon + lxoff;
3664
3665	adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3666	adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3667	adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3668	adapter->stats.pf.gptc -= total;
3669	adapter->stats.pf.mptc -= total;
3670	adapter->stats.pf.ptc64 -= total;
3671	adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3672
3673	adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3674	adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3675	adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3676	adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3677	adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3678	adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3679	adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3680	adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3681	adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3682	adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3683	adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3684	adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3685	adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3686	adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3687	adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3688	adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3689	adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3690	adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3691	/* Only read FCOE on 82599 */
3692	if (hw->mac.type != ixgbe_mac_82598EB) {
3693		adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3694		adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3695		adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3696		adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3697		adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3698	}
3699
3700	/* Fill out the OS statistics structure */
3701	IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3702	IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3703	IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3704	IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3705	IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3706	IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3707	IXGBE_SET_COLLISIONS(adapter, 0);
3708	IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3709	IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3710	    + adapter->stats.pf.rlec);
3711}
3712
3713#if __FreeBSD_version >= 1100036
3714static uint64_t
3715ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3716{
3717	struct adapter *adapter;
3718	struct tx_ring *txr;
3719	uint64_t rv;
3720
3721	adapter = if_getsoftc(ifp);
3722
3723	switch (cnt) {
3724	case IFCOUNTER_IPACKETS:
3725		return (adapter->ipackets);
3726	case IFCOUNTER_OPACKETS:
3727		return (adapter->opackets);
3728	case IFCOUNTER_IBYTES:
3729		return (adapter->ibytes);
3730	case IFCOUNTER_OBYTES:
3731		return (adapter->obytes);
3732	case IFCOUNTER_IMCASTS:
3733		return (adapter->imcasts);
3734	case IFCOUNTER_OMCASTS:
3735		return (adapter->omcasts);
3736	case IFCOUNTER_COLLISIONS:
3737		return (0);
3738	case IFCOUNTER_IQDROPS:
3739		return (adapter->iqdrops);
3740	case IFCOUNTER_OQDROPS:
3741		rv = 0;
3742		txr = adapter->tx_rings;
3743		for (int i = 0; i < adapter->num_queues; i++, txr++)
3744			rv += txr->br->br_drops;
3745		return (rv);
3746	case IFCOUNTER_IERRORS:
3747		return (adapter->ierrors);
3748	default:
3749		return (if_get_counter_default(ifp, cnt));
3750	}
3751}
3752#endif
3753
3754/** ixgbe_sysctl_tdh_handler - Handler function
3755 *  Retrieves the TDH value from the hardware
3756 */
3757static int
3758ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3759{
3760	int error;
3761
3762	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3763	if (!txr) return 0;
3764
3765	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3766	error = sysctl_handle_int(oidp, &val, 0, req);
3767	if (error || !req->newptr)
3768		return error;
3769	return 0;
3770}
3771
3772/** ixgbe_sysctl_tdt_handler - Handler function
3773 *  Retrieves the TDT value from the hardware
3774 */
3775static int
3776ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3777{
3778	int error;
3779
3780	struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3781	if (!txr) return 0;
3782
3783	unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3784	error = sysctl_handle_int(oidp, &val, 0, req);
3785	if (error || !req->newptr)
3786		return error;
3787	return 0;
3788}
3789
3790/** ixgbe_sysctl_rdh_handler - Handler function
3791 *  Retrieves the RDH value from the hardware
3792 */
3793static int
3794ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3795{
3796	int error;
3797
3798	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3799	if (!rxr) return 0;
3800
3801	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3802	error = sysctl_handle_int(oidp, &val, 0, req);
3803	if (error || !req->newptr)
3804		return error;
3805	return 0;
3806}
3807
3808/** ixgbe_sysctl_rdt_handler - Handler function
3809 *  Retrieves the RDT value from the hardware
3810 */
3811static int
3812ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3813{
3814	int error;
3815
3816	struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3817	if (!rxr) return 0;
3818
3819	unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3820	error = sysctl_handle_int(oidp, &val, 0, req);
3821	if (error || !req->newptr)
3822		return error;
3823	return 0;
3824}
3825
3826static int
3827ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3828{
3829	int error;
3830	struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3831	unsigned int reg, usec, rate;
3832
3833	reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3834	usec = ((reg & 0x0FF8) >> 3);
3835	if (usec > 0)
3836		rate = 500000 / usec;
3837	else
3838		rate = 0;
3839	error = sysctl_handle_int(oidp, &rate, 0, req);
3840	if (error || !req->newptr)
3841		return error;
3842	reg &= ~0xfff; /* default, no limitation */
3843	ixgbe_max_interrupt_rate = 0;
3844	if (rate > 0 && rate < 500000) {
3845		if (rate < 1000)
3846			rate = 1000;
3847		ixgbe_max_interrupt_rate = rate;
3848		reg |= ((4000000/rate) & 0xff8 );
3849	}
3850	IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3851	return 0;
3852}
3853
3854static void
3855ixgbe_add_device_sysctls(struct adapter *adapter)
3856{
3857	device_t dev = adapter->dev;
3858	struct ixgbe_hw *hw = &adapter->hw;
3859	struct sysctl_oid_list *child;
3860	struct sysctl_ctx_list *ctx;
3861
3862	ctx = device_get_sysctl_ctx(dev);
3863	child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3864
3865	/* Sysctls for all devices */
3866	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3867			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3868			ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3869
3870        SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3871			CTLFLAG_RW,
3872			&ixgbe_enable_aim, 1, "Interrupt Moderation");
3873
3874	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3875			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3876			ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3877
3878	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3879			CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3880			ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3881
3882	/* for X550 devices */
3883	if (hw->mac.type >= ixgbe_mac_X550)
3884		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3885				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3886				ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3887
3888	/* for X550T and X550EM backplane devices */
3889	if (hw->device_id == IXGBE_DEV_ID_X550T ||
3890	    hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3891		struct sysctl_oid *eee_node;
3892		struct sysctl_oid_list *eee_list;
3893
3894		eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3895					   CTLFLAG_RD, NULL,
3896					   "Energy Efficient Ethernet sysctls");
3897		eee_list = SYSCTL_CHILDREN(eee_node);
3898
3899		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3900				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3901				ixgbe_sysctl_eee_enable, "I",
3902				"Enable or Disable EEE");
3903
3904		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3905				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3906				ixgbe_sysctl_eee_negotiated, "I",
3907				"EEE negotiated on link");
3908
3909		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3910				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3911				ixgbe_sysctl_eee_tx_lpi_status, "I",
3912				"Whether or not TX link is in LPI state");
3913
3914		SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3915				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3916				ixgbe_sysctl_eee_rx_lpi_status, "I",
3917				"Whether or not RX link is in LPI state");
3918	}
3919
3920	/* for certain 10GBaseT devices */
3921	if (hw->device_id == IXGBE_DEV_ID_X550T ||
3922	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3923		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3924				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3925				ixgbe_sysctl_wol_enable, "I",
3926				"Enable/Disable Wake on LAN");
3927
3928		SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3929				CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3930				ixgbe_sysctl_wufc, "I",
3931				"Enable/Disable Wake Up Filters");
3932	}
3933
3934	/* for X550EM 10GBaseT devices */
3935	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3936		struct sysctl_oid *phy_node;
3937		struct sysctl_oid_list *phy_list;
3938
3939		phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3940					   CTLFLAG_RD, NULL,
3941					   "External PHY sysctls");
3942		phy_list = SYSCTL_CHILDREN(phy_node);
3943
3944		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3945				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3946				ixgbe_sysctl_phy_temp, "I",
3947				"Current External PHY Temperature (Celsius)");
3948
3949		SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3950				CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3951				ixgbe_sysctl_phy_overtemp_occurred, "I",
3952				"External PHY High Temperature Event Occurred");
3953	}
3954}
3955
3956/*
3957 * Add sysctl variables, one per statistic, to the system.
3958 */
3959static void
3960ixgbe_add_hw_stats(struct adapter *adapter)
3961{
3962	device_t dev = adapter->dev;
3963
3964	struct tx_ring *txr = adapter->tx_rings;
3965	struct rx_ring *rxr = adapter->rx_rings;
3966
3967	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3968	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3969	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3970	struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3971
3972	struct sysctl_oid *stat_node, *queue_node;
3973	struct sysctl_oid_list *stat_list, *queue_list;
3974
3975#define QUEUE_NAME_LEN 32
3976	char namebuf[QUEUE_NAME_LEN];
3977
3978	/* Driver Statistics */
3979	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3980			CTLFLAG_RD, &adapter->dropped_pkts,
3981			"Driver dropped packets");
3982	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3983			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3984			"m_defrag() failed");
3985	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3986			CTLFLAG_RD, &adapter->watchdog_events,
3987			"Watchdog timeouts");
3988	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3989			CTLFLAG_RD, &adapter->link_irq,
3990			"Link MSIX IRQ Handled");
3991
3992	for (int i = 0; i < adapter->num_queues; i++, txr++) {
3993		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3994		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3995					    CTLFLAG_RD, NULL, "Queue Name");
3996		queue_list = SYSCTL_CHILDREN(queue_node);
3997
3998		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
3999				CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4000				sizeof(&adapter->queues[i]),
4001				ixgbe_sysctl_interrupt_rate_handler, "IU",
4002				"Interrupt Rate");
4003		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4004				CTLFLAG_RD, &(adapter->queues[i].irqs),
4005				"irqs on this queue");
4006		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
4007				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4008				ixgbe_sysctl_tdh_handler, "IU",
4009				"Transmit Descriptor Head");
4010		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
4011				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4012				ixgbe_sysctl_tdt_handler, "IU",
4013				"Transmit Descriptor Tail");
4014		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4015				CTLFLAG_RD, &txr->tso_tx,
4016				"TSO");
4017		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4018				CTLFLAG_RD, &txr->no_tx_dma_setup,
4019				"Driver tx dma failure in xmit");
4020		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4021				CTLFLAG_RD, &txr->no_desc_avail,
4022				"Queue No Descriptor Available");
4023		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4024				CTLFLAG_RD, &txr->total_packets,
4025				"Queue Packets Transmitted");
4026		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4027				CTLFLAG_RD, &txr->br->br_drops,
4028				"Packets dropped in buf_ring");
4029	}
4030
4031	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4032		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4033		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4034					    CTLFLAG_RD, NULL, "Queue Name");
4035		queue_list = SYSCTL_CHILDREN(queue_node);
4036
4037		struct lro_ctrl *lro = &rxr->lro;
4038
4039		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4040		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4041					    CTLFLAG_RD, NULL, "Queue Name");
4042		queue_list = SYSCTL_CHILDREN(queue_node);
4043
4044		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
4045				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4046				ixgbe_sysctl_rdh_handler, "IU",
4047				"Receive Descriptor Head");
4048		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
4049				CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4050				ixgbe_sysctl_rdt_handler, "IU",
4051				"Receive Descriptor Tail");
4052		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4053				CTLFLAG_RD, &rxr->rx_packets,
4054				"Queue Packets Received");
4055		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4056				CTLFLAG_RD, &rxr->rx_bytes,
4057				"Queue Bytes Received");
4058		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4059				CTLFLAG_RD, &rxr->rx_copies,
4060				"Copied RX Frames");
4061		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4062				CTLFLAG_RD, &lro->lro_queued, 0,
4063				"LRO Queued");
4064		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4065				CTLFLAG_RD, &lro->lro_flushed, 0,
4066				"LRO Flushed");
4067	}
4068
4069	/* MAC stats get the own sub node */
4070
4071	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
4072				    CTLFLAG_RD, NULL, "MAC Statistics");
4073	stat_list = SYSCTL_CHILDREN(stat_node);
4074
4075	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4076			CTLFLAG_RD, &stats->crcerrs,
4077			"CRC Errors");
4078	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4079			CTLFLAG_RD, &stats->illerrc,
4080			"Illegal Byte Errors");
4081	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4082			CTLFLAG_RD, &stats->errbc,
4083			"Byte Errors");
4084	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4085			CTLFLAG_RD, &stats->mspdc,
4086			"MAC Short Packets Discarded");
4087	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4088			CTLFLAG_RD, &stats->mlfc,
4089			"MAC Local Faults");
4090	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4091			CTLFLAG_RD, &stats->mrfc,
4092			"MAC Remote Faults");
4093	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4094			CTLFLAG_RD, &stats->rlec,
4095			"Receive Length Errors");
4096
4097	/* Flow Control stats */
4098	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4099			CTLFLAG_RD, &stats->lxontxc,
4100			"Link XON Transmitted");
4101	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4102			CTLFLAG_RD, &stats->lxonrxc,
4103			"Link XON Received");
4104	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4105			CTLFLAG_RD, &stats->lxofftxc,
4106			"Link XOFF Transmitted");
4107	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4108			CTLFLAG_RD, &stats->lxoffrxc,
4109			"Link XOFF Received");
4110
4111	/* Packet Reception Stats */
4112	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4113			CTLFLAG_RD, &stats->tor,
4114			"Total Octets Received");
4115	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4116			CTLFLAG_RD, &stats->gorc,
4117			"Good Octets Received");
4118	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4119			CTLFLAG_RD, &stats->tpr,
4120			"Total Packets Received");
4121	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4122			CTLFLAG_RD, &stats->gprc,
4123			"Good Packets Received");
4124	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4125			CTLFLAG_RD, &stats->mprc,
4126			"Multicast Packets Received");
4127	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4128			CTLFLAG_RD, &stats->bprc,
4129			"Broadcast Packets Received");
4130	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4131			CTLFLAG_RD, &stats->prc64,
4132			"64 byte frames received ");
4133	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4134			CTLFLAG_RD, &stats->prc127,
4135			"65-127 byte frames received");
4136	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4137			CTLFLAG_RD, &stats->prc255,
4138			"128-255 byte frames received");
4139	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4140			CTLFLAG_RD, &stats->prc511,
4141			"256-511 byte frames received");
4142	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4143			CTLFLAG_RD, &stats->prc1023,
4144			"512-1023 byte frames received");
4145	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4146			CTLFLAG_RD, &stats->prc1522,
4147			"1023-1522 byte frames received");
4148	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4149			CTLFLAG_RD, &stats->ruc,
4150			"Receive Undersized");
4151	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4152			CTLFLAG_RD, &stats->rfc,
4153			"Fragmented Packets Received ");
4154	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4155			CTLFLAG_RD, &stats->roc,
4156			"Oversized Packets Received");
4157	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4158			CTLFLAG_RD, &stats->rjc,
4159			"Received Jabber");
4160	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4161			CTLFLAG_RD, &stats->mngprc,
4162			"Management Packets Received");
4163	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4164			CTLFLAG_RD, &stats->mngptc,
4165			"Management Packets Dropped");
4166	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4167			CTLFLAG_RD, &stats->xec,
4168			"Checksum Errors");
4169
4170	/* Packet Transmission Stats */
4171	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4172			CTLFLAG_RD, &stats->gotc,
4173			"Good Octets Transmitted");
4174	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4175			CTLFLAG_RD, &stats->tpt,
4176			"Total Packets Transmitted");
4177	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4178			CTLFLAG_RD, &stats->gptc,
4179			"Good Packets Transmitted");
4180	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4181			CTLFLAG_RD, &stats->bptc,
4182			"Broadcast Packets Transmitted");
4183	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4184			CTLFLAG_RD, &stats->mptc,
4185			"Multicast Packets Transmitted");
4186	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4187			CTLFLAG_RD, &stats->mngptc,
4188			"Management Packets Transmitted");
4189	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4190			CTLFLAG_RD, &stats->ptc64,
4191			"64 byte frames transmitted ");
4192	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4193			CTLFLAG_RD, &stats->ptc127,
4194			"65-127 byte frames transmitted");
4195	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4196			CTLFLAG_RD, &stats->ptc255,
4197			"128-255 byte frames transmitted");
4198	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4199			CTLFLAG_RD, &stats->ptc511,
4200			"256-511 byte frames transmitted");
4201	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4202			CTLFLAG_RD, &stats->ptc1023,
4203			"512-1023 byte frames transmitted");
4204	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4205			CTLFLAG_RD, &stats->ptc1522,
4206			"1024-1522 byte frames transmitted");
4207}
4208
4209/*
4210** Set flow control using sysctl:
4211** Flow control values:
4212** 	0 - off
4213**	1 - rx pause
4214**	2 - tx pause
4215**	3 - full
4216*/
4217static int
4218ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4219{
4220	int error, last;
4221	struct adapter *adapter = (struct adapter *) arg1;
4222
4223	last = adapter->fc;
4224	error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4225	if ((error) || (req->newptr == NULL))
4226		return (error);
4227
4228	/* Don't bother if it's not changed */
4229	if (adapter->fc == last)
4230		return (0);
4231
4232	switch (adapter->fc) {
4233		case ixgbe_fc_rx_pause:
4234		case ixgbe_fc_tx_pause:
4235		case ixgbe_fc_full:
4236			adapter->hw.fc.requested_mode = adapter->fc;
4237			if (adapter->num_queues > 1)
4238				ixgbe_disable_rx_drop(adapter);
4239			break;
4240		case ixgbe_fc_none:
4241			adapter->hw.fc.requested_mode = ixgbe_fc_none;
4242			if (adapter->num_queues > 1)
4243				ixgbe_enable_rx_drop(adapter);
4244			break;
4245		default:
4246			adapter->fc = last;
4247			return (EINVAL);
4248	}
4249	/* Don't autoneg if forcing a value */
4250	adapter->hw.fc.disable_fc_autoneg = TRUE;
4251	ixgbe_fc_enable(&adapter->hw);
4252	return error;
4253}
4254
4255/*
4256** Control advertised link speed:
4257**	Flags:
4258**	0x1 - advertise 100 Mb
4259**	0x2 - advertise 1G
4260**	0x4 - advertise 10G
4261*/
4262static int
4263ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4264{
4265	int			error = 0, requested;
4266	struct adapter		*adapter;
4267	device_t		dev;
4268	struct ixgbe_hw		*hw;
4269	ixgbe_link_speed	speed = 0;
4270
4271	adapter = (struct adapter *) arg1;
4272	dev = adapter->dev;
4273	hw = &adapter->hw;
4274
4275	requested = adapter->advertise;
4276	error = sysctl_handle_int(oidp, &requested, 0, req);
4277	if ((error) || (req->newptr == NULL))
4278		return (error);
4279
4280	/* Checks to validate new value */
4281	if (adapter->advertise == requested) /* no change */
4282		return (0);
4283
4284	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4285	    (hw->phy.multispeed_fiber))) {
4286		device_printf(dev,
4287		    "Advertised speed can only be set on copper or "
4288		    "multispeed fiber media types.\n");
4289		return (EINVAL);
4290	}
4291
4292	if (requested < 0x1 || requested > 0x7) {
4293		device_printf(dev,
4294		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4295		return (EINVAL);
4296	}
4297
4298	if ((requested & 0x1)
4299	    && (hw->mac.type != ixgbe_mac_X540)
4300	    && (hw->mac.type != ixgbe_mac_X550)) {
4301		device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4302		return (EINVAL);
4303	}
4304
4305	/* Set new value and report new advertised mode */
4306	if (requested & 0x1)
4307		speed |= IXGBE_LINK_SPEED_100_FULL;
4308	if (requested & 0x2)
4309		speed |= IXGBE_LINK_SPEED_1GB_FULL;
4310	if (requested & 0x4)
4311		speed |= IXGBE_LINK_SPEED_10GB_FULL;
4312
4313	hw->mac.autotry_restart = TRUE;
4314	hw->mac.ops.setup_link(hw, speed, TRUE);
4315	adapter->advertise = requested;
4316
4317	return (error);
4318}
4319
4320/*
4321 * The following two sysctls are for X550 BaseT devices;
4322 * they deal with the external PHY used in them.
4323 */
4324static int
4325ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4326{
4327	struct adapter	*adapter = (struct adapter *) arg1;
4328	struct ixgbe_hw *hw = &adapter->hw;
4329	u16 reg;
4330
4331	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4332		device_printf(adapter->dev,
4333		    "Device has no supported external thermal sensor.\n");
4334		return (ENODEV);
4335	}
4336
4337	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4338				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4339				      &reg)) {
4340		device_printf(adapter->dev,
4341		    "Error reading from PHY's current temperature register\n");
4342		return (EAGAIN);
4343	}
4344
4345	/* Shift temp for output */
4346	reg = reg >> 8;
4347
4348	return (sysctl_handle_int(oidp, NULL, reg, req));
4349}
4350
4351/*
4352 * Reports whether the current PHY temperature is over
4353 * the overtemp threshold.
4354 *  - This is reported directly from the PHY
4355 */
4356static int
4357ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4358{
4359	struct adapter	*adapter = (struct adapter *) arg1;
4360	struct ixgbe_hw *hw = &adapter->hw;
4361	u16 reg;
4362
4363	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4364		device_printf(adapter->dev,
4365		    "Device has no supported external thermal sensor.\n");
4366		return (ENODEV);
4367	}
4368
4369	if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4370				      IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4371				      &reg)) {
4372		device_printf(adapter->dev,
4373		    "Error reading from PHY's temperature status register\n");
4374		return (EAGAIN);
4375	}
4376
4377	/* Get occurrence bit */
4378	reg = !!(reg & 0x4000);
4379	return (sysctl_handle_int(oidp, 0, reg, req));
4380}
4381
4382/*
4383** Thermal Shutdown Trigger (internal MAC)
4384**   - Set this to 1 to cause an overtemp event to occur
4385*/
4386static int
4387ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4388{
4389	struct adapter	*adapter = (struct adapter *) arg1;
4390	struct ixgbe_hw *hw = &adapter->hw;
4391	int error, fire = 0;
4392
4393	error = sysctl_handle_int(oidp, &fire, 0, req);
4394	if ((error) || (req->newptr == NULL))
4395		return (error);
4396
4397	if (fire) {
4398		u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4399		reg |= IXGBE_EICR_TS;
4400		IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4401	}
4402
4403	return (0);
4404}
4405
4406/*
4407** Manage DMA Coalescing.
4408** Control values:
4409** 	0/1 - off / on (use default value of 1000)
4410**
4411**	Legal timer values are:
4412**	50,100,250,500,1000,2000,5000,10000
4413**
4414**	Turning off interrupt moderation will also turn this off.
4415*/
4416static int
4417ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4418{
4419	struct adapter *adapter = (struct adapter *) arg1;
4420	struct ixgbe_hw *hw = &adapter->hw;
4421	struct ifnet *ifp = adapter->ifp;
4422	int		error;
4423	u16		oldval;
4424
4425	oldval = adapter->dmac;
4426	error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4427	if ((error) || (req->newptr == NULL))
4428		return (error);
4429
4430	switch (hw->mac.type) {
4431	case ixgbe_mac_X550:
4432	case ixgbe_mac_X550EM_x:
4433		break;
4434	default:
4435		device_printf(adapter->dev,
4436		    "DMA Coalescing is only supported on X550 devices\n");
4437		return (ENODEV);
4438	}
4439
4440	switch (adapter->dmac) {
4441	case 0:
4442		/* Disabled */
4443		break;
4444	case 1: /* Enable and use default */
4445		adapter->dmac = 1000;
4446		break;
4447	case 50:
4448	case 100:
4449	case 250:
4450	case 500:
4451	case 1000:
4452	case 2000:
4453	case 5000:
4454	case 10000:
4455		/* Legal values - allow */
4456		break;
4457	default:
4458		/* Do nothing, illegal value */
4459		adapter->dmac = oldval;
4460		return (EINVAL);
4461	}
4462
4463	/* Re-initialize hardware if it's already running */
4464	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4465		ixgbe_init(adapter);
4466
4467	return (0);
4468}
4469
4470/*
4471 * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4472 * Values:
4473 *	0 - disabled
4474 *	1 - enabled
4475 */
4476static int
4477ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4478{
4479	struct adapter *adapter = (struct adapter *) arg1;
4480	struct ixgbe_hw *hw = &adapter->hw;
4481	int new_wol_enabled;
4482	int error = 0;
4483
4484	new_wol_enabled = hw->wol_enabled;
4485	error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4486	if ((error) || (req->newptr == NULL))
4487		return (error);
4488	if (new_wol_enabled == hw->wol_enabled)
4489		return (0);
4490
4491	if (new_wol_enabled > 0 && !adapter->wol_support)
4492		return (ENODEV);
4493	else
4494		hw->wol_enabled = !!(new_wol_enabled);
4495
4496	return (0);
4497}
4498
4499/*
4500 * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4501 * if supported by the adapter.
4502 * Values:
4503 *	0 - disabled
4504 *	1 - enabled
4505 */
4506static int
4507ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4508{
4509	struct adapter *adapter = (struct adapter *) arg1;
4510	struct ifnet *ifp = adapter->ifp;
4511	int new_eee_enabled, error = 0;
4512
4513	new_eee_enabled = adapter->eee_enabled;
4514	error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4515	if ((error) || (req->newptr == NULL))
4516		return (error);
4517	if (new_eee_enabled == adapter->eee_enabled)
4518		return (0);
4519
4520	if (new_eee_enabled > 0 && !adapter->eee_support)
4521		return (ENODEV);
4522	else
4523		adapter->eee_enabled = !!(new_eee_enabled);
4524
4525	/* Re-initialize hardware if it's already running */
4526	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4527		ixgbe_init(adapter);
4528
4529	return (0);
4530}
4531
4532/*
4533 * Read-only sysctl indicating whether EEE support was negotiated
4534 * on the link.
4535 */
4536static int
4537ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4538{
4539	struct adapter *adapter = (struct adapter *) arg1;
4540	struct ixgbe_hw *hw = &adapter->hw;
4541	bool status;
4542
4543	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4544
4545	return (sysctl_handle_int(oidp, 0, status, req));
4546}
4547
4548/*
4549 * Read-only sysctl indicating whether RX Link is in LPI state.
4550 */
4551static int
4552ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4553{
4554	struct adapter *adapter = (struct adapter *) arg1;
4555	struct ixgbe_hw *hw = &adapter->hw;
4556	bool status;
4557
4558	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4559	    IXGBE_EEE_RX_LPI_STATUS);
4560
4561	return (sysctl_handle_int(oidp, 0, status, req));
4562}
4563
4564/*
4565 * Read-only sysctl indicating whether TX Link is in LPI state.
4566 */
4567static int
4568ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4569{
4570	struct adapter *adapter = (struct adapter *) arg1;
4571	struct ixgbe_hw *hw = &adapter->hw;
4572	bool status;
4573
4574	status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4575	    IXGBE_EEE_TX_LPI_STATUS);
4576
4577	return (sysctl_handle_int(oidp, 0, status, req));
4578}
4579
4580/*
4581 * Sysctl to enable/disable the types of packets that the
4582 * adapter will wake up on upon receipt.
4583 * WUFC - Wake Up Filter Control
4584 * Flags:
4585 *	0x1  - Link Status Change
4586 *	0x2  - Magic Packet
4587 *	0x4  - Direct Exact
4588 *	0x8  - Directed Multicast
4589 *	0x10 - Broadcast
4590 *	0x20 - ARP/IPv4 Request Packet
4591 *	0x40 - Direct IPv4 Packet
4592 *	0x80 - Direct IPv6 Packet
4593 *
4594 * Setting another flag will cause the sysctl to return an
4595 * error.
4596 */
4597static int
4598ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4599{
4600	struct adapter *adapter = (struct adapter *) arg1;
4601	int error = 0;
4602	u32 new_wufc;
4603
4604	new_wufc = adapter->wufc;
4605
4606	error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4607	if ((error) || (req->newptr == NULL))
4608		return (error);
4609	if (new_wufc == adapter->wufc)
4610		return (0);
4611
4612	if (new_wufc & 0xffffff00)
4613		return (EINVAL);
4614	else {
4615		new_wufc &= 0xff;
4616		new_wufc |= (0xffffff & adapter->wufc);
4617		adapter->wufc = new_wufc;
4618	}
4619
4620	return (0);
4621}
4622
4623/*
4624** Enable the hardware to drop packets when the buffer is
4625** full. This is useful when multiqueue,so that no single
4626** queue being full stalls the entire RX engine. We only
4627** enable this when Multiqueue AND when Flow Control is
4628** disabled.
4629*/
4630static void
4631ixgbe_enable_rx_drop(struct adapter *adapter)
4632{
4633        struct ixgbe_hw *hw = &adapter->hw;
4634
4635	for (int i = 0; i < adapter->num_queues; i++) {
4636        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4637        	srrctl |= IXGBE_SRRCTL_DROP_EN;
4638        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4639	}
4640}
4641
4642static void
4643ixgbe_disable_rx_drop(struct adapter *adapter)
4644{
4645        struct ixgbe_hw *hw = &adapter->hw;
4646
4647	for (int i = 0; i < adapter->num_queues; i++) {
4648        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4649        	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4650        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4651	}
4652}
4653
4654static void
4655ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4656{
4657	u32 mask;
4658
4659	switch (adapter->hw.mac.type) {
4660	case ixgbe_mac_82598EB:
4661		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4662		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4663		break;
4664	case ixgbe_mac_82599EB:
4665	case ixgbe_mac_X540:
4666	case ixgbe_mac_X550:
4667	case ixgbe_mac_X550EM_x:
4668		mask = (queues & 0xFFFFFFFF);
4669		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4670		mask = (queues >> 32);
4671		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4672		break;
4673	default:
4674		break;
4675	}
4676}
4677
4678
4679