if_ixl.c revision 269198
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/i40e/if_i40e.c 269198 2014-07-28 21:57:09Z jfv $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#endif
39
40#include "i40e.h"
41#include "i40e_pf.h"
42
43/*********************************************************************
44 *  Driver version
45 *********************************************************************/
46char i40e_driver_version[] = "1.0.0";
47
48/*********************************************************************
49 *  PCI Device ID Table
50 *
51 *  Used by probe to select devices to load on
52 *  Last field stores an index into i40e_strings
53 *  Last entry must be all 0s
54 *
55 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
57
58static i40e_vendor_info_t i40e_vendor_info_array[] =
59{
60	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
61	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
62	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
63	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
64	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
65	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
66	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
67	/* required last entry */
68	{0, 0, 0, 0, 0}
69};
70
71/*********************************************************************
72 *  Table of branding strings
73 *********************************************************************/
74
75static char    *i40e_strings[] = {
76	"Intel(R) Ethernet Connection XL710 Driver"
77};
78
79
80/*********************************************************************
81 *  Function prototypes
82 *********************************************************************/
83static int      i40e_probe(device_t);
84static int      i40e_attach(device_t);
85static int      i40e_detach(device_t);
86static int      i40e_shutdown(device_t);
87static int	i40e_get_hw_capabilities(struct i40e_pf *);
88static void	i40e_cap_txcsum_tso(struct i40e_vsi *, struct ifnet *, int);
89static int      i40e_ioctl(struct ifnet *, u_long, caddr_t);
90static void	i40e_init(void *);
91static void	i40e_init_locked(struct i40e_pf *);
92static void     i40e_stop(struct i40e_pf *);
93static void     i40e_media_status(struct ifnet *, struct ifmediareq *);
94static int      i40e_media_change(struct ifnet *);
95static void     i40e_update_link_status(struct i40e_pf *);
96static int      i40e_allocate_pci_resources(struct i40e_pf *);
97static u16	i40e_get_bus_info(struct i40e_hw *, device_t);
98static int	i40e_setup_stations(struct i40e_pf *);
99static int	i40e_setup_vsi(struct i40e_vsi *);
100static int	i40e_initialize_vsi(struct i40e_vsi *);
101static int	i40e_assign_vsi_msix(struct i40e_pf *);
102static int	i40e_assign_vsi_legacy(struct i40e_pf *);
103static int	i40e_init_msix(struct i40e_pf *);
104static void	i40e_configure_msix(struct i40e_pf *);
105static void	i40e_configure_itr(struct i40e_pf *);
106static void	i40e_configure_legacy(struct i40e_pf *);
107static void	i40e_free_pci_resources(struct i40e_pf *);
108static void	i40e_local_timer(void *);
109static int	i40e_setup_interface(device_t, struct i40e_vsi *);
110static bool	i40e_config_link(struct i40e_hw *);
111static void	i40e_config_rss(struct i40e_vsi *);
112static void	i40e_set_queue_rx_itr(struct i40e_queue *);
113static void	i40e_set_queue_tx_itr(struct i40e_queue *);
114
115static void	i40e_enable_rings(struct i40e_vsi *);
116static void	i40e_disable_rings(struct i40e_vsi *);
117static void     i40e_enable_intr(struct i40e_vsi *);
118static void     i40e_disable_intr(struct i40e_vsi *);
119
120static void     i40e_enable_adminq(struct i40e_hw *);
121static void     i40e_disable_adminq(struct i40e_hw *);
122static void     i40e_enable_queue(struct i40e_hw *, int);
123static void     i40e_disable_queue(struct i40e_hw *, int);
124static void     i40e_enable_legacy(struct i40e_hw *);
125static void     i40e_disable_legacy(struct i40e_hw *);
126
127static void     i40e_set_promisc(struct i40e_vsi *);
128static void     i40e_add_multi(struct i40e_vsi *);
129static void     i40e_del_multi(struct i40e_vsi *);
130static void	i40e_register_vlan(void *, struct ifnet *, u16);
131static void	i40e_unregister_vlan(void *, struct ifnet *, u16);
132static void	i40e_setup_vlan_filters(struct i40e_vsi *);
133
134static void	i40e_init_filters(struct i40e_vsi *);
135static void	i40e_add_filter(struct i40e_vsi *, u8 *, s16 vlan);
136static void	i40e_del_filter(struct i40e_vsi *, u8 *, s16 vlan);
137static void	i40e_add_hw_filters(struct i40e_vsi *, int, int);
138static void	i40e_del_hw_filters(struct i40e_vsi *, int);
139static struct i40e_mac_filter *
140		i40e_find_filter(struct i40e_vsi *, u8 *, s16);
141static void	i40e_add_mc_filter(struct i40e_vsi *, u8 *);
142
143/* Sysctl debug interface */
144static int	i40e_debug_info(SYSCTL_HANDLER_ARGS);
145static void	i40e_print_debug_info(struct i40e_pf *);
146
147/* The MSI/X Interrupt handlers */
148static void	i40e_intr(void *);
149static void	i40e_msix_que(void *);
150static void	i40e_msix_adminq(void *);
151static void	i40e_handle_mdd_event(struct i40e_pf *);
152
153/* Deferred interrupt tasklets */
154static void	i40e_do_adminq(void *, int);
155
156/* Sysctl handlers */
157static int	i40e_set_flowcntl(SYSCTL_HANDLER_ARGS);
158static int	i40e_set_advertise(SYSCTL_HANDLER_ARGS);
159
160/* Statistics */
161static void     i40e_add_hw_stats(struct i40e_pf *);
162static void	i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *,
163		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
164static void	i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *,
165		    struct sysctl_oid_list *,
166		    struct i40e_eth_stats *);
167static void	i40e_update_stats_counters(struct i40e_pf *);
168static void	i40e_update_eth_stats(struct i40e_vsi *);
169static void	i40e_pf_reset_stats(struct i40e_pf *);
170static void	i40e_vsi_reset_stats(struct i40e_vsi *);
171static void	i40e_stat_update48(struct i40e_hw *, u32, u32, bool,
172		    u64 *, u64 *);
173static void	i40e_stat_update32(struct i40e_hw *, u32, bool,
174		    u64 *, u64 *);
175
176#ifdef I40E_DEBUG
177static int 	i40e_sysctl_link_status(SYSCTL_HANDLER_ARGS);
178static int	i40e_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
179static int	i40e_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
180static int	i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
181static int	i40e_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
182#endif
183
184/*********************************************************************
185 *  FreeBSD Device Interface Entry Points
186 *********************************************************************/
187
188static device_method_t i40e_methods[] = {
189	/* Device interface */
190	DEVMETHOD(device_probe, i40e_probe),
191	DEVMETHOD(device_attach, i40e_attach),
192	DEVMETHOD(device_detach, i40e_detach),
193	DEVMETHOD(device_shutdown, i40e_shutdown),
194	{0, 0}
195};
196
197static driver_t i40e_driver = {
198	"ixl", i40e_methods, sizeof(struct i40e_pf),
199};
200
201devclass_t i40e_devclass;
202DRIVER_MODULE(i40e, pci, i40e_driver, i40e_devclass, 0, 0);
203
204MODULE_DEPEND(i40e, pci, 1, 1, 1);
205MODULE_DEPEND(i40e, ether, 1, 1, 1);
206
207/*
208** Global reset mutex
209*/
210static struct mtx i40e_reset_mtx;
211
212/*
213 * MSIX should be the default for best performance,
214 * but this allows it to be forced off for testing.
215 */
216static int i40e_enable_msix = 1;
217TUNABLE_INT("hw.i40e.enable_msix", &i40e_enable_msix);
218
219/*
220** Number of descriptors per ring:
221**   - TX and RX are the same size
222*/
223static int i40e_ringsz = DEFAULT_RING;
224TUNABLE_INT("hw.i40e.ringsz", &i40e_ringsz);
225
226/*
227** This can be set manually, if left as 0 the
228** number of queues will be calculated based
229** on cpus and msix vectors available.
230*/
231int i40e_max_queues = 0;
232TUNABLE_INT("hw.i40e.max_queues", &i40e_max_queues);
233
234/*
235** Controls for Interrupt Throttling
236**	- true/false for dynamic adjustment
237** 	- default values for static ITR
238*/
239int i40e_dynamic_rx_itr = 0;
240TUNABLE_INT("hw.i40e.dynamic_rx_itr", &i40e_dynamic_rx_itr);
241int i40e_dynamic_tx_itr = 0;
242TUNABLE_INT("hw.i40e.dynamic_tx_itr", &i40e_dynamic_tx_itr);
243
244int i40e_rx_itr = I40E_ITR_8K;
245TUNABLE_INT("hw.i40e.rx_itr", &i40e_rx_itr);
246int i40e_tx_itr = I40E_ITR_4K;
247TUNABLE_INT("hw.i40e.tx_itr", &i40e_tx_itr);
248
249#ifdef I40E_FDIR
250static int i40e_enable_fdir = 1;
251TUNABLE_INT("hw.i40e.enable_fdir", &i40e_enable_fdir);
252/* Rate at which we sample */
253int i40e_atr_rate = 20;
254TUNABLE_INT("hw.i40e.atr_rate", &i40e_atr_rate);
255#endif
256
257
258static char *i40e_fc_string[6] = {
259	"None",
260	"Rx",
261	"Tx",
262	"Full",
263	"Priority",
264	"Default"
265};
266
267
268/*********************************************************************
269 *  Device identification routine
270 *
271 *  i40e_probe determines if the driver should be loaded on
272 *  the hardware based on PCI vendor/device id of the device.
273 *
274 *  return BUS_PROBE_DEFAULT on success, positive on failure
275 *********************************************************************/
276
277static int
278i40e_probe(device_t dev)
279{
280	i40e_vendor_info_t *ent;
281
282	u16	pci_vendor_id, pci_device_id;
283	u16	pci_subvendor_id, pci_subdevice_id;
284	char	device_name[256];
285	static bool lock_init = FALSE;
286
287	INIT_DEBUGOUT("i40e_probe: begin");
288
289	pci_vendor_id = pci_get_vendor(dev);
290	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
291		return (ENXIO);
292
293	pci_device_id = pci_get_device(dev);
294	pci_subvendor_id = pci_get_subvendor(dev);
295	pci_subdevice_id = pci_get_subdevice(dev);
296
297	ent = i40e_vendor_info_array;
298	while (ent->vendor_id != 0) {
299		if ((pci_vendor_id == ent->vendor_id) &&
300		    (pci_device_id == ent->device_id) &&
301
302		    ((pci_subvendor_id == ent->subvendor_id) ||
303		     (ent->subvendor_id == 0)) &&
304
305		    ((pci_subdevice_id == ent->subdevice_id) ||
306		     (ent->subdevice_id == 0))) {
307			sprintf(device_name, "%s, Version - %s",
308				i40e_strings[ent->index],
309				i40e_driver_version);
310			device_set_desc_copy(dev, device_name);
311			/* One shot mutex init */
312			if (lock_init == FALSE) {
313				lock_init = TRUE;
314				mtx_init(&i40e_reset_mtx,
315				    "i40e_reset",
316				    "I40E RESET Lock", MTX_DEF);
317			}
318			return (BUS_PROBE_DEFAULT);
319		}
320		ent++;
321	}
322	return (ENXIO);
323}
324
325/*********************************************************************
326 *  Device initialization routine
327 *
328 *  The attach entry point is called when the driver is being loaded.
329 *  This routine identifies the type of hardware, allocates all resources
330 *  and initializes the hardware.
331 *
332 *  return 0 on success, positive on failure
333 *********************************************************************/
334
335static int
336i40e_attach(device_t dev)
337{
338	struct i40e_pf	*pf;
339	struct i40e_hw	*hw;
340	struct i40e_vsi *vsi;
341	u16		bus;
342	int             error = 0;
343
344	INIT_DEBUGOUT("i40e_attach: begin");
345
346	/* Allocate, clear, and link in our primary soft structure */
347	pf = device_get_softc(dev);
348	pf->dev = pf->osdep.dev = dev;
349	hw = &pf->hw;
350
351	/*
352	** Note this assumes we have a single embedded VSI,
353	** this could be enhanced later to allocate multiple
354	*/
355	vsi = &pf->vsi;
356	vsi->dev = pf->dev;
357
358	/* Core Lock Init*/
359	I40E_PF_LOCK_INIT(pf, device_get_nameunit(dev));
360
361	/* Set up the timer callout */
362	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
363
364	/* Set up sysctls */
365	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
366	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
367	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
368	    pf, 0, i40e_set_flowcntl, "I", "Flow Control");
369
370	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
371	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
372	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
373	    pf, 0, i40e_set_advertise, "I", "Advertised Speed");
374
375	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
376	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
377	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
378	    &i40e_rx_itr, I40E_ITR_8K, "RX ITR");
379
380	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
381	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
382	    OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
383	    &i40e_dynamic_rx_itr, 0, "Dynamic RX ITR");
384
385	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
386	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
387	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
388	    &i40e_tx_itr, I40E_ITR_4K, "TX ITR");
389
390	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
391	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
392	    OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
393	    &i40e_dynamic_tx_itr, 0, "Dynamic TX ITR");
394
395#ifdef I40E_DEBUG
396	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
399	    pf, 0, i40e_sysctl_link_status, "A", "Current Link Status");
400
401	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
402	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
404	    pf, 0, i40e_sysctl_phy_abilities, "A", "PHY Abilities");
405
406	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
407	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
409	    pf, 0, i40e_sysctl_sw_filter_list, "A", "SW Filter List");
410
411	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
412	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413	    OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
414	    pf, 0, i40e_sysctl_hw_res_info, "A", "HW Resource Allocation");
415
416	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
417	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
419	    pf, 0, i40e_sysctl_dump_txd, "I", "Desc dump");
420#endif
421
422	/* Save off the information about this board */
423	hw->vendor_id = pci_get_vendor(dev);
424	hw->device_id = pci_get_device(dev);
425	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
426	hw->subsystem_vendor_id =
427	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
428	hw->subsystem_device_id =
429	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
430
431	hw->bus.device = pci_get_slot(dev);
432	hw->bus.func = pci_get_function(dev);
433
434	/* Do PCI setup - map BAR0, etc */
435	if (i40e_allocate_pci_resources(pf)) {
436		device_printf(dev, "Allocation of PCI resources failed\n");
437		error = ENXIO;
438		goto err_out;
439	}
440
441	/* Create for initial debugging use */
442	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
445	    i40e_debug_info, "I", "Debug Information");
446
447
448	/* Establish a clean starting point */
449	i40e_clear_hw(hw);
450	error = i40e_pf_reset(hw);
451	if (error) {
452		device_printf(dev,"PF reset failure %x\n", error);
453		error = EIO;
454		goto err_out;
455	}
456
457	/* For now always do an initial CORE reset on first device */
458	{
459		static int	i40e_dev_count;
460		static int	i40e_dev_track[32];
461		u32		my_dev;
462		int		i, found = FALSE;
463		u16		bus = pci_get_bus(dev);
464
465		mtx_lock(&i40e_reset_mtx);
466		my_dev = (bus << 8) | hw->bus.device;
467
468		for (i = 0; i < i40e_dev_count; i++) {
469			if (i40e_dev_track[i] == my_dev)
470				found = TRUE;
471		}
472
473                if (!found) {
474                        u32 reg;
475
476                        i40e_dev_track[i40e_dev_count] = my_dev;
477                        i40e_dev_count++;
478
479                        device_printf(dev, "Initial CORE RESET\n");
480                        wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
481                        i40e_flush(hw);
482                        i = 50;
483                        do {
484				i40e_msec_delay(50);
485                                reg = rd32(hw, I40E_GLGEN_RSTAT);
486                                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
487                                        break;
488                        } while (i--);
489
490                        /* paranoia */
491                        wr32(hw, I40E_PF_ATQLEN, 0);
492                        wr32(hw, I40E_PF_ATQBAL, 0);
493                        wr32(hw, I40E_PF_ATQBAH, 0);
494                        i40e_clear_pxe_mode(hw);
495                }
496                mtx_unlock(&i40e_reset_mtx);
497	}
498
499	/* Set admin queue parameters */
500	hw->aq.num_arq_entries = I40E_AQ_LEN;
501	hw->aq.num_asq_entries = I40E_AQ_LEN;
502	hw->aq.arq_buf_size = I40E_AQ_BUFSZ;
503	hw->aq.asq_buf_size = I40E_AQ_BUFSZ;
504
505	/* Initialize the shared code */
506	error = i40e_init_shared_code(hw);
507	if (error) {
508		device_printf(dev,"Unable to initialize the shared code\n");
509		error = EIO;
510		goto err_out;
511	}
512
513	/* Set up the admin queue */
514	error = i40e_init_adminq(hw);
515	if (error) {
516		device_printf(dev, "The driver for the device stopped "
517		    "because the NVM image is newer than expected.\n"
518		    "You must install the most recent version of "
519		    " the network driver.\n");
520		goto err_out;
521	}
522	device_printf(dev, "%s\n", i40e_fw_version_str(hw));
523
524        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
525	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
526		device_printf(dev, "The driver for the device detected "
527		    "a newer version of the NVM image than expected.\n"
528		    "Please install the most recent version of the network driver.\n");
529	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
530	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
531		device_printf(dev, "The driver for the device detected "
532		    "an older version of the NVM image than expected.\n"
533		    "Please update the NVM image.\n");
534
535	/* Clear PXE mode */
536	i40e_clear_pxe_mode(hw);
537
538	/* Get capabilities from the device */
539	error = i40e_get_hw_capabilities(pf);
540	if (error) {
541		device_printf(dev, "HW capabilities failure!\n");
542		goto err_get_cap;
543	}
544
545	/* Set up host memory cache */
546	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
547	if (error) {
548		device_printf(dev, "init_lan_hmc failed: %d\n", error);
549		goto err_get_cap;
550	}
551
552	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
553	if (error) {
554		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
555		goto err_mac_hmc;
556	}
557
558	/* Disable LLDP from the firmware */
559	i40e_aq_stop_lldp(hw, TRUE, NULL);
560
561	i40e_get_mac_addr(hw, hw->mac.addr);
562	error = i40e_validate_mac_addr(hw->mac.addr);
563	if (error) {
564		device_printf(dev, "validate_mac_addr failed: %d\n", error);
565		goto err_mac_hmc;
566	}
567	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
568	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
569
570	if (i40e_setup_stations(pf) != 0) {
571		device_printf(dev, "setup stations failed!\n");
572		error = ENOMEM;
573		goto err_mac_hmc;
574	}
575
576	/* Initialize mac filter list for VSI */
577	SLIST_INIT(&vsi->ftl);
578
579	/* Set up interrupt routing here */
580	if (pf->msix > 1)
581		error = i40e_assign_vsi_msix(pf);
582	else
583		error = i40e_assign_vsi_legacy(pf);
584	if (error)
585		goto err_late;
586
587	/* Determine link state */
588	vsi->link_up = i40e_config_link(hw);
589
590	/* Report if Unqualified modules are found */
591	if ((vsi->link_up == FALSE) &&
592	    (pf->hw.phy.link_info.link_info &
593	    I40E_AQ_MEDIA_AVAILABLE) &&
594	    (!(pf->hw.phy.link_info.an_info &
595	    I40E_AQ_QUALIFIED_MODULE)))
596		device_printf(dev, "Link failed because "
597		    "an unqualified module was detected\n");
598
599	/* Setup OS specific network interface */
600	if (i40e_setup_interface(dev, vsi) != 0)
601		goto err_late;
602
603	/* Get the bus configuration and set the shared code */
604	bus = i40e_get_bus_info(hw, dev);
605	i40e_set_pci_config_data(hw, bus);
606
607	/* Initialize statistics */
608	i40e_pf_reset_stats(pf);
609	i40e_update_stats_counters(pf);
610	i40e_add_hw_stats(pf);
611
612	/* Register for VLAN events */
613	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
614	    i40e_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
615	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
616	    i40e_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
617
618
619	INIT_DEBUGOUT("i40e_attach: end");
620	return (0);
621
622err_late:
623	i40e_free_vsi(vsi);
624err_mac_hmc:
625	i40e_shutdown_lan_hmc(hw);
626err_get_cap:
627	i40e_shutdown_adminq(hw);
628err_out:
629	if (vsi->ifp != NULL)
630		if_free(vsi->ifp);
631	i40e_free_pci_resources(pf);
632	I40E_PF_LOCK_DESTROY(pf);
633	return (error);
634}
635
636/*********************************************************************
637 *  Device removal routine
638 *
639 *  The detach entry point is called when the driver is being removed.
640 *  This routine stops the adapter and deallocates all the resources
641 *  that were allocated for driver operation.
642 *
643 *  return 0 on success, positive on failure
644 *********************************************************************/
645
646static int
647i40e_detach(device_t dev)
648{
649	struct i40e_pf		*pf = device_get_softc(dev);
650	struct i40e_hw		*hw = &pf->hw;
651	struct i40e_vsi		*vsi = &pf->vsi;
652	struct i40e_queue	*que = vsi->queues;
653	i40e_status		status;
654	u32			reg;
655
656	INIT_DEBUGOUT("i40e_detach: begin");
657
658	/* Make sure VLANS are not using driver */
659	if (vsi->ifp->if_vlantrunk != NULL) {
660		device_printf(dev,"Vlan in use, detach first\n");
661		return (EBUSY);
662	}
663
664	I40E_PF_LOCK(pf);
665	i40e_stop(pf);
666	I40E_PF_UNLOCK(pf);
667
668	for (int i = 0; i < vsi->num_queues; i++, que++) {
669		if (que->tq) {
670			taskqueue_drain(que->tq, &que->task);
671			taskqueue_drain(que->tq, &que->tx_task);
672			taskqueue_free(que->tq);
673		}
674	}
675
676	/* Drain other tasks here */
677
678	/* Shutdown LAN HMC */
679	status = i40e_shutdown_lan_hmc(hw);
680	if (status)
681		device_printf(dev,
682		    "Shutdown LAN HMC failed with code %d\n", status);
683
684	/* Shutdown admin queue */
685	status = i40e_shutdown_adminq(hw);
686	if (status)
687		device_printf(dev,
688		    "Shutdown Admin queue failed with code %d\n", status);
689
690	/* Now force a pf reset */
691	reg = rd32(hw, I40E_PFGEN_CTRL);
692	reg |= I40E_PFGEN_CTRL_PFSWR_MASK;
693	wr32(hw, I40E_PFGEN_CTRL, reg);
694	//i40e_pf_reset(hw);
695	i40e_flush(hw);
696
697	/* Unregister VLAN events */
698	if (vsi->vlan_attach != NULL)
699		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
700	if (vsi->vlan_detach != NULL)
701		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
702
703	ether_ifdetach(vsi->ifp);
704	callout_drain(&pf->timer);
705
706
707	i40e_free_pci_resources(pf);
708	bus_generic_detach(dev);
709	if_free(vsi->ifp);
710	i40e_free_vsi(vsi);
711	I40E_PF_LOCK_DESTROY(pf);
712	return (0);
713}
714
715/*********************************************************************
716 *
717 *  Shutdown entry point
718 *
719 **********************************************************************/
720
721static int
722i40e_shutdown(device_t dev)
723{
724	struct i40e_pf *pf = device_get_softc(dev);
725	I40E_PF_LOCK(pf);
726	i40e_stop(pf);
727	I40E_PF_UNLOCK(pf);
728	return (0);
729}
730
731
732/*********************************************************************
733 *
734 *  Get the hardware capabilities
735 *
736 **********************************************************************/
737
738static int
739i40e_get_hw_capabilities(struct i40e_pf *pf)
740{
741	struct i40e_aqc_list_capabilities_element_resp *buf;
742	struct i40e_hw	*hw = &pf->hw;
743	device_t 	dev = pf->dev;
744	int             error, len;
745	u16		needed;
746	bool		again = TRUE;
747
748	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
749retry:
750	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
751	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
752		device_printf(dev, "Unable to allocate cap memory\n");
753                return (ENOMEM);
754	}
755
756	/* This populates the hw struct */
757        error = i40e_aq_discover_capabilities(hw, buf, len,
758	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
759	free(buf, M_DEVBUF);
760	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
761	    (again == TRUE)) {
762		/* retry once with a larger buffer */
763		again = FALSE;
764		len = needed;
765		goto retry;
766	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
767		device_printf(dev, "capability discovery failed: %d\n",
768		    pf->hw.aq.asq_last_status);
769		return (ENODEV);
770	}
771
772	/* Capture this PF's starting queue pair */
773	pf->qbase = hw->func_caps.base_queue;
774
775#ifdef I40E_DEBUG
776	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
777	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
778	    hw->pf_id, hw->func_caps.num_vfs,
779	    hw->func_caps.num_msix_vectors,
780	    hw->func_caps.num_msix_vectors_vf,
781	    hw->func_caps.fd_filters_guaranteed,
782	    hw->func_caps.fd_filters_best_effort,
783	    hw->func_caps.num_tx_qp,
784	    hw->func_caps.num_rx_qp,
785	    hw->func_caps.base_queue);
786#endif
787	return (error);
788}
789
790static void
791i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask)
792{
793	device_t 	dev = vsi->dev;
794
795	/* Enable/disable TXCSUM/TSO4 */
796	if (!(ifp->if_capenable & IFCAP_TXCSUM)
797	    && !(ifp->if_capenable & IFCAP_TSO4)) {
798		if (mask & IFCAP_TXCSUM) {
799			ifp->if_capenable |= IFCAP_TXCSUM;
800			/* enable TXCSUM, restore TSO if previously enabled */
801			if (vsi->flags & I40E_FLAGS_KEEP_TSO4) {
802				vsi->flags &= ~I40E_FLAGS_KEEP_TSO4;
803				ifp->if_capenable |= IFCAP_TSO4;
804			}
805		}
806		else if (mask & IFCAP_TSO4) {
807			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
808			vsi->flags &= ~I40E_FLAGS_KEEP_TSO4;
809			device_printf(dev,
810			    "TSO4 requires txcsum, enabling both...\n");
811		}
812	} else if((ifp->if_capenable & IFCAP_TXCSUM)
813	    && !(ifp->if_capenable & IFCAP_TSO4)) {
814		if (mask & IFCAP_TXCSUM)
815			ifp->if_capenable &= ~IFCAP_TXCSUM;
816		else if (mask & IFCAP_TSO4)
817			ifp->if_capenable |= IFCAP_TSO4;
818	} else if((ifp->if_capenable & IFCAP_TXCSUM)
819	    && (ifp->if_capenable & IFCAP_TSO4)) {
820		if (mask & IFCAP_TXCSUM) {
821			vsi->flags |= I40E_FLAGS_KEEP_TSO4;
822			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
823			device_printf(dev,
824			    "TSO4 requires txcsum, disabling both...\n");
825		} else if (mask & IFCAP_TSO4)
826			ifp->if_capenable &= ~IFCAP_TSO4;
827	}
828
829	/* Enable/disable TXCSUM_IPV6/TSO6 */
830	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
831	    && !(ifp->if_capenable & IFCAP_TSO6)) {
832		if (mask & IFCAP_TXCSUM_IPV6) {
833			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
834			if (vsi->flags & I40E_FLAGS_KEEP_TSO6) {
835				vsi->flags &= ~I40E_FLAGS_KEEP_TSO6;
836				ifp->if_capenable |= IFCAP_TSO6;
837			}
838		} else if (mask & IFCAP_TSO6) {
839			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
840			vsi->flags &= ~I40E_FLAGS_KEEP_TSO6;
841			device_printf(dev,
842			    "TSO6 requires txcsum6, enabling both...\n");
843		}
844	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
845	    && !(ifp->if_capenable & IFCAP_TSO6)) {
846		if (mask & IFCAP_TXCSUM_IPV6)
847			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
848		else if (mask & IFCAP_TSO6)
849			ifp->if_capenable |= IFCAP_TSO6;
850	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
851	    && (ifp->if_capenable & IFCAP_TSO6)) {
852		if (mask & IFCAP_TXCSUM_IPV6) {
853			vsi->flags |= I40E_FLAGS_KEEP_TSO6;
854			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
855			device_printf(dev,
856			    "TSO6 requires txcsum6, disabling both...\n");
857		} else if (mask & IFCAP_TSO6)
858			ifp->if_capenable &= ~IFCAP_TSO6;
859	}
860}
861
862/*********************************************************************
863 *  Ioctl entry point
864 *
865 *  i40e_ioctl is called when the user wants to configure the
866 *  interface.
867 *
868 *  return 0 on success, positive on failure
869 **********************************************************************/
870
871static int
872i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
873{
874	struct i40e_vsi	*vsi = ifp->if_softc;
875	struct i40e_pf	*pf = (struct i40e_pf *)vsi->back;
876	struct ifreq	*ifr = (struct ifreq *) data;
877#if defined(INET) || defined(INET6)
878	struct ifaddr *ifa = (struct ifaddr *)data;
879	bool		avoid_reset = FALSE;
880#endif
881	int             error = 0;
882
883	switch (command) {
884
885        case SIOCSIFADDR:
886#ifdef INET
887		if (ifa->ifa_addr->sa_family == AF_INET)
888			avoid_reset = TRUE;
889#endif
890#ifdef INET6
891		if (ifa->ifa_addr->sa_family == AF_INET6)
892			avoid_reset = TRUE;
893#endif
894#if defined(INET) || defined(INET6)
895		/*
896		** Calling init results in link renegotiation,
897		** so we avoid doing it when possible.
898		*/
899		if (avoid_reset) {
900			ifp->if_flags |= IFF_UP;
901			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
902				i40e_init(pf);
903			if (!(ifp->if_flags & IFF_NOARP))
904				arp_ifinit(ifp, ifa);
905		} else
906			error = ether_ioctl(ifp, command, data);
907		break;
908#endif
909	case SIOCSIFMTU:
910		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
911		if (ifr->ifr_mtu > I40E_MAX_FRAME -
912		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
913			error = EINVAL;
914		} else {
915			I40E_PF_LOCK(pf);
916			ifp->if_mtu = ifr->ifr_mtu;
917			vsi->max_frame_size =
918				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
919			    + ETHER_VLAN_ENCAP_LEN;
920			i40e_init_locked(pf);
921			I40E_PF_UNLOCK(pf);
922		}
923		break;
924	case SIOCSIFFLAGS:
925		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
926		I40E_PF_LOCK(pf);
927		if (ifp->if_flags & IFF_UP) {
928			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
929				if ((ifp->if_flags ^ pf->if_flags) &
930				    (IFF_PROMISC | IFF_ALLMULTI)) {
931					i40e_set_promisc(vsi);
932				}
933			} else
934				i40e_init_locked(pf);
935		} else
936			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
937				i40e_stop(pf);
938		pf->if_flags = ifp->if_flags;
939		I40E_PF_UNLOCK(pf);
940		break;
941	case SIOCADDMULTI:
942		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
943		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
944			I40E_PF_LOCK(pf);
945			i40e_disable_intr(vsi);
946			i40e_add_multi(vsi);
947			i40e_enable_intr(vsi);
948			I40E_PF_UNLOCK(pf);
949		}
950		break;
951	case SIOCDELMULTI:
952		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
953		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
954			I40E_PF_LOCK(pf);
955			i40e_disable_intr(vsi);
956			i40e_del_multi(vsi);
957			i40e_enable_intr(vsi);
958			I40E_PF_UNLOCK(pf);
959		}
960		break;
961	case SIOCSIFMEDIA:
962	case SIOCGIFMEDIA:
963		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
964		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
965		break;
966	case SIOCSIFCAP:
967	{
968		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
969		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
970
971		i40e_cap_txcsum_tso(vsi, ifp, mask);
972
973		if (mask & IFCAP_RXCSUM)
974			ifp->if_capenable ^= IFCAP_RXCSUM;
975		if (mask & IFCAP_RXCSUM_IPV6)
976			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
977		if (mask & IFCAP_LRO)
978			ifp->if_capenable ^= IFCAP_LRO;
979		if (mask & IFCAP_VLAN_HWTAGGING)
980			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
981		if (mask & IFCAP_VLAN_HWFILTER)
982			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
983		if (mask & IFCAP_VLAN_HWTSO)
984			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
985		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
986			I40E_PF_LOCK(pf);
987			i40e_init_locked(pf);
988			I40E_PF_UNLOCK(pf);
989		}
990		VLAN_CAPABILITIES(ifp);
991
992		break;
993	}
994
995	default:
996		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
997		error = ether_ioctl(ifp, command, data);
998		break;
999	}
1000
1001	return (error);
1002}
1003
1004
1005/*********************************************************************
1006 *  Init entry point
1007 *
1008 *  This routine is used in two ways. It is used by the stack as
1009 *  init entry point in network interface structure. It is also used
1010 *  by the driver as a hw/sw initialization routine to get to a
1011 *  consistent state.
1012 *
1013 *  return 0 on success, positive on failure
1014 **********************************************************************/
1015
1016static void
1017i40e_init_locked(struct i40e_pf *pf)
1018{
1019	struct i40e_hw	*hw = &pf->hw;
1020	struct i40e_vsi	*vsi = &pf->vsi;
1021	struct ifnet	*ifp = vsi->ifp;
1022	device_t 	dev = pf->dev;
1023	struct i40e_filter_control_settings	filter;
1024	u8		tmpaddr[ETHER_ADDR_LEN];
1025	int		ret;
1026
1027	mtx_assert(&pf->pf_mtx, MA_OWNED);
1028	INIT_DEBUGOUT("i40e_init: begin");
1029	i40e_stop(pf);
1030
1031	/* Get the latest mac address... User might use a LAA */
1032	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1033	      I40E_ETH_LENGTH_OF_ADDRESS);
1034	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1035	    i40e_validate_mac_addr(tmpaddr)) {
1036		bcopy(tmpaddr, hw->mac.addr,
1037		    I40E_ETH_LENGTH_OF_ADDRESS);
1038		ret = i40e_aq_mac_address_write(hw,
1039		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1040		    hw->mac.addr, NULL);
1041		if (ret) {
1042			device_printf(dev, "LLA address"
1043			 "change failed!!\n");
1044			return;
1045		}
1046	}
1047
1048	/* Set the various hardware offload abilities */
1049	ifp->if_hwassist = 0;
1050	if (ifp->if_capenable & IFCAP_TSO)
1051		ifp->if_hwassist |= CSUM_TSO;
1052	if (ifp->if_capenable & IFCAP_TXCSUM)
1053		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1054	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1055		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1056
1057	/* Set up the device filtering */
1058	bzero(&filter, sizeof(filter));
1059	filter.enable_ethtype = TRUE;
1060	filter.enable_macvlan = TRUE;
1061#ifdef I40E_FDIR
1062	filter.enable_fdir = TRUE;
1063#endif
1064	if (i40e_set_filter_control(hw, &filter))
1065		device_printf(dev, "set_filter_control() failed\n");
1066
1067	/* Set up RSS */
1068	i40e_config_rss(vsi);
1069
1070	/* Setup the VSI */
1071	i40e_setup_vsi(vsi);
1072
1073	/*
1074	** Prepare the rings, hmc contexts, etc...
1075	*/
1076	if (i40e_initialize_vsi(vsi)) {
1077		device_printf(dev,"initialize vsi failed!!\n");
1078		return;
1079	}
1080
1081	/* Add protocol filters to list */
1082	i40e_init_filters(vsi);
1083
1084	/* Setup vlan's if needed */
1085	i40e_setup_vlan_filters(vsi);
1086
1087	/* Start the local timer */
1088	callout_reset(&pf->timer, hz, i40e_local_timer, pf);
1089
1090	/* Set up MSI/X routing and the ITR settings */
1091	if (i40e_enable_msix) {
1092		i40e_configure_msix(pf);
1093		i40e_configure_itr(pf);
1094	} else
1095		i40e_configure_legacy(pf);
1096
1097	i40e_enable_rings(vsi);
1098
1099	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1100
1101	/* Flow control setup */
1102	/* NOTE: flow control currently doesn't work correctly */
1103	// i40e_set_fc_mode(pf, I40E_FC_FULL);
1104
1105	/* Set MTU in hardware*/
1106	if (ifp->if_mtu > ETHERMTU) {
1107		int aq_error =
1108			i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1109				TRUE, 0, NULL);
1110		if (aq_error)
1111			device_printf(vsi->dev,
1112				"aq_set_mac_config in init error, code %d\n",
1113			    aq_error);
1114	}
1115
1116	/* And now turn on interrupts */
1117	i40e_enable_intr(vsi);
1118
1119	/* Now inform the stack we're ready */
1120	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1121	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1122
1123	return;
1124}
1125
1126static void
1127i40e_init(void *arg)
1128{
1129	struct i40e_pf *pf = arg;
1130
1131	I40E_PF_LOCK(pf);
1132	i40e_init_locked(pf);
1133	I40E_PF_UNLOCK(pf);
1134	return;
1135}
1136
1137/*
1138**
1139** MSIX Interrupt Handlers and Tasklets
1140**
1141*/
1142static void
1143i40e_handle_que(void *context, int pending)
1144{
1145	struct i40e_queue *que = context;
1146	struct i40e_vsi *vsi = que->vsi;
1147	struct i40e_hw  *hw = vsi->hw;
1148	struct tx_ring  *txr = &que->txr;
1149	struct ifnet    *ifp = vsi->ifp;
1150	bool		more;
1151
1152	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1153		more = i40e_rxeof(que, I40E_RX_LIMIT);
1154		I40E_TX_LOCK(txr);
1155		i40e_txeof(que);
1156		if (!drbr_empty(ifp, txr->br))
1157			i40e_mq_start_locked(ifp, txr);
1158		I40E_TX_UNLOCK(txr);
1159		if (more) {
1160			taskqueue_enqueue(que->tq, &que->task);
1161			return;
1162		}
1163	}
1164
1165	/* Reenable this interrupt - hmmm */
1166	i40e_enable_queue(hw, que->me);
1167	return;
1168}
1169
1170
1171/*********************************************************************
1172 *
1173 *  Legacy Interrupt Service routine
1174 *
1175 **********************************************************************/
1176void
1177i40e_intr(void *arg)
1178{
1179	struct i40e_pf		*pf = arg;
1180	struct i40e_hw		*hw =  &pf->hw;
1181	struct i40e_vsi		*vsi = &pf->vsi;
1182	struct i40e_queue	*que = vsi->queues;
1183	struct ifnet		*ifp = vsi->ifp;
1184	struct tx_ring		*txr = &que->txr;
1185        u32			reg, icr0, mask;
1186	bool			more_tx, more_rx;
1187
1188	++que->irqs;
1189
1190	/* Protect against spurious interrupts */
1191	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1192		return;
1193
1194	icr0 = rd32(hw, I40E_PFINT_ICR0);
1195
1196	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1197	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1198	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1199
1200        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1201
1202	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1203		taskqueue_enqueue(pf->tq, &pf->adminq);
1204		return;
1205	}
1206
1207	more_rx = i40e_rxeof(que, I40E_RX_LIMIT);
1208
1209	I40E_TX_LOCK(txr);
1210	more_tx = i40e_txeof(que);
1211	if (!drbr_empty(vsi->ifp, txr->br))
1212		more_tx = 1;
1213	I40E_TX_UNLOCK(txr);
1214
1215	/* re-enable other interrupt causes */
1216	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1217
1218	/* And now the queues */
1219	reg = rd32(hw, I40E_QINT_RQCTL(0));
1220	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1221	wr32(hw, I40E_QINT_RQCTL(0), reg);
1222
1223	reg = rd32(hw, I40E_QINT_TQCTL(0));
1224	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1225	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1226	wr32(hw, I40E_QINT_TQCTL(0), reg);
1227
1228	i40e_enable_legacy(hw);
1229
1230	return;
1231}
1232
1233
1234/*********************************************************************
1235 *
1236 *  MSIX VSI Interrupt Service routine
1237 *
1238 **********************************************************************/
1239void
1240i40e_msix_que(void *arg)
1241{
1242	struct i40e_queue	*que = arg;
1243	struct i40e_vsi	*vsi = que->vsi;
1244	struct i40e_hw	*hw = vsi->hw;
1245	struct tx_ring	*txr = &que->txr;
1246	bool		more_tx, more_rx;
1247
1248	/* Protect against spurious interrupts */
1249	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1250		return;
1251
1252	++que->irqs;
1253
1254	more_rx = i40e_rxeof(que, I40E_RX_LIMIT);
1255
1256	I40E_TX_LOCK(txr);
1257	more_tx = i40e_txeof(que);
1258	/*
1259	** Make certain that if the stack
1260	** has anything queued the task gets
1261	** scheduled to handle it.
1262	*/
1263	if (!drbr_empty(vsi->ifp, txr->br))
1264		more_tx = 1;
1265	I40E_TX_UNLOCK(txr);
1266
1267	i40e_set_queue_rx_itr(que);
1268	i40e_set_queue_tx_itr(que);
1269
1270	if (more_tx || more_rx)
1271		taskqueue_enqueue(que->tq, &que->task);
1272	else
1273		i40e_enable_queue(hw, que->me);
1274
1275	return;
1276}
1277
1278
1279/*********************************************************************
1280 *
1281 *  MSIX Admin Queue Interrupt Service routine
1282 *
1283 **********************************************************************/
1284static void
1285i40e_msix_adminq(void *arg)
1286{
1287	struct i40e_pf	*pf = arg;
1288	struct i40e_hw	*hw = &pf->hw;
1289	u32		reg, mask;
1290
1291	++pf->admin_irq;
1292
1293	reg = rd32(hw, I40E_PFINT_ICR0);
1294	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1295
1296	/* Check on the cause */
1297	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1298		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1299
1300	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1301		i40e_handle_mdd_event(pf);
1302		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1303	}
1304
1305	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1306		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1307
1308	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1309	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1310	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1311
1312	taskqueue_enqueue(pf->tq, &pf->adminq);
1313	return;
1314}
1315
1316/*********************************************************************
1317 *
1318 *  Media Ioctl callback
1319 *
1320 *  This routine is called whenever the user queries the status of
1321 *  the interface using ifconfig.
1322 *
1323 **********************************************************************/
1324static void
1325i40e_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1326{
1327	struct i40e_vsi	*vsi = ifp->if_softc;
1328	struct i40e_pf	*pf = (struct i40e_pf *)vsi->back;
1329	struct i40e_hw  *hw = &pf->hw;
1330
1331	INIT_DEBUGOUT("i40e_media_status: begin");
1332	I40E_PF_LOCK(pf);
1333
1334	i40e_update_link_status(pf);
1335
1336	ifmr->ifm_status = IFM_AVALID;
1337	ifmr->ifm_active = IFM_ETHER;
1338
1339	if (!vsi->link_up) {
1340		I40E_PF_UNLOCK(pf);
1341		return;
1342	}
1343
1344	ifmr->ifm_status |= IFM_ACTIVE;
1345	/* Hardware is always full-duplex */
1346	ifmr->ifm_active |= IFM_FDX;
1347
1348	switch (hw->phy.link_info.phy_type) {
1349		/* 100 M */
1350		case I40E_PHY_TYPE_100BASE_TX:
1351			ifmr->ifm_active |= IFM_100_TX;
1352			break;
1353		/* 1 G */
1354		case I40E_PHY_TYPE_1000BASE_T:
1355			ifmr->ifm_active |= IFM_1000_T;
1356			break;
1357		case I40E_PHY_TYPE_1000BASE_SX:
1358			ifmr->ifm_active |= IFM_1000_SX;
1359			break;
1360		case I40E_PHY_TYPE_1000BASE_LX:
1361			ifmr->ifm_active |= IFM_1000_LX;
1362			break;
1363		/* 10 G */
1364		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1365		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1366			ifmr->ifm_active |= IFM_10G_TWINAX;
1367			break;
1368		case I40E_PHY_TYPE_10GBASE_SR:
1369			ifmr->ifm_active |= IFM_10G_SR;
1370			break;
1371		case I40E_PHY_TYPE_10GBASE_LR:
1372			ifmr->ifm_active |= IFM_10G_LR;
1373			break;
1374		/* 40 G */
1375		case I40E_PHY_TYPE_40GBASE_CR4:
1376		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1377			ifmr->ifm_active |= IFM_40G_CR4;
1378			break;
1379		case I40E_PHY_TYPE_40GBASE_SR4:
1380			ifmr->ifm_active |= IFM_40G_SR4;
1381			break;
1382		case I40E_PHY_TYPE_40GBASE_LR4:
1383			ifmr->ifm_active |= IFM_40G_LR4;
1384			break;
1385		default:
1386			ifmr->ifm_active |= IFM_UNKNOWN;
1387			break;
1388	}
1389	/* Report flow control status as well */
1390	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1391		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1392	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1393		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1394
1395	I40E_PF_UNLOCK(pf);
1396
1397	return;
1398}
1399
1400/*********************************************************************
1401 *
1402 *  Media Ioctl callback
1403 *
1404 *  This routine is called when the user changes speed/duplex using
1405 *  media/mediopt option with ifconfig.
1406 *
1407 **********************************************************************/
1408static int
1409i40e_media_change(struct ifnet * ifp)
1410{
1411	struct i40e_vsi *vsi = ifp->if_softc;
1412	struct ifmedia *ifm = &vsi->media;
1413
1414	INIT_DEBUGOUT("i40e_media_change: begin");
1415
1416	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1417		return (EINVAL);
1418
1419	if_printf(ifp, "Media change is currently not supported.\n");
1420
1421	return (ENODEV);
1422}
1423
1424
1425#ifdef I40E_FDIR
1426/*
1427** ATR: Application Targetted Receive - creates a filter
1428**	based on TX flow info that will keep the receive
1429**	portion of the flow on the same queue. Based on the
1430**	implementation this is only available for TCP connections
1431*/
1432void
1433i40e_atr(struct i40e_queue *que, struct tcphdr *th, int etype)
1434{
1435	struct i40e_vsi			*vsi = que->vsi;
1436	struct tx_ring			*txr = &que->txr;
1437	struct i40e_filter_program_desc	*FDIR;
1438	u32				ptype, dtype;
1439	int				idx;
1440
1441	/* check if ATR is enabled and sample rate */
1442	if ((!i40e_enable_fdir) || (!txr->atr_rate))
1443		return;
1444	/*
1445	** We sample all TCP SYN/FIN packets,
1446	** or at the selected sample rate
1447	*/
1448	txr->atr_count++;
1449	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1450	    (txr->atr_count < txr->atr_rate))
1451                return;
1452	txr->atr_count = 0;
1453
1454	/* Get a descriptor to use */
1455	idx = txr->next_avail;
1456	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1457	if (++idx == que->num_desc)
1458		idx = 0;
1459	txr->avail--;
1460	txr->next_avail = idx;
1461
1462	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1463	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1464
1465	ptype |= (etype == ETHERTYPE_IP) ?
1466	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1467	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1468	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1469	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1470
1471	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1472
1473	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1474
1475	/*
1476	** We use the TCP TH_FIN as a trigger to remove
1477	** the filter, otherwise its an update.
1478	*/
1479	dtype |= (th->th_flags & TH_FIN) ?
1480	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1481	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1482	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1483	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1484
1485	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1486	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1487
1488	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1489	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1490
1491	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1492	FDIR->dtype_cmd_cntindex = htole32(dtype);
1493	return;
1494}
1495#endif
1496
1497
1498static void
1499i40e_set_promisc(struct i40e_vsi *vsi)
1500{
1501	struct ifnet	*ifp = vsi->ifp;
1502	struct i40e_hw	*hw = vsi->hw;
1503	int		err, mcnt = 0;
1504	bool		uni = FALSE, multi = FALSE;
1505
1506	if (ifp->if_flags & IFF_ALLMULTI)
1507                multi = TRUE;
1508	else { /* Need to count the multicast addresses */
1509		struct  ifmultiaddr *ifma;
1510		if_maddr_rlock(ifp);
1511		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1512                        if (ifma->ifma_addr->sa_family != AF_LINK)
1513                                continue;
1514                        if (mcnt == MAX_MULTICAST_ADDR)
1515                                break;
1516                        mcnt++;
1517		}
1518		if_maddr_runlock(ifp);
1519	}
1520
1521	if (mcnt >= MAX_MULTICAST_ADDR)
1522                multi = TRUE;
1523        if (ifp->if_flags & IFF_PROMISC)
1524		uni = TRUE;
1525
1526	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1527	    vsi->seid, uni, NULL);
1528	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1529	    vsi->seid, multi, NULL);
1530	return;
1531}
1532
1533/*********************************************************************
1534 * 	Filter Routines
1535 *
1536 *	Routines for multicast and vlan filter management.
1537 *
1538 *********************************************************************/
1539static void
1540i40e_add_multi(struct i40e_vsi *vsi)
1541{
1542	struct	ifmultiaddr	*ifma;
1543	struct ifnet		*ifp = vsi->ifp;
1544	struct i40e_hw		*hw = vsi->hw;
1545	int			mcnt = 0, flags;
1546
1547	IOCTL_DEBUGOUT("i40e_add_multi: begin");
1548
1549	if_maddr_rlock(ifp);
1550	/*
1551	** First just get a count, to decide if we
1552	** we simply use multicast promiscuous.
1553	*/
1554	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1555		if (ifma->ifma_addr->sa_family != AF_LINK)
1556			continue;
1557		mcnt++;
1558	}
1559	if_maddr_runlock(ifp);
1560
1561	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1562		/* delete existing MC filters */
1563		i40e_del_hw_filters(vsi, mcnt);
1564		i40e_aq_set_vsi_multicast_promiscuous(hw,
1565		    vsi->seid, TRUE, NULL);
1566		return;
1567	}
1568
1569	mcnt = 0;
1570	if_maddr_rlock(ifp);
1571	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1572		if (ifma->ifma_addr->sa_family != AF_LINK)
1573			continue;
1574		i40e_add_mc_filter(vsi,
1575		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1576		mcnt++;
1577	}
1578	if_maddr_runlock(ifp);
1579	if (mcnt > 0) {
1580		flags = (I40E_FILTER_ADD | I40E_FILTER_USED | I40E_FILTER_MC);
1581		i40e_add_hw_filters(vsi, flags, mcnt);
1582	}
1583
1584	IOCTL_DEBUGOUT("i40e_add_multi: end");
1585	return;
1586}
1587
1588static void
1589i40e_del_multi(struct i40e_vsi *vsi)
1590{
1591	struct ifnet		*ifp = vsi->ifp;
1592	struct ifmultiaddr	*ifma;
1593	struct i40e_mac_filter	*f;
1594	int			mcnt = 0;
1595	bool		match = FALSE;
1596
1597	IOCTL_DEBUGOUT("i40e_del_multi: begin");
1598
1599	/* Search for removed multicast addresses */
1600	if_maddr_rlock(ifp);
1601	SLIST_FOREACH(f, &vsi->ftl, next) {
1602		if ((f->flags & I40E_FILTER_USED) && (f->flags & I40E_FILTER_MC)) {
1603			match = FALSE;
1604			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1605				if (ifma->ifma_addr->sa_family != AF_LINK)
1606					continue;
1607				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1608				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1609					match = TRUE;
1610					break;
1611				}
1612			}
1613			if (match == FALSE) {
1614				f->flags |= I40E_FILTER_DEL;
1615				mcnt++;
1616			}
1617		}
1618	}
1619	if_maddr_runlock(ifp);
1620
1621	if (mcnt > 0)
1622		i40e_del_hw_filters(vsi, mcnt);
1623}
1624
1625
1626/*********************************************************************
1627 *  Timer routine
1628 *
1629 *  This routine checks for link status,updates statistics,
1630 *  and runs the watchdog check.
1631 *
1632 **********************************************************************/
1633
1634static void
1635i40e_local_timer(void *arg)
1636{
1637	struct i40e_pf		*pf = arg;
1638	struct i40e_hw		*hw = &pf->hw;
1639	struct i40e_vsi		*vsi = &pf->vsi;
1640	struct i40e_queue	*que = vsi->queues;
1641	device_t		dev = pf->dev;
1642	int			hung = 0;
1643	u32			mask;
1644
1645	mtx_assert(&pf->pf_mtx, MA_OWNED);
1646
1647	/* Fire off the adminq task */
1648	taskqueue_enqueue(pf->tq, &pf->adminq);
1649
1650	/* Update stats */
1651	i40e_update_stats_counters(pf);
1652
1653	/*
1654	** Check status of the queues
1655	*/
1656	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1657		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1658
1659	for (int i = 0; i < vsi->num_queues; i++,que++) {
1660		/* Any queues with outstanding work get a sw irq */
1661		if (que->busy)
1662			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1663		/*
1664		** Each time txeof runs without cleaning, but there
1665		** are uncleaned descriptors it increments busy. If
1666		** we get to 5 we declare it hung.
1667		*/
1668		if (que->busy == I40E_QUEUE_HUNG) {
1669			++hung;
1670			/* Mark the queue as inactive */
1671			vsi->active_queues &= ~((u64)1 << que->me);
1672			continue;
1673		} else {
1674			/* Check if we've come back from hung */
1675			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1676				vsi->active_queues |= ((u64)1 << que->me);
1677		}
1678		if (que->busy >= I40E_MAX_TX_BUSY) {
1679			device_printf(dev,"Warning queue %d "
1680			    "appears to be hung!\n", i);
1681			que->busy = I40E_QUEUE_HUNG;
1682			++hung;
1683		}
1684	}
1685	/* Only reinit if all queues show hung */
1686	if (hung == vsi->num_queues)
1687		goto hung;
1688
1689	callout_reset(&pf->timer, hz, i40e_local_timer, pf);
1690	return;
1691
1692hung:
1693	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1694	i40e_init_locked(pf);
1695}
1696
1697/*
1698** Note: this routine updates the OS on the link state
1699**	the real check of the hardware only happens with
1700**	a link interrupt.
1701*/
1702static void
1703i40e_update_link_status(struct i40e_pf *pf)
1704{
1705	struct i40e_vsi		*vsi = &pf->vsi;
1706	struct i40e_hw		*hw = &pf->hw;
1707	struct ifnet		*ifp = vsi->ifp;
1708	device_t		dev = pf->dev;
1709	enum i40e_fc_mode 	fc;
1710
1711
1712	if (vsi->link_up){
1713		if (vsi->link_active == FALSE) {
1714			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1715			if (bootverbose) {
1716				fc = hw->fc.current_mode;
1717				device_printf(dev,"Link is up %d Gbps %s,"
1718				    " Flow Control: %s\n",
1719				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1720				    "Full Duplex", i40e_fc_string[fc]);
1721			}
1722			vsi->link_active = TRUE;
1723			if_link_state_change(ifp, LINK_STATE_UP);
1724		}
1725	} else { /* Link down */
1726		if (vsi->link_active == TRUE) {
1727			if (bootverbose)
1728				device_printf(dev,"Link is Down\n");
1729			if_link_state_change(ifp, LINK_STATE_DOWN);
1730			vsi->link_active = FALSE;
1731		}
1732	}
1733
1734	return;
1735}
1736
1737/*********************************************************************
1738 *
1739 *  This routine disables all traffic on the adapter by issuing a
1740 *  global reset on the MAC and deallocates TX/RX buffers.
1741 *
1742 **********************************************************************/
1743
1744static void
1745i40e_stop(struct i40e_pf *pf)
1746{
1747	struct i40e_vsi	*vsi = &pf->vsi;
1748	struct ifnet	*ifp = vsi->ifp;
1749
1750	mtx_assert(&pf->pf_mtx, MA_OWNED);
1751
1752	INIT_DEBUGOUT("i40e_stop: begin\n");
1753	i40e_disable_intr(vsi);
1754	i40e_disable_rings(vsi);
1755
1756	/* Tell the stack that the interface is no longer active */
1757	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1758
1759	/* Stop the local timer */
1760	callout_stop(&pf->timer);
1761
1762	return;
1763}
1764
1765
1766/*********************************************************************
1767 *
1768 *  Setup MSIX Interrupt resources and handlers for the VSI
1769 *
1770 **********************************************************************/
1771static int
1772i40e_assign_vsi_legacy(struct i40e_pf *pf)
1773{
1774	device_t        dev = pf->dev;
1775	struct 		i40e_vsi *vsi = &pf->vsi;
1776	struct		i40e_queue *que = vsi->queues;
1777	int 		error, rid = 0;
1778
1779	if (pf->msix == 1)
1780		rid = 1;
1781	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1782	    &rid, RF_SHAREABLE | RF_ACTIVE);
1783	if (pf->res == NULL) {
1784		device_printf(dev,"Unable to allocate"
1785		    " bus resource: vsi legacy/msi interrupt\n");
1786		return (ENXIO);
1787	}
1788
1789	/* Set the handler function */
1790	error = bus_setup_intr(dev, pf->res,
1791	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1792	    i40e_intr, pf, &pf->tag);
1793	if (error) {
1794		pf->res = NULL;
1795		device_printf(dev, "Failed to register legacy/msi handler");
1796		return (error);
1797	}
1798	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1799	TASK_INIT(&que->tx_task, 0, i40e_deferred_mq_start, que);
1800	TASK_INIT(&que->task, 0, i40e_handle_que, que);
1801	que->tq = taskqueue_create_fast("i40e_que", M_NOWAIT,
1802	    taskqueue_thread_enqueue, &que->tq);
1803	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1804	    device_get_nameunit(dev));
1805	TASK_INIT(&pf->adminq, 0, i40e_do_adminq, pf);
1806	pf->tq = taskqueue_create_fast("i40e_adm", M_NOWAIT,
1807	    taskqueue_thread_enqueue, &pf->tq);
1808	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1809	    device_get_nameunit(dev));
1810
1811	return (0);
1812}
1813
1814
1815/*********************************************************************
1816 *
1817 *  Setup MSIX Interrupt resources and handlers for the VSI
1818 *
1819 **********************************************************************/
1820static int
1821i40e_assign_vsi_msix(struct i40e_pf *pf)
1822{
1823	device_t	dev = pf->dev;
1824	struct 		i40e_vsi *vsi = &pf->vsi;
1825	struct 		i40e_queue *que = vsi->queues;
1826	struct		tx_ring	 *txr;
1827	int 		error, rid, vector = 0;
1828
1829	/* Admin Que is vector 0*/
1830	rid = vector + 1;
1831	pf->res = bus_alloc_resource_any(dev,
1832    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1833	if (!pf->res) {
1834		device_printf(dev,"Unable to allocate"
1835    	    " bus resource: Adminq interrupt [%d]\n", rid);
1836		return (ENXIO);
1837	}
1838	/* Set the adminq vector and handler */
1839	error = bus_setup_intr(dev, pf->res,
1840	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1841	    i40e_msix_adminq, pf, &pf->tag);
1842	if (error) {
1843		pf->res = NULL;
1844		device_printf(dev, "Failed to register Admin que handler");
1845		return (error);
1846	}
1847	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1848	pf->admvec = vector;
1849	/* Tasklet for Admin Queue */
1850	TASK_INIT(&pf->adminq, 0, i40e_do_adminq, pf);
1851	pf->tq = taskqueue_create_fast("i40e_adm", M_NOWAIT,
1852	    taskqueue_thread_enqueue, &pf->tq);
1853	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1854	    device_get_nameunit(pf->dev));
1855	++vector;
1856
1857	/* Now set up the stations */
1858	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1859		rid = vector + 1;
1860		txr = &que->txr;
1861		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1862		    RF_SHAREABLE | RF_ACTIVE);
1863		if (que->res == NULL) {
1864			device_printf(dev,"Unable to allocate"
1865		    	    " bus resource: que interrupt [%d]\n", vector);
1866			return (ENXIO);
1867		}
1868		/* Set the handler function */
1869		error = bus_setup_intr(dev, que->res,
1870		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1871		    i40e_msix_que, que, &que->tag);
1872		if (error) {
1873			que->res = NULL;
1874			device_printf(dev, "Failed to register que handler");
1875			return (error);
1876		}
1877		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1878		/* Bind the vector to a CPU */
1879		bus_bind_intr(dev, que->res, i);
1880		que->msix = vector;
1881		TASK_INIT(&que->tx_task, 0, i40e_deferred_mq_start, que);
1882		TASK_INIT(&que->task, 0, i40e_handle_que, que);
1883		que->tq = taskqueue_create_fast("i40e_que", M_NOWAIT,
1884		    taskqueue_thread_enqueue, &que->tq);
1885		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1886		    device_get_nameunit(pf->dev));
1887	}
1888
1889	return (0);
1890}
1891
1892
1893/*
1894 * Allocate MSI/X vectors
1895 */
1896static int
1897i40e_init_msix(struct i40e_pf *pf)
1898{
1899	device_t dev = pf->dev;
1900	int rid, want, vectors, queues, available;
1901
1902	/* Override by tuneable */
1903	if (i40e_enable_msix == 0)
1904		goto msi;
1905
1906	/*
1907	** When used in a virtualized environment
1908	** PCI BUSMASTER capability may not be set
1909	** so explicity set it here and rewrite
1910	** the ENABLE in the MSIX control register
1911	** at this point to cause the host to
1912	** successfully initialize us.
1913	*/
1914	{
1915		u16 pci_cmd_word;
1916		int msix_ctrl;
1917		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1918		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1919		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1920		pci_find_cap(dev, PCIY_MSIX, &rid);
1921		rid += PCIR_MSIX_CTRL;
1922		msix_ctrl = pci_read_config(dev, rid, 2);
1923		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1924		pci_write_config(dev, rid, msix_ctrl, 2);
1925	}
1926
1927	/* First try MSI/X */
1928	rid = PCIR_BAR(I40E_BAR);
1929	pf->msix_mem = bus_alloc_resource_any(dev,
1930	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1931       	if (!pf->msix_mem) {
1932		/* May not be enabled */
1933		device_printf(pf->dev,
1934		    "Unable to map MSIX table \n");
1935		goto msi;
1936	}
1937
1938	available = pci_msix_count(dev);
1939	if (available == 0) { /* system has msix disabled */
1940		bus_release_resource(dev, SYS_RES_MEMORY,
1941		    rid, pf->msix_mem);
1942		pf->msix_mem = NULL;
1943		goto msi;
1944	}
1945
1946	/* Figure out a reasonable auto config value */
1947	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1948
1949	/* Override with hardcoded value if sane */
1950	if ((i40e_max_queues != 0) && (i40e_max_queues <= queues))
1951		queues = i40e_max_queues;
1952
1953	/*
1954	** Want one vector (RX/TX pair) per queue
1955	** plus an additional for the admin queue.
1956	*/
1957	want = queues + 1;
1958	if (want <= available)	/* Have enough */
1959		vectors = want;
1960	else {
1961               	device_printf(pf->dev,
1962		    "MSIX Configuration Problem, "
1963		    "%d vectors available but %d wanted!\n",
1964		    available, want);
1965		return (0); /* Will go to Legacy setup */
1966	}
1967
1968	if (pci_alloc_msix(dev, &vectors) == 0) {
1969               	device_printf(pf->dev,
1970		    "Using MSIX interrupts with %d vectors\n", vectors);
1971		pf->msix = vectors;
1972		pf->vsi.num_queues = queues;
1973		return (vectors);
1974	}
1975msi:
1976       	vectors = pci_msi_count(dev);
1977	pf->vsi.num_queues = 1;
1978	pf->msix = 1;
1979	i40e_max_queues = 1;
1980	i40e_enable_msix = 0;
1981       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
1982               	device_printf(pf->dev,"Using an MSI interrupt\n");
1983	else {
1984		pf->msix = 0;
1985               	device_printf(pf->dev,"Using a Legacy interrupt\n");
1986	}
1987	return (vectors);
1988}
1989
1990
1991/*
1992 * Plumb MSI/X vectors
1993 */
1994static void
1995i40e_configure_msix(struct i40e_pf *pf)
1996{
1997	struct i40e_hw	*hw = &pf->hw;
1998	struct i40e_vsi *vsi = &pf->vsi;
1999	u32		reg;
2000	u16		vector = 1;
2001
2002	/* First set up the adminq - vector 0 */
2003	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2004	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2005
2006	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2007	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2008	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2009	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2010	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2011	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2012	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2013	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2014
2015	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2016	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), 0x003E);
2017
2018	wr32(hw, I40E_PFINT_DYN_CTL0,
2019	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2020	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2021
2022	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2023
2024	/* Next configure the queues */
2025	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2026		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2027		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2028
2029		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2030		(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2031		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2032		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2033		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2034		wr32(hw, I40E_QINT_RQCTL(i), reg);
2035
2036		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2037		(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2038		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2039		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2040		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2041		if (i == (vsi->num_queues - 1))
2042			reg |= (I40E_QUEUE_EOL
2043			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2044		wr32(hw, I40E_QINT_TQCTL(i), reg);
2045	}
2046}
2047
2048/*
2049 * Configure for MSI single vector operation
2050 */
2051static void
2052i40e_configure_legacy(struct i40e_pf *pf)
2053{
2054	struct i40e_hw	*hw = &pf->hw;
2055	u32		reg;
2056
2057
2058	wr32(hw, I40E_PFINT_ITR0(0), 0);
2059	wr32(hw, I40E_PFINT_ITR0(1), 0);
2060
2061
2062	/* Setup "other" causes */
2063	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2064	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2065	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2066	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2067	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2068	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2069	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2070	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2071	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2072	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2073	    ;
2074	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2075
2076	/* SW_ITR_IDX = 0, but don't change INTENA */
2077	wr32(hw, I40E_PFINT_DYN_CTL0,
2078	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2079	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2080	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2081	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2082
2083	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2084	wr32(hw, I40E_PFINT_LNKLST0, 0);
2085
2086	/* Associate the queue pair to the vector and enable the q int */
2087	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2088	    | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2089	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2090	wr32(hw, I40E_QINT_RQCTL(0), reg);
2091
2092	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2093	    | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2094	    | (I40E_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2095	wr32(hw, I40E_QINT_TQCTL(0), reg);
2096
2097	/* Next enable the queue pair */
2098	reg = rd32(hw, I40E_QTX_ENA(0));
2099	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2100	wr32(hw, I40E_QTX_ENA(0), reg);
2101
2102	reg = rd32(hw, I40E_QRX_ENA(0));
2103	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2104	wr32(hw, I40E_QRX_ENA(0), reg);
2105}
2106
2107
2108/*
2109 * Set the Initial ITR state
2110 */
2111static void
2112i40e_configure_itr(struct i40e_pf *pf)
2113{
2114	struct i40e_hw		*hw = &pf->hw;
2115	struct i40e_vsi		*vsi = &pf->vsi;
2116	struct i40e_queue	*que = vsi->queues;
2117
2118	vsi->rx_itr_setting = i40e_rx_itr;
2119	if (i40e_dynamic_rx_itr)
2120		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
2121	vsi->tx_itr_setting = i40e_tx_itr;
2122	if (i40e_dynamic_tx_itr)
2123		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
2124
2125	for (int i = 0; i < vsi->num_queues; i++, que++) {
2126		struct tx_ring	*txr = &que->txr;
2127		struct rx_ring 	*rxr = &que->rxr;
2128
2129		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, i),
2130		    vsi->rx_itr_setting);
2131		rxr->itr = vsi->rx_itr_setting;
2132		rxr->latency = I40E_AVE_LATENCY;
2133		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, i),
2134		    vsi->tx_itr_setting);
2135		txr->itr = vsi->tx_itr_setting;
2136		txr->latency = I40E_AVE_LATENCY;
2137	}
2138}
2139
2140
2141static int
2142i40e_allocate_pci_resources(struct i40e_pf *pf)
2143{
2144	int             rid;
2145	device_t        dev = pf->dev;
2146
2147	rid = PCIR_BAR(0);
2148	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2149	    &rid, RF_ACTIVE);
2150
2151	if (!(pf->pci_mem)) {
2152		device_printf(dev,"Unable to allocate bus resource: memory\n");
2153		return (ENXIO);
2154	}
2155
2156	pf->osdep.mem_bus_space_tag =
2157		rman_get_bustag(pf->pci_mem);
2158	pf->osdep.mem_bus_space_handle =
2159		rman_get_bushandle(pf->pci_mem);
2160	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2161
2162	pf->hw.back = &pf->osdep;
2163
2164	/*
2165	** Now setup MSI or MSI/X, should
2166	** return us the number of supported
2167	** vectors. (Will be 1 for MSI)
2168	*/
2169	pf->msix = i40e_init_msix(pf);
2170	return (0);
2171}
2172
2173static void
2174i40e_free_pci_resources(struct i40e_pf * pf)
2175{
2176	struct i40e_vsi		*vsi = &pf->vsi;
2177	struct i40e_queue	*que = vsi->queues;
2178	device_t		dev = pf->dev;
2179	int			rid, memrid;
2180
2181	memrid = PCIR_BAR(I40E_BAR);
2182
2183	/* We may get here before stations are setup */
2184	if ((!i40e_enable_msix) || (que == NULL))
2185		goto early;
2186
2187	/*
2188	**  Release all msix VSI resources:
2189	*/
2190	for (int i = 0; i < vsi->num_queues; i++, que++) {
2191		rid = que->msix + 1;
2192		if (que->tag != NULL) {
2193			bus_teardown_intr(dev, que->res, que->tag);
2194			que->tag = NULL;
2195		}
2196		if (que->res != NULL)
2197			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2198	}
2199
2200early:
2201	/* Clean the AdminQ interrupt last */
2202	if (pf->admvec) /* we are doing MSIX */
2203		rid = pf->admvec + 1;
2204	else
2205		(pf->msix != 0) ? (rid = 1):(rid = 0);
2206
2207	if (pf->tag != NULL) {
2208		bus_teardown_intr(dev, pf->res, pf->tag);
2209		pf->tag = NULL;
2210	}
2211	if (pf->res != NULL)
2212		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2213
2214	if (pf->msix)
2215		pci_release_msi(dev);
2216
2217	if (pf->msix_mem != NULL)
2218		bus_release_resource(dev, SYS_RES_MEMORY,
2219		    memrid, pf->msix_mem);
2220
2221	if (pf->pci_mem != NULL)
2222		bus_release_resource(dev, SYS_RES_MEMORY,
2223		    PCIR_BAR(0), pf->pci_mem);
2224
2225	return;
2226}
2227
2228
2229/*********************************************************************
2230 *
2231 *  Setup networking device structure and register an interface.
2232 *
2233 **********************************************************************/
2234static int
2235i40e_setup_interface(device_t dev, struct i40e_vsi *vsi)
2236{
2237	struct ifnet		*ifp;
2238	struct i40e_hw		*hw = vsi->hw;
2239	struct i40e_queue	*que = vsi->queues;
2240	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2241	enum i40e_status_code aq_error = 0;
2242
2243	INIT_DEBUGOUT("i40e_setup_interface: begin");
2244
2245	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2246	if (ifp == NULL) {
2247		device_printf(dev, "can not allocate ifnet structure\n");
2248		return (-1);
2249	}
2250	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2251	ifp->if_mtu = ETHERMTU;
2252	ifp->if_baudrate = 4000000000;  // ??
2253	ifp->if_init = i40e_init;
2254	ifp->if_softc = vsi;
2255	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2256	ifp->if_ioctl = i40e_ioctl;
2257
2258	ifp->if_transmit = i40e_mq_start;
2259
2260	ifp->if_qflush = i40e_qflush;
2261
2262	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2263
2264	ether_ifattach(ifp, hw->mac.addr);
2265
2266	vsi->max_frame_size =
2267	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2268	    + ETHER_VLAN_ENCAP_LEN;
2269
2270	/*
2271	 * Tell the upper layer(s) we support long frames.
2272	 */
2273	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2274
2275	ifp->if_capabilities |= IFCAP_HWCSUM;
2276	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2277	ifp->if_capabilities |= IFCAP_TSO;
2278	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2279	ifp->if_capabilities |= IFCAP_LRO;
2280
2281	/* VLAN capabilties */
2282	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2283			     |  IFCAP_VLAN_HWTSO
2284			     |  IFCAP_VLAN_MTU
2285			     |  IFCAP_VLAN_HWCSUM;
2286	ifp->if_capenable = ifp->if_capabilities;
2287
2288	/*
2289	** Don't turn this on by default, if vlans are
2290	** created on another pseudo device (eg. lagg)
2291	** then vlan events are not passed thru, breaking
2292	** operation, but with HW FILTER off it works. If
2293	** using vlans directly on the i40e driver you can
2294	** enable this and get full hardware tag filtering.
2295	*/
2296	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2297
2298	/*
2299	 * Specify the media types supported by this adapter and register
2300	 * callbacks to update media and link information
2301	 */
2302	ifmedia_init(&vsi->media, IFM_IMASK, i40e_media_change,
2303		     i40e_media_status);
2304
2305	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2306	if (aq_error) {
2307		printf("Error getting supported media types, AQ error %d\n", aq_error);
2308		return (EPERM);
2309	}
2310
2311	/* Display supported media types */
2312	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2313		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2314
2315	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2316		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2317
2318	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2319	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2320		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2321	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2322		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2323	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2324		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2325
2326	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2327	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2328		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2329	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2330		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2331	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2332		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2333
2334	/* Use autoselect media by default */
2335	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2336	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2337
2338	return (0);
2339}
2340
2341static bool
2342i40e_config_link(struct i40e_hw *hw)
2343{
2344	bool check;
2345
2346	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2347	check = i40e_get_link_status(hw);
2348#ifdef I40E_DEBUG
2349	printf("Link is %s\n", check ? "up":"down");
2350#endif
2351	return (check);
2352}
2353
2354/*********************************************************************
2355 *
2356 *  Initialize this VSI
2357 *
2358 **********************************************************************/
2359static int
2360i40e_setup_vsi(struct i40e_vsi *vsi)
2361{
2362	struct i40e_hw	*hw = vsi->hw;
2363	device_t 	dev = vsi->dev;
2364	struct i40e_aqc_get_switch_config_resp *sw_config;
2365	struct i40e_vsi_context	ctxt;
2366	u8	aq_buf[I40E_AQ_LARGE_BUF];
2367	int	ret = I40E_SUCCESS;
2368	u16	next = 0;
2369
2370	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2371	ret = i40e_aq_get_switch_config(hw, sw_config,
2372	    sizeof(aq_buf), &next, NULL);
2373	if (ret) {
2374		device_printf(dev,"aq_get_switch_config failed!!\n");
2375		return (ret);
2376	}
2377#ifdef I40E_DEBUG
2378	printf("Switch config: header reported: %d in structure, %d total\n",
2379    	    sw_config->header.num_reported, sw_config->header.num_total);
2380	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2381	    sw_config->element[0].element_type,
2382	    sw_config->element[0].seid,
2383	    sw_config->element[0].uplink_seid,
2384	    sw_config->element[0].downlink_seid);
2385#endif
2386	/* Save off this important value */
2387	vsi->seid = sw_config->element[0].seid;
2388
2389	memset(&ctxt, 0, sizeof(ctxt));
2390	ctxt.seid = vsi->seid;
2391	ctxt.pf_num = hw->pf_id;
2392	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2393	if (ret) {
2394		device_printf(dev,"get vsi params failed %x!!\n", ret);
2395		return (ret);
2396	}
2397#ifdef I40E_DEBUG
2398	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2399	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2400	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2401	    ctxt.uplink_seid, ctxt.vsi_number,
2402	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2403	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2404	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2405#endif
2406	/*
2407	** Set the queue and traffic class bits
2408	**  - when multiple traffic classes are supported
2409	**    this will need to be more robust.
2410	*/
2411	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2412	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2413	ctxt.info.queue_mapping[0] = 0;
2414	ctxt.info.tc_mapping[0] = 0x0800;
2415
2416	/* Set VLAN receive stripping mode */
2417	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2418	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2419	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2420	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2421	else
2422	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2423
2424	/* Keep copy of VSI info in VSI for statistic counters */
2425	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2426
2427	/* Reset VSI statistics */
2428	i40e_vsi_reset_stats(vsi);
2429	vsi->hw_filters_add = 0;
2430	vsi->hw_filters_del = 0;
2431
2432	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2433	if (ret)
2434		device_printf(dev,"update vsi params failed %x!!\n",
2435		   hw->aq.asq_last_status);
2436	return (ret);
2437}
2438
2439
2440/*********************************************************************
2441 *
2442 *  Initialize the VSI:  this handles contexts, which means things
2443 *  			 like the number of descriptors, buffer size,
2444 *			 plus we init the rings thru this function.
2445 *
2446 **********************************************************************/
2447static int
2448i40e_initialize_vsi(struct i40e_vsi *vsi)
2449{
2450	struct i40e_queue	*que = vsi->queues;
2451	device_t		dev = vsi->dev;
2452	struct i40e_hw		*hw = vsi->hw;
2453	int			err = 0;
2454
2455
2456	for (int i = 0; i < vsi->num_queues; i++, que++) {
2457		struct tx_ring		*txr = &que->txr;
2458		struct rx_ring 		*rxr = &que->rxr;
2459		struct i40e_hmc_obj_txq tctx;
2460		struct i40e_hmc_obj_rxq rctx;
2461		u32			txctl;
2462		u16			size;
2463
2464
2465		/* Setup the HMC TX Context  */
2466		size = que->num_desc * sizeof(struct i40e_tx_desc);
2467		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2468		tctx.new_context = 1;
2469		tctx.base = (txr->dma.pa/128);
2470		tctx.qlen = que->num_desc;
2471		tctx.fc_ena = 0;
2472		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2473		/* Enable HEAD writeback */
2474		tctx.head_wb_ena = 1;
2475		tctx.head_wb_addr = txr->dma.pa +
2476		    (que->num_desc * sizeof(struct i40e_tx_desc));
2477		tctx.rdylist_act = 0;
2478		err = i40e_clear_lan_tx_queue_context(hw, i);
2479		if (err) {
2480			device_printf(dev, "Unable to clear TX context\n");
2481			break;
2482		}
2483		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2484		if (err) {
2485			device_printf(dev, "Unable to set TX context\n");
2486			break;
2487		}
2488		/* Associate the ring with this PF */
2489		txctl = I40E_QTX_CTL_PF_QUEUE;
2490		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2491		    I40E_QTX_CTL_PF_INDX_MASK);
2492		wr32(hw, I40E_QTX_CTL(i), txctl);
2493		i40e_flush(hw);
2494
2495		/* Do ring (re)init */
2496		i40e_init_tx_ring(que);
2497
2498		/* Next setup the HMC RX Context  */
2499		if (vsi->max_frame_size <= 2048)
2500			rxr->mbuf_sz = MCLBYTES;
2501		else
2502			rxr->mbuf_sz = MJUMPAGESIZE;
2503
2504		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2505
2506		/* Set up an RX context for the HMC */
2507		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2508		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2509		/* ignore header split for now */
2510		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2511		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2512		    vsi->max_frame_size : max_rxmax;
2513		rctx.dtype = 0;
2514		rctx.dsize = 1;	/* do 32byte descriptors */
2515		rctx.hsplit_0 = 0;  /* no HDR split initially */
2516		rctx.base = (rxr->dma.pa/128);
2517		rctx.qlen = que->num_desc;
2518		rctx.tphrdesc_ena = 1;
2519		rctx.tphwdesc_ena = 1;
2520		rctx.tphdata_ena = 0;
2521		rctx.tphhead_ena = 0;
2522		rctx.lrxqthresh = 2;
2523		rctx.crcstrip = 1;
2524		rctx.l2tsel = 1;
2525		rctx.showiv = 1;
2526		rctx.fc_ena = 0;
2527		rctx.prefena = 1;
2528
2529		err = i40e_clear_lan_rx_queue_context(hw, i);
2530		if (err) {
2531			device_printf(dev,
2532			    "Unable to clear RX context %d\n", i);
2533			break;
2534		}
2535		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2536		if (err) {
2537			device_printf(dev, "Unable to set RX context %d\n", i);
2538			break;
2539		}
2540		err = i40e_init_rx_ring(que);
2541		if (err) {
2542			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2543			break;
2544		}
2545		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2546		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2547	}
2548	return (err);
2549}
2550
2551
2552/*********************************************************************
2553 *
2554 *  Free all VSI structs.
2555 *
2556 **********************************************************************/
2557void
2558i40e_free_vsi(struct i40e_vsi *vsi)
2559{
2560	struct i40e_pf		*pf = (struct i40e_pf *)vsi->back;
2561	struct i40e_queue	*que = vsi->queues;
2562	struct i40e_mac_filter *f;
2563
2564	/* Free station queues */
2565	for (int i = 0; i < vsi->num_queues; i++, que++) {
2566		struct tx_ring *txr = &que->txr;
2567		struct rx_ring *rxr = &que->rxr;
2568
2569		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2570			continue;
2571		I40E_TX_LOCK(txr);
2572		i40e_free_que_tx(que);
2573		if (txr->base)
2574			i40e_free_dma(&pf->hw, &txr->dma);
2575		I40E_TX_UNLOCK(txr);
2576		I40E_TX_LOCK_DESTROY(txr);
2577
2578		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2579			continue;
2580		I40E_RX_LOCK(rxr);
2581		i40e_free_que_rx(que);
2582		if (rxr->base)
2583			i40e_free_dma(&pf->hw, &rxr->dma);
2584		I40E_RX_UNLOCK(rxr);
2585		I40E_RX_LOCK_DESTROY(rxr);
2586
2587	}
2588	free(vsi->queues, M_DEVBUF);
2589
2590	/* Free VSI filter list */
2591	while (!SLIST_EMPTY(&vsi->ftl)) {
2592		f = SLIST_FIRST(&vsi->ftl);
2593		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2594		free(f, M_DEVBUF);
2595	}
2596}
2597
2598
2599/*********************************************************************
2600 *
2601 *  Allocate memory for the VSI (virtual station interface) and their
2602 *  associated queues, rings and the descriptors associated with each,
2603 *  called only once at attach.
2604 *
2605 **********************************************************************/
2606static int
2607i40e_setup_stations(struct i40e_pf *pf)
2608{
2609	device_t		dev = pf->dev;
2610	struct i40e_vsi		*vsi;
2611	struct i40e_queue	*que;
2612	struct tx_ring		*txr;
2613	struct rx_ring		*rxr;
2614	int 			rsize, tsize;
2615	int			error = I40E_SUCCESS;
2616
2617	vsi = &pf->vsi;
2618	vsi->back = (void *)pf;
2619	vsi->hw = &pf->hw;
2620	vsi->id = 0;
2621	vsi->num_vlans = 0;
2622
2623	/* Get memory for the station queues */
2624        if (!(vsi->queues =
2625            (struct i40e_queue *) malloc(sizeof(struct i40e_queue) *
2626            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2627                device_printf(dev, "Unable to allocate queue memory\n");
2628                error = ENOMEM;
2629                goto early;
2630        }
2631
2632	for (int i = 0; i < vsi->num_queues; i++) {
2633		que = &vsi->queues[i];
2634		que->num_desc = i40e_ringsz;
2635		que->me = i;
2636		que->vsi = vsi;
2637		/* mark the queue as active */
2638		vsi->active_queues |= (u64)1 << que->me;
2639		txr = &que->txr;
2640		txr->que = que;
2641		txr->tail = I40E_QTX_TAIL(que->me);
2642
2643		/* Initialize the TX lock */
2644		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2645		    device_get_nameunit(dev), que->me);
2646		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2647		/* Create the TX descriptor ring */
2648		tsize = roundup2((que->num_desc *
2649		    sizeof(struct i40e_tx_desc)) +
2650		    sizeof(u32), DBA_ALIGN);
2651		if (i40e_allocate_dma(&pf->hw,
2652		    &txr->dma, tsize, DBA_ALIGN)) {
2653			device_printf(dev,
2654			    "Unable to allocate TX Descriptor memory\n");
2655			error = ENOMEM;
2656			goto fail;
2657		}
2658		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2659		bzero((void *)txr->base, tsize);
2660       		/* Now allocate transmit soft structs for the ring */
2661       		if (i40e_allocate_tx_data(que)) {
2662			device_printf(dev,
2663			    "Critical Failure setting up TX structures\n");
2664			error = ENOMEM;
2665			goto fail;
2666       		}
2667		/* Allocate a buf ring */
2668		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2669		    M_WAITOK, &txr->mtx);
2670		if (txr->br == NULL) {
2671			device_printf(dev,
2672			    "Critical Failure setting up TX buf ring\n");
2673			error = ENOMEM;
2674			goto fail;
2675       		}
2676
2677		/*
2678		 * Next the RX queues...
2679		 */
2680		rsize = roundup2(que->num_desc *
2681		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2682		rxr = &que->rxr;
2683		rxr->que = que;
2684		rxr->tail = I40E_QRX_TAIL(que->me);
2685
2686		/* Initialize the RX side lock */
2687		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2688		    device_get_nameunit(dev), que->me);
2689		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2690
2691		if (i40e_allocate_dma(&pf->hw,
2692		    &rxr->dma, rsize, 4096)) {
2693			device_printf(dev,
2694			    "Unable to allocate RX Descriptor memory\n");
2695			error = ENOMEM;
2696			goto fail;
2697		}
2698		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2699		bzero((void *)rxr->base, rsize);
2700
2701        	/* Allocate receive soft structs for the ring*/
2702		if (i40e_allocate_rx_data(que)) {
2703			device_printf(dev,
2704			    "Critical Failure setting up receive structs\n");
2705			error = ENOMEM;
2706			goto fail;
2707		}
2708	}
2709
2710	return (0);
2711
2712fail:
2713	for (int i = 0; i < vsi->num_queues; i++) {
2714		que = &vsi->queues[i];
2715		rxr = &que->rxr;
2716		txr = &que->txr;
2717		if (rxr->base)
2718			i40e_free_dma(&pf->hw, &rxr->dma);
2719		if (txr->base)
2720			i40e_free_dma(&pf->hw, &txr->dma);
2721	}
2722
2723early:
2724	return (error);
2725}
2726
2727/*
2728** Provide a update to the queue RX
2729** interrupt moderation value.
2730*/
2731static void
2732i40e_set_queue_rx_itr(struct i40e_queue *que)
2733{
2734	struct i40e_vsi	*vsi = que->vsi;
2735	struct i40e_hw	*hw = vsi->hw;
2736	struct rx_ring	*rxr = &que->rxr;
2737	u16		rx_itr;
2738	u16		rx_latency = 0;
2739	int		rx_bytes;
2740
2741
2742	/* Idle, do nothing */
2743	if (rxr->bytes == 0)
2744		return;
2745
2746	if (i40e_dynamic_rx_itr) {
2747		rx_bytes = rxr->bytes/rxr->itr;
2748		rx_itr = rxr->itr;
2749
2750		/* Adjust latency range */
2751		switch (rxr->latency) {
2752		case I40E_LOW_LATENCY:
2753			if (rx_bytes > 10) {
2754				rx_latency = I40E_AVE_LATENCY;
2755				rx_itr = I40E_ITR_20K;
2756			}
2757			break;
2758		case I40E_AVE_LATENCY:
2759			if (rx_bytes > 20) {
2760				rx_latency = I40E_BULK_LATENCY;
2761				rx_itr = I40E_ITR_8K;
2762			} else if (rx_bytes <= 10) {
2763				rx_latency = I40E_LOW_LATENCY;
2764				rx_itr = I40E_ITR_100K;
2765			}
2766			break;
2767		case I40E_BULK_LATENCY:
2768			if (rx_bytes <= 20) {
2769				rx_latency = I40E_AVE_LATENCY;
2770				rx_itr = I40E_ITR_20K;
2771			}
2772			break;
2773       		 }
2774
2775		rxr->latency = rx_latency;
2776
2777		if (rx_itr != rxr->itr) {
2778			/* do an exponential smoothing */
2779			rx_itr = (10 * rx_itr * rxr->itr) /
2780			    ((9 * rx_itr) + rxr->itr);
2781			rxr->itr = rx_itr & I40E_MAX_ITR;
2782			wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR,
2783			    que->me), rxr->itr);
2784		}
2785	} else { /* We may have have toggled to non-dynamic */
2786		if (vsi->rx_itr_setting & I40E_ITR_DYNAMIC)
2787			vsi->rx_itr_setting = i40e_rx_itr;
2788		/* Update the hardware if needed */
2789		if (rxr->itr != vsi->rx_itr_setting) {
2790			rxr->itr = vsi->rx_itr_setting;
2791			wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR,
2792			    que->me), rxr->itr);
2793		}
2794	}
2795	rxr->bytes = 0;
2796	rxr->packets = 0;
2797	return;
2798}
2799
2800
2801/*
2802** Provide a update to the queue TX
2803** interrupt moderation value.
2804*/
2805static void
2806i40e_set_queue_tx_itr(struct i40e_queue *que)
2807{
2808	struct i40e_vsi	*vsi = que->vsi;
2809	struct i40e_hw	*hw = vsi->hw;
2810	struct tx_ring	*txr = &que->txr;
2811	u16		tx_itr;
2812	u16		tx_latency = 0;
2813	int		tx_bytes;
2814
2815
2816	/* Idle, do nothing */
2817	if (txr->bytes == 0)
2818		return;
2819
2820	if (i40e_dynamic_tx_itr) {
2821		tx_bytes = txr->bytes/txr->itr;
2822		tx_itr = txr->itr;
2823
2824		switch (txr->latency) {
2825		case I40E_LOW_LATENCY:
2826			if (tx_bytes > 10) {
2827				tx_latency = I40E_AVE_LATENCY;
2828				tx_itr = I40E_ITR_20K;
2829			}
2830			break;
2831		case I40E_AVE_LATENCY:
2832			if (tx_bytes > 20) {
2833				tx_latency = I40E_BULK_LATENCY;
2834				tx_itr = I40E_ITR_8K;
2835			} else if (tx_bytes <= 10) {
2836				tx_latency = I40E_LOW_LATENCY;
2837				tx_itr = I40E_ITR_100K;
2838			}
2839			break;
2840		case I40E_BULK_LATENCY:
2841			if (tx_bytes <= 20) {
2842				tx_latency = I40E_AVE_LATENCY;
2843				tx_itr = I40E_ITR_20K;
2844			}
2845			break;
2846		}
2847
2848		txr->latency = tx_latency;
2849
2850		if (tx_itr != txr->itr) {
2851       	         /* do an exponential smoothing */
2852			tx_itr = (10 * tx_itr * txr->itr) /
2853			    ((9 * tx_itr) + txr->itr);
2854			txr->itr = tx_itr & I40E_MAX_ITR;
2855			wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR,
2856			    que->me), txr->itr);
2857		}
2858
2859	} else { /* We may have have toggled to non-dynamic */
2860		if (vsi->tx_itr_setting & I40E_ITR_DYNAMIC)
2861			vsi->tx_itr_setting = i40e_tx_itr;
2862		/* Update the hardware if needed */
2863		if (txr->itr != vsi->tx_itr_setting) {
2864			txr->itr = vsi->tx_itr_setting;
2865			wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR,
2866			    que->me), txr->itr);
2867		}
2868	}
2869	txr->bytes = 0;
2870	txr->packets = 0;
2871	return;
2872}
2873
2874
2875static void
2876i40e_add_hw_stats(struct i40e_pf *pf)
2877{
2878	device_t dev = pf->dev;
2879	struct i40e_vsi *vsi = &pf->vsi;
2880	struct i40e_queue *queues = vsi->queues;
2881	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2882	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2883
2884	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2885	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2886	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2887
2888	struct sysctl_oid *vsi_node, *queue_node;
2889	struct sysctl_oid_list *vsi_list, *queue_list;
2890
2891	struct tx_ring *txr;
2892	struct rx_ring *rxr;
2893
2894	/* Driver statistics */
2895	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2896			CTLFLAG_RD, &pf->watchdog_events,
2897			"Watchdog timeouts");
2898	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2899			CTLFLAG_RD, &pf->admin_irq,
2900			"Admin Queue IRQ Handled");
2901
2902	/* VSI statistics */
2903#define QUEUE_NAME_LEN 32
2904	char queue_namebuf[QUEUE_NAME_LEN];
2905
2906	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2907	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2908	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2909				   CTLFLAG_RD, NULL, "VSI-specific stats");
2910	vsi_list = SYSCTL_CHILDREN(vsi_node);
2911
2912	i40e_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2913
2914	/* Queue statistics */
2915	for (int q = 0; q < vsi->num_queues; q++) {
2916		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2917		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2918					     CTLFLAG_RD, NULL, "Queue #");
2919		queue_list = SYSCTL_CHILDREN(queue_node);
2920
2921		txr = &(queues[q].txr);
2922		rxr = &(queues[q].rxr);
2923
2924		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2925				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2926				"m_defrag() failed");
2927		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2928				CTLFLAG_RD, &(queues[q].dropped_pkts),
2929				"Driver dropped packets");
2930		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2931				CTLFLAG_RD, &(queues[q].irqs),
2932				"irqs on this queue");
2933		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2934				CTLFLAG_RD, &(queues[q].tso),
2935				"TSO");
2936		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2937				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2938				"Driver tx dma failure in xmit");
2939		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2940				CTLFLAG_RD, &(txr->no_desc),
2941				"Queue No Descriptor Available");
2942		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2943				CTLFLAG_RD, &(txr->total_packets),
2944				"Queue Packets Transmitted");
2945		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2946				CTLFLAG_RD, &(txr->bytes),
2947				"Queue Bytes Transmitted");
2948		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2949				CTLFLAG_RD, &(rxr->rx_packets),
2950				"Queue Packets Received");
2951		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2952				CTLFLAG_RD, &(rxr->rx_bytes),
2953				"Queue Bytes Received");
2954	}
2955
2956	/* MAC stats */
2957	i40e_add_sysctls_mac_stats(ctx, child, pf_stats);
2958}
2959
2960static void
2961i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2962	struct sysctl_oid_list *child,
2963	struct i40e_eth_stats *eth_stats)
2964{
2965	struct i40e_sysctl_info ctls[] =
2966	{
2967		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2968		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2969			"Unicast Packets Received"},
2970		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2971			"Multicast Packets Received"},
2972		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2973			"Broadcast Packets Received"},
2974		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
2975		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2976		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2977		{&eth_stats->tx_multicast, "mcast_pkts_txd",
2978			"Multicast Packets Transmitted"},
2979		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
2980			"Broadcast Packets Transmitted"},
2981		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
2982		// end
2983		{0,0,0}
2984	};
2985
2986	struct i40e_sysctl_info *entry = ctls;
2987	while (entry->stat != 0)
2988	{
2989		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2990				CTLFLAG_RD, entry->stat,
2991				entry->description);
2992		entry++;
2993	}
2994}
2995
2996static void
2997i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
2998	struct sysctl_oid_list *child,
2999	struct i40e_hw_port_stats *stats)
3000{
3001	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3002				    CTLFLAG_RD, NULL, "Mac Statistics");
3003	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3004
3005	struct i40e_eth_stats *eth_stats = &stats->eth;
3006	i40e_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3007
3008	struct i40e_sysctl_info ctls[] =
3009	{
3010		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3011		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3012		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3013		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3014		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3015		/* Packet Reception Stats */
3016		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3017		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3018		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3019		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3020		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3021		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3022		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3023		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3024		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3025		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3026		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3027		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3028		/* Packet Transmission Stats */
3029		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3030		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3031		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3032		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3033		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3034		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3035		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3036		/* Flow control */
3037		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3038		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3039		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3040		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3041		/* End */
3042		{0,0,0}
3043	};
3044
3045	struct i40e_sysctl_info *entry = ctls;
3046	while (entry->stat != 0)
3047	{
3048		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3049				CTLFLAG_RD, entry->stat,
3050				entry->description);
3051		entry++;
3052	}
3053}
3054
3055/*
3056** i40e_config_rss - setup RSS
3057**  - note this is done for the single vsi
3058*/
3059static void i40e_config_rss(struct i40e_vsi *vsi)
3060{
3061	struct i40e_pf	*pf = (struct i40e_pf *)vsi->back;
3062	struct i40e_hw	*hw = vsi->hw;
3063	u32		lut = 0;
3064	u64		set_hena, hena;
3065	int		i, j;
3066
3067	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3068	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3069	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3070	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3071
3072	/* Fill out hash function seed */
3073	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3074                wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3075
3076	/* Enable PCTYPES for RSS: */
3077	set_hena =
3078		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3079		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3080		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3081		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3082		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3083		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3084		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3085		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3086		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3087		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3088		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3089
3090	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3091	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3092	hena |= set_hena;
3093	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3094	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3095
3096	/* Populate the LUT with max no. of queues in round robin fashion */
3097	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3098		if (j == vsi->num_queues)
3099			j = 0;
3100		/* lut = 4-byte sliding window of 4 lut entries */
3101		lut = (lut << 8) | (j &
3102		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3103		/* On i = 3, we have 4 entries in lut; write to the register */
3104		if ((i & 3) == 3)
3105			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3106	}
3107	i40e_flush(hw);
3108}
3109
3110
3111/*
3112** This routine is run via an vlan config EVENT,
3113** it enables us to use the HW Filter table since
3114** we can get the vlan id. This just creates the
3115** entry in the soft version of the VFTA, init will
3116** repopulate the real table.
3117*/
3118static void
3119i40e_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3120{
3121	struct i40e_vsi	*vsi = ifp->if_softc;
3122	struct i40e_hw	*hw = vsi->hw;
3123	struct i40e_pf	*pf = (struct i40e_pf *)vsi->back;
3124
3125	if (ifp->if_softc !=  arg)   /* Not our event */
3126		return;
3127
3128	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3129		return;
3130
3131	I40E_PF_LOCK(pf);
3132	++vsi->num_vlans;
3133	i40e_add_filter(vsi, hw->mac.addr, vtag);
3134	I40E_PF_UNLOCK(pf);
3135}
3136
3137/*
3138** This routine is run via an vlan
3139** unconfig EVENT, remove our entry
3140** in the soft vfta.
3141*/
3142static void
3143i40e_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3144{
3145	struct i40e_vsi	*vsi = ifp->if_softc;
3146	struct i40e_hw	*hw = vsi->hw;
3147	struct i40e_pf	*pf = (struct i40e_pf *)vsi->back;
3148
3149	if (ifp->if_softc !=  arg)
3150		return;
3151
3152	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3153		return;
3154
3155	I40E_PF_LOCK(pf);
3156	--vsi->num_vlans;
3157	i40e_del_filter(vsi, hw->mac.addr, vtag);
3158	I40E_PF_UNLOCK(pf);
3159}
3160
3161/*
3162** This routine updates vlan filters, called by init
3163** it scans the filter table and then updates the hw
3164** after a soft reset.
3165*/
3166static void
3167i40e_setup_vlan_filters(struct i40e_vsi *vsi)
3168{
3169	struct i40e_mac_filter	*f;
3170	int			cnt = 0, flags;
3171
3172	if (vsi->num_vlans == 0)
3173		return;
3174	/*
3175	** Scan the filter list for vlan entries,
3176	** mark them for addition and then call
3177	** for the AQ update.
3178	*/
3179	SLIST_FOREACH(f, &vsi->ftl, next) {
3180		if (f->flags & I40E_FILTER_VLAN) {
3181			f->flags |=
3182			    (I40E_FILTER_ADD |
3183			    I40E_FILTER_USED);
3184			cnt++;
3185		}
3186	}
3187	if (cnt == 0) {
3188		printf("setup vlan: no filters found!\n");
3189		return;
3190	}
3191	flags = I40E_FILTER_VLAN;
3192	flags |= (I40E_FILTER_ADD | I40E_FILTER_USED);
3193	i40e_add_hw_filters(vsi, flags, cnt);
3194	return;
3195}
3196
3197/*
3198** Initialize filter list and add filters that the hardware
3199** needs to know about.
3200*/
3201static void
3202i40e_init_filters(struct i40e_vsi *vsi)
3203{
3204	/* Add broadcast address */
3205	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3206	i40e_add_filter(vsi, bc, I40E_VLAN_ANY);
3207}
3208
3209/*
3210** This routine adds mulicast filters
3211*/
3212static void
3213i40e_add_mc_filter(struct i40e_vsi *vsi, u8 *macaddr)
3214{
3215	struct i40e_mac_filter *f;
3216
3217	/* Does one already exist */
3218	f = i40e_find_filter(vsi, macaddr, I40E_VLAN_ANY);
3219	if (f != NULL)
3220		return;
3221
3222	f = i40e_get_filter(vsi);
3223	if (f == NULL) {
3224		printf("WARNING: no filter available!!\n");
3225		return;
3226	}
3227	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3228	f->vlan = I40E_VLAN_ANY;
3229	f->flags |= (I40E_FILTER_ADD | I40E_FILTER_USED
3230	    | I40E_FILTER_MC);
3231
3232	return;
3233}
3234
3235/*
3236** This routine adds macvlan filters
3237*/
3238static void
3239i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan)
3240{
3241	struct i40e_mac_filter	*f, *tmp;
3242	device_t		dev = vsi->dev;
3243
3244	DEBUGOUT("i40e_add_filter: begin");
3245
3246	/* Does one already exist */
3247	f = i40e_find_filter(vsi, macaddr, vlan);
3248	if (f != NULL)
3249		return;
3250	/*
3251	** Is this the first vlan being registered, if so we
3252	** need to remove the ANY filter that indicates we are
3253	** not in a vlan, and replace that with a 0 filter.
3254	*/
3255	if ((vlan != I40E_VLAN_ANY) && (vsi->num_vlans == 1)) {
3256		tmp = i40e_find_filter(vsi, macaddr, I40E_VLAN_ANY);
3257		if (tmp != NULL) {
3258			i40e_del_filter(vsi, macaddr, I40E_VLAN_ANY);
3259			i40e_add_filter(vsi, macaddr, 0);
3260		}
3261	}
3262
3263	f = i40e_get_filter(vsi);
3264	if (f == NULL) {
3265		device_printf(dev, "WARNING: no filter available!!\n");
3266		return;
3267	}
3268	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3269	f->vlan = vlan;
3270	f->flags |= (I40E_FILTER_ADD | I40E_FILTER_USED);
3271	if (f->vlan != I40E_VLAN_ANY)
3272		f->flags |= I40E_FILTER_VLAN;
3273
3274	i40e_add_hw_filters(vsi, f->flags, 1);
3275	return;
3276}
3277
3278static void
3279i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan)
3280{
3281	struct i40e_mac_filter *f;
3282
3283	f = i40e_find_filter(vsi, macaddr, vlan);
3284	if (f == NULL)
3285		return;
3286
3287	f->flags |= I40E_FILTER_DEL;
3288	i40e_del_hw_filters(vsi, 1);
3289
3290	/* Check if this is the last vlan removal */
3291	if (vlan != I40E_VLAN_ANY && vsi->num_vlans == 0) {
3292		/* Switch back to a non-vlan filter */
3293		i40e_del_filter(vsi, macaddr, 0);
3294		i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
3295	}
3296	return;
3297}
3298
3299/*
3300** Find the filter with both matching mac addr and vlan id
3301*/
3302static struct i40e_mac_filter *
3303i40e_find_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan)
3304{
3305	struct i40e_mac_filter	*f;
3306	bool			match = FALSE;
3307
3308	SLIST_FOREACH(f, &vsi->ftl, next) {
3309		if (!cmp_etheraddr(f->macaddr, macaddr))
3310			continue;
3311		if (f->vlan == vlan) {
3312			match = TRUE;
3313			break;
3314		}
3315	}
3316
3317	if (!match)
3318		f = NULL;
3319	return (f);
3320}
3321
3322/*
3323** This routine takes additions to the vsi filter
3324** table and creates an Admin Queue call to create
3325** the filters in the hardware.
3326*/
3327static void
3328i40e_add_hw_filters(struct i40e_vsi *vsi, int flags, int cnt)
3329{
3330	struct i40e_aqc_add_macvlan_element_data *a, *b;
3331	struct i40e_mac_filter	*f;
3332	struct i40e_hw	*hw = vsi->hw;
3333	device_t	dev = vsi->dev;
3334	int		err, j = 0;
3335
3336	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3337	    M_DEVBUF, M_NOWAIT | M_ZERO);
3338	if (a == NULL) {
3339		device_printf(dev, "add hw filter failed to get memory\n");
3340		return;
3341	}
3342
3343	/*
3344	** Scan the filter list, each time we find one
3345	** we add it to the admin queue array and turn off
3346	** the add bit.
3347	*/
3348	SLIST_FOREACH(f, &vsi->ftl, next) {
3349		if (f->flags == flags) {
3350			b = &a[j]; // a pox on fvl long names :)
3351			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3352			b->vlan_tag =
3353			    (f->vlan == I40E_VLAN_ANY ? 0 : f->vlan);
3354			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3355			f->flags &= ~I40E_FILTER_ADD;
3356			j++;
3357		}
3358		if (j == cnt)
3359			break;
3360	}
3361	if (j > 0) {
3362		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3363		if (err)
3364			device_printf(dev, "aq_add_macvlan failure %d\n",
3365			    hw->aq.asq_last_status);
3366		else
3367			vsi->hw_filters_add += j;
3368	}
3369	free(a, M_DEVBUF);
3370	return;
3371}
3372
3373/*
3374** This routine takes removals in the vsi filter
3375** table and creates an Admin Queue call to delete
3376** the filters in the hardware.
3377*/
3378static void
3379i40e_del_hw_filters(struct i40e_vsi *vsi, int cnt)
3380{
3381	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3382	struct i40e_hw		*hw = vsi->hw;
3383	device_t		dev = vsi->dev;
3384	struct i40e_mac_filter	*f, *f_temp;
3385	int			err, j = 0;
3386
3387	DEBUGOUT("i40e_del_hw_filters: begin\n");
3388
3389	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3390	    M_DEVBUF, M_NOWAIT | M_ZERO);
3391	if (d == NULL) {
3392		printf("del hw filter failed to get memory\n");
3393		return;
3394	}
3395
3396	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3397		if (f->flags & I40E_FILTER_DEL) {
3398			e = &d[j]; // a pox on fvl long names :)
3399			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3400			e->vlan_tag = (f->vlan == I40E_VLAN_ANY ? 0 : f->vlan);
3401			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3402			/* delete entry from vsi list */
3403			SLIST_REMOVE(&vsi->ftl, f, i40e_mac_filter, next);
3404			free(f, M_DEVBUF);
3405			j++;
3406		}
3407		if (j == cnt)
3408			break;
3409	}
3410	if (j > 0) {
3411		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3412		/* NOTE: returns ENOENT every time but seems to work fine,
3413		   so we'll ignore that specific error. */
3414		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3415			int sc = 0;
3416			for (int i = 0; i < j; i++)
3417				sc += (!d[i].error_code);
3418			vsi->hw_filters_del += sc;
3419			device_printf(dev,
3420			    "Failed to remove %d/%d filters, aq error %d\n",
3421			    j - sc, j, hw->aq.asq_last_status);
3422		} else
3423			vsi->hw_filters_del += j;
3424	}
3425	free(d, M_DEVBUF);
3426
3427	DEBUGOUT("i40e_del_hw_filters: end\n");
3428	return;
3429}
3430
3431
3432static void
3433i40e_enable_rings(struct i40e_vsi *vsi)
3434{
3435	struct i40e_hw	*hw = vsi->hw;
3436	u32		reg;
3437
3438	for (int i = 0; i < vsi->num_queues; i++) {
3439		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3440
3441		reg = rd32(hw, I40E_QTX_ENA(i));
3442		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3443		    I40E_QTX_ENA_QENA_STAT_MASK;
3444		wr32(hw, I40E_QTX_ENA(i), reg);
3445		/* Verify the enable took */
3446		for (int j = 0; j < 10; j++) {
3447			reg = rd32(hw, I40E_QTX_ENA(i));
3448			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3449				break;
3450			i40e_msec_delay(10);
3451		}
3452		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3453			printf("TX queue %d disabled!\n", i);
3454
3455		reg = rd32(hw, I40E_QRX_ENA(i));
3456		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3457		    I40E_QRX_ENA_QENA_STAT_MASK;
3458		wr32(hw, I40E_QRX_ENA(i), reg);
3459		/* Verify the enable took */
3460		for (int j = 0; j < 10; j++) {
3461			reg = rd32(hw, I40E_QRX_ENA(i));
3462			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3463				break;
3464			i40e_msec_delay(10);
3465		}
3466		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3467			printf("RX queue %d disabled!\n", i);
3468	}
3469}
3470
3471static void
3472i40e_disable_rings(struct i40e_vsi *vsi)
3473{
3474	struct i40e_hw	*hw = vsi->hw;
3475	u32		reg;
3476
3477	for (int i = 0; i < vsi->num_queues; i++) {
3478		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3479		i40e_usec_delay(500);
3480
3481		reg = rd32(hw, I40E_QTX_ENA(i));
3482		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3483		wr32(hw, I40E_QTX_ENA(i), reg);
3484		/* Verify the disable took */
3485		for (int j = 0; j < 10; j++) {
3486			reg = rd32(hw, I40E_QTX_ENA(i));
3487			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3488				break;
3489			i40e_msec_delay(10);
3490		}
3491		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3492			printf("TX queue %d still enabled!\n", i);
3493
3494		reg = rd32(hw, I40E_QRX_ENA(i));
3495		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3496		wr32(hw, I40E_QRX_ENA(i), reg);
3497		/* Verify the disable took */
3498		for (int j = 0; j < 10; j++) {
3499			reg = rd32(hw, I40E_QRX_ENA(i));
3500			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3501				break;
3502			i40e_msec_delay(10);
3503		}
3504		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3505			printf("RX queue %d still enabled!\n", i);
3506	}
3507}
3508
3509/**
3510 * i40e_handle_mdd_event
3511 *
3512 * Called from interrupt handler to identify possibly malicious vfs
3513 * (But also detects events from the PF, as well)
3514 **/
3515static void i40e_handle_mdd_event(struct i40e_pf *pf)
3516{
3517	struct i40e_hw *hw = &pf->hw;
3518	device_t dev = pf->dev;
3519	bool mdd_detected = false;
3520	bool pf_mdd_detected = false;
3521	u32 reg;
3522
3523	/* find what triggered the MDD event */
3524	reg = rd32(hw, I40E_GL_MDET_TX);
3525	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3526		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3527				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3528		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3529				I40E_GL_MDET_TX_EVENT_SHIFT;
3530		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3531				I40E_GL_MDET_TX_QUEUE_SHIFT;
3532		device_printf(dev,
3533			 "Malicious Driver Detection event 0x%02x"
3534			 " on TX queue %d pf number 0x%02x\n",
3535			 event, queue, pf_num);
3536		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3537		mdd_detected = true;
3538	}
3539	reg = rd32(hw, I40E_GL_MDET_RX);
3540	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3541		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3542				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3543		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3544				I40E_GL_MDET_RX_EVENT_SHIFT;
3545		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3546				I40E_GL_MDET_RX_QUEUE_SHIFT;
3547		device_printf(dev,
3548			 "Malicious Driver Detection event 0x%02x"
3549			 " on RX queue %d of function 0x%02x\n",
3550			 event, queue, func);
3551		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3552		mdd_detected = true;
3553	}
3554
3555	if (mdd_detected) {
3556		reg = rd32(hw, I40E_PF_MDET_TX);
3557		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3558			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3559			device_printf(dev,
3560				 "MDD TX event is for this function 0x%08x",
3561				 reg);
3562			pf_mdd_detected = true;
3563		}
3564		reg = rd32(hw, I40E_PF_MDET_RX);
3565		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3566			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3567			device_printf(dev,
3568				 "MDD RX event is for this function 0x%08x",
3569				 reg);
3570			pf_mdd_detected = true;
3571		}
3572	}
3573
3574	/* re-enable mdd interrupt cause */
3575	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3576	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3577	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3578	i40e_flush(hw);
3579}
3580
3581static void
3582i40e_enable_intr(struct i40e_vsi *vsi)
3583{
3584	struct i40e_hw		*hw = vsi->hw;
3585	struct i40e_queue	*que = vsi->queues;
3586
3587	if (i40e_enable_msix) {
3588		i40e_enable_adminq(hw);
3589		for (int i = 0; i < vsi->num_queues; i++, que++)
3590			i40e_enable_queue(hw, que->me);
3591	} else
3592		i40e_enable_legacy(hw);
3593}
3594
3595static void
3596i40e_disable_intr(struct i40e_vsi *vsi)
3597{
3598	struct i40e_hw		*hw = vsi->hw;
3599	struct i40e_queue	*que = vsi->queues;
3600
3601	if (i40e_enable_msix) {
3602		i40e_disable_adminq(hw);
3603		for (int i = 0; i < vsi->num_queues; i++, que++)
3604			i40e_disable_queue(hw, que->me);
3605	} else
3606		i40e_disable_legacy(hw);
3607}
3608
3609static void
3610i40e_enable_adminq(struct i40e_hw *hw)
3611{
3612	u32		reg;
3613
3614	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3615	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3616	    (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3617	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3618	i40e_flush(hw);
3619	return;
3620}
3621
3622static void
3623i40e_disable_adminq(struct i40e_hw *hw)
3624{
3625	u32		reg;
3626
3627	reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3628	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3629
3630	return;
3631}
3632
3633static void
3634i40e_enable_queue(struct i40e_hw *hw, int id)
3635{
3636	u32		reg;
3637
3638	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3639	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3640	    (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3641	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3642}
3643
3644static void
3645i40e_disable_queue(struct i40e_hw *hw, int id)
3646{
3647	u32		reg;
3648
3649	reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3650	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3651
3652	return;
3653}
3654
3655static void
3656i40e_enable_legacy(struct i40e_hw *hw)
3657{
3658	u32		reg;
3659	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3660	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3661	    (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3662	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3663}
3664
3665static void
3666i40e_disable_legacy(struct i40e_hw *hw)
3667{
3668	u32		reg;
3669
3670	reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3671	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3672
3673	return;
3674}
3675
3676static void
3677i40e_update_stats_counters(struct i40e_pf *pf)
3678{
3679	struct i40e_hw	*hw = &pf->hw;
3680	struct i40e_vsi *vsi = &pf->vsi;
3681	struct ifnet	*ifp = vsi->ifp;
3682
3683	struct i40e_hw_port_stats *nsd = &pf->stats;
3684	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3685
3686	/* Update hw stats */
3687	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3688			   pf->stat_offsets_loaded,
3689			   &osd->crc_errors, &nsd->crc_errors);
3690	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3691			   pf->stat_offsets_loaded,
3692			   &osd->illegal_bytes, &nsd->illegal_bytes);
3693	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3694			   I40E_GLPRT_GORCL(hw->port),
3695			   pf->stat_offsets_loaded,
3696			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3697	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3698			   I40E_GLPRT_GOTCL(hw->port),
3699			   pf->stat_offsets_loaded,
3700			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3701	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3702			   pf->stat_offsets_loaded,
3703			   &osd->eth.rx_discards,
3704			   &nsd->eth.rx_discards);
3705	i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3706			   pf->stat_offsets_loaded,
3707			   &osd->eth.tx_discards,
3708			   &nsd->eth.tx_discards);
3709	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3710			   I40E_GLPRT_UPRCL(hw->port),
3711			   pf->stat_offsets_loaded,
3712			   &osd->eth.rx_unicast,
3713			   &nsd->eth.rx_unicast);
3714	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3715			   I40E_GLPRT_UPTCL(hw->port),
3716			   pf->stat_offsets_loaded,
3717			   &osd->eth.tx_unicast,
3718			   &nsd->eth.tx_unicast);
3719	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3720			   I40E_GLPRT_MPRCL(hw->port),
3721			   pf->stat_offsets_loaded,
3722			   &osd->eth.rx_multicast,
3723			   &nsd->eth.rx_multicast);
3724	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3725			   I40E_GLPRT_MPTCL(hw->port),
3726			   pf->stat_offsets_loaded,
3727			   &osd->eth.tx_multicast,
3728			   &nsd->eth.tx_multicast);
3729	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3730			   I40E_GLPRT_BPRCL(hw->port),
3731			   pf->stat_offsets_loaded,
3732			   &osd->eth.rx_broadcast,
3733			   &nsd->eth.rx_broadcast);
3734	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3735			   I40E_GLPRT_BPTCL(hw->port),
3736			   pf->stat_offsets_loaded,
3737			   &osd->eth.tx_broadcast,
3738			   &nsd->eth.tx_broadcast);
3739
3740	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3741			   pf->stat_offsets_loaded,
3742			   &osd->tx_dropped_link_down,
3743			   &nsd->tx_dropped_link_down);
3744	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3745			   pf->stat_offsets_loaded,
3746			   &osd->mac_local_faults,
3747			   &nsd->mac_local_faults);
3748	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3749			   pf->stat_offsets_loaded,
3750			   &osd->mac_remote_faults,
3751			   &nsd->mac_remote_faults);
3752	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3753			   pf->stat_offsets_loaded,
3754			   &osd->rx_length_errors,
3755			   &nsd->rx_length_errors);
3756
3757	/* Flow control (LFC) stats */
3758	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3759			   pf->stat_offsets_loaded,
3760			   &osd->link_xon_rx, &nsd->link_xon_rx);
3761	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3762			   pf->stat_offsets_loaded,
3763			   &osd->link_xon_tx, &nsd->link_xon_tx);
3764	i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3765			   pf->stat_offsets_loaded,
3766			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3767	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3768			   pf->stat_offsets_loaded,
3769			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3770
3771	/* Priority flow control stats */
3772#if 0
3773	for (int i = 0; i < 8; i++) {
3774		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3775				   pf->stat_offsets_loaded,
3776				   &osd->priority_xon_rx[i],
3777				   &nsd->priority_xon_rx[i]);
3778		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3779				   pf->stat_offsets_loaded,
3780				   &osd->priority_xon_tx[i],
3781				   &nsd->priority_xon_tx[i]);
3782		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3783				   pf->stat_offsets_loaded,
3784				   &osd->priority_xoff_tx[i],
3785				   &nsd->priority_xoff_tx[i]);
3786		i40e_stat_update32(hw,
3787				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3788				   pf->stat_offsets_loaded,
3789				   &osd->priority_xon_2_xoff[i],
3790				   &nsd->priority_xon_2_xoff[i]);
3791	}
3792#endif
3793
3794	/* Packet size stats rx */
3795	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3796			   I40E_GLPRT_PRC64L(hw->port),
3797			   pf->stat_offsets_loaded,
3798			   &osd->rx_size_64, &nsd->rx_size_64);
3799	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3800			   I40E_GLPRT_PRC127L(hw->port),
3801			   pf->stat_offsets_loaded,
3802			   &osd->rx_size_127, &nsd->rx_size_127);
3803	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3804			   I40E_GLPRT_PRC255L(hw->port),
3805			   pf->stat_offsets_loaded,
3806			   &osd->rx_size_255, &nsd->rx_size_255);
3807	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3808			   I40E_GLPRT_PRC511L(hw->port),
3809			   pf->stat_offsets_loaded,
3810			   &osd->rx_size_511, &nsd->rx_size_511);
3811	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3812			   I40E_GLPRT_PRC1023L(hw->port),
3813			   pf->stat_offsets_loaded,
3814			   &osd->rx_size_1023, &nsd->rx_size_1023);
3815	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3816			   I40E_GLPRT_PRC1522L(hw->port),
3817			   pf->stat_offsets_loaded,
3818			   &osd->rx_size_1522, &nsd->rx_size_1522);
3819	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3820			   I40E_GLPRT_PRC9522L(hw->port),
3821			   pf->stat_offsets_loaded,
3822			   &osd->rx_size_big, &nsd->rx_size_big);
3823
3824	/* Packet size stats tx */
3825	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3826			   I40E_GLPRT_PTC64L(hw->port),
3827			   pf->stat_offsets_loaded,
3828			   &osd->tx_size_64, &nsd->tx_size_64);
3829	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3830			   I40E_GLPRT_PTC127L(hw->port),
3831			   pf->stat_offsets_loaded,
3832			   &osd->tx_size_127, &nsd->tx_size_127);
3833	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3834			   I40E_GLPRT_PTC255L(hw->port),
3835			   pf->stat_offsets_loaded,
3836			   &osd->tx_size_255, &nsd->tx_size_255);
3837	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3838			   I40E_GLPRT_PTC511L(hw->port),
3839			   pf->stat_offsets_loaded,
3840			   &osd->tx_size_511, &nsd->tx_size_511);
3841	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3842			   I40E_GLPRT_PTC1023L(hw->port),
3843			   pf->stat_offsets_loaded,
3844			   &osd->tx_size_1023, &nsd->tx_size_1023);
3845	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3846			   I40E_GLPRT_PTC1522L(hw->port),
3847			   pf->stat_offsets_loaded,
3848			   &osd->tx_size_1522, &nsd->tx_size_1522);
3849	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3850			   I40E_GLPRT_PTC9522L(hw->port),
3851			   pf->stat_offsets_loaded,
3852			   &osd->tx_size_big, &nsd->tx_size_big);
3853
3854	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3855			   pf->stat_offsets_loaded,
3856			   &osd->rx_undersize, &nsd->rx_undersize);
3857	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3858			   pf->stat_offsets_loaded,
3859			   &osd->rx_fragments, &nsd->rx_fragments);
3860	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3861			   pf->stat_offsets_loaded,
3862			   &osd->rx_oversize, &nsd->rx_oversize);
3863	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3864			   pf->stat_offsets_loaded,
3865			   &osd->rx_jabber, &nsd->rx_jabber);
3866	pf->stat_offsets_loaded = true;
3867	/* End hw stats */
3868
3869	/* Update vsi stats */
3870	i40e_update_eth_stats(vsi);
3871
3872	/* OS statistics */
3873	// ERJ - these are per-port, update all vsis?
3874	ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
3875}
3876
3877/*
3878** Tasklet handler for MSIX Adminq interrupts
3879**  - do outside interrupt since it might sleep
3880*/
3881static void
3882i40e_do_adminq(void *context, int pending)
3883{
3884	struct i40e_pf			*pf = context;
3885	struct i40e_hw			*hw = &pf->hw;
3886	struct i40e_vsi			*vsi = &pf->vsi;
3887	struct i40e_arq_event_info	event;
3888	i40e_status			ret;
3889	u32				reg, loop = 0;
3890	u16				opcode, result;
3891
3892	event.msg_size = I40E_AQ_BUF_SZ;
3893	event.msg_buf = malloc(event.msg_size,
3894	    M_DEVBUF, M_NOWAIT | M_ZERO);
3895	if (!event.msg_buf) {
3896		printf("Unable to allocate adminq memory\n");
3897		return;
3898	}
3899
3900	/* clean and process any events */
3901	do {
3902		ret = i40e_clean_arq_element(hw, &event, &result);
3903		if (ret)
3904			break;
3905		opcode = LE16_TO_CPU(event.desc.opcode);
3906		switch (opcode) {
3907		case i40e_aqc_opc_get_link_status:
3908			vsi->link_up = i40e_config_link(hw);
3909			i40e_update_link_status(pf);
3910			break;
3911		case i40e_aqc_opc_send_msg_to_pf:
3912			/* process pf/vf communication here */
3913			break;
3914		case i40e_aqc_opc_event_lan_overflow:
3915			break;
3916		default:
3917#ifdef I40E_DEBUG
3918			printf("AdminQ unknown event %x\n", opcode);
3919#endif
3920			break;
3921		}
3922
3923	} while (result && (loop++ < I40E_ADM_LIMIT));
3924
3925	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3926	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3927	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3928	free(event.msg_buf, M_DEVBUF);
3929
3930	if (pf->msix > 1)
3931		i40e_enable_adminq(&pf->hw);
3932	else
3933		i40e_enable_intr(vsi);
3934}
3935
3936static int
3937i40e_debug_info(SYSCTL_HANDLER_ARGS)
3938{
3939	struct i40e_pf	*pf;
3940	int		error, input = 0;
3941
3942	error = sysctl_handle_int(oidp, &input, 0, req);
3943
3944	if (error || !req->newptr)
3945		return (error);
3946
3947	if (input == 1) {
3948		pf = (struct i40e_pf *)arg1;
3949		i40e_print_debug_info(pf);
3950	}
3951
3952	return (error);
3953}
3954
3955static void
3956i40e_print_debug_info(struct i40e_pf *pf)
3957{
3958	struct i40e_hw		*hw = &pf->hw;
3959	struct i40e_vsi		*vsi = &pf->vsi;
3960	struct i40e_queue	*que = vsi->queues;
3961	struct rx_ring		*rxr = &que->rxr;
3962	struct tx_ring		*txr = &que->txr;
3963	u32			reg;
3964
3965
3966	printf("Queue irqs = %lx\n", que->irqs);
3967	printf("AdminQ irqs = %lx\n", pf->admin_irq);
3968	printf("RX next check = %x\n", rxr->next_check);
3969	printf("RX not ready = %lx\n", rxr->not_done);
3970	printf("RX packets = %lx\n", rxr->rx_packets);
3971	printf("TX desc avail = %x\n", txr->avail);
3972
3973	reg = rd32(hw, I40E_GLV_GORCL(0xc));
3974	 printf("RX Bytes = %x\n", reg);
3975	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
3976	 printf("Port RX Bytes = %x\n", reg);
3977	reg = rd32(hw, I40E_GLV_RDPC(0xc));
3978	 printf("RX discard = %x\n", reg);
3979	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
3980	 printf("Port RX discard = %x\n", reg);
3981
3982	reg = rd32(hw, I40E_GLV_TEPC(0xc));
3983	 printf("TX errors = %x\n", reg);
3984	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
3985	 printf("TX Bytes = %x\n", reg);
3986
3987	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
3988	 printf("RX undersize = %x\n", reg);
3989	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
3990	 printf("RX fragments = %x\n", reg);
3991	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
3992	 printf("RX oversize = %x\n", reg);
3993	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
3994	 printf("RX length error = %x\n", reg);
3995	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
3996	 printf("mac remote fault = %x\n", reg);
3997	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
3998	 printf("mac local fault = %x\n", reg);
3999}
4000
4001/**
4002 * Update VSI-specific ethernet statistics counters.
4003 **/
4004void i40e_update_eth_stats(struct i40e_vsi *vsi)
4005{
4006	struct i40e_pf *pf = (struct i40e_pf *)vsi->back;
4007	struct i40e_hw *hw = &pf->hw;
4008	struct ifnet *ifp = vsi->ifp;
4009	struct i40e_eth_stats *es;
4010	struct i40e_eth_stats *oes;
4011	u16 stat_idx = vsi->info.stat_counter_idx;
4012
4013	es = &vsi->eth_stats;
4014	oes = &vsi->eth_stats_offsets;
4015
4016	/* Gather up the stats that the hw collects */
4017	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4018			   vsi->stat_offsets_loaded,
4019			   &oes->tx_errors, &es->tx_errors);
4020	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4021			   vsi->stat_offsets_loaded,
4022			   &oes->rx_discards, &es->rx_discards);
4023
4024	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4025			   I40E_GLV_GORCL(stat_idx),
4026			   vsi->stat_offsets_loaded,
4027			   &oes->rx_bytes, &es->rx_bytes);
4028	i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4029			   I40E_GLV_UPRCL(stat_idx),
4030			   vsi->stat_offsets_loaded,
4031			   &oes->rx_unicast, &es->rx_unicast);
4032	i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4033			   I40E_GLV_MPRCL(stat_idx),
4034			   vsi->stat_offsets_loaded,
4035			   &oes->rx_multicast, &es->rx_multicast);
4036	i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4037			   I40E_GLV_BPRCL(stat_idx),
4038			   vsi->stat_offsets_loaded,
4039			   &oes->rx_broadcast, &es->rx_broadcast);
4040
4041	i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4042			   I40E_GLV_GOTCL(stat_idx),
4043			   vsi->stat_offsets_loaded,
4044			   &oes->tx_bytes, &es->tx_bytes);
4045	i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4046			   I40E_GLV_UPTCL(stat_idx),
4047			   vsi->stat_offsets_loaded,
4048			   &oes->tx_unicast, &es->tx_unicast);
4049	i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4050			   I40E_GLV_MPTCL(stat_idx),
4051			   vsi->stat_offsets_loaded,
4052			   &oes->tx_multicast, &es->tx_multicast);
4053	i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4054			   I40E_GLV_BPTCL(stat_idx),
4055			   vsi->stat_offsets_loaded,
4056			   &oes->tx_broadcast, &es->tx_broadcast);
4057	vsi->stat_offsets_loaded = true;
4058
4059	/* Update ifnet stats */
4060	ifp->if_ipackets = es->rx_unicast +
4061	                   es->rx_multicast +
4062			   es->rx_broadcast;
4063	ifp->if_opackets = es->tx_unicast +
4064	                   es->tx_multicast +
4065			   es->tx_broadcast;
4066	ifp->if_ibytes = es->rx_bytes;
4067	ifp->if_obytes = es->tx_bytes;
4068	ifp->if_imcasts = es->rx_multicast;
4069	ifp->if_omcasts = es->tx_multicast;
4070
4071	ifp->if_oerrors = es->tx_errors;
4072	ifp->if_iqdrops = es->rx_discards;
4073	ifp->if_noproto = es->rx_unknown_protocol;
4074	ifp->if_collisions = 0;
4075}
4076
4077/**
4078 * Reset all of the stats for the given pf
4079 **/
4080void i40e_pf_reset_stats(struct i40e_pf *pf)
4081{
4082	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4083	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4084	pf->stat_offsets_loaded = false;
4085}
4086
4087/**
4088 * Resets all stats of the given vsi
4089 **/
4090void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
4091{
4092	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4093	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4094	vsi->stat_offsets_loaded = false;
4095}
4096
4097/**
4098 * Read and update a 48 bit stat from the hw
4099 *
4100 * Since the device stats are not reset at PFReset, they likely will not
4101 * be zeroed when the driver starts.  We'll save the first values read
4102 * and use them as offsets to be subtracted from the raw values in order
4103 * to report stats that count from zero.
4104 **/
4105static void
4106i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4107	bool offset_loaded, u64 *offset, u64 *stat)
4108{
4109	u64 new_data;
4110
4111#if __FreeBSD__ >= 10 && __amd64__
4112	new_data = rd64(hw, loreg);
4113#else
4114	/*
4115	 * Use two rd32's instead of one rd64; FreeBSD versions before
4116	 * 10 don't support 8 byte bus reads/writes.
4117	 */
4118	new_data = rd32(hw, loreg);
4119	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4120#endif
4121
4122	if (!offset_loaded)
4123		*offset = new_data;
4124	if (new_data >= *offset)
4125		*stat = new_data - *offset;
4126	else
4127		*stat = (new_data + ((u64)1 << 48)) - *offset;
4128	*stat &= 0xFFFFFFFFFFFFULL;
4129}
4130
4131/**
4132 * Read and update a 32 bit stat from the hw
4133 **/
4134static void
4135i40e_stat_update32(struct i40e_hw *hw, u32 reg,
4136	bool offset_loaded, u64 *offset, u64 *stat)
4137{
4138	u32 new_data;
4139
4140	new_data = rd32(hw, reg);
4141	if (!offset_loaded)
4142		*offset = new_data;
4143	if (new_data >= *offset)
4144		*stat = (u32)(new_data - *offset);
4145	else
4146		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4147}
4148
4149/*
4150** Set flow control using sysctl:
4151** 	0 - off
4152**	1 - rx pause
4153**	2 - tx pause
4154**	3 - full
4155*/
4156static int
4157i40e_set_flowcntl(SYSCTL_HANDLER_ARGS)
4158{
4159	/*
4160	 * TODO: ensure flow control is disabled if
4161	 * priority flow control is enabled
4162	 *
4163	 * TODO: ensure tx CRC by hardware should be enabled
4164	 * if tx flow control is enabled.
4165	 */
4166	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4167	struct i40e_hw *hw = &pf->hw;
4168	device_t dev = pf->dev;
4169	int requested_fc = 0, error = 0;
4170	enum i40e_status_code aq_error = 0;
4171	u8 fc_aq_err = 0;
4172
4173	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4174	if (aq_error) {
4175		device_printf(dev,
4176		    "%s: Error retrieving link info from aq, %d\n",
4177		    __func__, aq_error);
4178		return (EAGAIN);
4179	}
4180
4181	/* Read in new mode */
4182	requested_fc = hw->fc.current_mode;
4183	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4184	if ((error) || (req->newptr == NULL))
4185		return (error);
4186	if (requested_fc < 0 || requested_fc > 3) {
4187		device_printf(dev,
4188		    "Invalid fc mode; valid modes are 0 through 3\n");
4189		return (EINVAL);
4190	}
4191
4192	/*
4193	** Changing flow control mode currently does not work on
4194	** 40GBASE-CR4 PHYs
4195	*/
4196	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4197	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4198		device_printf(dev, "Changing flow control mode unsupported"
4199		    " on 40GBase-CR4 media.\n");
4200		return (ENODEV);
4201	}
4202
4203	/* Set fc ability for port */
4204	hw->fc.requested_mode = requested_fc;
4205	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4206	if (aq_error) {
4207		device_printf(dev,
4208		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4209		    __func__, aq_error, fc_aq_err);
4210		return (EAGAIN);
4211	}
4212
4213	if (hw->fc.current_mode != hw->fc.requested_mode) {
4214		device_printf(dev, "%s: FC set failure:\n", __func__);
4215		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4216		    __func__,
4217		    i40e_fc_string[hw->fc.current_mode],
4218		    i40e_fc_string[hw->fc.requested_mode]);
4219	}
4220
4221	return (0);
4222}
4223
4224/*
4225** Control link advertise speed:
4226**	1 - advertise 1G only
4227**	2 - advertise 10G only
4228**	3 - advertise 1 and 10G
4229**
4230** Does not work on 40G devices.
4231*/
4232static int
4233i40e_set_advertise(SYSCTL_HANDLER_ARGS)
4234{
4235	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4236	struct i40e_hw *hw = &pf->hw;
4237	device_t dev = pf->dev;
4238	struct i40e_aq_get_phy_abilities_resp abilities;
4239	struct i40e_aq_set_phy_config config;
4240	int current_ls = 0, requested_ls = 0;
4241	enum i40e_status_code aq_error = 0;
4242	int error = 0;
4243
4244	/*
4245	** FW doesn't support changing advertised speed
4246	** for 40G devices; speed is always 40G.
4247	*/
4248	if (i40e_is_40G_device(hw->device_id))
4249		return (ENODEV);
4250
4251	/* Get current capability information */
4252	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4253	if (aq_error) {
4254		device_printf(dev, "%s: Error getting phy capabilities %d,"
4255		    " aq error: %d\n", __func__, aq_error,
4256		    hw->aq.asq_last_status);
4257		return (EAGAIN);
4258	}
4259
4260	/* Figure out current mode */
4261	else if (abilities.link_speed & I40E_LINK_SPEED_10GB
4262	    && abilities.link_speed & I40E_LINK_SPEED_1GB)
4263		current_ls = 3;
4264	else if (abilities.link_speed & I40E_LINK_SPEED_10GB)
4265		current_ls = 2;
4266	else if (abilities.link_speed & I40E_LINK_SPEED_1GB)
4267		current_ls = 1;
4268	else
4269		current_ls = 0;
4270
4271	/* Read in new mode */
4272	requested_ls = current_ls;
4273	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4274	if ((error) || (req->newptr == NULL))
4275		return (error);
4276	if (requested_ls < 1 || requested_ls > 3) {
4277		device_printf(dev,
4278		    "Invalid advertised speed; valid modes are 1 through 3\n");
4279		return (EINVAL);
4280	}
4281
4282	/* Exit if no change */
4283	if (current_ls == requested_ls)
4284		return (0);
4285
4286	/* Prepare new config */
4287	bzero(&config, sizeof(config));
4288	config.phy_type = abilities.phy_type;
4289	config.abilities = abilities.abilities
4290	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4291	config.eee_capability = abilities.eee_capability;
4292	config.eeer = abilities.eeer_val;
4293	config.low_power_ctrl = abilities.d3_lpan;
4294	/* Translate into aq cmd link_speed */
4295	switch (requested_ls) {
4296	case 3:
4297		config.link_speed = I40E_LINK_SPEED_10GB
4298		    | I40E_LINK_SPEED_1GB;
4299	case 2:
4300		config.link_speed = I40E_LINK_SPEED_10GB;
4301	case 1:
4302		config.link_speed = I40E_LINK_SPEED_1GB;
4303	default:
4304		// nothing should get here
4305		break;
4306	}
4307
4308	/* Do aq command & restart link */
4309	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4310	if (aq_error) {
4311		device_printf(dev, "%s: Error setting new phy config %d,"
4312		    " aq error: %d\n", __func__, aq_error,
4313		    hw->aq.asq_last_status);
4314		return (EAGAIN);
4315	}
4316
4317	i40e_update_link_status(pf);
4318	return (0);
4319}
4320
4321/*
4322** Get the width and transaction speed of
4323** the bus this adapter is plugged into.
4324*/
4325static u16
4326i40e_get_bus_info(struct i40e_hw *hw, device_t dev)
4327{
4328        u16                     link;
4329        u32                     offset;
4330
4331
4332        /* Get the PCI Express Capabilities offset */
4333        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4334
4335        /* ...and read the Link Status Register */
4336        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4337
4338        switch (link & I40E_PCI_LINK_WIDTH) {
4339        case I40E_PCI_LINK_WIDTH_1:
4340                hw->bus.width = i40e_bus_width_pcie_x1;
4341                break;
4342        case I40E_PCI_LINK_WIDTH_2:
4343                hw->bus.width = i40e_bus_width_pcie_x2;
4344                break;
4345        case I40E_PCI_LINK_WIDTH_4:
4346                hw->bus.width = i40e_bus_width_pcie_x4;
4347                break;
4348        case I40E_PCI_LINK_WIDTH_8:
4349                hw->bus.width = i40e_bus_width_pcie_x8;
4350                break;
4351        default:
4352                hw->bus.width = i40e_bus_width_unknown;
4353                break;
4354        }
4355
4356        switch (link & I40E_PCI_LINK_SPEED) {
4357        case I40E_PCI_LINK_SPEED_2500:
4358                hw->bus.speed = i40e_bus_speed_2500;
4359                break;
4360        case I40E_PCI_LINK_SPEED_5000:
4361                hw->bus.speed = i40e_bus_speed_5000;
4362                break;
4363        case I40E_PCI_LINK_SPEED_8000:
4364                hw->bus.speed = i40e_bus_speed_8000;
4365                break;
4366        default:
4367                hw->bus.speed = i40e_bus_speed_unknown;
4368                break;
4369        }
4370
4371
4372        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4373            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4374            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4375            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4376            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4377            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4378            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4379            ("Unknown"));
4380
4381        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4382            (hw->bus.speed < i40e_bus_speed_8000)) {
4383                device_printf(dev, "PCI-Express bandwidth available"
4384                    " for this device\n     is not sufficient for"
4385                    " normal operation.\n");
4386                device_printf(dev, "For expected performance a x8 "
4387                    "PCIE Gen3 slot is required.\n");
4388        }
4389
4390        return (link);
4391}
4392
4393#ifdef I40E_DEBUG
4394static int
4395i40e_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4396{
4397	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4398	struct i40e_hw *hw = &pf->hw;
4399	struct i40e_link_status link_status;
4400	char buf[512];
4401
4402	enum i40e_status_code aq_error = 0;
4403
4404	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4405	if (aq_error) {
4406		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4407		return (EPERM);
4408	}
4409
4410	sprintf(buf, "\n"
4411	    "PHY Type : %#04x\n"
4412	    "Speed    : %#04x\n"
4413	    "Link info: %#04x\n"
4414	    "AN info  : %#04x\n"
4415	    "Ext info : %#04x",
4416	    link_status.phy_type, link_status.link_speed,
4417	    link_status.link_info, link_status.an_info,
4418	    link_status.ext_info);
4419
4420	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4421}
4422
4423static int
4424i40e_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4425{
4426	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4427	struct i40e_hw *hw = &pf->hw;
4428	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4429	char buf[512];
4430
4431	enum i40e_status_code aq_error = 0;
4432
4433	// TODO: Print out list of qualified modules as well?
4434	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4435	if (aq_error) {
4436		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4437		return (EPERM);
4438	}
4439
4440	sprintf(buf, "\n"
4441	    "PHY Type : %#010x\n"
4442	    "Speed    : %#04x\n"
4443	    "Abilities: %#04x\n"
4444	    "EEE cap  : %#06x\n"
4445	    "EEER reg : %#010x\n"
4446	    "D3 Lpan  : %#04x",
4447	    abilities_resp.phy_type, abilities_resp.link_speed,
4448	    abilities_resp.abilities, abilities_resp.eee_capability,
4449	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4450
4451	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4452}
4453
4454static int
4455i40e_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4456{
4457	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4458	struct i40e_vsi *vsi = &pf->vsi;
4459	struct i40e_mac_filter *f;
4460	char *buf, *buf_i;
4461
4462	int error = 0;
4463	int ftl_len = 0;
4464	int ftl_counter = 0;
4465	int buf_len = 0;
4466	int entry_len = 42;
4467
4468	SLIST_FOREACH(f, &vsi->ftl, next) {
4469		ftl_len++;
4470	}
4471
4472	if (ftl_len < 1) {
4473		sysctl_handle_string(oidp, "(none)", 6, req);
4474		return (0);
4475	}
4476
4477	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4478	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4479
4480	sprintf(buf_i++, "\n");
4481	SLIST_FOREACH(f, &vsi->ftl, next) {
4482		sprintf(buf_i,
4483		    MAC_FORMAT ", vlan %4d, flags %#06x",
4484		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4485		buf_i += entry_len;
4486		/* don't print '\n' for last entry */
4487		if (++ftl_counter != ftl_len) {
4488			sprintf(buf_i, "\n");
4489			buf_i++;
4490		}
4491	}
4492
4493	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4494	if (error)
4495		printf("sysctl error: %d\n", error);
4496	free(buf, M_DEVBUF);
4497	return error;
4498}
4499
4500#define I40E_SW_RES_SIZE 0x14
4501static int
4502i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4503{
4504	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4505	struct i40e_hw *hw = &pf->hw;
4506	device_t dev = pf->dev;
4507	struct sbuf *buf;
4508	int error = 0;
4509
4510	u8 num_entries;
4511	struct i40e_aqc_switch_resource_alloc_element_resp resp[I40E_SW_RES_SIZE];
4512
4513	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4514	if (!buf) {
4515		device_printf(dev, "Could not allocate sbuf for output.\n");
4516		return (ENOMEM);
4517	}
4518
4519	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4520				resp,
4521				I40E_SW_RES_SIZE,
4522				NULL);
4523	if (error) {
4524		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4525		    __func__, error, hw->aq.asq_last_status);
4526		sbuf_delete(buf);
4527		return error;
4528	}
4529	device_printf(dev, "Num_entries: %d\n", num_entries);
4530
4531	sbuf_cat(buf, "\n");
4532	sbuf_printf(buf,
4533	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4534	    "     | (this)     | (all) | (this) | (all)       \n");
4535	for (int i = 0; i < num_entries; i++) {
4536		sbuf_printf(buf,
4537		    "%#4x | %10d   %5d   %6d   %12d",
4538		    resp[i].resource_type,
4539		    resp[i].guaranteed,
4540		    resp[i].total,
4541		    resp[i].used,
4542		    resp[i].total_unalloced);
4543		if (i < num_entries - 1)
4544			sbuf_cat(buf, "\n");
4545	}
4546
4547	error = sbuf_finish(buf);
4548	if (error) {
4549		device_printf(dev, "Error finishing sbuf: %d\n", error);
4550		sbuf_delete(buf);
4551		return error;
4552	}
4553
4554	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4555	if (error)
4556		device_printf(dev, "sysctl error: %d\n", error);
4557	sbuf_delete(buf);
4558	return error;
4559
4560}
4561
4562/*
4563** Dump TX desc given index.
4564** Doesn't work; don't use.
4565** TODO: Also needs a queue index input!
4566**/
4567static int
4568i40e_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4569{
4570	struct i40e_pf *pf = (struct i40e_pf *)arg1;
4571	device_t dev = pf->dev;
4572	struct sbuf *buf;
4573	int error = 0;
4574
4575	u16 desc_idx = 0;
4576
4577	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4578	if (!buf) {
4579		device_printf(dev, "Could not allocate sbuf for output.\n");
4580		return (ENOMEM);
4581	}
4582
4583	/* Read in index */
4584	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4585	if (error)
4586		return (error);
4587	if (req->newptr == NULL)
4588		return (EIO); // fix
4589	if (desc_idx > 1024) { // fix
4590		device_printf(dev,
4591		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4592		return (EINVAL);
4593	}
4594
4595	// Don't use this sysctl yet
4596	if (TRUE)
4597		return (ENODEV);
4598
4599	sbuf_cat(buf, "\n");
4600
4601	// set to queue 1?
4602	struct i40e_queue *que = pf->vsi.queues;
4603	struct tx_ring *txr = &(que[1].txr);
4604	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4605
4606	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4607	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4608	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4609
4610	error = sbuf_finish(buf);
4611	if (error) {
4612		device_printf(dev, "Error finishing sbuf: %d\n", error);
4613		sbuf_delete(buf);
4614		return error;
4615	}
4616
4617	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4618	if (error)
4619		device_printf(dev, "sysctl error: %d\n", error);
4620	sbuf_delete(buf);
4621	return error;
4622}
4623#endif
4624
4625