if_ixl.c revision 272227
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixl/if_ixl.c 272227 2014-09-27 20:54:57Z glebius $*/
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "ixl.h"
38#include "ixl_pf.h"
39
40/*********************************************************************
41 *  Driver version
42 *********************************************************************/
43char ixl_driver_version[] = "1.2.2";
44
45/*********************************************************************
46 *  PCI Device ID Table
47 *
48 *  Used by probe to select devices to load on
49 *  Last field stores an index into ixl_strings
50 *  Last entry must be all 0s
51 *
52 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
54
55static ixl_vendor_info_t ixl_vendor_info_array[] =
56{
57	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65	/* required last entry */
66	{0, 0, 0, 0, 0}
67};
68
69/*********************************************************************
70 *  Table of branding strings
71 *********************************************************************/
72
73static char    *ixl_strings[] = {
74	"Intel(R) Ethernet Connection XL710 Driver"
75};
76
77
78/*********************************************************************
79 *  Function prototypes
80 *********************************************************************/
81static int      ixl_probe(device_t);
82static int      ixl_attach(device_t);
83static int      ixl_detach(device_t);
84static int      ixl_shutdown(device_t);
85static int	ixl_get_hw_capabilities(struct ixl_pf *);
86static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88static void	ixl_init(void *);
89static void	ixl_init_locked(struct ixl_pf *);
90static void     ixl_stop(struct ixl_pf *);
91static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92static int      ixl_media_change(struct ifnet *);
93static void     ixl_update_link_status(struct ixl_pf *);
94static int      ixl_allocate_pci_resources(struct ixl_pf *);
95static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
96static int	ixl_setup_stations(struct ixl_pf *);
97static int	ixl_setup_vsi(struct ixl_vsi *);
98static int	ixl_initialize_vsi(struct ixl_vsi *);
99static int	ixl_assign_vsi_msix(struct ixl_pf *);
100static int	ixl_assign_vsi_legacy(struct ixl_pf *);
101static int	ixl_init_msix(struct ixl_pf *);
102static void	ixl_configure_msix(struct ixl_pf *);
103static void	ixl_configure_itr(struct ixl_pf *);
104static void	ixl_configure_legacy(struct ixl_pf *);
105static void	ixl_free_pci_resources(struct ixl_pf *);
106static void	ixl_local_timer(void *);
107static int	ixl_setup_interface(device_t, struct ixl_vsi *);
108static bool	ixl_config_link(struct i40e_hw *);
109static void	ixl_config_rss(struct ixl_vsi *);
110static void	ixl_set_queue_rx_itr(struct ixl_queue *);
111static void	ixl_set_queue_tx_itr(struct ixl_queue *);
112
113static void	ixl_enable_rings(struct ixl_vsi *);
114static void	ixl_disable_rings(struct ixl_vsi *);
115static void     ixl_enable_intr(struct ixl_vsi *);
116static void     ixl_disable_intr(struct ixl_vsi *);
117
118static void     ixl_enable_adminq(struct i40e_hw *);
119static void     ixl_disable_adminq(struct i40e_hw *);
120static void     ixl_enable_queue(struct i40e_hw *, int);
121static void     ixl_disable_queue(struct i40e_hw *, int);
122static void     ixl_enable_legacy(struct i40e_hw *);
123static void     ixl_disable_legacy(struct i40e_hw *);
124
125static void     ixl_set_promisc(struct ixl_vsi *);
126static void     ixl_add_multi(struct ixl_vsi *);
127static void     ixl_del_multi(struct ixl_vsi *);
128static void	ixl_register_vlan(void *, struct ifnet *, u16);
129static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
130static void	ixl_setup_vlan_filters(struct ixl_vsi *);
131
132static void	ixl_init_filters(struct ixl_vsi *);
133static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
136static void	ixl_del_hw_filters(struct ixl_vsi *, int);
137static struct ixl_mac_filter *
138		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
140
141/* Sysctl debug interface */
142static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
143static void	ixl_print_debug_info(struct ixl_pf *);
144
145/* The MSI/X Interrupt handlers */
146static void	ixl_intr(void *);
147static void	ixl_msix_que(void *);
148static void	ixl_msix_adminq(void *);
149static void	ixl_handle_mdd_event(struct ixl_pf *);
150
151/* Deferred interrupt tasklets */
152static void	ixl_do_adminq(void *, int);
153
154/* Sysctl handlers */
155static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
158
159/* Statistics */
160static void     ixl_add_hw_stats(struct ixl_pf *);
161static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164		    struct sysctl_oid_list *,
165		    struct i40e_eth_stats *);
166static void	ixl_update_stats_counters(struct ixl_pf *);
167static void	ixl_update_eth_stats(struct ixl_vsi *);
168static void	ixl_pf_reset_stats(struct ixl_pf *);
169static void	ixl_vsi_reset_stats(struct ixl_vsi *);
170static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
171		    u64 *, u64 *);
172static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
173		    u64 *, u64 *);
174
175#ifdef IXL_DEBUG
176static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179static int	ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
181#endif
182
183/*********************************************************************
184 *  FreeBSD Device Interface Entry Points
185 *********************************************************************/
186
187static device_method_t ixl_methods[] = {
188	/* Device interface */
189	DEVMETHOD(device_probe, ixl_probe),
190	DEVMETHOD(device_attach, ixl_attach),
191	DEVMETHOD(device_detach, ixl_detach),
192	DEVMETHOD(device_shutdown, ixl_shutdown),
193	{0, 0}
194};
195
196static driver_t ixl_driver = {
197	"ixl", ixl_methods, sizeof(struct ixl_pf),
198};
199
200devclass_t ixl_devclass;
201DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
202
203MODULE_DEPEND(ixl, pci, 1, 1, 1);
204MODULE_DEPEND(ixl, ether, 1, 1, 1);
205
206/*
207** Global reset mutex
208*/
209static struct mtx ixl_reset_mtx;
210
211/*
212** TUNEABLE PARAMETERS:
213*/
214
215static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216                   "IXL driver parameters");
217
218/*
219 * MSIX should be the default for best performance,
220 * but this allows it to be forced off for testing.
221 */
222static int ixl_enable_msix = 1;
223TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225    "Enable MSI-X interrupts");
226
227/*
228** Number of descriptors per ring:
229**   - TX and RX are the same size
230*/
231static int ixl_ringsz = DEFAULT_RING;
232TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234    &ixl_ringsz, 0, "Descriptor Ring Size");
235
236/*
237** This can be set manually, if left as 0 the
238** number of queues will be calculated based
239** on cpus and msix vectors available.
240*/
241int ixl_max_queues = 0;
242TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244    &ixl_max_queues, 0, "Number of Queues");
245
246/*
247** Controls for Interrupt Throttling
248**	- true/false for dynamic adjustment
249** 	- default values for static ITR
250*/
251int ixl_dynamic_rx_itr = 0;
252TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
255
256int ixl_dynamic_tx_itr = 0;
257TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
260
261int ixl_rx_itr = IXL_ITR_8K;
262TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264    &ixl_rx_itr, 0, "RX Interrupt Rate");
265
266int ixl_tx_itr = IXL_ITR_4K;
267TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269    &ixl_tx_itr, 0, "TX Interrupt Rate");
270
271#ifdef IXL_FDIR
272static int ixl_enable_fdir = 1;
273TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274/* Rate at which we sample */
275int ixl_atr_rate = 20;
276TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
277#endif
278
279static char *ixl_fc_string[6] = {
280	"None",
281	"Rx",
282	"Tx",
283	"Full",
284	"Priority",
285	"Default"
286};
287
288
289/*********************************************************************
290 *  Device identification routine
291 *
292 *  ixl_probe determines if the driver should be loaded on
293 *  the hardware based on PCI vendor/device id of the device.
294 *
295 *  return BUS_PROBE_DEFAULT on success, positive on failure
296 *********************************************************************/
297
298static int
299ixl_probe(device_t dev)
300{
301	ixl_vendor_info_t *ent;
302
303	u16	pci_vendor_id, pci_device_id;
304	u16	pci_subvendor_id, pci_subdevice_id;
305	char	device_name[256];
306	static bool lock_init = FALSE;
307
308	INIT_DEBUGOUT("ixl_probe: begin");
309
310	pci_vendor_id = pci_get_vendor(dev);
311	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
312		return (ENXIO);
313
314	pci_device_id = pci_get_device(dev);
315	pci_subvendor_id = pci_get_subvendor(dev);
316	pci_subdevice_id = pci_get_subdevice(dev);
317
318	ent = ixl_vendor_info_array;
319	while (ent->vendor_id != 0) {
320		if ((pci_vendor_id == ent->vendor_id) &&
321		    (pci_device_id == ent->device_id) &&
322
323		    ((pci_subvendor_id == ent->subvendor_id) ||
324		     (ent->subvendor_id == 0)) &&
325
326		    ((pci_subdevice_id == ent->subdevice_id) ||
327		     (ent->subdevice_id == 0))) {
328			sprintf(device_name, "%s, Version - %s",
329				ixl_strings[ent->index],
330				ixl_driver_version);
331			device_set_desc_copy(dev, device_name);
332			/* One shot mutex init */
333			if (lock_init == FALSE) {
334				lock_init = TRUE;
335				mtx_init(&ixl_reset_mtx,
336				    "ixl_reset",
337				    "IXL RESET Lock", MTX_DEF);
338			}
339			return (BUS_PROBE_DEFAULT);
340		}
341		ent++;
342	}
343	return (ENXIO);
344}
345
346/*********************************************************************
347 *  Device initialization routine
348 *
349 *  The attach entry point is called when the driver is being loaded.
350 *  This routine identifies the type of hardware, allocates all resources
351 *  and initializes the hardware.
352 *
353 *  return 0 on success, positive on failure
354 *********************************************************************/
355
356static int
357ixl_attach(device_t dev)
358{
359	struct ixl_pf	*pf;
360	struct i40e_hw	*hw;
361	struct ixl_vsi *vsi;
362	u16		bus;
363	int             error = 0;
364
365	INIT_DEBUGOUT("ixl_attach: begin");
366
367	/* Allocate, clear, and link in our primary soft structure */
368	pf = device_get_softc(dev);
369	pf->dev = pf->osdep.dev = dev;
370	hw = &pf->hw;
371
372	/*
373	** Note this assumes we have a single embedded VSI,
374	** this could be enhanced later to allocate multiple
375	*/
376	vsi = &pf->vsi;
377	vsi->dev = pf->dev;
378
379	/* Core Lock Init*/
380	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
381
382	/* Set up the timer callout */
383	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
384
385	/* Set up sysctls */
386	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
387	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
388	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
389	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
390
391	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
392	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
393	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
394	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
395
396	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
397	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
398	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
399	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
400
401	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
402	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
403	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
404	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
405
406	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
407	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
408	    OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
409	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
410
411	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
412	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
413	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
414	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
415
416	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
417	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
418	    OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
419	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
420
421#ifdef IXL_DEBUG
422	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
423	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
424	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
425	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
426
427	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
428	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
429	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
430	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
431
432	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
433	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
434	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
435	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
436
437	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
438	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
439	    OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
440	    pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
441
442	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
443	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
444	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
445	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
446#endif
447
448	/* Save off the information about this board */
449	hw->vendor_id = pci_get_vendor(dev);
450	hw->device_id = pci_get_device(dev);
451	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
452	hw->subsystem_vendor_id =
453	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
454	hw->subsystem_device_id =
455	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
456
457	hw->bus.device = pci_get_slot(dev);
458	hw->bus.func = pci_get_function(dev);
459
460	/* Do PCI setup - map BAR0, etc */
461	if (ixl_allocate_pci_resources(pf)) {
462		device_printf(dev, "Allocation of PCI resources failed\n");
463		error = ENXIO;
464		goto err_out;
465	}
466
467	/* Create for initial debugging use */
468	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
469	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
470	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
471	    ixl_debug_info, "I", "Debug Information");
472
473
474	/* Establish a clean starting point */
475	i40e_clear_hw(hw);
476	error = i40e_pf_reset(hw);
477	if (error) {
478		device_printf(dev,"PF reset failure %x\n", error);
479		error = EIO;
480		goto err_out;
481	}
482
483	/* For now always do an initial CORE reset on first device */
484	{
485		static int	ixl_dev_count;
486		static int	ixl_dev_track[32];
487		u32		my_dev;
488		int		i, found = FALSE;
489		u16		bus = pci_get_bus(dev);
490
491		mtx_lock(&ixl_reset_mtx);
492		my_dev = (bus << 8) | hw->bus.device;
493
494		for (i = 0; i < ixl_dev_count; i++) {
495			if (ixl_dev_track[i] == my_dev)
496				found = TRUE;
497		}
498
499                if (!found) {
500                        u32 reg;
501
502                        ixl_dev_track[ixl_dev_count] = my_dev;
503                        ixl_dev_count++;
504
505			INIT_DEBUGOUT("Initial CORE RESET\n");
506                        wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
507                        ixl_flush(hw);
508                        i = 50;
509                        do {
510				i40e_msec_delay(50);
511                                reg = rd32(hw, I40E_GLGEN_RSTAT);
512                                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
513                                        break;
514                        } while (i--);
515
516                        /* paranoia */
517                        wr32(hw, I40E_PF_ATQLEN, 0);
518                        wr32(hw, I40E_PF_ATQBAL, 0);
519                        wr32(hw, I40E_PF_ATQBAH, 0);
520                        i40e_clear_pxe_mode(hw);
521                }
522                mtx_unlock(&ixl_reset_mtx);
523	}
524
525	/* Set admin queue parameters */
526	hw->aq.num_arq_entries = IXL_AQ_LEN;
527	hw->aq.num_asq_entries = IXL_AQ_LEN;
528	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
529	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
530
531	/* Initialize the shared code */
532	error = i40e_init_shared_code(hw);
533	if (error) {
534		device_printf(dev,"Unable to initialize the shared code\n");
535		error = EIO;
536		goto err_out;
537	}
538
539	/* Set up the admin queue */
540	error = i40e_init_adminq(hw);
541	if (error) {
542		device_printf(dev, "The driver for the device stopped "
543		    "because the NVM image is newer than expected.\n"
544		    "You must install the most recent version of "
545		    " the network driver.\n");
546		goto err_out;
547	}
548	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
549
550        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
551	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
552		device_printf(dev, "The driver for the device detected "
553		    "a newer version of the NVM image than expected.\n"
554		    "Please install the most recent version of the network driver.\n");
555	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
556	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
557		device_printf(dev, "The driver for the device detected "
558		    "an older version of the NVM image than expected.\n"
559		    "Please update the NVM image.\n");
560
561	/* Clear PXE mode */
562	i40e_clear_pxe_mode(hw);
563
564	/* Get capabilities from the device */
565	error = ixl_get_hw_capabilities(pf);
566	if (error) {
567		device_printf(dev, "HW capabilities failure!\n");
568		goto err_get_cap;
569	}
570
571	/* Set up host memory cache */
572	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
573	if (error) {
574		device_printf(dev, "init_lan_hmc failed: %d\n", error);
575		goto err_get_cap;
576	}
577
578	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
579	if (error) {
580		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
581		goto err_mac_hmc;
582	}
583
584	/* Disable LLDP from the firmware */
585	i40e_aq_stop_lldp(hw, TRUE, NULL);
586
587	i40e_get_mac_addr(hw, hw->mac.addr);
588	error = i40e_validate_mac_addr(hw->mac.addr);
589	if (error) {
590		device_printf(dev, "validate_mac_addr failed: %d\n", error);
591		goto err_mac_hmc;
592	}
593	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
594	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
595
596	if (ixl_setup_stations(pf) != 0) {
597		device_printf(dev, "setup stations failed!\n");
598		error = ENOMEM;
599		goto err_mac_hmc;
600	}
601
602	/* Initialize mac filter list for VSI */
603	SLIST_INIT(&vsi->ftl);
604
605	/* Set up interrupt routing here */
606	if (pf->msix > 1)
607		error = ixl_assign_vsi_msix(pf);
608	else
609		error = ixl_assign_vsi_legacy(pf);
610	if (error)
611		goto err_late;
612
613	i40e_msec_delay(75);
614	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
615	if (error) {
616		device_printf(dev, "link restart failed, aq_err=%d\n",
617		    pf->hw.aq.asq_last_status);
618	}
619
620	/* Determine link state */
621	vsi->link_up = ixl_config_link(hw);
622
623	/* Report if Unqualified modules are found */
624	if ((vsi->link_up == FALSE) &&
625	    (pf->hw.phy.link_info.link_info &
626	    I40E_AQ_MEDIA_AVAILABLE) &&
627	    (!(pf->hw.phy.link_info.an_info &
628	    I40E_AQ_QUALIFIED_MODULE)))
629		device_printf(dev, "Link failed because "
630		    "an unqualified module was detected\n");
631
632	/* Setup OS specific network interface */
633	if (ixl_setup_interface(dev, vsi) != 0)
634		goto err_late;
635
636	/* Get the bus configuration and set the shared code */
637	bus = ixl_get_bus_info(hw, dev);
638	i40e_set_pci_config_data(hw, bus);
639
640	/* Initialize statistics */
641	ixl_pf_reset_stats(pf);
642	ixl_update_stats_counters(pf);
643	ixl_add_hw_stats(pf);
644
645	/* Register for VLAN events */
646	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
647	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
648	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
649	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
650
651	INIT_DEBUGOUT("ixl_attach: end");
652	return (0);
653
654err_late:
655	ixl_free_vsi(vsi);
656err_mac_hmc:
657	i40e_shutdown_lan_hmc(hw);
658err_get_cap:
659	i40e_shutdown_adminq(hw);
660err_out:
661	if (vsi->ifp != NULL)
662		if_free(vsi->ifp);
663	ixl_free_pci_resources(pf);
664	IXL_PF_LOCK_DESTROY(pf);
665	return (error);
666}
667
668/*********************************************************************
669 *  Device removal routine
670 *
671 *  The detach entry point is called when the driver is being removed.
672 *  This routine stops the adapter and deallocates all the resources
673 *  that were allocated for driver operation.
674 *
675 *  return 0 on success, positive on failure
676 *********************************************************************/
677
678static int
679ixl_detach(device_t dev)
680{
681	struct ixl_pf		*pf = device_get_softc(dev);
682	struct i40e_hw		*hw = &pf->hw;
683	struct ixl_vsi		*vsi = &pf->vsi;
684	struct ixl_queue	*que = vsi->queues;
685	i40e_status		status;
686
687	INIT_DEBUGOUT("ixl_detach: begin");
688
689	/* Make sure VLANS are not using driver */
690	if (vsi->ifp->if_vlantrunk != NULL) {
691		device_printf(dev,"Vlan in use, detach first\n");
692		return (EBUSY);
693	}
694
695	IXL_PF_LOCK(pf);
696	ixl_stop(pf);
697	IXL_PF_UNLOCK(pf);
698
699	for (int i = 0; i < vsi->num_queues; i++, que++) {
700		if (que->tq) {
701			taskqueue_drain(que->tq, &que->task);
702			taskqueue_drain(que->tq, &que->tx_task);
703			taskqueue_free(que->tq);
704		}
705	}
706
707	/* Shutdown LAN HMC */
708	status = i40e_shutdown_lan_hmc(hw);
709	if (status)
710		device_printf(dev,
711		    "Shutdown LAN HMC failed with code %d\n", status);
712
713	/* Shutdown admin queue */
714	status = i40e_shutdown_adminq(hw);
715	if (status)
716		device_printf(dev,
717		    "Shutdown Admin queue failed with code %d\n", status);
718
719	/* Unregister VLAN events */
720	if (vsi->vlan_attach != NULL)
721		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
722	if (vsi->vlan_detach != NULL)
723		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
724
725	ether_ifdetach(vsi->ifp);
726	callout_drain(&pf->timer);
727
728	ixl_free_pci_resources(pf);
729	bus_generic_detach(dev);
730	if_free(vsi->ifp);
731	ixl_free_vsi(vsi);
732	IXL_PF_LOCK_DESTROY(pf);
733	return (0);
734}
735
736/*********************************************************************
737 *
738 *  Shutdown entry point
739 *
740 **********************************************************************/
741
742static int
743ixl_shutdown(device_t dev)
744{
745	struct ixl_pf *pf = device_get_softc(dev);
746	IXL_PF_LOCK(pf);
747	ixl_stop(pf);
748	IXL_PF_UNLOCK(pf);
749	return (0);
750}
751
752
753/*********************************************************************
754 *
755 *  Get the hardware capabilities
756 *
757 **********************************************************************/
758
759static int
760ixl_get_hw_capabilities(struct ixl_pf *pf)
761{
762	struct i40e_aqc_list_capabilities_element_resp *buf;
763	struct i40e_hw	*hw = &pf->hw;
764	device_t 	dev = pf->dev;
765	int             error, len;
766	u16		needed;
767	bool		again = TRUE;
768
769	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
770retry:
771	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
772	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
773		device_printf(dev, "Unable to allocate cap memory\n");
774                return (ENOMEM);
775	}
776
777	/* This populates the hw struct */
778        error = i40e_aq_discover_capabilities(hw, buf, len,
779	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
780	free(buf, M_DEVBUF);
781	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
782	    (again == TRUE)) {
783		/* retry once with a larger buffer */
784		again = FALSE;
785		len = needed;
786		goto retry;
787	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
788		device_printf(dev, "capability discovery failed: %d\n",
789		    pf->hw.aq.asq_last_status);
790		return (ENODEV);
791	}
792
793	/* Capture this PF's starting queue pair */
794	pf->qbase = hw->func_caps.base_queue;
795
796#ifdef IXL_DEBUG
797	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
798	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
799	    hw->pf_id, hw->func_caps.num_vfs,
800	    hw->func_caps.num_msix_vectors,
801	    hw->func_caps.num_msix_vectors_vf,
802	    hw->func_caps.fd_filters_guaranteed,
803	    hw->func_caps.fd_filters_best_effort,
804	    hw->func_caps.num_tx_qp,
805	    hw->func_caps.num_rx_qp,
806	    hw->func_caps.base_queue);
807#endif
808	return (error);
809}
810
811static void
812ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
813{
814	device_t 	dev = vsi->dev;
815
816	/* Enable/disable TXCSUM/TSO4 */
817	if (!(ifp->if_capenable & IFCAP_TXCSUM)
818	    && !(ifp->if_capenable & IFCAP_TSO4)) {
819		if (mask & IFCAP_TXCSUM) {
820			ifp->if_capenable |= IFCAP_TXCSUM;
821			/* enable TXCSUM, restore TSO if previously enabled */
822			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
823				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
824				ifp->if_capenable |= IFCAP_TSO4;
825			}
826		}
827		else if (mask & IFCAP_TSO4) {
828			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
829			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
830			device_printf(dev,
831			    "TSO4 requires txcsum, enabling both...\n");
832		}
833	} else if((ifp->if_capenable & IFCAP_TXCSUM)
834	    && !(ifp->if_capenable & IFCAP_TSO4)) {
835		if (mask & IFCAP_TXCSUM)
836			ifp->if_capenable &= ~IFCAP_TXCSUM;
837		else if (mask & IFCAP_TSO4)
838			ifp->if_capenable |= IFCAP_TSO4;
839	} else if((ifp->if_capenable & IFCAP_TXCSUM)
840	    && (ifp->if_capenable & IFCAP_TSO4)) {
841		if (mask & IFCAP_TXCSUM) {
842			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
843			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
844			device_printf(dev,
845			    "TSO4 requires txcsum, disabling both...\n");
846		} else if (mask & IFCAP_TSO4)
847			ifp->if_capenable &= ~IFCAP_TSO4;
848	}
849
850	/* Enable/disable TXCSUM_IPV6/TSO6 */
851	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
852	    && !(ifp->if_capenable & IFCAP_TSO6)) {
853		if (mask & IFCAP_TXCSUM_IPV6) {
854			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
855			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
856				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
857				ifp->if_capenable |= IFCAP_TSO6;
858			}
859		} else if (mask & IFCAP_TSO6) {
860			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
861			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
862			device_printf(dev,
863			    "TSO6 requires txcsum6, enabling both...\n");
864		}
865	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
866	    && !(ifp->if_capenable & IFCAP_TSO6)) {
867		if (mask & IFCAP_TXCSUM_IPV6)
868			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
869		else if (mask & IFCAP_TSO6)
870			ifp->if_capenable |= IFCAP_TSO6;
871	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872	    && (ifp->if_capenable & IFCAP_TSO6)) {
873		if (mask & IFCAP_TXCSUM_IPV6) {
874			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
875			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
876			device_printf(dev,
877			    "TSO6 requires txcsum6, disabling both...\n");
878		} else if (mask & IFCAP_TSO6)
879			ifp->if_capenable &= ~IFCAP_TSO6;
880	}
881}
882
883/*********************************************************************
884 *  Ioctl entry point
885 *
886 *  ixl_ioctl is called when the user wants to configure the
887 *  interface.
888 *
889 *  return 0 on success, positive on failure
890 **********************************************************************/
891
892static int
893ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
894{
895	struct ixl_vsi	*vsi = ifp->if_softc;
896	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
897	struct ifreq	*ifr = (struct ifreq *) data;
898#if defined(INET) || defined(INET6)
899	struct ifaddr *ifa = (struct ifaddr *)data;
900	bool		avoid_reset = FALSE;
901#endif
902	int             error = 0;
903
904	switch (command) {
905
906        case SIOCSIFADDR:
907#ifdef INET
908		if (ifa->ifa_addr->sa_family == AF_INET)
909			avoid_reset = TRUE;
910#endif
911#ifdef INET6
912		if (ifa->ifa_addr->sa_family == AF_INET6)
913			avoid_reset = TRUE;
914#endif
915#if defined(INET) || defined(INET6)
916		/*
917		** Calling init results in link renegotiation,
918		** so we avoid doing it when possible.
919		*/
920		if (avoid_reset) {
921			ifp->if_flags |= IFF_UP;
922			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
923				ixl_init(pf);
924#ifdef INET
925			if (!(ifp->if_flags & IFF_NOARP))
926				arp_ifinit(ifp, ifa);
927#endif
928		} else
929			error = ether_ioctl(ifp, command, data);
930		break;
931#endif
932	case SIOCSIFMTU:
933		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
934		if (ifr->ifr_mtu > IXL_MAX_FRAME -
935		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
936			error = EINVAL;
937		} else {
938			IXL_PF_LOCK(pf);
939			ifp->if_mtu = ifr->ifr_mtu;
940			vsi->max_frame_size =
941				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
942			    + ETHER_VLAN_ENCAP_LEN;
943			ixl_init_locked(pf);
944			IXL_PF_UNLOCK(pf);
945		}
946		break;
947	case SIOCSIFFLAGS:
948		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
949		IXL_PF_LOCK(pf);
950		if (ifp->if_flags & IFF_UP) {
951			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
952				if ((ifp->if_flags ^ pf->if_flags) &
953				    (IFF_PROMISC | IFF_ALLMULTI)) {
954					ixl_set_promisc(vsi);
955				}
956			} else
957				ixl_init_locked(pf);
958		} else
959			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
960				ixl_stop(pf);
961		pf->if_flags = ifp->if_flags;
962		IXL_PF_UNLOCK(pf);
963		break;
964	case SIOCADDMULTI:
965		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
966		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
967			IXL_PF_LOCK(pf);
968			ixl_disable_intr(vsi);
969			ixl_add_multi(vsi);
970			ixl_enable_intr(vsi);
971			IXL_PF_UNLOCK(pf);
972		}
973		break;
974	case SIOCDELMULTI:
975		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
976		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977			IXL_PF_LOCK(pf);
978			ixl_disable_intr(vsi);
979			ixl_del_multi(vsi);
980			ixl_enable_intr(vsi);
981			IXL_PF_UNLOCK(pf);
982		}
983		break;
984	case SIOCSIFMEDIA:
985	case SIOCGIFMEDIA:
986		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
987		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
988		break;
989	case SIOCSIFCAP:
990	{
991		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
992		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
993
994		ixl_cap_txcsum_tso(vsi, ifp, mask);
995
996		if (mask & IFCAP_RXCSUM)
997			ifp->if_capenable ^= IFCAP_RXCSUM;
998		if (mask & IFCAP_RXCSUM_IPV6)
999			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1000		if (mask & IFCAP_LRO)
1001			ifp->if_capenable ^= IFCAP_LRO;
1002		if (mask & IFCAP_VLAN_HWTAGGING)
1003			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1004		if (mask & IFCAP_VLAN_HWFILTER)
1005			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1006		if (mask & IFCAP_VLAN_HWTSO)
1007			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1008		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1009			IXL_PF_LOCK(pf);
1010			ixl_init_locked(pf);
1011			IXL_PF_UNLOCK(pf);
1012		}
1013		VLAN_CAPABILITIES(ifp);
1014
1015		break;
1016	}
1017
1018	default:
1019		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1020		error = ether_ioctl(ifp, command, data);
1021		break;
1022	}
1023
1024	return (error);
1025}
1026
1027
1028/*********************************************************************
1029 *  Init entry point
1030 *
1031 *  This routine is used in two ways. It is used by the stack as
1032 *  init entry point in network interface structure. It is also used
1033 *  by the driver as a hw/sw initialization routine to get to a
1034 *  consistent state.
1035 *
1036 *  return 0 on success, positive on failure
1037 **********************************************************************/
1038
1039static void
1040ixl_init_locked(struct ixl_pf *pf)
1041{
1042	struct i40e_hw	*hw = &pf->hw;
1043	struct ixl_vsi	*vsi = &pf->vsi;
1044	struct ifnet	*ifp = vsi->ifp;
1045	device_t 	dev = pf->dev;
1046	struct i40e_filter_control_settings	filter;
1047	u8		tmpaddr[ETHER_ADDR_LEN];
1048	int		ret;
1049
1050	mtx_assert(&pf->pf_mtx, MA_OWNED);
1051	INIT_DEBUGOUT("ixl_init: begin");
1052	ixl_stop(pf);
1053
1054	/* Get the latest mac address... User might use a LAA */
1055	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1056	      I40E_ETH_LENGTH_OF_ADDRESS);
1057	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1058	    i40e_validate_mac_addr(tmpaddr)) {
1059		bcopy(tmpaddr, hw->mac.addr,
1060		    I40E_ETH_LENGTH_OF_ADDRESS);
1061		ret = i40e_aq_mac_address_write(hw,
1062		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1063		    hw->mac.addr, NULL);
1064		if (ret) {
1065			device_printf(dev, "LLA address"
1066			 "change failed!!\n");
1067			return;
1068		}
1069	}
1070
1071	/* Set the various hardware offload abilities */
1072	ifp->if_hwassist = 0;
1073	if (ifp->if_capenable & IFCAP_TSO)
1074		ifp->if_hwassist |= CSUM_TSO;
1075	if (ifp->if_capenable & IFCAP_TXCSUM)
1076		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1077	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1078		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1079
1080	/* Set up the device filtering */
1081	bzero(&filter, sizeof(filter));
1082	filter.enable_ethtype = TRUE;
1083	filter.enable_macvlan = TRUE;
1084#ifdef IXL_FDIR
1085	filter.enable_fdir = TRUE;
1086#endif
1087	if (i40e_set_filter_control(hw, &filter))
1088		device_printf(dev, "set_filter_control() failed\n");
1089
1090	/* Set up RSS */
1091	ixl_config_rss(vsi);
1092
1093	/* Setup the VSI */
1094	ixl_setup_vsi(vsi);
1095
1096	/*
1097	** Prepare the rings, hmc contexts, etc...
1098	*/
1099	if (ixl_initialize_vsi(vsi)) {
1100		device_printf(dev, "initialize vsi failed!!\n");
1101		return;
1102	}
1103
1104	/* Add protocol filters to list */
1105	ixl_init_filters(vsi);
1106
1107	/* Setup vlan's if needed */
1108	ixl_setup_vlan_filters(vsi);
1109
1110	/* Start the local timer */
1111	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1112
1113	/* Set up MSI/X routing and the ITR settings */
1114	if (ixl_enable_msix) {
1115		ixl_configure_msix(pf);
1116		ixl_configure_itr(pf);
1117	} else
1118		ixl_configure_legacy(pf);
1119
1120	ixl_enable_rings(vsi);
1121
1122	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1123
1124	/* Set MTU in hardware*/
1125	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1126	    TRUE, 0, NULL);
1127	if (aq_error)
1128		device_printf(vsi->dev,
1129			"aq_set_mac_config in init error, code %d\n",
1130		    aq_error);
1131
1132	/* And now turn on interrupts */
1133	ixl_enable_intr(vsi);
1134
1135	/* Now inform the stack we're ready */
1136	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1137	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1138
1139	return;
1140}
1141
1142static void
1143ixl_init(void *arg)
1144{
1145	struct ixl_pf *pf = arg;
1146
1147	IXL_PF_LOCK(pf);
1148	ixl_init_locked(pf);
1149	IXL_PF_UNLOCK(pf);
1150	return;
1151}
1152
1153/*
1154**
1155** MSIX Interrupt Handlers and Tasklets
1156**
1157*/
1158static void
1159ixl_handle_que(void *context, int pending)
1160{
1161	struct ixl_queue *que = context;
1162	struct ixl_vsi *vsi = que->vsi;
1163	struct i40e_hw  *hw = vsi->hw;
1164	struct tx_ring  *txr = &que->txr;
1165	struct ifnet    *ifp = vsi->ifp;
1166	bool		more;
1167
1168	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1169		more = ixl_rxeof(que, IXL_RX_LIMIT);
1170		IXL_TX_LOCK(txr);
1171		ixl_txeof(que);
1172		if (!drbr_empty(ifp, txr->br))
1173			ixl_mq_start_locked(ifp, txr);
1174		IXL_TX_UNLOCK(txr);
1175		if (more) {
1176			taskqueue_enqueue(que->tq, &que->task);
1177			return;
1178		}
1179	}
1180
1181	/* Reenable this interrupt - hmmm */
1182	ixl_enable_queue(hw, que->me);
1183	return;
1184}
1185
1186
1187/*********************************************************************
1188 *
1189 *  Legacy Interrupt Service routine
1190 *
1191 **********************************************************************/
1192void
1193ixl_intr(void *arg)
1194{
1195	struct ixl_pf		*pf = arg;
1196	struct i40e_hw		*hw =  &pf->hw;
1197	struct ixl_vsi		*vsi = &pf->vsi;
1198	struct ixl_queue	*que = vsi->queues;
1199	struct ifnet		*ifp = vsi->ifp;
1200	struct tx_ring		*txr = &que->txr;
1201        u32			reg, icr0, mask;
1202	bool			more_tx, more_rx;
1203
1204	++que->irqs;
1205
1206	/* Protect against spurious interrupts */
1207	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1208		return;
1209
1210	icr0 = rd32(hw, I40E_PFINT_ICR0);
1211
1212	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1213	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1214	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1215
1216        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1217
1218	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1219		taskqueue_enqueue(pf->tq, &pf->adminq);
1220		return;
1221	}
1222
1223	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1224
1225	IXL_TX_LOCK(txr);
1226	more_tx = ixl_txeof(que);
1227	if (!drbr_empty(vsi->ifp, txr->br))
1228		more_tx = 1;
1229	IXL_TX_UNLOCK(txr);
1230
1231	/* re-enable other interrupt causes */
1232	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1233
1234	/* And now the queues */
1235	reg = rd32(hw, I40E_QINT_RQCTL(0));
1236	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1237	wr32(hw, I40E_QINT_RQCTL(0), reg);
1238
1239	reg = rd32(hw, I40E_QINT_TQCTL(0));
1240	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1241	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1242	wr32(hw, I40E_QINT_TQCTL(0), reg);
1243
1244	ixl_enable_legacy(hw);
1245
1246	return;
1247}
1248
1249
1250/*********************************************************************
1251 *
1252 *  MSIX VSI Interrupt Service routine
1253 *
1254 **********************************************************************/
1255void
1256ixl_msix_que(void *arg)
1257{
1258	struct ixl_queue	*que = arg;
1259	struct ixl_vsi	*vsi = que->vsi;
1260	struct i40e_hw	*hw = vsi->hw;
1261	struct tx_ring	*txr = &que->txr;
1262	bool		more_tx, more_rx;
1263
1264	/* Protect against spurious interrupts */
1265	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1266		return;
1267
1268	++que->irqs;
1269
1270	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1271
1272	IXL_TX_LOCK(txr);
1273	more_tx = ixl_txeof(que);
1274	/*
1275	** Make certain that if the stack
1276	** has anything queued the task gets
1277	** scheduled to handle it.
1278	*/
1279	if (!drbr_empty(vsi->ifp, txr->br))
1280		more_tx = 1;
1281	IXL_TX_UNLOCK(txr);
1282
1283	ixl_set_queue_rx_itr(que);
1284	ixl_set_queue_tx_itr(que);
1285
1286	if (more_tx || more_rx)
1287		taskqueue_enqueue(que->tq, &que->task);
1288	else
1289		ixl_enable_queue(hw, que->me);
1290
1291	return;
1292}
1293
1294
1295/*********************************************************************
1296 *
1297 *  MSIX Admin Queue Interrupt Service routine
1298 *
1299 **********************************************************************/
1300static void
1301ixl_msix_adminq(void *arg)
1302{
1303	struct ixl_pf	*pf = arg;
1304	struct i40e_hw	*hw = &pf->hw;
1305	u32		reg, mask;
1306
1307	++pf->admin_irq;
1308
1309	reg = rd32(hw, I40E_PFINT_ICR0);
1310	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1311
1312	/* Check on the cause */
1313	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1314		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1315
1316	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1317		ixl_handle_mdd_event(pf);
1318		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1319	}
1320
1321	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1322		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1323
1324	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1325	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1326	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1327
1328	taskqueue_enqueue(pf->tq, &pf->adminq);
1329	return;
1330}
1331
1332/*********************************************************************
1333 *
1334 *  Media Ioctl callback
1335 *
1336 *  This routine is called whenever the user queries the status of
1337 *  the interface using ifconfig.
1338 *
1339 **********************************************************************/
1340static void
1341ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1342{
1343	struct ixl_vsi	*vsi = ifp->if_softc;
1344	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1345	struct i40e_hw  *hw = &pf->hw;
1346
1347	INIT_DEBUGOUT("ixl_media_status: begin");
1348	IXL_PF_LOCK(pf);
1349
1350	ixl_update_link_status(pf);
1351
1352	ifmr->ifm_status = IFM_AVALID;
1353	ifmr->ifm_active = IFM_ETHER;
1354
1355	if (!vsi->link_up) {
1356		IXL_PF_UNLOCK(pf);
1357		return;
1358	}
1359
1360	ifmr->ifm_status |= IFM_ACTIVE;
1361	/* Hardware is always full-duplex */
1362	ifmr->ifm_active |= IFM_FDX;
1363
1364	switch (hw->phy.link_info.phy_type) {
1365		/* 100 M */
1366		case I40E_PHY_TYPE_100BASE_TX:
1367			ifmr->ifm_active |= IFM_100_TX;
1368			break;
1369		/* 1 G */
1370		case I40E_PHY_TYPE_1000BASE_T:
1371			ifmr->ifm_active |= IFM_1000_T;
1372			break;
1373		case I40E_PHY_TYPE_1000BASE_SX:
1374			ifmr->ifm_active |= IFM_1000_SX;
1375			break;
1376		case I40E_PHY_TYPE_1000BASE_LX:
1377			ifmr->ifm_active |= IFM_1000_LX;
1378			break;
1379		/* 10 G */
1380		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1381		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1382			ifmr->ifm_active |= IFM_10G_TWINAX;
1383			break;
1384		case I40E_PHY_TYPE_10GBASE_SR:
1385			ifmr->ifm_active |= IFM_10G_SR;
1386			break;
1387		case I40E_PHY_TYPE_10GBASE_LR:
1388			ifmr->ifm_active |= IFM_10G_LR;
1389			break;
1390		case I40E_PHY_TYPE_10GBASE_T:
1391			ifmr->ifm_active |= IFM_10G_T;
1392			break;
1393		/* 40 G */
1394		case I40E_PHY_TYPE_40GBASE_CR4:
1395		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1396			ifmr->ifm_active |= IFM_40G_CR4;
1397			break;
1398		case I40E_PHY_TYPE_40GBASE_SR4:
1399			ifmr->ifm_active |= IFM_40G_SR4;
1400			break;
1401		case I40E_PHY_TYPE_40GBASE_LR4:
1402			ifmr->ifm_active |= IFM_40G_LR4;
1403			break;
1404		default:
1405			ifmr->ifm_active |= IFM_UNKNOWN;
1406			break;
1407	}
1408	/* Report flow control status as well */
1409	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1410		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1411	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1412		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1413
1414	IXL_PF_UNLOCK(pf);
1415
1416	return;
1417}
1418
1419/*********************************************************************
1420 *
1421 *  Media Ioctl callback
1422 *
1423 *  This routine is called when the user changes speed/duplex using
1424 *  media/mediopt option with ifconfig.
1425 *
1426 **********************************************************************/
1427static int
1428ixl_media_change(struct ifnet * ifp)
1429{
1430	struct ixl_vsi *vsi = ifp->if_softc;
1431	struct ifmedia *ifm = &vsi->media;
1432
1433	INIT_DEBUGOUT("ixl_media_change: begin");
1434
1435	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1436		return (EINVAL);
1437
1438	if_printf(ifp, "Media change is currently not supported.\n");
1439
1440	return (ENODEV);
1441}
1442
1443
1444#ifdef IXL_FDIR
1445/*
1446** ATR: Application Targetted Receive - creates a filter
1447**	based on TX flow info that will keep the receive
1448**	portion of the flow on the same queue. Based on the
1449**	implementation this is only available for TCP connections
1450*/
1451void
1452ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1453{
1454	struct ixl_vsi			*vsi = que->vsi;
1455	struct tx_ring			*txr = &que->txr;
1456	struct i40e_filter_program_desc	*FDIR;
1457	u32				ptype, dtype;
1458	int				idx;
1459
1460	/* check if ATR is enabled and sample rate */
1461	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1462		return;
1463	/*
1464	** We sample all TCP SYN/FIN packets,
1465	** or at the selected sample rate
1466	*/
1467	txr->atr_count++;
1468	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1469	    (txr->atr_count < txr->atr_rate))
1470                return;
1471	txr->atr_count = 0;
1472
1473	/* Get a descriptor to use */
1474	idx = txr->next_avail;
1475	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1476	if (++idx == que->num_desc)
1477		idx = 0;
1478	txr->avail--;
1479	txr->next_avail = idx;
1480
1481	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1482	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1483
1484	ptype |= (etype == ETHERTYPE_IP) ?
1485	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1486	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1487	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1488	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1489
1490	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1491
1492	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1493
1494	/*
1495	** We use the TCP TH_FIN as a trigger to remove
1496	** the filter, otherwise its an update.
1497	*/
1498	dtype |= (th->th_flags & TH_FIN) ?
1499	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1500	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1501	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1502	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1503
1504	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1505	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1506
1507	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1508	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1509
1510	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1511	FDIR->dtype_cmd_cntindex = htole32(dtype);
1512	return;
1513}
1514#endif
1515
1516
1517static void
1518ixl_set_promisc(struct ixl_vsi *vsi)
1519{
1520	struct ifnet	*ifp = vsi->ifp;
1521	struct i40e_hw	*hw = vsi->hw;
1522	int		err, mcnt = 0;
1523	bool		uni = FALSE, multi = FALSE;
1524
1525	if (ifp->if_flags & IFF_ALLMULTI)
1526                multi = TRUE;
1527	else { /* Need to count the multicast addresses */
1528		struct  ifmultiaddr *ifma;
1529		if_maddr_rlock(ifp);
1530		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1531                        if (ifma->ifma_addr->sa_family != AF_LINK)
1532                                continue;
1533                        if (mcnt == MAX_MULTICAST_ADDR)
1534                                break;
1535                        mcnt++;
1536		}
1537		if_maddr_runlock(ifp);
1538	}
1539
1540	if (mcnt >= MAX_MULTICAST_ADDR)
1541                multi = TRUE;
1542        if (ifp->if_flags & IFF_PROMISC)
1543		uni = TRUE;
1544
1545	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1546	    vsi->seid, uni, NULL);
1547	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1548	    vsi->seid, multi, NULL);
1549	return;
1550}
1551
1552/*********************************************************************
1553 * 	Filter Routines
1554 *
1555 *	Routines for multicast and vlan filter management.
1556 *
1557 *********************************************************************/
1558static void
1559ixl_add_multi(struct ixl_vsi *vsi)
1560{
1561	struct	ifmultiaddr	*ifma;
1562	struct ifnet		*ifp = vsi->ifp;
1563	struct i40e_hw		*hw = vsi->hw;
1564	int			mcnt = 0, flags;
1565
1566	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1567
1568	if_maddr_rlock(ifp);
1569	/*
1570	** First just get a count, to decide if we
1571	** we simply use multicast promiscuous.
1572	*/
1573	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1574		if (ifma->ifma_addr->sa_family != AF_LINK)
1575			continue;
1576		mcnt++;
1577	}
1578	if_maddr_runlock(ifp);
1579
1580	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1581		/* delete existing MC filters */
1582		ixl_del_hw_filters(vsi, mcnt);
1583		i40e_aq_set_vsi_multicast_promiscuous(hw,
1584		    vsi->seid, TRUE, NULL);
1585		return;
1586	}
1587
1588	mcnt = 0;
1589	if_maddr_rlock(ifp);
1590	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1591		if (ifma->ifma_addr->sa_family != AF_LINK)
1592			continue;
1593		ixl_add_mc_filter(vsi,
1594		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1595		mcnt++;
1596	}
1597	if_maddr_runlock(ifp);
1598	if (mcnt > 0) {
1599		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1600		ixl_add_hw_filters(vsi, flags, mcnt);
1601	}
1602
1603	IOCTL_DEBUGOUT("ixl_add_multi: end");
1604	return;
1605}
1606
1607static void
1608ixl_del_multi(struct ixl_vsi *vsi)
1609{
1610	struct ifnet		*ifp = vsi->ifp;
1611	struct ifmultiaddr	*ifma;
1612	struct ixl_mac_filter	*f;
1613	int			mcnt = 0;
1614	bool		match = FALSE;
1615
1616	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1617
1618	/* Search for removed multicast addresses */
1619	if_maddr_rlock(ifp);
1620	SLIST_FOREACH(f, &vsi->ftl, next) {
1621		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1622			match = FALSE;
1623			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1624				if (ifma->ifma_addr->sa_family != AF_LINK)
1625					continue;
1626				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1627				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1628					match = TRUE;
1629					break;
1630				}
1631			}
1632			if (match == FALSE) {
1633				f->flags |= IXL_FILTER_DEL;
1634				mcnt++;
1635			}
1636		}
1637	}
1638	if_maddr_runlock(ifp);
1639
1640	if (mcnt > 0)
1641		ixl_del_hw_filters(vsi, mcnt);
1642}
1643
1644
1645/*********************************************************************
1646 *  Timer routine
1647 *
1648 *  This routine checks for link status,updates statistics,
1649 *  and runs the watchdog check.
1650 *
1651 **********************************************************************/
1652
1653static void
1654ixl_local_timer(void *arg)
1655{
1656	struct ixl_pf		*pf = arg;
1657	struct i40e_hw		*hw = &pf->hw;
1658	struct ixl_vsi		*vsi = &pf->vsi;
1659	struct ixl_queue	*que = vsi->queues;
1660	device_t		dev = pf->dev;
1661	int			hung = 0;
1662	u32			mask;
1663
1664	mtx_assert(&pf->pf_mtx, MA_OWNED);
1665
1666	/* Fire off the adminq task */
1667	taskqueue_enqueue(pf->tq, &pf->adminq);
1668
1669	/* Update stats */
1670	ixl_update_stats_counters(pf);
1671
1672	/*
1673	** Check status of the queues
1674	*/
1675	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1676		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1677
1678	for (int i = 0; i < vsi->num_queues; i++,que++) {
1679		/* Any queues with outstanding work get a sw irq */
1680		if (que->busy)
1681			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1682		/*
1683		** Each time txeof runs without cleaning, but there
1684		** are uncleaned descriptors it increments busy. If
1685		** we get to 5 we declare it hung.
1686		*/
1687		if (que->busy == IXL_QUEUE_HUNG) {
1688			++hung;
1689			/* Mark the queue as inactive */
1690			vsi->active_queues &= ~((u64)1 << que->me);
1691			continue;
1692		} else {
1693			/* Check if we've come back from hung */
1694			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1695				vsi->active_queues |= ((u64)1 << que->me);
1696		}
1697		if (que->busy >= IXL_MAX_TX_BUSY) {
1698			device_printf(dev,"Warning queue %d "
1699			    "appears to be hung!\n", i);
1700			que->busy = IXL_QUEUE_HUNG;
1701			++hung;
1702		}
1703	}
1704	/* Only reinit if all queues show hung */
1705	if (hung == vsi->num_queues)
1706		goto hung;
1707
1708	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1709	return;
1710
1711hung:
1712	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1713	ixl_init_locked(pf);
1714}
1715
1716/*
1717** Note: this routine updates the OS on the link state
1718**	the real check of the hardware only happens with
1719**	a link interrupt.
1720*/
1721static void
1722ixl_update_link_status(struct ixl_pf *pf)
1723{
1724	struct ixl_vsi		*vsi = &pf->vsi;
1725	struct i40e_hw		*hw = &pf->hw;
1726	struct ifnet		*ifp = vsi->ifp;
1727	device_t		dev = pf->dev;
1728	enum i40e_fc_mode 	fc;
1729
1730
1731	if (vsi->link_up){
1732		if (vsi->link_active == FALSE) {
1733			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1734			if (bootverbose) {
1735				fc = hw->fc.current_mode;
1736				device_printf(dev,"Link is up %d Gbps %s,"
1737				    " Flow Control: %s\n",
1738				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1739				    "Full Duplex", ixl_fc_string[fc]);
1740			}
1741			vsi->link_active = TRUE;
1742			if_link_state_change(ifp, LINK_STATE_UP);
1743		}
1744	} else { /* Link down */
1745		if (vsi->link_active == TRUE) {
1746			if (bootverbose)
1747				device_printf(dev,"Link is Down\n");
1748			if_link_state_change(ifp, LINK_STATE_DOWN);
1749			vsi->link_active = FALSE;
1750		}
1751	}
1752
1753	return;
1754}
1755
1756/*********************************************************************
1757 *
1758 *  This routine disables all traffic on the adapter by issuing a
1759 *  global reset on the MAC and deallocates TX/RX buffers.
1760 *
1761 **********************************************************************/
1762
1763static void
1764ixl_stop(struct ixl_pf *pf)
1765{
1766	struct ixl_vsi	*vsi = &pf->vsi;
1767	struct ifnet	*ifp = vsi->ifp;
1768
1769	mtx_assert(&pf->pf_mtx, MA_OWNED);
1770
1771	INIT_DEBUGOUT("ixl_stop: begin\n");
1772	ixl_disable_intr(vsi);
1773	ixl_disable_rings(vsi);
1774
1775	/* Tell the stack that the interface is no longer active */
1776	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1777
1778	/* Stop the local timer */
1779	callout_stop(&pf->timer);
1780
1781	return;
1782}
1783
1784
1785/*********************************************************************
1786 *
1787 *  Setup MSIX Interrupt resources and handlers for the VSI
1788 *
1789 **********************************************************************/
1790static int
1791ixl_assign_vsi_legacy(struct ixl_pf *pf)
1792{
1793	device_t        dev = pf->dev;
1794	struct 		ixl_vsi *vsi = &pf->vsi;
1795	struct		ixl_queue *que = vsi->queues;
1796	int 		error, rid = 0;
1797
1798	if (pf->msix == 1)
1799		rid = 1;
1800	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1801	    &rid, RF_SHAREABLE | RF_ACTIVE);
1802	if (pf->res == NULL) {
1803		device_printf(dev,"Unable to allocate"
1804		    " bus resource: vsi legacy/msi interrupt\n");
1805		return (ENXIO);
1806	}
1807
1808	/* Set the handler function */
1809	error = bus_setup_intr(dev, pf->res,
1810	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1811	    ixl_intr, pf, &pf->tag);
1812	if (error) {
1813		pf->res = NULL;
1814		device_printf(dev, "Failed to register legacy/msi handler");
1815		return (error);
1816	}
1817	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1818	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1819	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1820	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1821	    taskqueue_thread_enqueue, &que->tq);
1822	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1823	    device_get_nameunit(dev));
1824	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1825	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1826	    taskqueue_thread_enqueue, &pf->tq);
1827	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1828	    device_get_nameunit(dev));
1829
1830	return (0);
1831}
1832
1833
1834/*********************************************************************
1835 *
1836 *  Setup MSIX Interrupt resources and handlers for the VSI
1837 *
1838 **********************************************************************/
1839static int
1840ixl_assign_vsi_msix(struct ixl_pf *pf)
1841{
1842	device_t	dev = pf->dev;
1843	struct 		ixl_vsi *vsi = &pf->vsi;
1844	struct 		ixl_queue *que = vsi->queues;
1845	struct		tx_ring	 *txr;
1846	int 		error, rid, vector = 0;
1847
1848	/* Admin Que is vector 0*/
1849	rid = vector + 1;
1850	pf->res = bus_alloc_resource_any(dev,
1851    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1852	if (!pf->res) {
1853		device_printf(dev,"Unable to allocate"
1854    	    " bus resource: Adminq interrupt [%d]\n", rid);
1855		return (ENXIO);
1856	}
1857	/* Set the adminq vector and handler */
1858	error = bus_setup_intr(dev, pf->res,
1859	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1860	    ixl_msix_adminq, pf, &pf->tag);
1861	if (error) {
1862		pf->res = NULL;
1863		device_printf(dev, "Failed to register Admin que handler");
1864		return (error);
1865	}
1866	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1867	pf->admvec = vector;
1868	/* Tasklet for Admin Queue */
1869	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1870	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1871	    taskqueue_thread_enqueue, &pf->tq);
1872	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1873	    device_get_nameunit(pf->dev));
1874	++vector;
1875
1876	/* Now set up the stations */
1877	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1878		rid = vector + 1;
1879		txr = &que->txr;
1880		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1881		    RF_SHAREABLE | RF_ACTIVE);
1882		if (que->res == NULL) {
1883			device_printf(dev,"Unable to allocate"
1884		    	    " bus resource: que interrupt [%d]\n", vector);
1885			return (ENXIO);
1886		}
1887		/* Set the handler function */
1888		error = bus_setup_intr(dev, que->res,
1889		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1890		    ixl_msix_que, que, &que->tag);
1891		if (error) {
1892			que->res = NULL;
1893			device_printf(dev, "Failed to register que handler");
1894			return (error);
1895		}
1896		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1897		/* Bind the vector to a CPU */
1898		bus_bind_intr(dev, que->res, i);
1899		que->msix = vector;
1900		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1901		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1902		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1903		    taskqueue_thread_enqueue, &que->tq);
1904		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1905		    device_get_nameunit(pf->dev));
1906	}
1907
1908	return (0);
1909}
1910
1911
1912/*
1913 * Allocate MSI/X vectors
1914 */
1915static int
1916ixl_init_msix(struct ixl_pf *pf)
1917{
1918	device_t dev = pf->dev;
1919	int rid, want, vectors, queues, available;
1920
1921	/* Override by tuneable */
1922	if (ixl_enable_msix == 0)
1923		goto msi;
1924
1925	/*
1926	** When used in a virtualized environment
1927	** PCI BUSMASTER capability may not be set
1928	** so explicity set it here and rewrite
1929	** the ENABLE in the MSIX control register
1930	** at this point to cause the host to
1931	** successfully initialize us.
1932	*/
1933	{
1934		u16 pci_cmd_word;
1935		int msix_ctrl;
1936		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1937		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1938		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1939		pci_find_cap(dev, PCIY_MSIX, &rid);
1940		rid += PCIR_MSIX_CTRL;
1941		msix_ctrl = pci_read_config(dev, rid, 2);
1942		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1943		pci_write_config(dev, rid, msix_ctrl, 2);
1944	}
1945
1946	/* First try MSI/X */
1947	rid = PCIR_BAR(IXL_BAR);
1948	pf->msix_mem = bus_alloc_resource_any(dev,
1949	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1950       	if (!pf->msix_mem) {
1951		/* May not be enabled */
1952		device_printf(pf->dev,
1953		    "Unable to map MSIX table \n");
1954		goto msi;
1955	}
1956
1957	available = pci_msix_count(dev);
1958	if (available == 0) { /* system has msix disabled */
1959		bus_release_resource(dev, SYS_RES_MEMORY,
1960		    rid, pf->msix_mem);
1961		pf->msix_mem = NULL;
1962		goto msi;
1963	}
1964
1965	/* Figure out a reasonable auto config value */
1966	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1967
1968	/* Override with hardcoded value if sane */
1969	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1970		queues = ixl_max_queues;
1971
1972	/*
1973	** Want one vector (RX/TX pair) per queue
1974	** plus an additional for the admin queue.
1975	*/
1976	want = queues + 1;
1977	if (want <= available)	/* Have enough */
1978		vectors = want;
1979	else {
1980               	device_printf(pf->dev,
1981		    "MSIX Configuration Problem, "
1982		    "%d vectors available but %d wanted!\n",
1983		    available, want);
1984		return (0); /* Will go to Legacy setup */
1985	}
1986
1987	if (pci_alloc_msix(dev, &vectors) == 0) {
1988               	device_printf(pf->dev,
1989		    "Using MSIX interrupts with %d vectors\n", vectors);
1990		pf->msix = vectors;
1991		pf->vsi.num_queues = queues;
1992		return (vectors);
1993	}
1994msi:
1995       	vectors = pci_msi_count(dev);
1996	pf->vsi.num_queues = 1;
1997	pf->msix = 1;
1998	ixl_max_queues = 1;
1999	ixl_enable_msix = 0;
2000       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2001               	device_printf(pf->dev,"Using an MSI interrupt\n");
2002	else {
2003		pf->msix = 0;
2004               	device_printf(pf->dev,"Using a Legacy interrupt\n");
2005	}
2006	return (vectors);
2007}
2008
2009
2010/*
2011 * Plumb MSI/X vectors
2012 */
2013static void
2014ixl_configure_msix(struct ixl_pf *pf)
2015{
2016	struct i40e_hw	*hw = &pf->hw;
2017	struct ixl_vsi *vsi = &pf->vsi;
2018	u32		reg;
2019	u16		vector = 1;
2020
2021	/* First set up the adminq - vector 0 */
2022	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2023	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2024
2025	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2026	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2027	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2028	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2029	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2030	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2031	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2032	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2033
2034	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2035	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2036
2037	wr32(hw, I40E_PFINT_DYN_CTL0,
2038	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2039	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2040
2041	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2042
2043	/* Next configure the queues */
2044	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2045		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2046		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2047
2048		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2049		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2050		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2051		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2052		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2053		wr32(hw, I40E_QINT_RQCTL(i), reg);
2054
2055		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2056		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2057		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2058		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2059		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2060		if (i == (vsi->num_queues - 1))
2061			reg |= (IXL_QUEUE_EOL
2062			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2063		wr32(hw, I40E_QINT_TQCTL(i), reg);
2064	}
2065}
2066
2067/*
2068 * Configure for MSI single vector operation
2069 */
2070static void
2071ixl_configure_legacy(struct ixl_pf *pf)
2072{
2073	struct i40e_hw	*hw = &pf->hw;
2074	u32		reg;
2075
2076
2077	wr32(hw, I40E_PFINT_ITR0(0), 0);
2078	wr32(hw, I40E_PFINT_ITR0(1), 0);
2079
2080
2081	/* Setup "other" causes */
2082	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2083	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2084	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2085	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2086	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2087	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2088	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2089	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2090	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2091	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2092	    ;
2093	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2094
2095	/* SW_ITR_IDX = 0, but don't change INTENA */
2096	wr32(hw, I40E_PFINT_DYN_CTL0,
2097	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2098	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2099	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2100	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2101
2102	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2103	wr32(hw, I40E_PFINT_LNKLST0, 0);
2104
2105	/* Associate the queue pair to the vector and enable the q int */
2106	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2107	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2108	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2109	wr32(hw, I40E_QINT_RQCTL(0), reg);
2110
2111	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2112	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2113	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2114	wr32(hw, I40E_QINT_TQCTL(0), reg);
2115
2116	/* Next enable the queue pair */
2117	reg = rd32(hw, I40E_QTX_ENA(0));
2118	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2119	wr32(hw, I40E_QTX_ENA(0), reg);
2120
2121	reg = rd32(hw, I40E_QRX_ENA(0));
2122	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2123	wr32(hw, I40E_QRX_ENA(0), reg);
2124}
2125
2126
2127/*
2128 * Set the Initial ITR state
2129 */
2130static void
2131ixl_configure_itr(struct ixl_pf *pf)
2132{
2133	struct i40e_hw		*hw = &pf->hw;
2134	struct ixl_vsi		*vsi = &pf->vsi;
2135	struct ixl_queue	*que = vsi->queues;
2136
2137	vsi->rx_itr_setting = ixl_rx_itr;
2138	if (ixl_dynamic_rx_itr)
2139		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2140	vsi->tx_itr_setting = ixl_tx_itr;
2141	if (ixl_dynamic_tx_itr)
2142		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2143
2144	for (int i = 0; i < vsi->num_queues; i++, que++) {
2145		struct tx_ring	*txr = &que->txr;
2146		struct rx_ring 	*rxr = &que->rxr;
2147
2148		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2149		    vsi->rx_itr_setting);
2150		rxr->itr = vsi->rx_itr_setting;
2151		rxr->latency = IXL_AVE_LATENCY;
2152		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2153		    vsi->tx_itr_setting);
2154		txr->itr = vsi->tx_itr_setting;
2155		txr->latency = IXL_AVE_LATENCY;
2156	}
2157}
2158
2159
2160static int
2161ixl_allocate_pci_resources(struct ixl_pf *pf)
2162{
2163	int             rid;
2164	device_t        dev = pf->dev;
2165
2166	rid = PCIR_BAR(0);
2167	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2168	    &rid, RF_ACTIVE);
2169
2170	if (!(pf->pci_mem)) {
2171		device_printf(dev,"Unable to allocate bus resource: memory\n");
2172		return (ENXIO);
2173	}
2174
2175	pf->osdep.mem_bus_space_tag =
2176		rman_get_bustag(pf->pci_mem);
2177	pf->osdep.mem_bus_space_handle =
2178		rman_get_bushandle(pf->pci_mem);
2179	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2180	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2181
2182	pf->hw.back = &pf->osdep;
2183
2184	/*
2185	** Now setup MSI or MSI/X, should
2186	** return us the number of supported
2187	** vectors. (Will be 1 for MSI)
2188	*/
2189	pf->msix = ixl_init_msix(pf);
2190	return (0);
2191}
2192
2193static void
2194ixl_free_pci_resources(struct ixl_pf * pf)
2195{
2196	struct ixl_vsi		*vsi = &pf->vsi;
2197	struct ixl_queue	*que = vsi->queues;
2198	device_t		dev = pf->dev;
2199	int			rid, memrid;
2200
2201	memrid = PCIR_BAR(IXL_BAR);
2202
2203	/* We may get here before stations are setup */
2204	if ((!ixl_enable_msix) || (que == NULL))
2205		goto early;
2206
2207	/*
2208	**  Release all msix VSI resources:
2209	*/
2210	for (int i = 0; i < vsi->num_queues; i++, que++) {
2211		rid = que->msix + 1;
2212		if (que->tag != NULL) {
2213			bus_teardown_intr(dev, que->res, que->tag);
2214			que->tag = NULL;
2215		}
2216		if (que->res != NULL)
2217			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2218	}
2219
2220early:
2221	/* Clean the AdminQ interrupt last */
2222	if (pf->admvec) /* we are doing MSIX */
2223		rid = pf->admvec + 1;
2224	else
2225		(pf->msix != 0) ? (rid = 1):(rid = 0);
2226
2227	if (pf->tag != NULL) {
2228		bus_teardown_intr(dev, pf->res, pf->tag);
2229		pf->tag = NULL;
2230	}
2231	if (pf->res != NULL)
2232		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2233
2234	if (pf->msix)
2235		pci_release_msi(dev);
2236
2237	if (pf->msix_mem != NULL)
2238		bus_release_resource(dev, SYS_RES_MEMORY,
2239		    memrid, pf->msix_mem);
2240
2241	if (pf->pci_mem != NULL)
2242		bus_release_resource(dev, SYS_RES_MEMORY,
2243		    PCIR_BAR(0), pf->pci_mem);
2244
2245	return;
2246}
2247
2248
2249/*********************************************************************
2250 *
2251 *  Setup networking device structure and register an interface.
2252 *
2253 **********************************************************************/
2254static int
2255ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2256{
2257	struct ifnet		*ifp;
2258	struct i40e_hw		*hw = vsi->hw;
2259	struct ixl_queue	*que = vsi->queues;
2260	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2261	enum i40e_status_code aq_error = 0;
2262
2263	INIT_DEBUGOUT("ixl_setup_interface: begin");
2264
2265	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2266	if (ifp == NULL) {
2267		device_printf(dev, "can not allocate ifnet structure\n");
2268		return (-1);
2269	}
2270	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2271	ifp->if_mtu = ETHERMTU;
2272	ifp->if_baudrate = 4000000000;  // ??
2273	ifp->if_init = ixl_init;
2274	ifp->if_softc = vsi;
2275	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2276	ifp->if_ioctl = ixl_ioctl;
2277
2278#if __FreeBSD_version >= 1100000
2279	if_setgetcounterfn(ifp, ixl_get_counter);
2280#endif
2281
2282	ifp->if_transmit = ixl_mq_start;
2283
2284	ifp->if_qflush = ixl_qflush;
2285
2286	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2287
2288	ether_ifattach(ifp, hw->mac.addr);
2289
2290	vsi->max_frame_size =
2291	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2292	    + ETHER_VLAN_ENCAP_LEN;
2293
2294	/*
2295	 * Tell the upper layer(s) we support long frames.
2296	 */
2297	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2298
2299	ifp->if_capabilities |= IFCAP_HWCSUM;
2300	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2301	ifp->if_capabilities |= IFCAP_TSO;
2302	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2303	ifp->if_capabilities |= IFCAP_LRO;
2304
2305	/* VLAN capabilties */
2306	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2307			     |  IFCAP_VLAN_HWTSO
2308			     |  IFCAP_VLAN_MTU
2309			     |  IFCAP_VLAN_HWCSUM;
2310	ifp->if_capenable = ifp->if_capabilities;
2311
2312	/*
2313	** Don't turn this on by default, if vlans are
2314	** created on another pseudo device (eg. lagg)
2315	** then vlan events are not passed thru, breaking
2316	** operation, but with HW FILTER off it works. If
2317	** using vlans directly on the ixl driver you can
2318	** enable this and get full hardware tag filtering.
2319	*/
2320	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2321
2322	/*
2323	 * Specify the media types supported by this adapter and register
2324	 * callbacks to update media and link information
2325	 */
2326	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2327		     ixl_media_status);
2328
2329	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2330	if (aq_error) {
2331		printf("Error getting supported media types, AQ error %d\n", aq_error);
2332		return (EPERM);
2333	}
2334
2335	/* Display supported media types */
2336	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2337		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2338
2339	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2340		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2341
2342	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2343	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2344		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2345	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2346		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2347	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2348		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2349	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2350		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2351
2352	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2353	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2354		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2355	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2356		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2357	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2358		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2359
2360	/* Use autoselect media by default */
2361	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2362	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2363
2364	return (0);
2365}
2366
2367static bool
2368ixl_config_link(struct i40e_hw *hw)
2369{
2370	bool check;
2371
2372	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2373	check = i40e_get_link_status(hw);
2374#ifdef IXL_DEBUG
2375	printf("Link is %s\n", check ? "up":"down");
2376#endif
2377	return (check);
2378}
2379
2380/*********************************************************************
2381 *
2382 *  Initialize this VSI
2383 *
2384 **********************************************************************/
2385static int
2386ixl_setup_vsi(struct ixl_vsi *vsi)
2387{
2388	struct i40e_hw	*hw = vsi->hw;
2389	device_t 	dev = vsi->dev;
2390	struct i40e_aqc_get_switch_config_resp *sw_config;
2391	struct i40e_vsi_context	ctxt;
2392	u8	aq_buf[I40E_AQ_LARGE_BUF];
2393	int	ret = I40E_SUCCESS;
2394	u16	next = 0;
2395
2396	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2397	ret = i40e_aq_get_switch_config(hw, sw_config,
2398	    sizeof(aq_buf), &next, NULL);
2399	if (ret) {
2400		device_printf(dev,"aq_get_switch_config failed!!\n");
2401		return (ret);
2402	}
2403#ifdef IXL_DEBUG
2404	printf("Switch config: header reported: %d in structure, %d total\n",
2405    	    sw_config->header.num_reported, sw_config->header.num_total);
2406	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2407	    sw_config->element[0].element_type,
2408	    sw_config->element[0].seid,
2409	    sw_config->element[0].uplink_seid,
2410	    sw_config->element[0].downlink_seid);
2411#endif
2412	/* Save off this important value */
2413	vsi->seid = sw_config->element[0].seid;
2414
2415	memset(&ctxt, 0, sizeof(ctxt));
2416	ctxt.seid = vsi->seid;
2417	ctxt.pf_num = hw->pf_id;
2418	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2419	if (ret) {
2420		device_printf(dev,"get vsi params failed %x!!\n", ret);
2421		return (ret);
2422	}
2423#ifdef IXL_DEBUG
2424	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2425	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2426	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2427	    ctxt.uplink_seid, ctxt.vsi_number,
2428	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2429	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2430	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2431#endif
2432	/*
2433	** Set the queue and traffic class bits
2434	**  - when multiple traffic classes are supported
2435	**    this will need to be more robust.
2436	*/
2437	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2438	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2439	ctxt.info.queue_mapping[0] = 0;
2440	ctxt.info.tc_mapping[0] = 0x0800;
2441
2442	/* Set VLAN receive stripping mode */
2443	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2444	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2445	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2446	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2447	else
2448	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2449
2450	/* Keep copy of VSI info in VSI for statistic counters */
2451	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2452
2453	/* Reset VSI statistics */
2454	ixl_vsi_reset_stats(vsi);
2455	vsi->hw_filters_add = 0;
2456	vsi->hw_filters_del = 0;
2457
2458	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2459	if (ret)
2460		device_printf(dev,"update vsi params failed %x!!\n",
2461		   hw->aq.asq_last_status);
2462	return (ret);
2463}
2464
2465
2466/*********************************************************************
2467 *
2468 *  Initialize the VSI:  this handles contexts, which means things
2469 *  			 like the number of descriptors, buffer size,
2470 *			 plus we init the rings thru this function.
2471 *
2472 **********************************************************************/
2473static int
2474ixl_initialize_vsi(struct ixl_vsi *vsi)
2475{
2476	struct ixl_queue	*que = vsi->queues;
2477	device_t		dev = vsi->dev;
2478	struct i40e_hw		*hw = vsi->hw;
2479	int			err = 0;
2480
2481
2482	for (int i = 0; i < vsi->num_queues; i++, que++) {
2483		struct tx_ring		*txr = &que->txr;
2484		struct rx_ring 		*rxr = &que->rxr;
2485		struct i40e_hmc_obj_txq tctx;
2486		struct i40e_hmc_obj_rxq rctx;
2487		u32			txctl;
2488		u16			size;
2489
2490
2491		/* Setup the HMC TX Context  */
2492		size = que->num_desc * sizeof(struct i40e_tx_desc);
2493		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2494		tctx.new_context = 1;
2495		tctx.base = (txr->dma.pa/128);
2496		tctx.qlen = que->num_desc;
2497		tctx.fc_ena = 0;
2498		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2499		/* Enable HEAD writeback */
2500		tctx.head_wb_ena = 1;
2501		tctx.head_wb_addr = txr->dma.pa +
2502		    (que->num_desc * sizeof(struct i40e_tx_desc));
2503		tctx.rdylist_act = 0;
2504		err = i40e_clear_lan_tx_queue_context(hw, i);
2505		if (err) {
2506			device_printf(dev, "Unable to clear TX context\n");
2507			break;
2508		}
2509		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2510		if (err) {
2511			device_printf(dev, "Unable to set TX context\n");
2512			break;
2513		}
2514		/* Associate the ring with this PF */
2515		txctl = I40E_QTX_CTL_PF_QUEUE;
2516		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2517		    I40E_QTX_CTL_PF_INDX_MASK);
2518		wr32(hw, I40E_QTX_CTL(i), txctl);
2519		ixl_flush(hw);
2520
2521		/* Do ring (re)init */
2522		ixl_init_tx_ring(que);
2523
2524		/* Next setup the HMC RX Context  */
2525		if (vsi->max_frame_size <= 2048)
2526			rxr->mbuf_sz = MCLBYTES;
2527		else
2528			rxr->mbuf_sz = MJUMPAGESIZE;
2529
2530		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2531
2532		/* Set up an RX context for the HMC */
2533		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2534		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2535		/* ignore header split for now */
2536		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2537		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2538		    vsi->max_frame_size : max_rxmax;
2539		rctx.dtype = 0;
2540		rctx.dsize = 1;	/* do 32byte descriptors */
2541		rctx.hsplit_0 = 0;  /* no HDR split initially */
2542		rctx.base = (rxr->dma.pa/128);
2543		rctx.qlen = que->num_desc;
2544		rctx.tphrdesc_ena = 1;
2545		rctx.tphwdesc_ena = 1;
2546		rctx.tphdata_ena = 0;
2547		rctx.tphhead_ena = 0;
2548		rctx.lrxqthresh = 2;
2549		rctx.crcstrip = 1;
2550		rctx.l2tsel = 1;
2551		rctx.showiv = 1;
2552		rctx.fc_ena = 0;
2553		rctx.prefena = 1;
2554
2555		err = i40e_clear_lan_rx_queue_context(hw, i);
2556		if (err) {
2557			device_printf(dev,
2558			    "Unable to clear RX context %d\n", i);
2559			break;
2560		}
2561		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2562		if (err) {
2563			device_printf(dev, "Unable to set RX context %d\n", i);
2564			break;
2565		}
2566		err = ixl_init_rx_ring(que);
2567		if (err) {
2568			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2569			break;
2570		}
2571		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2572		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2573	}
2574	return (err);
2575}
2576
2577
2578/*********************************************************************
2579 *
2580 *  Free all VSI structs.
2581 *
2582 **********************************************************************/
2583void
2584ixl_free_vsi(struct ixl_vsi *vsi)
2585{
2586	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2587	struct ixl_queue	*que = vsi->queues;
2588	struct ixl_mac_filter *f;
2589
2590	/* Free station queues */
2591	for (int i = 0; i < vsi->num_queues; i++, que++) {
2592		struct tx_ring *txr = &que->txr;
2593		struct rx_ring *rxr = &que->rxr;
2594
2595		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2596			continue;
2597		IXL_TX_LOCK(txr);
2598		ixl_free_que_tx(que);
2599		if (txr->base)
2600			i40e_free_dma_mem(&pf->hw, &txr->dma);
2601		IXL_TX_UNLOCK(txr);
2602		IXL_TX_LOCK_DESTROY(txr);
2603
2604		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2605			continue;
2606		IXL_RX_LOCK(rxr);
2607		ixl_free_que_rx(que);
2608		if (rxr->base)
2609			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2610		IXL_RX_UNLOCK(rxr);
2611		IXL_RX_LOCK_DESTROY(rxr);
2612
2613	}
2614	free(vsi->queues, M_DEVBUF);
2615
2616	/* Free VSI filter list */
2617	while (!SLIST_EMPTY(&vsi->ftl)) {
2618		f = SLIST_FIRST(&vsi->ftl);
2619		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2620		free(f, M_DEVBUF);
2621	}
2622}
2623
2624
2625/*********************************************************************
2626 *
2627 *  Allocate memory for the VSI (virtual station interface) and their
2628 *  associated queues, rings and the descriptors associated with each,
2629 *  called only once at attach.
2630 *
2631 **********************************************************************/
2632static int
2633ixl_setup_stations(struct ixl_pf *pf)
2634{
2635	device_t		dev = pf->dev;
2636	struct ixl_vsi		*vsi;
2637	struct ixl_queue	*que;
2638	struct tx_ring		*txr;
2639	struct rx_ring		*rxr;
2640	int 			rsize, tsize;
2641	int			error = I40E_SUCCESS;
2642
2643	vsi = &pf->vsi;
2644	vsi->back = (void *)pf;
2645	vsi->hw = &pf->hw;
2646	vsi->id = 0;
2647	vsi->num_vlans = 0;
2648
2649	/* Get memory for the station queues */
2650        if (!(vsi->queues =
2651            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2652            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2653                device_printf(dev, "Unable to allocate queue memory\n");
2654                error = ENOMEM;
2655                goto early;
2656        }
2657
2658	for (int i = 0; i < vsi->num_queues; i++) {
2659		que = &vsi->queues[i];
2660		que->num_desc = ixl_ringsz;
2661		que->me = i;
2662		que->vsi = vsi;
2663		/* mark the queue as active */
2664		vsi->active_queues |= (u64)1 << que->me;
2665		txr = &que->txr;
2666		txr->que = que;
2667		txr->tail = I40E_QTX_TAIL(que->me);
2668
2669		/* Initialize the TX lock */
2670		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2671		    device_get_nameunit(dev), que->me);
2672		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2673		/* Create the TX descriptor ring */
2674		tsize = roundup2((que->num_desc *
2675		    sizeof(struct i40e_tx_desc)) +
2676		    sizeof(u32), DBA_ALIGN);
2677		if (i40e_allocate_dma_mem(&pf->hw,
2678		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
2679			device_printf(dev,
2680			    "Unable to allocate TX Descriptor memory\n");
2681			error = ENOMEM;
2682			goto fail;
2683		}
2684		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2685		bzero((void *)txr->base, tsize);
2686       		/* Now allocate transmit soft structs for the ring */
2687       		if (ixl_allocate_tx_data(que)) {
2688			device_printf(dev,
2689			    "Critical Failure setting up TX structures\n");
2690			error = ENOMEM;
2691			goto fail;
2692       		}
2693		/* Allocate a buf ring */
2694		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2695		    M_WAITOK, &txr->mtx);
2696		if (txr->br == NULL) {
2697			device_printf(dev,
2698			    "Critical Failure setting up TX buf ring\n");
2699			error = ENOMEM;
2700			goto fail;
2701       		}
2702
2703		/*
2704		 * Next the RX queues...
2705		 */
2706		rsize = roundup2(que->num_desc *
2707		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2708		rxr = &que->rxr;
2709		rxr->que = que;
2710		rxr->tail = I40E_QRX_TAIL(que->me);
2711
2712		/* Initialize the RX side lock */
2713		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2714		    device_get_nameunit(dev), que->me);
2715		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2716
2717		if (i40e_allocate_dma_mem(&pf->hw,
2718		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
2719			device_printf(dev,
2720			    "Unable to allocate RX Descriptor memory\n");
2721			error = ENOMEM;
2722			goto fail;
2723		}
2724		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2725		bzero((void *)rxr->base, rsize);
2726
2727        	/* Allocate receive soft structs for the ring*/
2728		if (ixl_allocate_rx_data(que)) {
2729			device_printf(dev,
2730			    "Critical Failure setting up receive structs\n");
2731			error = ENOMEM;
2732			goto fail;
2733		}
2734	}
2735
2736	return (0);
2737
2738fail:
2739	for (int i = 0; i < vsi->num_queues; i++) {
2740		que = &vsi->queues[i];
2741		rxr = &que->rxr;
2742		txr = &que->txr;
2743		if (rxr->base)
2744			i40e_free_dma_mem(&pf->hw, &rxr->dma);
2745		if (txr->base)
2746			i40e_free_dma_mem(&pf->hw, &txr->dma);
2747	}
2748
2749early:
2750	return (error);
2751}
2752
2753/*
2754** Provide a update to the queue RX
2755** interrupt moderation value.
2756*/
2757static void
2758ixl_set_queue_rx_itr(struct ixl_queue *que)
2759{
2760	struct ixl_vsi	*vsi = que->vsi;
2761	struct i40e_hw	*hw = vsi->hw;
2762	struct rx_ring	*rxr = &que->rxr;
2763	u16		rx_itr;
2764	u16		rx_latency = 0;
2765	int		rx_bytes;
2766
2767
2768	/* Idle, do nothing */
2769	if (rxr->bytes == 0)
2770		return;
2771
2772	if (ixl_dynamic_rx_itr) {
2773		rx_bytes = rxr->bytes/rxr->itr;
2774		rx_itr = rxr->itr;
2775
2776		/* Adjust latency range */
2777		switch (rxr->latency) {
2778		case IXL_LOW_LATENCY:
2779			if (rx_bytes > 10) {
2780				rx_latency = IXL_AVE_LATENCY;
2781				rx_itr = IXL_ITR_20K;
2782			}
2783			break;
2784		case IXL_AVE_LATENCY:
2785			if (rx_bytes > 20) {
2786				rx_latency = IXL_BULK_LATENCY;
2787				rx_itr = IXL_ITR_8K;
2788			} else if (rx_bytes <= 10) {
2789				rx_latency = IXL_LOW_LATENCY;
2790				rx_itr = IXL_ITR_100K;
2791			}
2792			break;
2793		case IXL_BULK_LATENCY:
2794			if (rx_bytes <= 20) {
2795				rx_latency = IXL_AVE_LATENCY;
2796				rx_itr = IXL_ITR_20K;
2797			}
2798			break;
2799       		 }
2800
2801		rxr->latency = rx_latency;
2802
2803		if (rx_itr != rxr->itr) {
2804			/* do an exponential smoothing */
2805			rx_itr = (10 * rx_itr * rxr->itr) /
2806			    ((9 * rx_itr) + rxr->itr);
2807			rxr->itr = rx_itr & IXL_MAX_ITR;
2808			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2809			    que->me), rxr->itr);
2810		}
2811	} else { /* We may have have toggled to non-dynamic */
2812		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2813			vsi->rx_itr_setting = ixl_rx_itr;
2814		/* Update the hardware if needed */
2815		if (rxr->itr != vsi->rx_itr_setting) {
2816			rxr->itr = vsi->rx_itr_setting;
2817			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2818			    que->me), rxr->itr);
2819		}
2820	}
2821	rxr->bytes = 0;
2822	rxr->packets = 0;
2823	return;
2824}
2825
2826
2827/*
2828** Provide a update to the queue TX
2829** interrupt moderation value.
2830*/
2831static void
2832ixl_set_queue_tx_itr(struct ixl_queue *que)
2833{
2834	struct ixl_vsi	*vsi = que->vsi;
2835	struct i40e_hw	*hw = vsi->hw;
2836	struct tx_ring	*txr = &que->txr;
2837	u16		tx_itr;
2838	u16		tx_latency = 0;
2839	int		tx_bytes;
2840
2841
2842	/* Idle, do nothing */
2843	if (txr->bytes == 0)
2844		return;
2845
2846	if (ixl_dynamic_tx_itr) {
2847		tx_bytes = txr->bytes/txr->itr;
2848		tx_itr = txr->itr;
2849
2850		switch (txr->latency) {
2851		case IXL_LOW_LATENCY:
2852			if (tx_bytes > 10) {
2853				tx_latency = IXL_AVE_LATENCY;
2854				tx_itr = IXL_ITR_20K;
2855			}
2856			break;
2857		case IXL_AVE_LATENCY:
2858			if (tx_bytes > 20) {
2859				tx_latency = IXL_BULK_LATENCY;
2860				tx_itr = IXL_ITR_8K;
2861			} else if (tx_bytes <= 10) {
2862				tx_latency = IXL_LOW_LATENCY;
2863				tx_itr = IXL_ITR_100K;
2864			}
2865			break;
2866		case IXL_BULK_LATENCY:
2867			if (tx_bytes <= 20) {
2868				tx_latency = IXL_AVE_LATENCY;
2869				tx_itr = IXL_ITR_20K;
2870			}
2871			break;
2872		}
2873
2874		txr->latency = tx_latency;
2875
2876		if (tx_itr != txr->itr) {
2877       	         /* do an exponential smoothing */
2878			tx_itr = (10 * tx_itr * txr->itr) /
2879			    ((9 * tx_itr) + txr->itr);
2880			txr->itr = tx_itr & IXL_MAX_ITR;
2881			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2882			    que->me), txr->itr);
2883		}
2884
2885	} else { /* We may have have toggled to non-dynamic */
2886		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2887			vsi->tx_itr_setting = ixl_tx_itr;
2888		/* Update the hardware if needed */
2889		if (txr->itr != vsi->tx_itr_setting) {
2890			txr->itr = vsi->tx_itr_setting;
2891			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2892			    que->me), txr->itr);
2893		}
2894	}
2895	txr->bytes = 0;
2896	txr->packets = 0;
2897	return;
2898}
2899
2900
2901static void
2902ixl_add_hw_stats(struct ixl_pf *pf)
2903{
2904	device_t dev = pf->dev;
2905	struct ixl_vsi *vsi = &pf->vsi;
2906	struct ixl_queue *queues = vsi->queues;
2907	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2908	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2909
2910	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2911	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2912	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2913
2914	struct sysctl_oid *vsi_node, *queue_node;
2915	struct sysctl_oid_list *vsi_list, *queue_list;
2916
2917	struct tx_ring *txr;
2918	struct rx_ring *rxr;
2919
2920	/* Driver statistics */
2921	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2922			CTLFLAG_RD, &pf->watchdog_events,
2923			"Watchdog timeouts");
2924	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2925			CTLFLAG_RD, &pf->admin_irq,
2926			"Admin Queue IRQ Handled");
2927
2928	/* VSI statistics */
2929#define QUEUE_NAME_LEN 32
2930	char queue_namebuf[QUEUE_NAME_LEN];
2931
2932	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2933	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2934	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2935				   CTLFLAG_RD, NULL, "VSI-specific stats");
2936	vsi_list = SYSCTL_CHILDREN(vsi_node);
2937
2938	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2939
2940	/* Queue statistics */
2941	for (int q = 0; q < vsi->num_queues; q++) {
2942		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2943		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2944					     CTLFLAG_RD, NULL, "Queue #");
2945		queue_list = SYSCTL_CHILDREN(queue_node);
2946
2947		txr = &(queues[q].txr);
2948		rxr = &(queues[q].rxr);
2949
2950		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2951				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2952				"m_defrag() failed");
2953		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2954				CTLFLAG_RD, &(queues[q].dropped_pkts),
2955				"Driver dropped packets");
2956		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2957				CTLFLAG_RD, &(queues[q].irqs),
2958				"irqs on this queue");
2959		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2960				CTLFLAG_RD, &(queues[q].tso),
2961				"TSO");
2962		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2963				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2964				"Driver tx dma failure in xmit");
2965		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2966				CTLFLAG_RD, &(txr->no_desc),
2967				"Queue No Descriptor Available");
2968		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2969				CTLFLAG_RD, &(txr->total_packets),
2970				"Queue Packets Transmitted");
2971		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2972				CTLFLAG_RD, &(txr->tx_bytes),
2973				"Queue Bytes Transmitted");
2974		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2975				CTLFLAG_RD, &(rxr->rx_packets),
2976				"Queue Packets Received");
2977		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2978				CTLFLAG_RD, &(rxr->rx_bytes),
2979				"Queue Bytes Received");
2980	}
2981
2982	/* MAC stats */
2983	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
2984}
2985
2986static void
2987ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
2988	struct sysctl_oid_list *child,
2989	struct i40e_eth_stats *eth_stats)
2990{
2991	struct ixl_sysctl_info ctls[] =
2992	{
2993		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2994		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
2995			"Unicast Packets Received"},
2996		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
2997			"Multicast Packets Received"},
2998		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
2999			"Broadcast Packets Received"},
3000		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3001		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3002		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3003		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3004			"Multicast Packets Transmitted"},
3005		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3006			"Broadcast Packets Transmitted"},
3007		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3008		// end
3009		{0,0,0}
3010	};
3011
3012	struct ixl_sysctl_info *entry = ctls;
3013	while (entry->stat != 0)
3014	{
3015		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3016				CTLFLAG_RD, entry->stat,
3017				entry->description);
3018		entry++;
3019	}
3020}
3021
3022static void
3023ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3024	struct sysctl_oid_list *child,
3025	struct i40e_hw_port_stats *stats)
3026{
3027	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3028				    CTLFLAG_RD, NULL, "Mac Statistics");
3029	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3030
3031	struct i40e_eth_stats *eth_stats = &stats->eth;
3032	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3033
3034	struct ixl_sysctl_info ctls[] =
3035	{
3036		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3037		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3038		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3039		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3040		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3041		/* Packet Reception Stats */
3042		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3043		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3044		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3045		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3046		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3047		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3048		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3049		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3050		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3051		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3052		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3053		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3054		/* Packet Transmission Stats */
3055		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3056		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3057		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3058		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3059		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3060		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3061		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3062		/* Flow control */
3063		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3064		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3065		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3066		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3067		/* End */
3068		{0,0,0}
3069	};
3070
3071	struct ixl_sysctl_info *entry = ctls;
3072	while (entry->stat != 0)
3073	{
3074		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3075				CTLFLAG_RD, entry->stat,
3076				entry->description);
3077		entry++;
3078	}
3079}
3080
3081/*
3082** ixl_config_rss - setup RSS
3083**  - note this is done for the single vsi
3084*/
3085static void ixl_config_rss(struct ixl_vsi *vsi)
3086{
3087	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3088	struct i40e_hw	*hw = vsi->hw;
3089	u32		lut = 0;
3090	u64		set_hena, hena;
3091	int		i, j;
3092
3093	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3094	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3095	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3096	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3097
3098	/* Fill out hash function seed */
3099	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3100                wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3101
3102	/* Enable PCTYPES for RSS: */
3103	set_hena =
3104		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3105		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3106		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3107		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3108		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3109		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3110		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3111		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3112		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3113		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3114		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3115
3116	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3117	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3118	hena |= set_hena;
3119	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3120	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3121
3122	/* Populate the LUT with max no. of queues in round robin fashion */
3123	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3124		if (j == vsi->num_queues)
3125			j = 0;
3126		/* lut = 4-byte sliding window of 4 lut entries */
3127		lut = (lut << 8) | (j &
3128		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3129		/* On i = 3, we have 4 entries in lut; write to the register */
3130		if ((i & 3) == 3)
3131			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3132	}
3133	ixl_flush(hw);
3134}
3135
3136
3137/*
3138** This routine is run via an vlan config EVENT,
3139** it enables us to use the HW Filter table since
3140** we can get the vlan id. This just creates the
3141** entry in the soft version of the VFTA, init will
3142** repopulate the real table.
3143*/
3144static void
3145ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3146{
3147	struct ixl_vsi	*vsi = ifp->if_softc;
3148	struct i40e_hw	*hw = vsi->hw;
3149	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3150
3151	if (ifp->if_softc !=  arg)   /* Not our event */
3152		return;
3153
3154	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3155		return;
3156
3157	IXL_PF_LOCK(pf);
3158	++vsi->num_vlans;
3159	ixl_add_filter(vsi, hw->mac.addr, vtag);
3160	IXL_PF_UNLOCK(pf);
3161}
3162
3163/*
3164** This routine is run via an vlan
3165** unconfig EVENT, remove our entry
3166** in the soft vfta.
3167*/
3168static void
3169ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3170{
3171	struct ixl_vsi	*vsi = ifp->if_softc;
3172	struct i40e_hw	*hw = vsi->hw;
3173	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3174
3175	if (ifp->if_softc !=  arg)
3176		return;
3177
3178	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3179		return;
3180
3181	IXL_PF_LOCK(pf);
3182	--vsi->num_vlans;
3183	ixl_del_filter(vsi, hw->mac.addr, vtag);
3184	IXL_PF_UNLOCK(pf);
3185}
3186
3187/*
3188** This routine updates vlan filters, called by init
3189** it scans the filter table and then updates the hw
3190** after a soft reset.
3191*/
3192static void
3193ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3194{
3195	struct ixl_mac_filter	*f;
3196	int			cnt = 0, flags;
3197
3198	if (vsi->num_vlans == 0)
3199		return;
3200	/*
3201	** Scan the filter list for vlan entries,
3202	** mark them for addition and then call
3203	** for the AQ update.
3204	*/
3205	SLIST_FOREACH(f, &vsi->ftl, next) {
3206		if (f->flags & IXL_FILTER_VLAN) {
3207			f->flags |=
3208			    (IXL_FILTER_ADD |
3209			    IXL_FILTER_USED);
3210			cnt++;
3211		}
3212	}
3213	if (cnt == 0) {
3214		printf("setup vlan: no filters found!\n");
3215		return;
3216	}
3217	flags = IXL_FILTER_VLAN;
3218	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3219	ixl_add_hw_filters(vsi, flags, cnt);
3220	return;
3221}
3222
3223/*
3224** Initialize filter list and add filters that the hardware
3225** needs to know about.
3226*/
3227static void
3228ixl_init_filters(struct ixl_vsi *vsi)
3229{
3230	/* Add broadcast address */
3231	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3232	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3233}
3234
3235/*
3236** This routine adds mulicast filters
3237*/
3238static void
3239ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3240{
3241	struct ixl_mac_filter *f;
3242
3243	/* Does one already exist */
3244	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3245	if (f != NULL)
3246		return;
3247
3248	f = ixl_get_filter(vsi);
3249	if (f == NULL) {
3250		printf("WARNING: no filter available!!\n");
3251		return;
3252	}
3253	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3254	f->vlan = IXL_VLAN_ANY;
3255	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3256	    | IXL_FILTER_MC);
3257
3258	return;
3259}
3260
3261/*
3262** This routine adds macvlan filters
3263*/
3264static void
3265ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3266{
3267	struct ixl_mac_filter	*f, *tmp;
3268	device_t		dev = vsi->dev;
3269
3270	DEBUGOUT("ixl_add_filter: begin");
3271
3272	/* Does one already exist */
3273	f = ixl_find_filter(vsi, macaddr, vlan);
3274	if (f != NULL)
3275		return;
3276	/*
3277	** Is this the first vlan being registered, if so we
3278	** need to remove the ANY filter that indicates we are
3279	** not in a vlan, and replace that with a 0 filter.
3280	*/
3281	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3282		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3283		if (tmp != NULL) {
3284			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3285			ixl_add_filter(vsi, macaddr, 0);
3286		}
3287	}
3288
3289	f = ixl_get_filter(vsi);
3290	if (f == NULL) {
3291		device_printf(dev, "WARNING: no filter available!!\n");
3292		return;
3293	}
3294	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3295	f->vlan = vlan;
3296	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3297	if (f->vlan != IXL_VLAN_ANY)
3298		f->flags |= IXL_FILTER_VLAN;
3299
3300	ixl_add_hw_filters(vsi, f->flags, 1);
3301	return;
3302}
3303
3304static void
3305ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3306{
3307	struct ixl_mac_filter *f;
3308
3309	f = ixl_find_filter(vsi, macaddr, vlan);
3310	if (f == NULL)
3311		return;
3312
3313	f->flags |= IXL_FILTER_DEL;
3314	ixl_del_hw_filters(vsi, 1);
3315
3316	/* Check if this is the last vlan removal */
3317	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3318		/* Switch back to a non-vlan filter */
3319		ixl_del_filter(vsi, macaddr, 0);
3320		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3321	}
3322	return;
3323}
3324
3325/*
3326** Find the filter with both matching mac addr and vlan id
3327*/
3328static struct ixl_mac_filter *
3329ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3330{
3331	struct ixl_mac_filter	*f;
3332	bool			match = FALSE;
3333
3334	SLIST_FOREACH(f, &vsi->ftl, next) {
3335		if (!cmp_etheraddr(f->macaddr, macaddr))
3336			continue;
3337		if (f->vlan == vlan) {
3338			match = TRUE;
3339			break;
3340		}
3341	}
3342
3343	if (!match)
3344		f = NULL;
3345	return (f);
3346}
3347
3348/*
3349** This routine takes additions to the vsi filter
3350** table and creates an Admin Queue call to create
3351** the filters in the hardware.
3352*/
3353static void
3354ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3355{
3356	struct i40e_aqc_add_macvlan_element_data *a, *b;
3357	struct ixl_mac_filter	*f;
3358	struct i40e_hw	*hw = vsi->hw;
3359	device_t	dev = vsi->dev;
3360	int		err, j = 0;
3361
3362	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3363	    M_DEVBUF, M_NOWAIT | M_ZERO);
3364	if (a == NULL) {
3365		device_printf(dev, "add hw filter failed to get memory\n");
3366		return;
3367	}
3368
3369	/*
3370	** Scan the filter list, each time we find one
3371	** we add it to the admin queue array and turn off
3372	** the add bit.
3373	*/
3374	SLIST_FOREACH(f, &vsi->ftl, next) {
3375		if (f->flags == flags) {
3376			b = &a[j]; // a pox on fvl long names :)
3377			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3378			b->vlan_tag =
3379			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3380			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3381			f->flags &= ~IXL_FILTER_ADD;
3382			j++;
3383		}
3384		if (j == cnt)
3385			break;
3386	}
3387	if (j > 0) {
3388		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3389		if (err)
3390			device_printf(dev, "aq_add_macvlan failure %d\n",
3391			    hw->aq.asq_last_status);
3392		else
3393			vsi->hw_filters_add += j;
3394	}
3395	free(a, M_DEVBUF);
3396	return;
3397}
3398
3399/*
3400** This routine takes removals in the vsi filter
3401** table and creates an Admin Queue call to delete
3402** the filters in the hardware.
3403*/
3404static void
3405ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3406{
3407	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3408	struct i40e_hw		*hw = vsi->hw;
3409	device_t		dev = vsi->dev;
3410	struct ixl_mac_filter	*f, *f_temp;
3411	int			err, j = 0;
3412
3413	DEBUGOUT("ixl_del_hw_filters: begin\n");
3414
3415	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3416	    M_DEVBUF, M_NOWAIT | M_ZERO);
3417	if (d == NULL) {
3418		printf("del hw filter failed to get memory\n");
3419		return;
3420	}
3421
3422	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3423		if (f->flags & IXL_FILTER_DEL) {
3424			e = &d[j]; // a pox on fvl long names :)
3425			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3426			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3427			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3428			/* delete entry from vsi list */
3429			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3430			free(f, M_DEVBUF);
3431			j++;
3432		}
3433		if (j == cnt)
3434			break;
3435	}
3436	if (j > 0) {
3437		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3438		/* NOTE: returns ENOENT every time but seems to work fine,
3439		   so we'll ignore that specific error. */
3440		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3441			int sc = 0;
3442			for (int i = 0; i < j; i++)
3443				sc += (!d[i].error_code);
3444			vsi->hw_filters_del += sc;
3445			device_printf(dev,
3446			    "Failed to remove %d/%d filters, aq error %d\n",
3447			    j - sc, j, hw->aq.asq_last_status);
3448		} else
3449			vsi->hw_filters_del += j;
3450	}
3451	free(d, M_DEVBUF);
3452
3453	DEBUGOUT("ixl_del_hw_filters: end\n");
3454	return;
3455}
3456
3457
3458static void
3459ixl_enable_rings(struct ixl_vsi *vsi)
3460{
3461	struct i40e_hw	*hw = vsi->hw;
3462	u32		reg;
3463
3464	for (int i = 0; i < vsi->num_queues; i++) {
3465		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3466
3467		reg = rd32(hw, I40E_QTX_ENA(i));
3468		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3469		    I40E_QTX_ENA_QENA_STAT_MASK;
3470		wr32(hw, I40E_QTX_ENA(i), reg);
3471		/* Verify the enable took */
3472		for (int j = 0; j < 10; j++) {
3473			reg = rd32(hw, I40E_QTX_ENA(i));
3474			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3475				break;
3476			i40e_msec_delay(10);
3477		}
3478		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3479			printf("TX queue %d disabled!\n", i);
3480
3481		reg = rd32(hw, I40E_QRX_ENA(i));
3482		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3483		    I40E_QRX_ENA_QENA_STAT_MASK;
3484		wr32(hw, I40E_QRX_ENA(i), reg);
3485		/* Verify the enable took */
3486		for (int j = 0; j < 10; j++) {
3487			reg = rd32(hw, I40E_QRX_ENA(i));
3488			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3489				break;
3490			i40e_msec_delay(10);
3491		}
3492		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3493			printf("RX queue %d disabled!\n", i);
3494	}
3495}
3496
3497static void
3498ixl_disable_rings(struct ixl_vsi *vsi)
3499{
3500	struct i40e_hw	*hw = vsi->hw;
3501	u32		reg;
3502
3503	for (int i = 0; i < vsi->num_queues; i++) {
3504		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3505		i40e_usec_delay(500);
3506
3507		reg = rd32(hw, I40E_QTX_ENA(i));
3508		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3509		wr32(hw, I40E_QTX_ENA(i), reg);
3510		/* Verify the disable took */
3511		for (int j = 0; j < 10; j++) {
3512			reg = rd32(hw, I40E_QTX_ENA(i));
3513			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3514				break;
3515			i40e_msec_delay(10);
3516		}
3517		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3518			printf("TX queue %d still enabled!\n", i);
3519
3520		reg = rd32(hw, I40E_QRX_ENA(i));
3521		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3522		wr32(hw, I40E_QRX_ENA(i), reg);
3523		/* Verify the disable took */
3524		for (int j = 0; j < 10; j++) {
3525			reg = rd32(hw, I40E_QRX_ENA(i));
3526			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3527				break;
3528			i40e_msec_delay(10);
3529		}
3530		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3531			printf("RX queue %d still enabled!\n", i);
3532	}
3533}
3534
3535/**
3536 * ixl_handle_mdd_event
3537 *
3538 * Called from interrupt handler to identify possibly malicious vfs
3539 * (But also detects events from the PF, as well)
3540 **/
3541static void ixl_handle_mdd_event(struct ixl_pf *pf)
3542{
3543	struct i40e_hw *hw = &pf->hw;
3544	device_t dev = pf->dev;
3545	bool mdd_detected = false;
3546	bool pf_mdd_detected = false;
3547	u32 reg;
3548
3549	/* find what triggered the MDD event */
3550	reg = rd32(hw, I40E_GL_MDET_TX);
3551	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3552		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3553				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3554		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3555				I40E_GL_MDET_TX_EVENT_SHIFT;
3556		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3557				I40E_GL_MDET_TX_QUEUE_SHIFT;
3558		device_printf(dev,
3559			 "Malicious Driver Detection event 0x%02x"
3560			 " on TX queue %d pf number 0x%02x\n",
3561			 event, queue, pf_num);
3562		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3563		mdd_detected = true;
3564	}
3565	reg = rd32(hw, I40E_GL_MDET_RX);
3566	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3567		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3568				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3569		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3570				I40E_GL_MDET_RX_EVENT_SHIFT;
3571		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3572				I40E_GL_MDET_RX_QUEUE_SHIFT;
3573		device_printf(dev,
3574			 "Malicious Driver Detection event 0x%02x"
3575			 " on RX queue %d of function 0x%02x\n",
3576			 event, queue, func);
3577		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3578		mdd_detected = true;
3579	}
3580
3581	if (mdd_detected) {
3582		reg = rd32(hw, I40E_PF_MDET_TX);
3583		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3584			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3585			device_printf(dev,
3586				 "MDD TX event is for this function 0x%08x",
3587				 reg);
3588			pf_mdd_detected = true;
3589		}
3590		reg = rd32(hw, I40E_PF_MDET_RX);
3591		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3592			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3593			device_printf(dev,
3594				 "MDD RX event is for this function 0x%08x",
3595				 reg);
3596			pf_mdd_detected = true;
3597		}
3598	}
3599
3600	/* re-enable mdd interrupt cause */
3601	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3602	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3603	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3604	ixl_flush(hw);
3605}
3606
3607static void
3608ixl_enable_intr(struct ixl_vsi *vsi)
3609{
3610	struct i40e_hw		*hw = vsi->hw;
3611	struct ixl_queue	*que = vsi->queues;
3612
3613	if (ixl_enable_msix) {
3614		ixl_enable_adminq(hw);
3615		for (int i = 0; i < vsi->num_queues; i++, que++)
3616			ixl_enable_queue(hw, que->me);
3617	} else
3618		ixl_enable_legacy(hw);
3619}
3620
3621static void
3622ixl_disable_intr(struct ixl_vsi *vsi)
3623{
3624	struct i40e_hw		*hw = vsi->hw;
3625	struct ixl_queue	*que = vsi->queues;
3626
3627	if (ixl_enable_msix) {
3628		ixl_disable_adminq(hw);
3629		for (int i = 0; i < vsi->num_queues; i++, que++)
3630			ixl_disable_queue(hw, que->me);
3631	} else
3632		ixl_disable_legacy(hw);
3633}
3634
3635static void
3636ixl_enable_adminq(struct i40e_hw *hw)
3637{
3638	u32		reg;
3639
3640	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3641	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3642	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3643	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3644	ixl_flush(hw);
3645	return;
3646}
3647
3648static void
3649ixl_disable_adminq(struct i40e_hw *hw)
3650{
3651	u32		reg;
3652
3653	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3654	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3655
3656	return;
3657}
3658
3659static void
3660ixl_enable_queue(struct i40e_hw *hw, int id)
3661{
3662	u32		reg;
3663
3664	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3665	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3666	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3667	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3668}
3669
3670static void
3671ixl_disable_queue(struct i40e_hw *hw, int id)
3672{
3673	u32		reg;
3674
3675	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3676	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3677
3678	return;
3679}
3680
3681static void
3682ixl_enable_legacy(struct i40e_hw *hw)
3683{
3684	u32		reg;
3685	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3686	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3687	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3688	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3689}
3690
3691static void
3692ixl_disable_legacy(struct i40e_hw *hw)
3693{
3694	u32		reg;
3695
3696	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3697	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3698
3699	return;
3700}
3701
3702static void
3703ixl_update_stats_counters(struct ixl_pf *pf)
3704{
3705	struct i40e_hw	*hw = &pf->hw;
3706	struct ixl_vsi *vsi = &pf->vsi;
3707
3708	struct i40e_hw_port_stats *nsd = &pf->stats;
3709	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3710
3711	/* Update hw stats */
3712	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3713			   pf->stat_offsets_loaded,
3714			   &osd->crc_errors, &nsd->crc_errors);
3715	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3716			   pf->stat_offsets_loaded,
3717			   &osd->illegal_bytes, &nsd->illegal_bytes);
3718	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3719			   I40E_GLPRT_GORCL(hw->port),
3720			   pf->stat_offsets_loaded,
3721			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3722	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3723			   I40E_GLPRT_GOTCL(hw->port),
3724			   pf->stat_offsets_loaded,
3725			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3726	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3727			   pf->stat_offsets_loaded,
3728			   &osd->eth.rx_discards,
3729			   &nsd->eth.rx_discards);
3730	ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3731			   pf->stat_offsets_loaded,
3732			   &osd->eth.tx_discards,
3733			   &nsd->eth.tx_discards);
3734	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3735			   I40E_GLPRT_UPRCL(hw->port),
3736			   pf->stat_offsets_loaded,
3737			   &osd->eth.rx_unicast,
3738			   &nsd->eth.rx_unicast);
3739	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3740			   I40E_GLPRT_UPTCL(hw->port),
3741			   pf->stat_offsets_loaded,
3742			   &osd->eth.tx_unicast,
3743			   &nsd->eth.tx_unicast);
3744	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3745			   I40E_GLPRT_MPRCL(hw->port),
3746			   pf->stat_offsets_loaded,
3747			   &osd->eth.rx_multicast,
3748			   &nsd->eth.rx_multicast);
3749	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3750			   I40E_GLPRT_MPTCL(hw->port),
3751			   pf->stat_offsets_loaded,
3752			   &osd->eth.tx_multicast,
3753			   &nsd->eth.tx_multicast);
3754	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3755			   I40E_GLPRT_BPRCL(hw->port),
3756			   pf->stat_offsets_loaded,
3757			   &osd->eth.rx_broadcast,
3758			   &nsd->eth.rx_broadcast);
3759	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3760			   I40E_GLPRT_BPTCL(hw->port),
3761			   pf->stat_offsets_loaded,
3762			   &osd->eth.tx_broadcast,
3763			   &nsd->eth.tx_broadcast);
3764
3765	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3766			   pf->stat_offsets_loaded,
3767			   &osd->tx_dropped_link_down,
3768			   &nsd->tx_dropped_link_down);
3769	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3770			   pf->stat_offsets_loaded,
3771			   &osd->mac_local_faults,
3772			   &nsd->mac_local_faults);
3773	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3774			   pf->stat_offsets_loaded,
3775			   &osd->mac_remote_faults,
3776			   &nsd->mac_remote_faults);
3777	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3778			   pf->stat_offsets_loaded,
3779			   &osd->rx_length_errors,
3780			   &nsd->rx_length_errors);
3781
3782	/* Flow control (LFC) stats */
3783	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3784			   pf->stat_offsets_loaded,
3785			   &osd->link_xon_rx, &nsd->link_xon_rx);
3786	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3787			   pf->stat_offsets_loaded,
3788			   &osd->link_xon_tx, &nsd->link_xon_tx);
3789	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3790			   pf->stat_offsets_loaded,
3791			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3792	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3793			   pf->stat_offsets_loaded,
3794			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3795
3796	/* Priority flow control stats */
3797#if 0
3798	for (int i = 0; i < 8; i++) {
3799		ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3800				   pf->stat_offsets_loaded,
3801				   &osd->priority_xon_rx[i],
3802				   &nsd->priority_xon_rx[i]);
3803		ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3804				   pf->stat_offsets_loaded,
3805				   &osd->priority_xon_tx[i],
3806				   &nsd->priority_xon_tx[i]);
3807		ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3808				   pf->stat_offsets_loaded,
3809				   &osd->priority_xoff_tx[i],
3810				   &nsd->priority_xoff_tx[i]);
3811		ixl_stat_update32(hw,
3812				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3813				   pf->stat_offsets_loaded,
3814				   &osd->priority_xon_2_xoff[i],
3815				   &nsd->priority_xon_2_xoff[i]);
3816	}
3817#endif
3818
3819	/* Packet size stats rx */
3820	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3821			   I40E_GLPRT_PRC64L(hw->port),
3822			   pf->stat_offsets_loaded,
3823			   &osd->rx_size_64, &nsd->rx_size_64);
3824	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3825			   I40E_GLPRT_PRC127L(hw->port),
3826			   pf->stat_offsets_loaded,
3827			   &osd->rx_size_127, &nsd->rx_size_127);
3828	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3829			   I40E_GLPRT_PRC255L(hw->port),
3830			   pf->stat_offsets_loaded,
3831			   &osd->rx_size_255, &nsd->rx_size_255);
3832	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3833			   I40E_GLPRT_PRC511L(hw->port),
3834			   pf->stat_offsets_loaded,
3835			   &osd->rx_size_511, &nsd->rx_size_511);
3836	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3837			   I40E_GLPRT_PRC1023L(hw->port),
3838			   pf->stat_offsets_loaded,
3839			   &osd->rx_size_1023, &nsd->rx_size_1023);
3840	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3841			   I40E_GLPRT_PRC1522L(hw->port),
3842			   pf->stat_offsets_loaded,
3843			   &osd->rx_size_1522, &nsd->rx_size_1522);
3844	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3845			   I40E_GLPRT_PRC9522L(hw->port),
3846			   pf->stat_offsets_loaded,
3847			   &osd->rx_size_big, &nsd->rx_size_big);
3848
3849	/* Packet size stats tx */
3850	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3851			   I40E_GLPRT_PTC64L(hw->port),
3852			   pf->stat_offsets_loaded,
3853			   &osd->tx_size_64, &nsd->tx_size_64);
3854	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3855			   I40E_GLPRT_PTC127L(hw->port),
3856			   pf->stat_offsets_loaded,
3857			   &osd->tx_size_127, &nsd->tx_size_127);
3858	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3859			   I40E_GLPRT_PTC255L(hw->port),
3860			   pf->stat_offsets_loaded,
3861			   &osd->tx_size_255, &nsd->tx_size_255);
3862	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3863			   I40E_GLPRT_PTC511L(hw->port),
3864			   pf->stat_offsets_loaded,
3865			   &osd->tx_size_511, &nsd->tx_size_511);
3866	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3867			   I40E_GLPRT_PTC1023L(hw->port),
3868			   pf->stat_offsets_loaded,
3869			   &osd->tx_size_1023, &nsd->tx_size_1023);
3870	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3871			   I40E_GLPRT_PTC1522L(hw->port),
3872			   pf->stat_offsets_loaded,
3873			   &osd->tx_size_1522, &nsd->tx_size_1522);
3874	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3875			   I40E_GLPRT_PTC9522L(hw->port),
3876			   pf->stat_offsets_loaded,
3877			   &osd->tx_size_big, &nsd->tx_size_big);
3878
3879	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3880			   pf->stat_offsets_loaded,
3881			   &osd->rx_undersize, &nsd->rx_undersize);
3882	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3883			   pf->stat_offsets_loaded,
3884			   &osd->rx_fragments, &nsd->rx_fragments);
3885	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3886			   pf->stat_offsets_loaded,
3887			   &osd->rx_oversize, &nsd->rx_oversize);
3888	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3889			   pf->stat_offsets_loaded,
3890			   &osd->rx_jabber, &nsd->rx_jabber);
3891	pf->stat_offsets_loaded = true;
3892	/* End hw stats */
3893
3894	/* Update vsi stats */
3895	ixl_update_eth_stats(vsi);
3896
3897	/* OS statistics */
3898	// ERJ - these are per-port, update all vsis?
3899	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
3900}
3901
3902/*
3903** Tasklet handler for MSIX Adminq interrupts
3904**  - do outside interrupt since it might sleep
3905*/
3906static void
3907ixl_do_adminq(void *context, int pending)
3908{
3909	struct ixl_pf			*pf = context;
3910	struct i40e_hw			*hw = &pf->hw;
3911	struct ixl_vsi			*vsi = &pf->vsi;
3912	struct i40e_arq_event_info	event;
3913	i40e_status			ret;
3914	u32				reg, loop = 0;
3915	u16				opcode, result;
3916
3917	event.msg_len = IXL_AQ_BUF_SZ;
3918	event.msg_buf = malloc(event.msg_len,
3919	    M_DEVBUF, M_NOWAIT | M_ZERO);
3920	if (!event.msg_buf) {
3921		printf("Unable to allocate adminq memory\n");
3922		return;
3923	}
3924
3925	/* clean and process any events */
3926	do {
3927		ret = i40e_clean_arq_element(hw, &event, &result);
3928		if (ret)
3929			break;
3930		opcode = LE16_TO_CPU(event.desc.opcode);
3931		switch (opcode) {
3932		case i40e_aqc_opc_get_link_status:
3933			vsi->link_up = ixl_config_link(hw);
3934			ixl_update_link_status(pf);
3935			break;
3936		case i40e_aqc_opc_send_msg_to_pf:
3937			/* process pf/vf communication here */
3938			break;
3939		case i40e_aqc_opc_event_lan_overflow:
3940			break;
3941		default:
3942#ifdef IXL_DEBUG
3943			printf("AdminQ unknown event %x\n", opcode);
3944#endif
3945			break;
3946		}
3947
3948	} while (result && (loop++ < IXL_ADM_LIMIT));
3949
3950	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3951	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3952	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3953	free(event.msg_buf, M_DEVBUF);
3954
3955	if (pf->msix > 1)
3956		ixl_enable_adminq(&pf->hw);
3957	else
3958		ixl_enable_intr(vsi);
3959}
3960
3961static int
3962ixl_debug_info(SYSCTL_HANDLER_ARGS)
3963{
3964	struct ixl_pf	*pf;
3965	int		error, input = 0;
3966
3967	error = sysctl_handle_int(oidp, &input, 0, req);
3968
3969	if (error || !req->newptr)
3970		return (error);
3971
3972	if (input == 1) {
3973		pf = (struct ixl_pf *)arg1;
3974		ixl_print_debug_info(pf);
3975	}
3976
3977	return (error);
3978}
3979
3980static void
3981ixl_print_debug_info(struct ixl_pf *pf)
3982{
3983	struct i40e_hw		*hw = &pf->hw;
3984	struct ixl_vsi		*vsi = &pf->vsi;
3985	struct ixl_queue	*que = vsi->queues;
3986	struct rx_ring		*rxr = &que->rxr;
3987	struct tx_ring		*txr = &que->txr;
3988	u32			reg;
3989
3990
3991	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
3992	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
3993	printf("RX next check = %x\n", rxr->next_check);
3994	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
3995	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
3996	printf("TX desc avail = %x\n", txr->avail);
3997
3998	reg = rd32(hw, I40E_GLV_GORCL(0xc));
3999	 printf("RX Bytes = %x\n", reg);
4000	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4001	 printf("Port RX Bytes = %x\n", reg);
4002	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4003	 printf("RX discard = %x\n", reg);
4004	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4005	 printf("Port RX discard = %x\n", reg);
4006
4007	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4008	 printf("TX errors = %x\n", reg);
4009	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4010	 printf("TX Bytes = %x\n", reg);
4011
4012	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4013	 printf("RX undersize = %x\n", reg);
4014	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4015	 printf("RX fragments = %x\n", reg);
4016	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4017	 printf("RX oversize = %x\n", reg);
4018	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4019	 printf("RX length error = %x\n", reg);
4020	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4021	 printf("mac remote fault = %x\n", reg);
4022	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4023	 printf("mac local fault = %x\n", reg);
4024}
4025
4026/**
4027 * Update VSI-specific ethernet statistics counters.
4028 **/
4029void ixl_update_eth_stats(struct ixl_vsi *vsi)
4030{
4031	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4032	struct i40e_hw *hw = &pf->hw;
4033	struct i40e_eth_stats *es;
4034	struct i40e_eth_stats *oes;
4035	int i;
4036	uint64_t tx_discards;
4037	struct i40e_hw_port_stats *nsd;
4038	u16 stat_idx = vsi->info.stat_counter_idx;
4039
4040	es = &vsi->eth_stats;
4041	oes = &vsi->eth_stats_offsets;
4042	nsd = &pf->stats;
4043
4044	/* Gather up the stats that the hw collects */
4045	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4046			   vsi->stat_offsets_loaded,
4047			   &oes->tx_errors, &es->tx_errors);
4048	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4049			   vsi->stat_offsets_loaded,
4050			   &oes->rx_discards, &es->rx_discards);
4051
4052	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4053			   I40E_GLV_GORCL(stat_idx),
4054			   vsi->stat_offsets_loaded,
4055			   &oes->rx_bytes, &es->rx_bytes);
4056	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4057			   I40E_GLV_UPRCL(stat_idx),
4058			   vsi->stat_offsets_loaded,
4059			   &oes->rx_unicast, &es->rx_unicast);
4060	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4061			   I40E_GLV_MPRCL(stat_idx),
4062			   vsi->stat_offsets_loaded,
4063			   &oes->rx_multicast, &es->rx_multicast);
4064	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4065			   I40E_GLV_BPRCL(stat_idx),
4066			   vsi->stat_offsets_loaded,
4067			   &oes->rx_broadcast, &es->rx_broadcast);
4068
4069	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4070			   I40E_GLV_GOTCL(stat_idx),
4071			   vsi->stat_offsets_loaded,
4072			   &oes->tx_bytes, &es->tx_bytes);
4073	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4074			   I40E_GLV_UPTCL(stat_idx),
4075			   vsi->stat_offsets_loaded,
4076			   &oes->tx_unicast, &es->tx_unicast);
4077	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4078			   I40E_GLV_MPTCL(stat_idx),
4079			   vsi->stat_offsets_loaded,
4080			   &oes->tx_multicast, &es->tx_multicast);
4081	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4082			   I40E_GLV_BPTCL(stat_idx),
4083			   vsi->stat_offsets_loaded,
4084			   &oes->tx_broadcast, &es->tx_broadcast);
4085	vsi->stat_offsets_loaded = true;
4086
4087	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
4088	for (i = 0; i < vsi->num_queues; i++)
4089		tx_discards += vsi->queues[i].txr.br->br_drops;
4090
4091	/* Update ifnet stats */
4092	IXL_SET_IPACKETS(vsi, es->rx_unicast +
4093	                   es->rx_multicast +
4094			   es->rx_broadcast);
4095	IXL_SET_OPACKETS(vsi, es->tx_unicast +
4096	                   es->tx_multicast +
4097			   es->tx_broadcast);
4098	IXL_SET_IBYTES(vsi, es->rx_bytes);
4099	IXL_SET_OBYTES(vsi, es->tx_bytes);
4100	IXL_SET_IMCASTS(vsi, es->rx_multicast);
4101	IXL_SET_OMCASTS(vsi, es->tx_multicast);
4102
4103	IXL_SET_OERRORS(vsi, es->tx_errors);
4104	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
4105	IXL_SET_OQDROPS(vsi, tx_discards);
4106	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
4107	IXL_SET_COLLISIONS(vsi, 0);
4108}
4109
4110/**
4111 * Reset all of the stats for the given pf
4112 **/
4113void ixl_pf_reset_stats(struct ixl_pf *pf)
4114{
4115	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4116	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4117	pf->stat_offsets_loaded = false;
4118}
4119
4120/**
4121 * Resets all stats of the given vsi
4122 **/
4123void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4124{
4125	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4126	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4127	vsi->stat_offsets_loaded = false;
4128}
4129
4130/**
4131 * Read and update a 48 bit stat from the hw
4132 *
4133 * Since the device stats are not reset at PFReset, they likely will not
4134 * be zeroed when the driver starts.  We'll save the first values read
4135 * and use them as offsets to be subtracted from the raw values in order
4136 * to report stats that count from zero.
4137 **/
4138static void
4139ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4140	bool offset_loaded, u64 *offset, u64 *stat)
4141{
4142	u64 new_data;
4143
4144#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
4145	new_data = rd64(hw, loreg);
4146#else
4147	/*
4148	 * Use two rd32's instead of one rd64; FreeBSD versions before
4149	 * 10 don't support 8 byte bus reads/writes.
4150	 */
4151	new_data = rd32(hw, loreg);
4152	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4153#endif
4154
4155	if (!offset_loaded)
4156		*offset = new_data;
4157	if (new_data >= *offset)
4158		*stat = new_data - *offset;
4159	else
4160		*stat = (new_data + ((u64)1 << 48)) - *offset;
4161	*stat &= 0xFFFFFFFFFFFFULL;
4162}
4163
4164/**
4165 * Read and update a 32 bit stat from the hw
4166 **/
4167static void
4168ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4169	bool offset_loaded, u64 *offset, u64 *stat)
4170{
4171	u32 new_data;
4172
4173	new_data = rd32(hw, reg);
4174	if (!offset_loaded)
4175		*offset = new_data;
4176	if (new_data >= *offset)
4177		*stat = (u32)(new_data - *offset);
4178	else
4179		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4180}
4181
4182/*
4183** Set flow control using sysctl:
4184** 	0 - off
4185**	1 - rx pause
4186**	2 - tx pause
4187**	3 - full
4188*/
4189static int
4190ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4191{
4192	/*
4193	 * TODO: ensure flow control is disabled if
4194	 * priority flow control is enabled
4195	 *
4196	 * TODO: ensure tx CRC by hardware should be enabled
4197	 * if tx flow control is enabled.
4198	 */
4199	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4200	struct i40e_hw *hw = &pf->hw;
4201	device_t dev = pf->dev;
4202	int requested_fc = 0, error = 0;
4203	enum i40e_status_code aq_error = 0;
4204	u8 fc_aq_err = 0;
4205
4206	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4207	if (aq_error) {
4208		device_printf(dev,
4209		    "%s: Error retrieving link info from aq, %d\n",
4210		    __func__, aq_error);
4211		return (EAGAIN);
4212	}
4213
4214	/* Read in new mode */
4215	requested_fc = hw->fc.current_mode;
4216	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4217	if ((error) || (req->newptr == NULL))
4218		return (error);
4219	if (requested_fc < 0 || requested_fc > 3) {
4220		device_printf(dev,
4221		    "Invalid fc mode; valid modes are 0 through 3\n");
4222		return (EINVAL);
4223	}
4224
4225	/*
4226	** Changing flow control mode currently does not work on
4227	** 40GBASE-CR4 PHYs
4228	*/
4229	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4230	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4231		device_printf(dev, "Changing flow control mode unsupported"
4232		    " on 40GBase-CR4 media.\n");
4233		return (ENODEV);
4234	}
4235
4236	/* Set fc ability for port */
4237	hw->fc.requested_mode = requested_fc;
4238	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4239	if (aq_error) {
4240		device_printf(dev,
4241		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4242		    __func__, aq_error, fc_aq_err);
4243		return (EAGAIN);
4244	}
4245
4246	if (hw->fc.current_mode != hw->fc.requested_mode) {
4247		device_printf(dev, "%s: FC set failure:\n", __func__);
4248		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4249		    __func__,
4250		    ixl_fc_string[hw->fc.current_mode],
4251		    ixl_fc_string[hw->fc.requested_mode]);
4252	}
4253
4254	return (0);
4255}
4256
4257static int
4258ixl_current_speed(SYSCTL_HANDLER_ARGS)
4259{
4260	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4261	struct i40e_hw *hw = &pf->hw;
4262	int error = 0, index = 0;
4263
4264	char *speeds[] = {
4265		"Unknown",
4266		"100M",
4267		"1G",
4268		"10G",
4269		"40G",
4270		"20G"
4271	};
4272
4273	ixl_update_link_status(pf);
4274
4275	switch (hw->phy.link_info.link_speed) {
4276	case I40E_LINK_SPEED_100MB:
4277		index = 1;
4278		break;
4279	case I40E_LINK_SPEED_1GB:
4280		index = 2;
4281		break;
4282	case I40E_LINK_SPEED_10GB:
4283		index = 3;
4284		break;
4285	case I40E_LINK_SPEED_40GB:
4286		index = 4;
4287		break;
4288	case I40E_LINK_SPEED_20GB:
4289		index = 5;
4290		break;
4291	case I40E_LINK_SPEED_UNKNOWN:
4292	default:
4293		index = 0;
4294		break;
4295	}
4296
4297	error = sysctl_handle_string(oidp, speeds[index],
4298	    strlen(speeds[index]), req);
4299	return (error);
4300}
4301
4302/*
4303** Control link advertise speed:
4304**	Flags:
4305**	0x1 - advertise 100 Mb
4306**	0x2 - advertise 1G
4307**	0x4 - advertise 10G
4308**
4309** Does not work on 40G devices.
4310*/
4311static int
4312ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4313{
4314	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4315	struct i40e_hw *hw = &pf->hw;
4316	device_t dev = pf->dev;
4317	struct i40e_aq_get_phy_abilities_resp abilities;
4318	struct i40e_aq_set_phy_config config;
4319	int requested_ls = 0;
4320	enum i40e_status_code aq_error = 0;
4321	int error = 0;
4322
4323	/*
4324	** FW doesn't support changing advertised speed
4325	** for 40G devices; speed is always 40G.
4326	*/
4327	if (i40e_is_40G_device(hw->device_id))
4328		return (ENODEV);
4329
4330	/* Read in new mode */
4331	requested_ls = pf->advertised_speed;
4332	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4333	if ((error) || (req->newptr == NULL))
4334		return (error);
4335	if (requested_ls < 1 || requested_ls > 7) {
4336		device_printf(dev,
4337		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4338		return (EINVAL);
4339	}
4340
4341	/* Exit if no change */
4342	if (pf->advertised_speed == requested_ls)
4343		return (0);
4344
4345	/* Get current capability information */
4346	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4347	if (aq_error) {
4348		device_printf(dev, "%s: Error getting phy capabilities %d,"
4349		    " aq error: %d\n", __func__, aq_error,
4350		    hw->aq.asq_last_status);
4351		return (EAGAIN);
4352	}
4353
4354	/* Prepare new config */
4355	bzero(&config, sizeof(config));
4356	config.phy_type = abilities.phy_type;
4357	config.abilities = abilities.abilities
4358	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4359	config.eee_capability = abilities.eee_capability;
4360	config.eeer = abilities.eeer_val;
4361	config.low_power_ctrl = abilities.d3_lpan;
4362	/* Translate into aq cmd link_speed */
4363	if (requested_ls & 0x4)
4364		config.link_speed |= I40E_LINK_SPEED_10GB;
4365	if (requested_ls & 0x2)
4366		config.link_speed |= I40E_LINK_SPEED_1GB;
4367	if (requested_ls & 0x1)
4368		config.link_speed |= I40E_LINK_SPEED_100MB;
4369
4370	/* Do aq command & restart link */
4371	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4372	if (aq_error) {
4373		device_printf(dev, "%s: Error setting new phy config %d,"
4374		    " aq error: %d\n", __func__, aq_error,
4375		    hw->aq.asq_last_status);
4376		return (EAGAIN);
4377	}
4378
4379	pf->advertised_speed = requested_ls;
4380	ixl_update_link_status(pf);
4381	return (0);
4382}
4383
4384/*
4385** Get the width and transaction speed of
4386** the bus this adapter is plugged into.
4387*/
4388static u16
4389ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4390{
4391        u16                     link;
4392        u32                     offset;
4393
4394
4395        /* Get the PCI Express Capabilities offset */
4396        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4397
4398        /* ...and read the Link Status Register */
4399        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4400
4401        switch (link & I40E_PCI_LINK_WIDTH) {
4402        case I40E_PCI_LINK_WIDTH_1:
4403                hw->bus.width = i40e_bus_width_pcie_x1;
4404                break;
4405        case I40E_PCI_LINK_WIDTH_2:
4406                hw->bus.width = i40e_bus_width_pcie_x2;
4407                break;
4408        case I40E_PCI_LINK_WIDTH_4:
4409                hw->bus.width = i40e_bus_width_pcie_x4;
4410                break;
4411        case I40E_PCI_LINK_WIDTH_8:
4412                hw->bus.width = i40e_bus_width_pcie_x8;
4413                break;
4414        default:
4415                hw->bus.width = i40e_bus_width_unknown;
4416                break;
4417        }
4418
4419        switch (link & I40E_PCI_LINK_SPEED) {
4420        case I40E_PCI_LINK_SPEED_2500:
4421                hw->bus.speed = i40e_bus_speed_2500;
4422                break;
4423        case I40E_PCI_LINK_SPEED_5000:
4424                hw->bus.speed = i40e_bus_speed_5000;
4425                break;
4426        case I40E_PCI_LINK_SPEED_8000:
4427                hw->bus.speed = i40e_bus_speed_8000;
4428                break;
4429        default:
4430                hw->bus.speed = i40e_bus_speed_unknown;
4431                break;
4432        }
4433
4434
4435        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4436            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4437            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4438            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4439            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4440            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4441            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4442            ("Unknown"));
4443
4444        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4445            (hw->bus.speed < i40e_bus_speed_8000)) {
4446                device_printf(dev, "PCI-Express bandwidth available"
4447                    " for this device\n     is not sufficient for"
4448                    " normal operation.\n");
4449                device_printf(dev, "For expected performance a x8 "
4450                    "PCIE Gen3 slot is required.\n");
4451        }
4452
4453        return (link);
4454}
4455
4456#ifdef IXL_DEBUG
4457static int
4458ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4459{
4460	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4461	struct i40e_hw *hw = &pf->hw;
4462	struct i40e_link_status link_status;
4463	char buf[512];
4464
4465	enum i40e_status_code aq_error = 0;
4466
4467	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4468	if (aq_error) {
4469		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4470		return (EPERM);
4471	}
4472
4473	sprintf(buf, "\n"
4474	    "PHY Type : %#04x\n"
4475	    "Speed    : %#04x\n"
4476	    "Link info: %#04x\n"
4477	    "AN info  : %#04x\n"
4478	    "Ext info : %#04x",
4479	    link_status.phy_type, link_status.link_speed,
4480	    link_status.link_info, link_status.an_info,
4481	    link_status.ext_info);
4482
4483	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4484}
4485
4486static int
4487ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4488{
4489	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4490	struct i40e_hw *hw = &pf->hw;
4491	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4492	char buf[512];
4493
4494	enum i40e_status_code aq_error = 0;
4495
4496	// TODO: Print out list of qualified modules as well?
4497	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4498	if (aq_error) {
4499		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4500		return (EPERM);
4501	}
4502
4503	sprintf(buf, "\n"
4504	    "PHY Type : %#010x\n"
4505	    "Speed    : %#04x\n"
4506	    "Abilities: %#04x\n"
4507	    "EEE cap  : %#06x\n"
4508	    "EEER reg : %#010x\n"
4509	    "D3 Lpan  : %#04x",
4510	    abilities_resp.phy_type, abilities_resp.link_speed,
4511	    abilities_resp.abilities, abilities_resp.eee_capability,
4512	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4513
4514	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4515}
4516
4517static int
4518ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4519{
4520	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4521	struct ixl_vsi *vsi = &pf->vsi;
4522	struct ixl_mac_filter *f;
4523	char *buf, *buf_i;
4524
4525	int error = 0;
4526	int ftl_len = 0;
4527	int ftl_counter = 0;
4528	int buf_len = 0;
4529	int entry_len = 42;
4530
4531	SLIST_FOREACH(f, &vsi->ftl, next) {
4532		ftl_len++;
4533	}
4534
4535	if (ftl_len < 1) {
4536		sysctl_handle_string(oidp, "(none)", 6, req);
4537		return (0);
4538	}
4539
4540	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4541	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4542
4543	sprintf(buf_i++, "\n");
4544	SLIST_FOREACH(f, &vsi->ftl, next) {
4545		sprintf(buf_i,
4546		    MAC_FORMAT ", vlan %4d, flags %#06x",
4547		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4548		buf_i += entry_len;
4549		/* don't print '\n' for last entry */
4550		if (++ftl_counter != ftl_len) {
4551			sprintf(buf_i, "\n");
4552			buf_i++;
4553		}
4554	}
4555
4556	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4557	if (error)
4558		printf("sysctl error: %d\n", error);
4559	free(buf, M_DEVBUF);
4560	return error;
4561}
4562
4563#define IXL_SW_RES_SIZE 0x14
4564static int
4565ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4566{
4567	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4568	struct i40e_hw *hw = &pf->hw;
4569	device_t dev = pf->dev;
4570	struct sbuf *buf;
4571	int error = 0;
4572
4573	u8 num_entries;
4574	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4575
4576	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4577	if (!buf) {
4578		device_printf(dev, "Could not allocate sbuf for output.\n");
4579		return (ENOMEM);
4580	}
4581
4582	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4583				resp,
4584				IXL_SW_RES_SIZE,
4585				NULL);
4586	if (error) {
4587		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4588		    __func__, error, hw->aq.asq_last_status);
4589		sbuf_delete(buf);
4590		return error;
4591	}
4592	device_printf(dev, "Num_entries: %d\n", num_entries);
4593
4594	sbuf_cat(buf, "\n");
4595	sbuf_printf(buf,
4596	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4597	    "     | (this)     | (all) | (this) | (all)       \n");
4598	for (int i = 0; i < num_entries; i++) {
4599		sbuf_printf(buf,
4600		    "%#4x | %10d   %5d   %6d   %12d",
4601		    resp[i].resource_type,
4602		    resp[i].guaranteed,
4603		    resp[i].total,
4604		    resp[i].used,
4605		    resp[i].total_unalloced);
4606		if (i < num_entries - 1)
4607			sbuf_cat(buf, "\n");
4608	}
4609
4610	error = sbuf_finish(buf);
4611	if (error) {
4612		device_printf(dev, "Error finishing sbuf: %d\n", error);
4613		sbuf_delete(buf);
4614		return error;
4615	}
4616
4617	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4618	if (error)
4619		device_printf(dev, "sysctl error: %d\n", error);
4620	sbuf_delete(buf);
4621	return error;
4622
4623}
4624
4625/*
4626** Dump TX desc given index.
4627** Doesn't work; don't use.
4628** TODO: Also needs a queue index input!
4629**/
4630static int
4631ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4632{
4633	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4634	device_t dev = pf->dev;
4635	struct sbuf *buf;
4636	int error = 0;
4637
4638	u16 desc_idx = 0;
4639
4640	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4641	if (!buf) {
4642		device_printf(dev, "Could not allocate sbuf for output.\n");
4643		return (ENOMEM);
4644	}
4645
4646	/* Read in index */
4647	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4648	if (error)
4649		return (error);
4650	if (req->newptr == NULL)
4651		return (EIO); // fix
4652	if (desc_idx > 1024) { // fix
4653		device_printf(dev,
4654		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4655		return (EINVAL);
4656	}
4657
4658	// Don't use this sysctl yet
4659	if (TRUE)
4660		return (ENODEV);
4661
4662	sbuf_cat(buf, "\n");
4663
4664	// set to queue 1?
4665	struct ixl_queue *que = pf->vsi.queues;
4666	struct tx_ring *txr = &(que[1].txr);
4667	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4668
4669	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4670	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4671	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4672
4673	error = sbuf_finish(buf);
4674	if (error) {
4675		device_printf(dev, "Error finishing sbuf: %d\n", error);
4676		sbuf_delete(buf);
4677		return error;
4678	}
4679
4680	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4681	if (error)
4682		device_printf(dev, "sysctl error: %d\n", error);
4683	sbuf_delete(buf);
4684	return error;
4685}
4686#endif
4687
4688