if_ixl.c revision 270631
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/if_ixl.c 270631 2014-08-25 22:04:29Z jfv $*/
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "ixl.h"
38#include "ixl_pf.h"
39
40/*********************************************************************
41 *  Driver version
42 *********************************************************************/
43char ixl_driver_version[] = "1.2.2";
44
45/*********************************************************************
46 *  PCI Device ID Table
47 *
48 *  Used by probe to select devices to load on
49 *  Last field stores an index into ixl_strings
50 *  Last entry must be all 0s
51 *
52 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
54
55static ixl_vendor_info_t ixl_vendor_info_array[] =
56{
57	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
58	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
59	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
60	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
61	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
62	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
63	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
64	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
65	/* required last entry */
66	{0, 0, 0, 0, 0}
67};
68
69/*********************************************************************
70 *  Table of branding strings
71 *********************************************************************/
72
73static char    *ixl_strings[] = {
74	"Intel(R) Ethernet Connection XL710 Driver"
75};
76
77
78/*********************************************************************
79 *  Function prototypes
80 *********************************************************************/
81static int      ixl_probe(device_t);
82static int      ixl_attach(device_t);
83static int      ixl_detach(device_t);
84static int      ixl_shutdown(device_t);
85static int	ixl_get_hw_capabilities(struct ixl_pf *);
86static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
87static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
88static void	ixl_init(void *);
89static void	ixl_init_locked(struct ixl_pf *);
90static void     ixl_stop(struct ixl_pf *);
91static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
92static int      ixl_media_change(struct ifnet *);
93static void     ixl_update_link_status(struct ixl_pf *);
94static int      ixl_allocate_pci_resources(struct ixl_pf *);
95static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
96static int	ixl_setup_stations(struct ixl_pf *);
97static int	ixl_setup_vsi(struct ixl_vsi *);
98static int	ixl_initialize_vsi(struct ixl_vsi *);
99static int	ixl_assign_vsi_msix(struct ixl_pf *);
100static int	ixl_assign_vsi_legacy(struct ixl_pf *);
101static int	ixl_init_msix(struct ixl_pf *);
102static void	ixl_configure_msix(struct ixl_pf *);
103static void	ixl_configure_itr(struct ixl_pf *);
104static void	ixl_configure_legacy(struct ixl_pf *);
105static void	ixl_free_pci_resources(struct ixl_pf *);
106static void	ixl_local_timer(void *);
107static int	ixl_setup_interface(device_t, struct ixl_vsi *);
108static bool	ixl_config_link(struct i40e_hw *);
109static void	ixl_config_rss(struct ixl_vsi *);
110static void	ixl_set_queue_rx_itr(struct ixl_queue *);
111static void	ixl_set_queue_tx_itr(struct ixl_queue *);
112
113static void	ixl_enable_rings(struct ixl_vsi *);
114static void	ixl_disable_rings(struct ixl_vsi *);
115static void     ixl_enable_intr(struct ixl_vsi *);
116static void     ixl_disable_intr(struct ixl_vsi *);
117
118static void     ixl_enable_adminq(struct i40e_hw *);
119static void     ixl_disable_adminq(struct i40e_hw *);
120static void     ixl_enable_queue(struct i40e_hw *, int);
121static void     ixl_disable_queue(struct i40e_hw *, int);
122static void     ixl_enable_legacy(struct i40e_hw *);
123static void     ixl_disable_legacy(struct i40e_hw *);
124
125static void     ixl_set_promisc(struct ixl_vsi *);
126static void     ixl_add_multi(struct ixl_vsi *);
127static void     ixl_del_multi(struct ixl_vsi *);
128static void	ixl_register_vlan(void *, struct ifnet *, u16);
129static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
130static void	ixl_setup_vlan_filters(struct ixl_vsi *);
131
132static void	ixl_init_filters(struct ixl_vsi *);
133static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
134static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
135static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
136static void	ixl_del_hw_filters(struct ixl_vsi *, int);
137static struct ixl_mac_filter *
138		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
139static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
140
141/* Sysctl debug interface */
142static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
143static void	ixl_print_debug_info(struct ixl_pf *);
144
145/* The MSI/X Interrupt handlers */
146static void	ixl_intr(void *);
147static void	ixl_msix_que(void *);
148static void	ixl_msix_adminq(void *);
149static void	ixl_handle_mdd_event(struct ixl_pf *);
150
151/* Deferred interrupt tasklets */
152static void	ixl_do_adminq(void *, int);
153
154/* Sysctl handlers */
155static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
156static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
157static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
158
159/* Statistics */
160static void     ixl_add_hw_stats(struct ixl_pf *);
161static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
162		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
163static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
164		    struct sysctl_oid_list *,
165		    struct i40e_eth_stats *);
166static void	ixl_update_stats_counters(struct ixl_pf *);
167static void	ixl_update_eth_stats(struct ixl_vsi *);
168static void	ixl_pf_reset_stats(struct ixl_pf *);
169static void	ixl_vsi_reset_stats(struct ixl_vsi *);
170static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
171		    u64 *, u64 *);
172static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
173		    u64 *, u64 *);
174
175#ifdef IXL_DEBUG
176static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
177static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
178static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
179static int	ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
180static int	ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
181#endif
182
183/*********************************************************************
184 *  FreeBSD Device Interface Entry Points
185 *********************************************************************/
186
187static device_method_t ixl_methods[] = {
188	/* Device interface */
189	DEVMETHOD(device_probe, ixl_probe),
190	DEVMETHOD(device_attach, ixl_attach),
191	DEVMETHOD(device_detach, ixl_detach),
192	DEVMETHOD(device_shutdown, ixl_shutdown),
193	{0, 0}
194};
195
196static driver_t ixl_driver = {
197	"ixl", ixl_methods, sizeof(struct ixl_pf),
198};
199
200devclass_t ixl_devclass;
201DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
202
203MODULE_DEPEND(ixl, pci, 1, 1, 1);
204MODULE_DEPEND(ixl, ether, 1, 1, 1);
205
206/*
207** Global reset mutex
208*/
209static struct mtx ixl_reset_mtx;
210
211/*
212** TUNEABLE PARAMETERS:
213*/
214
215static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
216                   "IXL driver parameters");
217
218/*
219 * MSIX should be the default for best performance,
220 * but this allows it to be forced off for testing.
221 */
222static int ixl_enable_msix = 1;
223TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
224SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
225    "Enable MSI-X interrupts");
226
227/*
228** Number of descriptors per ring:
229**   - TX and RX are the same size
230*/
231static int ixl_ringsz = DEFAULT_RING;
232TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
233SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
234    &ixl_ringsz, 0, "Descriptor Ring Size");
235
236/*
237** This can be set manually, if left as 0 the
238** number of queues will be calculated based
239** on cpus and msix vectors available.
240*/
241int ixl_max_queues = 0;
242TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
243SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
244    &ixl_max_queues, 0, "Number of Queues");
245
246/*
247** Controls for Interrupt Throttling
248**	- true/false for dynamic adjustment
249** 	- default values for static ITR
250*/
251int ixl_dynamic_rx_itr = 0;
252TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
253SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
254    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
255
256int ixl_dynamic_tx_itr = 0;
257TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
258SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
259    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
260
261int ixl_rx_itr = IXL_ITR_8K;
262TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
263SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
264    &ixl_rx_itr, 0, "RX Interrupt Rate");
265
266int ixl_tx_itr = IXL_ITR_4K;
267TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
268SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
269    &ixl_tx_itr, 0, "TX Interrupt Rate");
270
271#ifdef IXL_FDIR
272static int ixl_enable_fdir = 1;
273TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
274/* Rate at which we sample */
275int ixl_atr_rate = 20;
276TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
277#endif
278
279#ifdef DEV_NETMAP
280#include <dev/netmap/if_ixl_netmap.h>
281#endif /* DEV_NETMAP */
282
283static char *ixl_fc_string[6] = {
284	"None",
285	"Rx",
286	"Tx",
287	"Full",
288	"Priority",
289	"Default"
290};
291
292
293/*********************************************************************
294 *  Device identification routine
295 *
296 *  ixl_probe determines if the driver should be loaded on
297 *  the hardware based on PCI vendor/device id of the device.
298 *
299 *  return BUS_PROBE_DEFAULT on success, positive on failure
300 *********************************************************************/
301
302static int
303ixl_probe(device_t dev)
304{
305	ixl_vendor_info_t *ent;
306
307	u16	pci_vendor_id, pci_device_id;
308	u16	pci_subvendor_id, pci_subdevice_id;
309	char	device_name[256];
310	static bool lock_init = FALSE;
311
312	INIT_DEBUGOUT("ixl_probe: begin");
313
314	pci_vendor_id = pci_get_vendor(dev);
315	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
316		return (ENXIO);
317
318	pci_device_id = pci_get_device(dev);
319	pci_subvendor_id = pci_get_subvendor(dev);
320	pci_subdevice_id = pci_get_subdevice(dev);
321
322	ent = ixl_vendor_info_array;
323	while (ent->vendor_id != 0) {
324		if ((pci_vendor_id == ent->vendor_id) &&
325		    (pci_device_id == ent->device_id) &&
326
327		    ((pci_subvendor_id == ent->subvendor_id) ||
328		     (ent->subvendor_id == 0)) &&
329
330		    ((pci_subdevice_id == ent->subdevice_id) ||
331		     (ent->subdevice_id == 0))) {
332			sprintf(device_name, "%s, Version - %s",
333				ixl_strings[ent->index],
334				ixl_driver_version);
335			device_set_desc_copy(dev, device_name);
336			/* One shot mutex init */
337			if (lock_init == FALSE) {
338				lock_init = TRUE;
339				mtx_init(&ixl_reset_mtx,
340				    "ixl_reset",
341				    "IXL RESET Lock", MTX_DEF);
342			}
343			return (BUS_PROBE_DEFAULT);
344		}
345		ent++;
346	}
347	return (ENXIO);
348}
349
350/*********************************************************************
351 *  Device initialization routine
352 *
353 *  The attach entry point is called when the driver is being loaded.
354 *  This routine identifies the type of hardware, allocates all resources
355 *  and initializes the hardware.
356 *
357 *  return 0 on success, positive on failure
358 *********************************************************************/
359
360static int
361ixl_attach(device_t dev)
362{
363	struct ixl_pf	*pf;
364	struct i40e_hw	*hw;
365	struct ixl_vsi *vsi;
366	u16		bus;
367	int             error = 0;
368
369	INIT_DEBUGOUT("ixl_attach: begin");
370
371	/* Allocate, clear, and link in our primary soft structure */
372	pf = device_get_softc(dev);
373	pf->dev = pf->osdep.dev = dev;
374	hw = &pf->hw;
375
376	/*
377	** Note this assumes we have a single embedded VSI,
378	** this could be enhanced later to allocate multiple
379	*/
380	vsi = &pf->vsi;
381	vsi->dev = pf->dev;
382
383	/* Core Lock Init*/
384	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
385
386	/* Set up the timer callout */
387	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
388
389	/* Set up sysctls */
390	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
391	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
392	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
393	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
394
395	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
396	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
397	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
398	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
399
400	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
401	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
402	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
403	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
404
405	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
406	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
407	    OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
408	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
409
410	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
411	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
412	    OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
413	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
414
415	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
416	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
417	    OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
418	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
419
420	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
421	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
422	    OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
423	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
424
425#ifdef IXL_DEBUG
426	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
427	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
428	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
429	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
430
431	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
432	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
433	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
434	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
435
436	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
437	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
438	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
439	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
440
441	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
442	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
443	    OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
444	    pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
445
446	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
447	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
448	    OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
449	    pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
450#endif
451
452	/* Save off the information about this board */
453	hw->vendor_id = pci_get_vendor(dev);
454	hw->device_id = pci_get_device(dev);
455	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
456	hw->subsystem_vendor_id =
457	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
458	hw->subsystem_device_id =
459	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
460
461	hw->bus.device = pci_get_slot(dev);
462	hw->bus.func = pci_get_function(dev);
463
464	/* Do PCI setup - map BAR0, etc */
465	if (ixl_allocate_pci_resources(pf)) {
466		device_printf(dev, "Allocation of PCI resources failed\n");
467		error = ENXIO;
468		goto err_out;
469	}
470
471	/* Create for initial debugging use */
472	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
473	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
474	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
475	    ixl_debug_info, "I", "Debug Information");
476
477
478	/* Establish a clean starting point */
479	i40e_clear_hw(hw);
480	error = i40e_pf_reset(hw);
481	if (error) {
482		device_printf(dev,"PF reset failure %x\n", error);
483		error = EIO;
484		goto err_out;
485	}
486
487	/* For now always do an initial CORE reset on first device */
488	{
489		static int	ixl_dev_count;
490		static int	ixl_dev_track[32];
491		u32		my_dev;
492		int		i, found = FALSE;
493		u16		bus = pci_get_bus(dev);
494
495		mtx_lock(&ixl_reset_mtx);
496		my_dev = (bus << 8) | hw->bus.device;
497
498		for (i = 0; i < ixl_dev_count; i++) {
499			if (ixl_dev_track[i] == my_dev)
500				found = TRUE;
501		}
502
503                if (!found) {
504                        u32 reg;
505
506                        ixl_dev_track[ixl_dev_count] = my_dev;
507                        ixl_dev_count++;
508
509			INIT_DEBUGOUT("Initial CORE RESET\n");
510                        wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
511                        ixl_flush(hw);
512                        i = 50;
513                        do {
514				i40e_msec_delay(50);
515                                reg = rd32(hw, I40E_GLGEN_RSTAT);
516                                if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
517                                        break;
518                        } while (i--);
519
520                        /* paranoia */
521                        wr32(hw, I40E_PF_ATQLEN, 0);
522                        wr32(hw, I40E_PF_ATQBAL, 0);
523                        wr32(hw, I40E_PF_ATQBAH, 0);
524                        i40e_clear_pxe_mode(hw);
525                }
526                mtx_unlock(&ixl_reset_mtx);
527	}
528
529	/* Set admin queue parameters */
530	hw->aq.num_arq_entries = IXL_AQ_LEN;
531	hw->aq.num_asq_entries = IXL_AQ_LEN;
532	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
533	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
534
535	/* Initialize the shared code */
536	error = i40e_init_shared_code(hw);
537	if (error) {
538		device_printf(dev,"Unable to initialize the shared code\n");
539		error = EIO;
540		goto err_out;
541	}
542
543	/* Set up the admin queue */
544	error = i40e_init_adminq(hw);
545	if (error) {
546		device_printf(dev, "The driver for the device stopped "
547		    "because the NVM image is newer than expected.\n"
548		    "You must install the most recent version of "
549		    " the network driver.\n");
550		goto err_out;
551	}
552	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
553
554        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
555	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
556		device_printf(dev, "The driver for the device detected "
557		    "a newer version of the NVM image than expected.\n"
558		    "Please install the most recent version of the network driver.\n");
559	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
560	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
561		device_printf(dev, "The driver for the device detected "
562		    "an older version of the NVM image than expected.\n"
563		    "Please update the NVM image.\n");
564
565	/* Clear PXE mode */
566	i40e_clear_pxe_mode(hw);
567
568	/* Get capabilities from the device */
569	error = ixl_get_hw_capabilities(pf);
570	if (error) {
571		device_printf(dev, "HW capabilities failure!\n");
572		goto err_get_cap;
573	}
574
575	/* Set up host memory cache */
576	error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
577	if (error) {
578		device_printf(dev, "init_lan_hmc failed: %d\n", error);
579		goto err_get_cap;
580	}
581
582	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
583	if (error) {
584		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
585		goto err_mac_hmc;
586	}
587
588	/* Disable LLDP from the firmware */
589	i40e_aq_stop_lldp(hw, TRUE, NULL);
590
591	i40e_get_mac_addr(hw, hw->mac.addr);
592	error = i40e_validate_mac_addr(hw->mac.addr);
593	if (error) {
594		device_printf(dev, "validate_mac_addr failed: %d\n", error);
595		goto err_mac_hmc;
596	}
597	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
598	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
599
600	if (ixl_setup_stations(pf) != 0) {
601		device_printf(dev, "setup stations failed!\n");
602		error = ENOMEM;
603		goto err_mac_hmc;
604	}
605
606	/* Initialize mac filter list for VSI */
607	SLIST_INIT(&vsi->ftl);
608
609	/* Set up interrupt routing here */
610	if (pf->msix > 1)
611		error = ixl_assign_vsi_msix(pf);
612	else
613		error = ixl_assign_vsi_legacy(pf);
614	if (error)
615		goto err_late;
616
617	i40e_msec_delay(75);
618	error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
619	if (error) {
620		device_printf(dev, "link restart failed, aq_err=%d\n",
621		    pf->hw.aq.asq_last_status);
622	}
623
624	/* Determine link state */
625	vsi->link_up = ixl_config_link(hw);
626
627	/* Report if Unqualified modules are found */
628	if ((vsi->link_up == FALSE) &&
629	    (pf->hw.phy.link_info.link_info &
630	    I40E_AQ_MEDIA_AVAILABLE) &&
631	    (!(pf->hw.phy.link_info.an_info &
632	    I40E_AQ_QUALIFIED_MODULE)))
633		device_printf(dev, "Link failed because "
634		    "an unqualified module was detected\n");
635
636	/* Setup OS specific network interface */
637	if (ixl_setup_interface(dev, vsi) != 0)
638		goto err_late;
639
640	/* Get the bus configuration and set the shared code */
641	bus = ixl_get_bus_info(hw, dev);
642	i40e_set_pci_config_data(hw, bus);
643
644	/* Initialize statistics */
645	ixl_pf_reset_stats(pf);
646	ixl_update_stats_counters(pf);
647	ixl_add_hw_stats(pf);
648
649	/* Register for VLAN events */
650	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
651	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
652	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
653	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
654
655#ifdef DEV_NETMAP
656	ixl_netmap_attach(pf);
657#endif /* DEV_NETMAP */
658
659	INIT_DEBUGOUT("ixl_attach: end");
660	return (0);
661
662err_late:
663	ixl_free_vsi(vsi);
664err_mac_hmc:
665	i40e_shutdown_lan_hmc(hw);
666err_get_cap:
667	i40e_shutdown_adminq(hw);
668err_out:
669	if (vsi->ifp != NULL)
670		if_free(vsi->ifp);
671	ixl_free_pci_resources(pf);
672	IXL_PF_LOCK_DESTROY(pf);
673	return (error);
674}
675
676/*********************************************************************
677 *  Device removal routine
678 *
679 *  The detach entry point is called when the driver is being removed.
680 *  This routine stops the adapter and deallocates all the resources
681 *  that were allocated for driver operation.
682 *
683 *  return 0 on success, positive on failure
684 *********************************************************************/
685
686static int
687ixl_detach(device_t dev)
688{
689	struct ixl_pf		*pf = device_get_softc(dev);
690	struct i40e_hw		*hw = &pf->hw;
691	struct ixl_vsi		*vsi = &pf->vsi;
692	struct ixl_queue	*que = vsi->queues;
693	i40e_status		status;
694
695	INIT_DEBUGOUT("ixl_detach: begin");
696
697	/* Make sure VLANS are not using driver */
698	if (vsi->ifp->if_vlantrunk != NULL) {
699		device_printf(dev,"Vlan in use, detach first\n");
700		return (EBUSY);
701	}
702
703	IXL_PF_LOCK(pf);
704	ixl_stop(pf);
705	IXL_PF_UNLOCK(pf);
706
707	for (int i = 0; i < vsi->num_queues; i++, que++) {
708		if (que->tq) {
709			taskqueue_drain(que->tq, &que->task);
710			taskqueue_drain(que->tq, &que->tx_task);
711			taskqueue_free(que->tq);
712		}
713	}
714
715	/* Shutdown LAN HMC */
716	status = i40e_shutdown_lan_hmc(hw);
717	if (status)
718		device_printf(dev,
719		    "Shutdown LAN HMC failed with code %d\n", status);
720
721	/* Shutdown admin queue */
722	status = i40e_shutdown_adminq(hw);
723	if (status)
724		device_printf(dev,
725		    "Shutdown Admin queue failed with code %d\n", status);
726
727	/* Unregister VLAN events */
728	if (vsi->vlan_attach != NULL)
729		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
730	if (vsi->vlan_detach != NULL)
731		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
732
733	ether_ifdetach(vsi->ifp);
734	callout_drain(&pf->timer);
735
736#ifdef DEV_NETMAP
737	netmap_detach(vsi->ifp);
738#endif /* DEV_NETMAP */
739
740	ixl_free_pci_resources(pf);
741	bus_generic_detach(dev);
742	if_free(vsi->ifp);
743	ixl_free_vsi(vsi);
744	IXL_PF_LOCK_DESTROY(pf);
745	return (0);
746}
747
748/*********************************************************************
749 *
750 *  Shutdown entry point
751 *
752 **********************************************************************/
753
754static int
755ixl_shutdown(device_t dev)
756{
757	struct ixl_pf *pf = device_get_softc(dev);
758	IXL_PF_LOCK(pf);
759	ixl_stop(pf);
760	IXL_PF_UNLOCK(pf);
761	return (0);
762}
763
764
765/*********************************************************************
766 *
767 *  Get the hardware capabilities
768 *
769 **********************************************************************/
770
771static int
772ixl_get_hw_capabilities(struct ixl_pf *pf)
773{
774	struct i40e_aqc_list_capabilities_element_resp *buf;
775	struct i40e_hw	*hw = &pf->hw;
776	device_t 	dev = pf->dev;
777	int             error, len;
778	u16		needed;
779	bool		again = TRUE;
780
781	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
782retry:
783	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
784	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
785		device_printf(dev, "Unable to allocate cap memory\n");
786                return (ENOMEM);
787	}
788
789	/* This populates the hw struct */
790        error = i40e_aq_discover_capabilities(hw, buf, len,
791	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
792	free(buf, M_DEVBUF);
793	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
794	    (again == TRUE)) {
795		/* retry once with a larger buffer */
796		again = FALSE;
797		len = needed;
798		goto retry;
799	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
800		device_printf(dev, "capability discovery failed: %d\n",
801		    pf->hw.aq.asq_last_status);
802		return (ENODEV);
803	}
804
805	/* Capture this PF's starting queue pair */
806	pf->qbase = hw->func_caps.base_queue;
807
808#ifdef IXL_DEBUG
809	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
810	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
811	    hw->pf_id, hw->func_caps.num_vfs,
812	    hw->func_caps.num_msix_vectors,
813	    hw->func_caps.num_msix_vectors_vf,
814	    hw->func_caps.fd_filters_guaranteed,
815	    hw->func_caps.fd_filters_best_effort,
816	    hw->func_caps.num_tx_qp,
817	    hw->func_caps.num_rx_qp,
818	    hw->func_caps.base_queue);
819#endif
820	return (error);
821}
822
823static void
824ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
825{
826	device_t 	dev = vsi->dev;
827
828	/* Enable/disable TXCSUM/TSO4 */
829	if (!(ifp->if_capenable & IFCAP_TXCSUM)
830	    && !(ifp->if_capenable & IFCAP_TSO4)) {
831		if (mask & IFCAP_TXCSUM) {
832			ifp->if_capenable |= IFCAP_TXCSUM;
833			/* enable TXCSUM, restore TSO if previously enabled */
834			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
835				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
836				ifp->if_capenable |= IFCAP_TSO4;
837			}
838		}
839		else if (mask & IFCAP_TSO4) {
840			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
841			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
842			device_printf(dev,
843			    "TSO4 requires txcsum, enabling both...\n");
844		}
845	} else if((ifp->if_capenable & IFCAP_TXCSUM)
846	    && !(ifp->if_capenable & IFCAP_TSO4)) {
847		if (mask & IFCAP_TXCSUM)
848			ifp->if_capenable &= ~IFCAP_TXCSUM;
849		else if (mask & IFCAP_TSO4)
850			ifp->if_capenable |= IFCAP_TSO4;
851	} else if((ifp->if_capenable & IFCAP_TXCSUM)
852	    && (ifp->if_capenable & IFCAP_TSO4)) {
853		if (mask & IFCAP_TXCSUM) {
854			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
855			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
856			device_printf(dev,
857			    "TSO4 requires txcsum, disabling both...\n");
858		} else if (mask & IFCAP_TSO4)
859			ifp->if_capenable &= ~IFCAP_TSO4;
860	}
861
862	/* Enable/disable TXCSUM_IPV6/TSO6 */
863	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
864	    && !(ifp->if_capenable & IFCAP_TSO6)) {
865		if (mask & IFCAP_TXCSUM_IPV6) {
866			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
867			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
868				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
869				ifp->if_capenable |= IFCAP_TSO6;
870			}
871		} else if (mask & IFCAP_TSO6) {
872			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
873			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
874			device_printf(dev,
875			    "TSO6 requires txcsum6, enabling both...\n");
876		}
877	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
878	    && !(ifp->if_capenable & IFCAP_TSO6)) {
879		if (mask & IFCAP_TXCSUM_IPV6)
880			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
881		else if (mask & IFCAP_TSO6)
882			ifp->if_capenable |= IFCAP_TSO6;
883	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
884	    && (ifp->if_capenable & IFCAP_TSO6)) {
885		if (mask & IFCAP_TXCSUM_IPV6) {
886			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
887			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
888			device_printf(dev,
889			    "TSO6 requires txcsum6, disabling both...\n");
890		} else if (mask & IFCAP_TSO6)
891			ifp->if_capenable &= ~IFCAP_TSO6;
892	}
893}
894
895/*********************************************************************
896 *  Ioctl entry point
897 *
898 *  ixl_ioctl is called when the user wants to configure the
899 *  interface.
900 *
901 *  return 0 on success, positive on failure
902 **********************************************************************/
903
904static int
905ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
906{
907	struct ixl_vsi	*vsi = ifp->if_softc;
908	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
909	struct ifreq	*ifr = (struct ifreq *) data;
910#if defined(INET) || defined(INET6)
911	struct ifaddr *ifa = (struct ifaddr *)data;
912	bool		avoid_reset = FALSE;
913#endif
914	int             error = 0;
915
916	switch (command) {
917
918        case SIOCSIFADDR:
919#ifdef INET
920		if (ifa->ifa_addr->sa_family == AF_INET)
921			avoid_reset = TRUE;
922#endif
923#ifdef INET6
924		if (ifa->ifa_addr->sa_family == AF_INET6)
925			avoid_reset = TRUE;
926#endif
927#if defined(INET) || defined(INET6)
928		/*
929		** Calling init results in link renegotiation,
930		** so we avoid doing it when possible.
931		*/
932		if (avoid_reset) {
933			ifp->if_flags |= IFF_UP;
934			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
935				ixl_init(pf);
936			if (!(ifp->if_flags & IFF_NOARP))
937				arp_ifinit(ifp, ifa);
938		} else
939			error = ether_ioctl(ifp, command, data);
940		break;
941#endif
942	case SIOCSIFMTU:
943		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
944		if (ifr->ifr_mtu > IXL_MAX_FRAME -
945		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
946			error = EINVAL;
947		} else {
948			IXL_PF_LOCK(pf);
949			ifp->if_mtu = ifr->ifr_mtu;
950			vsi->max_frame_size =
951				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
952			    + ETHER_VLAN_ENCAP_LEN;
953			ixl_init_locked(pf);
954			IXL_PF_UNLOCK(pf);
955		}
956		break;
957	case SIOCSIFFLAGS:
958		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
959		IXL_PF_LOCK(pf);
960		if (ifp->if_flags & IFF_UP) {
961			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
962				if ((ifp->if_flags ^ pf->if_flags) &
963				    (IFF_PROMISC | IFF_ALLMULTI)) {
964					ixl_set_promisc(vsi);
965				}
966			} else
967				ixl_init_locked(pf);
968		} else
969			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
970				ixl_stop(pf);
971		pf->if_flags = ifp->if_flags;
972		IXL_PF_UNLOCK(pf);
973		break;
974	case SIOCADDMULTI:
975		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
976		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
977			IXL_PF_LOCK(pf);
978			ixl_disable_intr(vsi);
979			ixl_add_multi(vsi);
980			ixl_enable_intr(vsi);
981			IXL_PF_UNLOCK(pf);
982		}
983		break;
984	case SIOCDELMULTI:
985		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
986		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
987			IXL_PF_LOCK(pf);
988			ixl_disable_intr(vsi);
989			ixl_del_multi(vsi);
990			ixl_enable_intr(vsi);
991			IXL_PF_UNLOCK(pf);
992		}
993		break;
994	case SIOCSIFMEDIA:
995	case SIOCGIFMEDIA:
996		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
997		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
998		break;
999	case SIOCSIFCAP:
1000	{
1001		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1002		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1003
1004		ixl_cap_txcsum_tso(vsi, ifp, mask);
1005
1006		if (mask & IFCAP_RXCSUM)
1007			ifp->if_capenable ^= IFCAP_RXCSUM;
1008		if (mask & IFCAP_RXCSUM_IPV6)
1009			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1010		if (mask & IFCAP_LRO)
1011			ifp->if_capenable ^= IFCAP_LRO;
1012		if (mask & IFCAP_VLAN_HWTAGGING)
1013			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1014		if (mask & IFCAP_VLAN_HWFILTER)
1015			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1016		if (mask & IFCAP_VLAN_HWTSO)
1017			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1018		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1019			IXL_PF_LOCK(pf);
1020			ixl_init_locked(pf);
1021			IXL_PF_UNLOCK(pf);
1022		}
1023		VLAN_CAPABILITIES(ifp);
1024
1025		break;
1026	}
1027
1028	default:
1029		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
1030		error = ether_ioctl(ifp, command, data);
1031		break;
1032	}
1033
1034	return (error);
1035}
1036
1037
1038/*********************************************************************
1039 *  Init entry point
1040 *
1041 *  This routine is used in two ways. It is used by the stack as
1042 *  init entry point in network interface structure. It is also used
1043 *  by the driver as a hw/sw initialization routine to get to a
1044 *  consistent state.
1045 *
1046 *  return 0 on success, positive on failure
1047 **********************************************************************/
1048
1049static void
1050ixl_init_locked(struct ixl_pf *pf)
1051{
1052	struct i40e_hw	*hw = &pf->hw;
1053	struct ixl_vsi	*vsi = &pf->vsi;
1054	struct ifnet	*ifp = vsi->ifp;
1055	device_t 	dev = pf->dev;
1056	struct i40e_filter_control_settings	filter;
1057	u8		tmpaddr[ETHER_ADDR_LEN];
1058	int		ret;
1059
1060	mtx_assert(&pf->pf_mtx, MA_OWNED);
1061	INIT_DEBUGOUT("ixl_init: begin");
1062	ixl_stop(pf);
1063
1064	/* Get the latest mac address... User might use a LAA */
1065	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
1066	      I40E_ETH_LENGTH_OF_ADDRESS);
1067	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
1068	    i40e_validate_mac_addr(tmpaddr)) {
1069		bcopy(tmpaddr, hw->mac.addr,
1070		    I40E_ETH_LENGTH_OF_ADDRESS);
1071		ret = i40e_aq_mac_address_write(hw,
1072		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
1073		    hw->mac.addr, NULL);
1074		if (ret) {
1075			device_printf(dev, "LLA address"
1076			 "change failed!!\n");
1077			return;
1078		}
1079	}
1080
1081	/* Set the various hardware offload abilities */
1082	ifp->if_hwassist = 0;
1083	if (ifp->if_capenable & IFCAP_TSO)
1084		ifp->if_hwassist |= CSUM_TSO;
1085	if (ifp->if_capenable & IFCAP_TXCSUM)
1086		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1087	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1088		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
1089
1090	/* Set up the device filtering */
1091	bzero(&filter, sizeof(filter));
1092	filter.enable_ethtype = TRUE;
1093	filter.enable_macvlan = TRUE;
1094#ifdef IXL_FDIR
1095	filter.enable_fdir = TRUE;
1096#endif
1097	if (i40e_set_filter_control(hw, &filter))
1098		device_printf(dev, "set_filter_control() failed\n");
1099
1100	/* Set up RSS */
1101	ixl_config_rss(vsi);
1102
1103	/* Setup the VSI */
1104	ixl_setup_vsi(vsi);
1105
1106	/*
1107	** Prepare the rings, hmc contexts, etc...
1108	*/
1109	if (ixl_initialize_vsi(vsi)) {
1110		device_printf(dev, "initialize vsi failed!!\n");
1111		return;
1112	}
1113
1114	/* Add protocol filters to list */
1115	ixl_init_filters(vsi);
1116
1117	/* Setup vlan's if needed */
1118	ixl_setup_vlan_filters(vsi);
1119
1120	/* Start the local timer */
1121	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1122
1123	/* Set up MSI/X routing and the ITR settings */
1124	if (ixl_enable_msix) {
1125		ixl_configure_msix(pf);
1126		ixl_configure_itr(pf);
1127	} else
1128		ixl_configure_legacy(pf);
1129
1130	ixl_enable_rings(vsi);
1131
1132	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
1133
1134	/* Set MTU in hardware*/
1135	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
1136	    TRUE, 0, NULL);
1137	if (aq_error)
1138		device_printf(vsi->dev,
1139			"aq_set_mac_config in init error, code %d\n",
1140		    aq_error);
1141
1142	/* And now turn on interrupts */
1143	ixl_enable_intr(vsi);
1144
1145	/* Now inform the stack we're ready */
1146	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1147	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1148
1149	return;
1150}
1151
1152static void
1153ixl_init(void *arg)
1154{
1155	struct ixl_pf *pf = arg;
1156
1157	IXL_PF_LOCK(pf);
1158	ixl_init_locked(pf);
1159	IXL_PF_UNLOCK(pf);
1160	return;
1161}
1162
1163/*
1164**
1165** MSIX Interrupt Handlers and Tasklets
1166**
1167*/
1168static void
1169ixl_handle_que(void *context, int pending)
1170{
1171	struct ixl_queue *que = context;
1172	struct ixl_vsi *vsi = que->vsi;
1173	struct i40e_hw  *hw = vsi->hw;
1174	struct tx_ring  *txr = &que->txr;
1175	struct ifnet    *ifp = vsi->ifp;
1176	bool		more;
1177
1178	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1179		more = ixl_rxeof(que, IXL_RX_LIMIT);
1180		IXL_TX_LOCK(txr);
1181		ixl_txeof(que);
1182		if (!drbr_empty(ifp, txr->br))
1183			ixl_mq_start_locked(ifp, txr);
1184		IXL_TX_UNLOCK(txr);
1185		if (more) {
1186			taskqueue_enqueue(que->tq, &que->task);
1187			return;
1188		}
1189	}
1190
1191	/* Reenable this interrupt - hmmm */
1192	ixl_enable_queue(hw, que->me);
1193	return;
1194}
1195
1196
1197/*********************************************************************
1198 *
1199 *  Legacy Interrupt Service routine
1200 *
1201 **********************************************************************/
1202void
1203ixl_intr(void *arg)
1204{
1205	struct ixl_pf		*pf = arg;
1206	struct i40e_hw		*hw =  &pf->hw;
1207	struct ixl_vsi		*vsi = &pf->vsi;
1208	struct ixl_queue	*que = vsi->queues;
1209	struct ifnet		*ifp = vsi->ifp;
1210	struct tx_ring		*txr = &que->txr;
1211        u32			reg, icr0, mask;
1212	bool			more_tx, more_rx;
1213
1214	++que->irqs;
1215
1216	/* Protect against spurious interrupts */
1217	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1218		return;
1219
1220	icr0 = rd32(hw, I40E_PFINT_ICR0);
1221
1222	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1223	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1224	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1225
1226        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1227
1228	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
1229		taskqueue_enqueue(pf->tq, &pf->adminq);
1230		return;
1231	}
1232
1233	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1234
1235	IXL_TX_LOCK(txr);
1236	more_tx = ixl_txeof(que);
1237	if (!drbr_empty(vsi->ifp, txr->br))
1238		more_tx = 1;
1239	IXL_TX_UNLOCK(txr);
1240
1241	/* re-enable other interrupt causes */
1242	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
1243
1244	/* And now the queues */
1245	reg = rd32(hw, I40E_QINT_RQCTL(0));
1246	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1247	wr32(hw, I40E_QINT_RQCTL(0), reg);
1248
1249	reg = rd32(hw, I40E_QINT_TQCTL(0));
1250	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1251	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
1252	wr32(hw, I40E_QINT_TQCTL(0), reg);
1253
1254	ixl_enable_legacy(hw);
1255
1256	return;
1257}
1258
1259
1260/*********************************************************************
1261 *
1262 *  MSIX VSI Interrupt Service routine
1263 *
1264 **********************************************************************/
1265void
1266ixl_msix_que(void *arg)
1267{
1268	struct ixl_queue	*que = arg;
1269	struct ixl_vsi	*vsi = que->vsi;
1270	struct i40e_hw	*hw = vsi->hw;
1271	struct tx_ring	*txr = &que->txr;
1272	bool		more_tx, more_rx;
1273
1274	/* Protect against spurious interrupts */
1275	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1276		return;
1277
1278	++que->irqs;
1279
1280	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1281
1282	IXL_TX_LOCK(txr);
1283	more_tx = ixl_txeof(que);
1284	/*
1285	** Make certain that if the stack
1286	** has anything queued the task gets
1287	** scheduled to handle it.
1288	*/
1289	if (!drbr_empty(vsi->ifp, txr->br))
1290		more_tx = 1;
1291	IXL_TX_UNLOCK(txr);
1292
1293	ixl_set_queue_rx_itr(que);
1294	ixl_set_queue_tx_itr(que);
1295
1296	if (more_tx || more_rx)
1297		taskqueue_enqueue(que->tq, &que->task);
1298	else
1299		ixl_enable_queue(hw, que->me);
1300
1301	return;
1302}
1303
1304
1305/*********************************************************************
1306 *
1307 *  MSIX Admin Queue Interrupt Service routine
1308 *
1309 **********************************************************************/
1310static void
1311ixl_msix_adminq(void *arg)
1312{
1313	struct ixl_pf	*pf = arg;
1314	struct i40e_hw	*hw = &pf->hw;
1315	u32		reg, mask;
1316
1317	++pf->admin_irq;
1318
1319	reg = rd32(hw, I40E_PFINT_ICR0);
1320	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
1321
1322	/* Check on the cause */
1323	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
1324		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1325
1326	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
1327		ixl_handle_mdd_event(pf);
1328		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1329	}
1330
1331	if (reg & I40E_PFINT_ICR0_VFLR_MASK)
1332		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
1333
1334	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
1335	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1336	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1337
1338	taskqueue_enqueue(pf->tq, &pf->adminq);
1339	return;
1340}
1341
1342/*********************************************************************
1343 *
1344 *  Media Ioctl callback
1345 *
1346 *  This routine is called whenever the user queries the status of
1347 *  the interface using ifconfig.
1348 *
1349 **********************************************************************/
1350static void
1351ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1352{
1353	struct ixl_vsi	*vsi = ifp->if_softc;
1354	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
1355	struct i40e_hw  *hw = &pf->hw;
1356
1357	INIT_DEBUGOUT("ixl_media_status: begin");
1358	IXL_PF_LOCK(pf);
1359
1360	ixl_update_link_status(pf);
1361
1362	ifmr->ifm_status = IFM_AVALID;
1363	ifmr->ifm_active = IFM_ETHER;
1364
1365	if (!vsi->link_up) {
1366		IXL_PF_UNLOCK(pf);
1367		return;
1368	}
1369
1370	ifmr->ifm_status |= IFM_ACTIVE;
1371	/* Hardware is always full-duplex */
1372	ifmr->ifm_active |= IFM_FDX;
1373
1374	switch (hw->phy.link_info.phy_type) {
1375		/* 100 M */
1376		case I40E_PHY_TYPE_100BASE_TX:
1377			ifmr->ifm_active |= IFM_100_TX;
1378			break;
1379		/* 1 G */
1380		case I40E_PHY_TYPE_1000BASE_T:
1381			ifmr->ifm_active |= IFM_1000_T;
1382			break;
1383		case I40E_PHY_TYPE_1000BASE_SX:
1384			ifmr->ifm_active |= IFM_1000_SX;
1385			break;
1386		case I40E_PHY_TYPE_1000BASE_LX:
1387			ifmr->ifm_active |= IFM_1000_LX;
1388			break;
1389		/* 10 G */
1390		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1391		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1392			ifmr->ifm_active |= IFM_10G_TWINAX;
1393			break;
1394		case I40E_PHY_TYPE_10GBASE_SR:
1395			ifmr->ifm_active |= IFM_10G_SR;
1396			break;
1397		case I40E_PHY_TYPE_10GBASE_LR:
1398			ifmr->ifm_active |= IFM_10G_LR;
1399			break;
1400		case I40E_PHY_TYPE_10GBASE_T:
1401			ifmr->ifm_active |= IFM_10G_T;
1402			break;
1403		/* 40 G */
1404		case I40E_PHY_TYPE_40GBASE_CR4:
1405		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1406			ifmr->ifm_active |= IFM_40G_CR4;
1407			break;
1408		case I40E_PHY_TYPE_40GBASE_SR4:
1409			ifmr->ifm_active |= IFM_40G_SR4;
1410			break;
1411		case I40E_PHY_TYPE_40GBASE_LR4:
1412			ifmr->ifm_active |= IFM_40G_LR4;
1413			break;
1414		default:
1415			ifmr->ifm_active |= IFM_UNKNOWN;
1416			break;
1417	}
1418	/* Report flow control status as well */
1419	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1420		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1421	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1422		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1423
1424	IXL_PF_UNLOCK(pf);
1425
1426	return;
1427}
1428
1429/*********************************************************************
1430 *
1431 *  Media Ioctl callback
1432 *
1433 *  This routine is called when the user changes speed/duplex using
1434 *  media/mediopt option with ifconfig.
1435 *
1436 **********************************************************************/
1437static int
1438ixl_media_change(struct ifnet * ifp)
1439{
1440	struct ixl_vsi *vsi = ifp->if_softc;
1441	struct ifmedia *ifm = &vsi->media;
1442
1443	INIT_DEBUGOUT("ixl_media_change: begin");
1444
1445	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1446		return (EINVAL);
1447
1448	if_printf(ifp, "Media change is currently not supported.\n");
1449
1450	return (ENODEV);
1451}
1452
1453
1454#ifdef IXL_FDIR
1455/*
1456** ATR: Application Targetted Receive - creates a filter
1457**	based on TX flow info that will keep the receive
1458**	portion of the flow on the same queue. Based on the
1459**	implementation this is only available for TCP connections
1460*/
1461void
1462ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
1463{
1464	struct ixl_vsi			*vsi = que->vsi;
1465	struct tx_ring			*txr = &que->txr;
1466	struct i40e_filter_program_desc	*FDIR;
1467	u32				ptype, dtype;
1468	int				idx;
1469
1470	/* check if ATR is enabled and sample rate */
1471	if ((!ixl_enable_fdir) || (!txr->atr_rate))
1472		return;
1473	/*
1474	** We sample all TCP SYN/FIN packets,
1475	** or at the selected sample rate
1476	*/
1477	txr->atr_count++;
1478	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
1479	    (txr->atr_count < txr->atr_rate))
1480                return;
1481	txr->atr_count = 0;
1482
1483	/* Get a descriptor to use */
1484	idx = txr->next_avail;
1485	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
1486	if (++idx == que->num_desc)
1487		idx = 0;
1488	txr->avail--;
1489	txr->next_avail = idx;
1490
1491	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1492	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
1493
1494	ptype |= (etype == ETHERTYPE_IP) ?
1495	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1496	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1497	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1498	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1499
1500	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1501
1502	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
1503
1504	/*
1505	** We use the TCP TH_FIN as a trigger to remove
1506	** the filter, otherwise its an update.
1507	*/
1508	dtype |= (th->th_flags & TH_FIN) ?
1509	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1510	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1511	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1512	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1513
1514	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1515	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
1516
1517	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1518	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1519
1520	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
1521	FDIR->dtype_cmd_cntindex = htole32(dtype);
1522	return;
1523}
1524#endif
1525
1526
1527static void
1528ixl_set_promisc(struct ixl_vsi *vsi)
1529{
1530	struct ifnet	*ifp = vsi->ifp;
1531	struct i40e_hw	*hw = vsi->hw;
1532	int		err, mcnt = 0;
1533	bool		uni = FALSE, multi = FALSE;
1534
1535	if (ifp->if_flags & IFF_ALLMULTI)
1536                multi = TRUE;
1537	else { /* Need to count the multicast addresses */
1538		struct  ifmultiaddr *ifma;
1539		if_maddr_rlock(ifp);
1540		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1541                        if (ifma->ifma_addr->sa_family != AF_LINK)
1542                                continue;
1543                        if (mcnt == MAX_MULTICAST_ADDR)
1544                                break;
1545                        mcnt++;
1546		}
1547		if_maddr_runlock(ifp);
1548	}
1549
1550	if (mcnt >= MAX_MULTICAST_ADDR)
1551                multi = TRUE;
1552        if (ifp->if_flags & IFF_PROMISC)
1553		uni = TRUE;
1554
1555	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1556	    vsi->seid, uni, NULL);
1557	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1558	    vsi->seid, multi, NULL);
1559	return;
1560}
1561
1562/*********************************************************************
1563 * 	Filter Routines
1564 *
1565 *	Routines for multicast and vlan filter management.
1566 *
1567 *********************************************************************/
1568static void
1569ixl_add_multi(struct ixl_vsi *vsi)
1570{
1571	struct	ifmultiaddr	*ifma;
1572	struct ifnet		*ifp = vsi->ifp;
1573	struct i40e_hw		*hw = vsi->hw;
1574	int			mcnt = 0, flags;
1575
1576	IOCTL_DEBUGOUT("ixl_add_multi: begin");
1577
1578	if_maddr_rlock(ifp);
1579	/*
1580	** First just get a count, to decide if we
1581	** we simply use multicast promiscuous.
1582	*/
1583	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1584		if (ifma->ifma_addr->sa_family != AF_LINK)
1585			continue;
1586		mcnt++;
1587	}
1588	if_maddr_runlock(ifp);
1589
1590	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
1591		/* delete existing MC filters */
1592		ixl_del_hw_filters(vsi, mcnt);
1593		i40e_aq_set_vsi_multicast_promiscuous(hw,
1594		    vsi->seid, TRUE, NULL);
1595		return;
1596	}
1597
1598	mcnt = 0;
1599	if_maddr_rlock(ifp);
1600	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1601		if (ifma->ifma_addr->sa_family != AF_LINK)
1602			continue;
1603		ixl_add_mc_filter(vsi,
1604		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1605		mcnt++;
1606	}
1607	if_maddr_runlock(ifp);
1608	if (mcnt > 0) {
1609		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1610		ixl_add_hw_filters(vsi, flags, mcnt);
1611	}
1612
1613	IOCTL_DEBUGOUT("ixl_add_multi: end");
1614	return;
1615}
1616
1617static void
1618ixl_del_multi(struct ixl_vsi *vsi)
1619{
1620	struct ifnet		*ifp = vsi->ifp;
1621	struct ifmultiaddr	*ifma;
1622	struct ixl_mac_filter	*f;
1623	int			mcnt = 0;
1624	bool		match = FALSE;
1625
1626	IOCTL_DEBUGOUT("ixl_del_multi: begin");
1627
1628	/* Search for removed multicast addresses */
1629	if_maddr_rlock(ifp);
1630	SLIST_FOREACH(f, &vsi->ftl, next) {
1631		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
1632			match = FALSE;
1633			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1634				if (ifma->ifma_addr->sa_family != AF_LINK)
1635					continue;
1636				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1637				if (cmp_etheraddr(f->macaddr, mc_addr)) {
1638					match = TRUE;
1639					break;
1640				}
1641			}
1642			if (match == FALSE) {
1643				f->flags |= IXL_FILTER_DEL;
1644				mcnt++;
1645			}
1646		}
1647	}
1648	if_maddr_runlock(ifp);
1649
1650	if (mcnt > 0)
1651		ixl_del_hw_filters(vsi, mcnt);
1652}
1653
1654
1655/*********************************************************************
1656 *  Timer routine
1657 *
1658 *  This routine checks for link status,updates statistics,
1659 *  and runs the watchdog check.
1660 *
1661 **********************************************************************/
1662
1663static void
1664ixl_local_timer(void *arg)
1665{
1666	struct ixl_pf		*pf = arg;
1667	struct i40e_hw		*hw = &pf->hw;
1668	struct ixl_vsi		*vsi = &pf->vsi;
1669	struct ixl_queue	*que = vsi->queues;
1670	device_t		dev = pf->dev;
1671	int			hung = 0;
1672	u32			mask;
1673
1674	mtx_assert(&pf->pf_mtx, MA_OWNED);
1675
1676	/* Fire off the adminq task */
1677	taskqueue_enqueue(pf->tq, &pf->adminq);
1678
1679	/* Update stats */
1680	ixl_update_stats_counters(pf);
1681
1682	/*
1683	** Check status of the queues
1684	*/
1685	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
1686		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
1687
1688	for (int i = 0; i < vsi->num_queues; i++,que++) {
1689		/* Any queues with outstanding work get a sw irq */
1690		if (que->busy)
1691			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
1692		/*
1693		** Each time txeof runs without cleaning, but there
1694		** are uncleaned descriptors it increments busy. If
1695		** we get to 5 we declare it hung.
1696		*/
1697		if (que->busy == IXL_QUEUE_HUNG) {
1698			++hung;
1699			/* Mark the queue as inactive */
1700			vsi->active_queues &= ~((u64)1 << que->me);
1701			continue;
1702		} else {
1703			/* Check if we've come back from hung */
1704			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
1705				vsi->active_queues |= ((u64)1 << que->me);
1706		}
1707		if (que->busy >= IXL_MAX_TX_BUSY) {
1708			device_printf(dev,"Warning queue %d "
1709			    "appears to be hung!\n", i);
1710			que->busy = IXL_QUEUE_HUNG;
1711			++hung;
1712		}
1713	}
1714	/* Only reinit if all queues show hung */
1715	if (hung == vsi->num_queues)
1716		goto hung;
1717
1718	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
1719	return;
1720
1721hung:
1722	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
1723	ixl_init_locked(pf);
1724}
1725
1726/*
1727** Note: this routine updates the OS on the link state
1728**	the real check of the hardware only happens with
1729**	a link interrupt.
1730*/
1731static void
1732ixl_update_link_status(struct ixl_pf *pf)
1733{
1734	struct ixl_vsi		*vsi = &pf->vsi;
1735	struct i40e_hw		*hw = &pf->hw;
1736	struct ifnet		*ifp = vsi->ifp;
1737	device_t		dev = pf->dev;
1738	enum i40e_fc_mode 	fc;
1739
1740
1741	if (vsi->link_up){
1742		if (vsi->link_active == FALSE) {
1743			i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1744			if (bootverbose) {
1745				fc = hw->fc.current_mode;
1746				device_printf(dev,"Link is up %d Gbps %s,"
1747				    " Flow Control: %s\n",
1748				    ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
1749				    "Full Duplex", ixl_fc_string[fc]);
1750			}
1751			vsi->link_active = TRUE;
1752			if_link_state_change(ifp, LINK_STATE_UP);
1753		}
1754	} else { /* Link down */
1755		if (vsi->link_active == TRUE) {
1756			if (bootverbose)
1757				device_printf(dev,"Link is Down\n");
1758			if_link_state_change(ifp, LINK_STATE_DOWN);
1759			vsi->link_active = FALSE;
1760		}
1761	}
1762
1763	return;
1764}
1765
1766/*********************************************************************
1767 *
1768 *  This routine disables all traffic on the adapter by issuing a
1769 *  global reset on the MAC and deallocates TX/RX buffers.
1770 *
1771 **********************************************************************/
1772
1773static void
1774ixl_stop(struct ixl_pf *pf)
1775{
1776	struct ixl_vsi	*vsi = &pf->vsi;
1777	struct ifnet	*ifp = vsi->ifp;
1778
1779	mtx_assert(&pf->pf_mtx, MA_OWNED);
1780
1781	INIT_DEBUGOUT("ixl_stop: begin\n");
1782	ixl_disable_intr(vsi);
1783	ixl_disable_rings(vsi);
1784
1785	/* Tell the stack that the interface is no longer active */
1786	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1787
1788	/* Stop the local timer */
1789	callout_stop(&pf->timer);
1790
1791	return;
1792}
1793
1794
1795/*********************************************************************
1796 *
1797 *  Setup MSIX Interrupt resources and handlers for the VSI
1798 *
1799 **********************************************************************/
1800static int
1801ixl_assign_vsi_legacy(struct ixl_pf *pf)
1802{
1803	device_t        dev = pf->dev;
1804	struct 		ixl_vsi *vsi = &pf->vsi;
1805	struct		ixl_queue *que = vsi->queues;
1806	int 		error, rid = 0;
1807
1808	if (pf->msix == 1)
1809		rid = 1;
1810	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1811	    &rid, RF_SHAREABLE | RF_ACTIVE);
1812	if (pf->res == NULL) {
1813		device_printf(dev,"Unable to allocate"
1814		    " bus resource: vsi legacy/msi interrupt\n");
1815		return (ENXIO);
1816	}
1817
1818	/* Set the handler function */
1819	error = bus_setup_intr(dev, pf->res,
1820	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1821	    ixl_intr, pf, &pf->tag);
1822	if (error) {
1823		pf->res = NULL;
1824		device_printf(dev, "Failed to register legacy/msi handler");
1825		return (error);
1826	}
1827	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
1828	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1829	TASK_INIT(&que->task, 0, ixl_handle_que, que);
1830	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1831	    taskqueue_thread_enqueue, &que->tq);
1832	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1833	    device_get_nameunit(dev));
1834	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1835	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1836	    taskqueue_thread_enqueue, &pf->tq);
1837	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1838	    device_get_nameunit(dev));
1839
1840	return (0);
1841}
1842
1843
1844/*********************************************************************
1845 *
1846 *  Setup MSIX Interrupt resources and handlers for the VSI
1847 *
1848 **********************************************************************/
1849static int
1850ixl_assign_vsi_msix(struct ixl_pf *pf)
1851{
1852	device_t	dev = pf->dev;
1853	struct 		ixl_vsi *vsi = &pf->vsi;
1854	struct 		ixl_queue *que = vsi->queues;
1855	struct		tx_ring	 *txr;
1856	int 		error, rid, vector = 0;
1857
1858	/* Admin Que is vector 0*/
1859	rid = vector + 1;
1860	pf->res = bus_alloc_resource_any(dev,
1861    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1862	if (!pf->res) {
1863		device_printf(dev,"Unable to allocate"
1864    	    " bus resource: Adminq interrupt [%d]\n", rid);
1865		return (ENXIO);
1866	}
1867	/* Set the adminq vector and handler */
1868	error = bus_setup_intr(dev, pf->res,
1869	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1870	    ixl_msix_adminq, pf, &pf->tag);
1871	if (error) {
1872		pf->res = NULL;
1873		device_printf(dev, "Failed to register Admin que handler");
1874		return (error);
1875	}
1876	bus_describe_intr(dev, pf->res, pf->tag, "aq");
1877	pf->admvec = vector;
1878	/* Tasklet for Admin Queue */
1879	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
1880	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1881	    taskqueue_thread_enqueue, &pf->tq);
1882	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
1883	    device_get_nameunit(pf->dev));
1884	++vector;
1885
1886	/* Now set up the stations */
1887	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1888		rid = vector + 1;
1889		txr = &que->txr;
1890		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1891		    RF_SHAREABLE | RF_ACTIVE);
1892		if (que->res == NULL) {
1893			device_printf(dev,"Unable to allocate"
1894		    	    " bus resource: que interrupt [%d]\n", vector);
1895			return (ENXIO);
1896		}
1897		/* Set the handler function */
1898		error = bus_setup_intr(dev, que->res,
1899		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1900		    ixl_msix_que, que, &que->tag);
1901		if (error) {
1902			que->res = NULL;
1903			device_printf(dev, "Failed to register que handler");
1904			return (error);
1905		}
1906		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
1907		/* Bind the vector to a CPU */
1908		bus_bind_intr(dev, que->res, i);
1909		que->msix = vector;
1910		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1911		TASK_INIT(&que->task, 0, ixl_handle_que, que);
1912		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
1913		    taskqueue_thread_enqueue, &que->tq);
1914		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1915		    device_get_nameunit(pf->dev));
1916	}
1917
1918	return (0);
1919}
1920
1921
1922/*
1923 * Allocate MSI/X vectors
1924 */
1925static int
1926ixl_init_msix(struct ixl_pf *pf)
1927{
1928	device_t dev = pf->dev;
1929	int rid, want, vectors, queues, available;
1930
1931	/* Override by tuneable */
1932	if (ixl_enable_msix == 0)
1933		goto msi;
1934
1935	/*
1936	** When used in a virtualized environment
1937	** PCI BUSMASTER capability may not be set
1938	** so explicity set it here and rewrite
1939	** the ENABLE in the MSIX control register
1940	** at this point to cause the host to
1941	** successfully initialize us.
1942	*/
1943	{
1944		u16 pci_cmd_word;
1945		int msix_ctrl;
1946		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1947		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1948		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1949		pci_find_cap(dev, PCIY_MSIX, &rid);
1950		rid += PCIR_MSIX_CTRL;
1951		msix_ctrl = pci_read_config(dev, rid, 2);
1952		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1953		pci_write_config(dev, rid, msix_ctrl, 2);
1954	}
1955
1956	/* First try MSI/X */
1957	rid = PCIR_BAR(IXL_BAR);
1958	pf->msix_mem = bus_alloc_resource_any(dev,
1959	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1960       	if (!pf->msix_mem) {
1961		/* May not be enabled */
1962		device_printf(pf->dev,
1963		    "Unable to map MSIX table \n");
1964		goto msi;
1965	}
1966
1967	available = pci_msix_count(dev);
1968	if (available == 0) { /* system has msix disabled */
1969		bus_release_resource(dev, SYS_RES_MEMORY,
1970		    rid, pf->msix_mem);
1971		pf->msix_mem = NULL;
1972		goto msi;
1973	}
1974
1975	/* Figure out a reasonable auto config value */
1976	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1977
1978	/* Override with hardcoded value if sane */
1979	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
1980		queues = ixl_max_queues;
1981
1982	/*
1983	** Want one vector (RX/TX pair) per queue
1984	** plus an additional for the admin queue.
1985	*/
1986	want = queues + 1;
1987	if (want <= available)	/* Have enough */
1988		vectors = want;
1989	else {
1990               	device_printf(pf->dev,
1991		    "MSIX Configuration Problem, "
1992		    "%d vectors available but %d wanted!\n",
1993		    available, want);
1994		return (0); /* Will go to Legacy setup */
1995	}
1996
1997	if (pci_alloc_msix(dev, &vectors) == 0) {
1998               	device_printf(pf->dev,
1999		    "Using MSIX interrupts with %d vectors\n", vectors);
2000		pf->msix = vectors;
2001		pf->vsi.num_queues = queues;
2002		return (vectors);
2003	}
2004msi:
2005       	vectors = pci_msi_count(dev);
2006	pf->vsi.num_queues = 1;
2007	pf->msix = 1;
2008	ixl_max_queues = 1;
2009	ixl_enable_msix = 0;
2010       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
2011               	device_printf(pf->dev,"Using an MSI interrupt\n");
2012	else {
2013		pf->msix = 0;
2014               	device_printf(pf->dev,"Using a Legacy interrupt\n");
2015	}
2016	return (vectors);
2017}
2018
2019
2020/*
2021 * Plumb MSI/X vectors
2022 */
2023static void
2024ixl_configure_msix(struct ixl_pf *pf)
2025{
2026	struct i40e_hw	*hw = &pf->hw;
2027	struct ixl_vsi *vsi = &pf->vsi;
2028	u32		reg;
2029	u16		vector = 1;
2030
2031	/* First set up the adminq - vector 0 */
2032	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2033	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2034
2035	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2036	    I40E_PFINT_ICR0_ENA_GRST_MASK |
2037	    I40E_PFINT_ICR0_HMC_ERR_MASK |
2038	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
2039	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2040	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
2041	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
2042	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2043
2044	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
2045	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
2046
2047	wr32(hw, I40E_PFINT_DYN_CTL0,
2048	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2049	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2050
2051	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2052
2053	/* Next configure the queues */
2054	for (int i = 0; i < vsi->num_queues; i++, vector++) {
2055		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
2056		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
2057
2058		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2059		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2060		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2061		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2062		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2063		wr32(hw, I40E_QINT_RQCTL(i), reg);
2064
2065		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2066		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2067		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2068		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2069		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2070		if (i == (vsi->num_queues - 1))
2071			reg |= (IXL_QUEUE_EOL
2072			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2073		wr32(hw, I40E_QINT_TQCTL(i), reg);
2074	}
2075}
2076
2077/*
2078 * Configure for MSI single vector operation
2079 */
2080static void
2081ixl_configure_legacy(struct ixl_pf *pf)
2082{
2083	struct i40e_hw	*hw = &pf->hw;
2084	u32		reg;
2085
2086
2087	wr32(hw, I40E_PFINT_ITR0(0), 0);
2088	wr32(hw, I40E_PFINT_ITR0(1), 0);
2089
2090
2091	/* Setup "other" causes */
2092	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
2093	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
2094	    | I40E_PFINT_ICR0_ENA_GRST_MASK
2095	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
2096	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
2097	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
2098	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
2099	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
2100	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
2101	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
2102	    ;
2103	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2104
2105	/* SW_ITR_IDX = 0, but don't change INTENA */
2106	wr32(hw, I40E_PFINT_DYN_CTL0,
2107	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2108	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2109	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
2110	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2111
2112	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2113	wr32(hw, I40E_PFINT_LNKLST0, 0);
2114
2115	/* Associate the queue pair to the vector and enable the q int */
2116	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
2117	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
2118	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2119	wr32(hw, I40E_QINT_RQCTL(0), reg);
2120
2121	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
2122	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
2123	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2124	wr32(hw, I40E_QINT_TQCTL(0), reg);
2125
2126	/* Next enable the queue pair */
2127	reg = rd32(hw, I40E_QTX_ENA(0));
2128	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2129	wr32(hw, I40E_QTX_ENA(0), reg);
2130
2131	reg = rd32(hw, I40E_QRX_ENA(0));
2132	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2133	wr32(hw, I40E_QRX_ENA(0), reg);
2134}
2135
2136
2137/*
2138 * Set the Initial ITR state
2139 */
2140static void
2141ixl_configure_itr(struct ixl_pf *pf)
2142{
2143	struct i40e_hw		*hw = &pf->hw;
2144	struct ixl_vsi		*vsi = &pf->vsi;
2145	struct ixl_queue	*que = vsi->queues;
2146
2147	vsi->rx_itr_setting = ixl_rx_itr;
2148	if (ixl_dynamic_rx_itr)
2149		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
2150	vsi->tx_itr_setting = ixl_tx_itr;
2151	if (ixl_dynamic_tx_itr)
2152		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
2153
2154	for (int i = 0; i < vsi->num_queues; i++, que++) {
2155		struct tx_ring	*txr = &que->txr;
2156		struct rx_ring 	*rxr = &que->rxr;
2157
2158		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
2159		    vsi->rx_itr_setting);
2160		rxr->itr = vsi->rx_itr_setting;
2161		rxr->latency = IXL_AVE_LATENCY;
2162		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
2163		    vsi->tx_itr_setting);
2164		txr->itr = vsi->tx_itr_setting;
2165		txr->latency = IXL_AVE_LATENCY;
2166	}
2167}
2168
2169
2170static int
2171ixl_allocate_pci_resources(struct ixl_pf *pf)
2172{
2173	int             rid;
2174	device_t        dev = pf->dev;
2175
2176	rid = PCIR_BAR(0);
2177	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2178	    &rid, RF_ACTIVE);
2179
2180	if (!(pf->pci_mem)) {
2181		device_printf(dev,"Unable to allocate bus resource: memory\n");
2182		return (ENXIO);
2183	}
2184
2185	pf->osdep.mem_bus_space_tag =
2186		rman_get_bustag(pf->pci_mem);
2187	pf->osdep.mem_bus_space_handle =
2188		rman_get_bushandle(pf->pci_mem);
2189	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
2190	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
2191
2192	pf->hw.back = &pf->osdep;
2193
2194	/*
2195	** Now setup MSI or MSI/X, should
2196	** return us the number of supported
2197	** vectors. (Will be 1 for MSI)
2198	*/
2199	pf->msix = ixl_init_msix(pf);
2200	return (0);
2201}
2202
2203static void
2204ixl_free_pci_resources(struct ixl_pf * pf)
2205{
2206	struct ixl_vsi		*vsi = &pf->vsi;
2207	struct ixl_queue	*que = vsi->queues;
2208	device_t		dev = pf->dev;
2209	int			rid, memrid;
2210
2211	memrid = PCIR_BAR(IXL_BAR);
2212
2213	/* We may get here before stations are setup */
2214	if ((!ixl_enable_msix) || (que == NULL))
2215		goto early;
2216
2217	/*
2218	**  Release all msix VSI resources:
2219	*/
2220	for (int i = 0; i < vsi->num_queues; i++, que++) {
2221		rid = que->msix + 1;
2222		if (que->tag != NULL) {
2223			bus_teardown_intr(dev, que->res, que->tag);
2224			que->tag = NULL;
2225		}
2226		if (que->res != NULL)
2227			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2228	}
2229
2230early:
2231	/* Clean the AdminQ interrupt last */
2232	if (pf->admvec) /* we are doing MSIX */
2233		rid = pf->admvec + 1;
2234	else
2235		(pf->msix != 0) ? (rid = 1):(rid = 0);
2236
2237	if (pf->tag != NULL) {
2238		bus_teardown_intr(dev, pf->res, pf->tag);
2239		pf->tag = NULL;
2240	}
2241	if (pf->res != NULL)
2242		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
2243
2244	if (pf->msix)
2245		pci_release_msi(dev);
2246
2247	if (pf->msix_mem != NULL)
2248		bus_release_resource(dev, SYS_RES_MEMORY,
2249		    memrid, pf->msix_mem);
2250
2251	if (pf->pci_mem != NULL)
2252		bus_release_resource(dev, SYS_RES_MEMORY,
2253		    PCIR_BAR(0), pf->pci_mem);
2254
2255	return;
2256}
2257
2258
2259/*********************************************************************
2260 *
2261 *  Setup networking device structure and register an interface.
2262 *
2263 **********************************************************************/
2264static int
2265ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
2266{
2267	struct ifnet		*ifp;
2268	struct i40e_hw		*hw = vsi->hw;
2269	struct ixl_queue	*que = vsi->queues;
2270	struct i40e_aq_get_phy_abilities_resp abilities_resp;
2271	enum i40e_status_code aq_error = 0;
2272
2273	INIT_DEBUGOUT("ixl_setup_interface: begin");
2274
2275	ifp = vsi->ifp = if_alloc(IFT_ETHER);
2276	if (ifp == NULL) {
2277		device_printf(dev, "can not allocate ifnet structure\n");
2278		return (-1);
2279	}
2280	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2281	ifp->if_mtu = ETHERMTU;
2282	ifp->if_baudrate = 4000000000;  // ??
2283	ifp->if_init = ixl_init;
2284	ifp->if_softc = vsi;
2285	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2286	ifp->if_ioctl = ixl_ioctl;
2287
2288	ifp->if_transmit = ixl_mq_start;
2289
2290	ifp->if_qflush = ixl_qflush;
2291
2292	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
2293
2294	ether_ifattach(ifp, hw->mac.addr);
2295
2296	vsi->max_frame_size =
2297	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
2298	    + ETHER_VLAN_ENCAP_LEN;
2299
2300	/*
2301	 * Tell the upper layer(s) we support long frames.
2302	 */
2303	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2304
2305	ifp->if_capabilities |= IFCAP_HWCSUM;
2306	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
2307	ifp->if_capabilities |= IFCAP_TSO;
2308	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2309	ifp->if_capabilities |= IFCAP_LRO;
2310
2311	/* VLAN capabilties */
2312	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2313			     |  IFCAP_VLAN_HWTSO
2314			     |  IFCAP_VLAN_MTU
2315			     |  IFCAP_VLAN_HWCSUM;
2316	ifp->if_capenable = ifp->if_capabilities;
2317
2318	/*
2319	** Don't turn this on by default, if vlans are
2320	** created on another pseudo device (eg. lagg)
2321	** then vlan events are not passed thru, breaking
2322	** operation, but with HW FILTER off it works. If
2323	** using vlans directly on the ixl driver you can
2324	** enable this and get full hardware tag filtering.
2325	*/
2326	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2327
2328	/*
2329	 * Specify the media types supported by this adapter and register
2330	 * callbacks to update media and link information
2331	 */
2332	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
2333		     ixl_media_status);
2334
2335	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
2336	if (aq_error) {
2337		printf("Error getting supported media types, AQ error %d\n", aq_error);
2338		return (EPERM);
2339	}
2340
2341	/* Display supported media types */
2342	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
2343		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2344
2345	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
2346		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2347
2348	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
2349	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
2350		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2351	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
2352		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2353	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
2354		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2355	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
2356		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2357
2358	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
2359	    abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
2360		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
2361	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
2362		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
2363	if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
2364		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
2365
2366	/* Use autoselect media by default */
2367	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2368	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
2369
2370	return (0);
2371}
2372
2373static bool
2374ixl_config_link(struct i40e_hw *hw)
2375{
2376	bool check;
2377
2378	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2379	check = i40e_get_link_status(hw);
2380#ifdef IXL_DEBUG
2381	printf("Link is %s\n", check ? "up":"down");
2382#endif
2383	return (check);
2384}
2385
2386/*********************************************************************
2387 *
2388 *  Initialize this VSI
2389 *
2390 **********************************************************************/
2391static int
2392ixl_setup_vsi(struct ixl_vsi *vsi)
2393{
2394	struct i40e_hw	*hw = vsi->hw;
2395	device_t 	dev = vsi->dev;
2396	struct i40e_aqc_get_switch_config_resp *sw_config;
2397	struct i40e_vsi_context	ctxt;
2398	u8	aq_buf[I40E_AQ_LARGE_BUF];
2399	int	ret = I40E_SUCCESS;
2400	u16	next = 0;
2401
2402	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
2403	ret = i40e_aq_get_switch_config(hw, sw_config,
2404	    sizeof(aq_buf), &next, NULL);
2405	if (ret) {
2406		device_printf(dev,"aq_get_switch_config failed!!\n");
2407		return (ret);
2408	}
2409#ifdef IXL_DEBUG
2410	printf("Switch config: header reported: %d in structure, %d total\n",
2411    	    sw_config->header.num_reported, sw_config->header.num_total);
2412	printf("type=%d seid=%d uplink=%d downlink=%d\n",
2413	    sw_config->element[0].element_type,
2414	    sw_config->element[0].seid,
2415	    sw_config->element[0].uplink_seid,
2416	    sw_config->element[0].downlink_seid);
2417#endif
2418	/* Save off this important value */
2419	vsi->seid = sw_config->element[0].seid;
2420
2421	memset(&ctxt, 0, sizeof(ctxt));
2422	ctxt.seid = vsi->seid;
2423	ctxt.pf_num = hw->pf_id;
2424	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2425	if (ret) {
2426		device_printf(dev,"get vsi params failed %x!!\n", ret);
2427		return (ret);
2428	}
2429#ifdef IXL_DEBUG
2430	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
2431	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
2432	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
2433	    ctxt.uplink_seid, ctxt.vsi_number,
2434	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
2435	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
2436	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
2437#endif
2438	/*
2439	** Set the queue and traffic class bits
2440	**  - when multiple traffic classes are supported
2441	**    this will need to be more robust.
2442	*/
2443	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2444	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
2445	ctxt.info.queue_mapping[0] = 0;
2446	ctxt.info.tc_mapping[0] = 0x0800;
2447
2448	/* Set VLAN receive stripping mode */
2449	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
2450	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
2451	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2452	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2453	else
2454	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2455
2456	/* Keep copy of VSI info in VSI for statistic counters */
2457	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2458
2459	/* Reset VSI statistics */
2460	ixl_vsi_reset_stats(vsi);
2461	vsi->hw_filters_add = 0;
2462	vsi->hw_filters_del = 0;
2463
2464	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2465	if (ret)
2466		device_printf(dev,"update vsi params failed %x!!\n",
2467		   hw->aq.asq_last_status);
2468	return (ret);
2469}
2470
2471
2472/*********************************************************************
2473 *
2474 *  Initialize the VSI:  this handles contexts, which means things
2475 *  			 like the number of descriptors, buffer size,
2476 *			 plus we init the rings thru this function.
2477 *
2478 **********************************************************************/
2479static int
2480ixl_initialize_vsi(struct ixl_vsi *vsi)
2481{
2482	struct ixl_queue	*que = vsi->queues;
2483	device_t		dev = vsi->dev;
2484	struct i40e_hw		*hw = vsi->hw;
2485	int			err = 0;
2486
2487
2488	for (int i = 0; i < vsi->num_queues; i++, que++) {
2489		struct tx_ring		*txr = &que->txr;
2490		struct rx_ring 		*rxr = &que->rxr;
2491		struct i40e_hmc_obj_txq tctx;
2492		struct i40e_hmc_obj_rxq rctx;
2493		u32			txctl;
2494		u16			size;
2495
2496
2497		/* Setup the HMC TX Context  */
2498		size = que->num_desc * sizeof(struct i40e_tx_desc);
2499		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
2500		tctx.new_context = 1;
2501		tctx.base = (txr->dma.pa/128);
2502		tctx.qlen = que->num_desc;
2503		tctx.fc_ena = 0;
2504		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
2505		/* Enable HEAD writeback */
2506		tctx.head_wb_ena = 1;
2507		tctx.head_wb_addr = txr->dma.pa +
2508		    (que->num_desc * sizeof(struct i40e_tx_desc));
2509		tctx.rdylist_act = 0;
2510		err = i40e_clear_lan_tx_queue_context(hw, i);
2511		if (err) {
2512			device_printf(dev, "Unable to clear TX context\n");
2513			break;
2514		}
2515		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
2516		if (err) {
2517			device_printf(dev, "Unable to set TX context\n");
2518			break;
2519		}
2520		/* Associate the ring with this PF */
2521		txctl = I40E_QTX_CTL_PF_QUEUE;
2522		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2523		    I40E_QTX_CTL_PF_INDX_MASK);
2524		wr32(hw, I40E_QTX_CTL(i), txctl);
2525		ixl_flush(hw);
2526
2527		/* Do ring (re)init */
2528		ixl_init_tx_ring(que);
2529
2530		/* Next setup the HMC RX Context  */
2531		if (vsi->max_frame_size <= 2048)
2532			rxr->mbuf_sz = MCLBYTES;
2533		else
2534			rxr->mbuf_sz = MJUMPAGESIZE;
2535
2536		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
2537
2538		/* Set up an RX context for the HMC */
2539		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
2540		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
2541		/* ignore header split for now */
2542		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2543		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
2544		    vsi->max_frame_size : max_rxmax;
2545		rctx.dtype = 0;
2546		rctx.dsize = 1;	/* do 32byte descriptors */
2547		rctx.hsplit_0 = 0;  /* no HDR split initially */
2548		rctx.base = (rxr->dma.pa/128);
2549		rctx.qlen = que->num_desc;
2550		rctx.tphrdesc_ena = 1;
2551		rctx.tphwdesc_ena = 1;
2552		rctx.tphdata_ena = 0;
2553		rctx.tphhead_ena = 0;
2554		rctx.lrxqthresh = 2;
2555#ifdef DEV_NETMAP
2556		/* "CRC strip in netmap is conditional" */
2557		if (vsi->ifp->if_capenable & IFCAP_NETMAP && !ixl_crcstrip)
2558			rctx.crcstrip = 0;
2559		else
2560#endif /* DEV_NETMAP */
2561		rctx.crcstrip = 1;
2562		rctx.l2tsel = 1;
2563		rctx.showiv = 1;
2564		rctx.fc_ena = 0;
2565		rctx.prefena = 1;
2566
2567		err = i40e_clear_lan_rx_queue_context(hw, i);
2568		if (err) {
2569			device_printf(dev,
2570			    "Unable to clear RX context %d\n", i);
2571			break;
2572		}
2573		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
2574		if (err) {
2575			device_printf(dev, "Unable to set RX context %d\n", i);
2576			break;
2577		}
2578		err = ixl_init_rx_ring(que);
2579		if (err) {
2580			device_printf(dev, "Fail in init_rx_ring %d\n", i);
2581			break;
2582		}
2583		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
2584#ifdef DEV_NETMAP
2585		/* TODO appropriately comment
2586		 * Code based on netmap code in ixgbe_init_locked()
2587		 * Messes with what the software sets as queue
2588		 * descriptor tail in hardware.
2589		 */
2590		if (vsi->ifp->if_capenable & IFCAP_NETMAP)
2591		{
2592			struct netmap_adapter *na = NA(vsi->ifp);
2593			struct netmap_kring *kring = &na->rx_rings[que->me];
2594			int t = na->num_rx_desc - 1 - kring->nr_hwavail;
2595
2596			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
2597		} else
2598#endif /* DEV_NETMAP */
2599		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
2600	}
2601	return (err);
2602}
2603
2604
2605/*********************************************************************
2606 *
2607 *  Free all VSI structs.
2608 *
2609 **********************************************************************/
2610void
2611ixl_free_vsi(struct ixl_vsi *vsi)
2612{
2613	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
2614	struct ixl_queue	*que = vsi->queues;
2615	struct ixl_mac_filter *f;
2616
2617	/* Free station queues */
2618	for (int i = 0; i < vsi->num_queues; i++, que++) {
2619		struct tx_ring *txr = &que->txr;
2620		struct rx_ring *rxr = &que->rxr;
2621
2622		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2623			continue;
2624		IXL_TX_LOCK(txr);
2625		ixl_free_que_tx(que);
2626		if (txr->base)
2627			i40e_free_dma(&pf->hw, &txr->dma);
2628		IXL_TX_UNLOCK(txr);
2629		IXL_TX_LOCK_DESTROY(txr);
2630
2631		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2632			continue;
2633		IXL_RX_LOCK(rxr);
2634		ixl_free_que_rx(que);
2635		if (rxr->base)
2636			i40e_free_dma(&pf->hw, &rxr->dma);
2637		IXL_RX_UNLOCK(rxr);
2638		IXL_RX_LOCK_DESTROY(rxr);
2639
2640	}
2641	free(vsi->queues, M_DEVBUF);
2642
2643	/* Free VSI filter list */
2644	while (!SLIST_EMPTY(&vsi->ftl)) {
2645		f = SLIST_FIRST(&vsi->ftl);
2646		SLIST_REMOVE_HEAD(&vsi->ftl, next);
2647		free(f, M_DEVBUF);
2648	}
2649}
2650
2651
2652/*********************************************************************
2653 *
2654 *  Allocate memory for the VSI (virtual station interface) and their
2655 *  associated queues, rings and the descriptors associated with each,
2656 *  called only once at attach.
2657 *
2658 **********************************************************************/
2659static int
2660ixl_setup_stations(struct ixl_pf *pf)
2661{
2662	device_t		dev = pf->dev;
2663	struct ixl_vsi		*vsi;
2664	struct ixl_queue	*que;
2665	struct tx_ring		*txr;
2666	struct rx_ring		*rxr;
2667	int 			rsize, tsize;
2668	int			error = I40E_SUCCESS;
2669
2670	vsi = &pf->vsi;
2671	vsi->back = (void *)pf;
2672	vsi->hw = &pf->hw;
2673	vsi->id = 0;
2674	vsi->num_vlans = 0;
2675
2676	/* Get memory for the station queues */
2677        if (!(vsi->queues =
2678            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
2679            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2680                device_printf(dev, "Unable to allocate queue memory\n");
2681                error = ENOMEM;
2682                goto early;
2683        }
2684
2685	for (int i = 0; i < vsi->num_queues; i++) {
2686		que = &vsi->queues[i];
2687		que->num_desc = ixl_ringsz;
2688		que->me = i;
2689		que->vsi = vsi;
2690		/* mark the queue as active */
2691		vsi->active_queues |= (u64)1 << que->me;
2692		txr = &que->txr;
2693		txr->que = que;
2694		txr->tail = I40E_QTX_TAIL(que->me);
2695
2696		/* Initialize the TX lock */
2697		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2698		    device_get_nameunit(dev), que->me);
2699		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
2700		/* Create the TX descriptor ring */
2701		tsize = roundup2((que->num_desc *
2702		    sizeof(struct i40e_tx_desc)) +
2703		    sizeof(u32), DBA_ALIGN);
2704		if (i40e_allocate_dma(&pf->hw,
2705		    &txr->dma, tsize, DBA_ALIGN)) {
2706			device_printf(dev,
2707			    "Unable to allocate TX Descriptor memory\n");
2708			error = ENOMEM;
2709			goto fail;
2710		}
2711		txr->base = (struct i40e_tx_desc *)txr->dma.va;
2712		bzero((void *)txr->base, tsize);
2713       		/* Now allocate transmit soft structs for the ring */
2714       		if (ixl_allocate_tx_data(que)) {
2715			device_printf(dev,
2716			    "Critical Failure setting up TX structures\n");
2717			error = ENOMEM;
2718			goto fail;
2719       		}
2720		/* Allocate a buf ring */
2721		txr->br = buf_ring_alloc(4096, M_DEVBUF,
2722		    M_WAITOK, &txr->mtx);
2723		if (txr->br == NULL) {
2724			device_printf(dev,
2725			    "Critical Failure setting up TX buf ring\n");
2726			error = ENOMEM;
2727			goto fail;
2728       		}
2729
2730		/*
2731		 * Next the RX queues...
2732		 */
2733		rsize = roundup2(que->num_desc *
2734		    sizeof(union i40e_rx_desc), DBA_ALIGN);
2735		rxr = &que->rxr;
2736		rxr->que = que;
2737		rxr->tail = I40E_QRX_TAIL(que->me);
2738
2739		/* Initialize the RX side lock */
2740		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2741		    device_get_nameunit(dev), que->me);
2742		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
2743
2744		if (i40e_allocate_dma(&pf->hw,
2745		    &rxr->dma, rsize, 4096)) {
2746			device_printf(dev,
2747			    "Unable to allocate RX Descriptor memory\n");
2748			error = ENOMEM;
2749			goto fail;
2750		}
2751		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
2752		bzero((void *)rxr->base, rsize);
2753
2754        	/* Allocate receive soft structs for the ring*/
2755		if (ixl_allocate_rx_data(que)) {
2756			device_printf(dev,
2757			    "Critical Failure setting up receive structs\n");
2758			error = ENOMEM;
2759			goto fail;
2760		}
2761	}
2762
2763	return (0);
2764
2765fail:
2766	for (int i = 0; i < vsi->num_queues; i++) {
2767		que = &vsi->queues[i];
2768		rxr = &que->rxr;
2769		txr = &que->txr;
2770		if (rxr->base)
2771			i40e_free_dma(&pf->hw, &rxr->dma);
2772		if (txr->base)
2773			i40e_free_dma(&pf->hw, &txr->dma);
2774	}
2775
2776early:
2777	return (error);
2778}
2779
2780/*
2781** Provide a update to the queue RX
2782** interrupt moderation value.
2783*/
2784static void
2785ixl_set_queue_rx_itr(struct ixl_queue *que)
2786{
2787	struct ixl_vsi	*vsi = que->vsi;
2788	struct i40e_hw	*hw = vsi->hw;
2789	struct rx_ring	*rxr = &que->rxr;
2790	u16		rx_itr;
2791	u16		rx_latency = 0;
2792	int		rx_bytes;
2793
2794
2795	/* Idle, do nothing */
2796	if (rxr->bytes == 0)
2797		return;
2798
2799	if (ixl_dynamic_rx_itr) {
2800		rx_bytes = rxr->bytes/rxr->itr;
2801		rx_itr = rxr->itr;
2802
2803		/* Adjust latency range */
2804		switch (rxr->latency) {
2805		case IXL_LOW_LATENCY:
2806			if (rx_bytes > 10) {
2807				rx_latency = IXL_AVE_LATENCY;
2808				rx_itr = IXL_ITR_20K;
2809			}
2810			break;
2811		case IXL_AVE_LATENCY:
2812			if (rx_bytes > 20) {
2813				rx_latency = IXL_BULK_LATENCY;
2814				rx_itr = IXL_ITR_8K;
2815			} else if (rx_bytes <= 10) {
2816				rx_latency = IXL_LOW_LATENCY;
2817				rx_itr = IXL_ITR_100K;
2818			}
2819			break;
2820		case IXL_BULK_LATENCY:
2821			if (rx_bytes <= 20) {
2822				rx_latency = IXL_AVE_LATENCY;
2823				rx_itr = IXL_ITR_20K;
2824			}
2825			break;
2826       		 }
2827
2828		rxr->latency = rx_latency;
2829
2830		if (rx_itr != rxr->itr) {
2831			/* do an exponential smoothing */
2832			rx_itr = (10 * rx_itr * rxr->itr) /
2833			    ((9 * rx_itr) + rxr->itr);
2834			rxr->itr = rx_itr & IXL_MAX_ITR;
2835			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2836			    que->me), rxr->itr);
2837		}
2838	} else { /* We may have have toggled to non-dynamic */
2839		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2840			vsi->rx_itr_setting = ixl_rx_itr;
2841		/* Update the hardware if needed */
2842		if (rxr->itr != vsi->rx_itr_setting) {
2843			rxr->itr = vsi->rx_itr_setting;
2844			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
2845			    que->me), rxr->itr);
2846		}
2847	}
2848	rxr->bytes = 0;
2849	rxr->packets = 0;
2850	return;
2851}
2852
2853
2854/*
2855** Provide a update to the queue TX
2856** interrupt moderation value.
2857*/
2858static void
2859ixl_set_queue_tx_itr(struct ixl_queue *que)
2860{
2861	struct ixl_vsi	*vsi = que->vsi;
2862	struct i40e_hw	*hw = vsi->hw;
2863	struct tx_ring	*txr = &que->txr;
2864	u16		tx_itr;
2865	u16		tx_latency = 0;
2866	int		tx_bytes;
2867
2868
2869	/* Idle, do nothing */
2870	if (txr->bytes == 0)
2871		return;
2872
2873	if (ixl_dynamic_tx_itr) {
2874		tx_bytes = txr->bytes/txr->itr;
2875		tx_itr = txr->itr;
2876
2877		switch (txr->latency) {
2878		case IXL_LOW_LATENCY:
2879			if (tx_bytes > 10) {
2880				tx_latency = IXL_AVE_LATENCY;
2881				tx_itr = IXL_ITR_20K;
2882			}
2883			break;
2884		case IXL_AVE_LATENCY:
2885			if (tx_bytes > 20) {
2886				tx_latency = IXL_BULK_LATENCY;
2887				tx_itr = IXL_ITR_8K;
2888			} else if (tx_bytes <= 10) {
2889				tx_latency = IXL_LOW_LATENCY;
2890				tx_itr = IXL_ITR_100K;
2891			}
2892			break;
2893		case IXL_BULK_LATENCY:
2894			if (tx_bytes <= 20) {
2895				tx_latency = IXL_AVE_LATENCY;
2896				tx_itr = IXL_ITR_20K;
2897			}
2898			break;
2899		}
2900
2901		txr->latency = tx_latency;
2902
2903		if (tx_itr != txr->itr) {
2904       	         /* do an exponential smoothing */
2905			tx_itr = (10 * tx_itr * txr->itr) /
2906			    ((9 * tx_itr) + txr->itr);
2907			txr->itr = tx_itr & IXL_MAX_ITR;
2908			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2909			    que->me), txr->itr);
2910		}
2911
2912	} else { /* We may have have toggled to non-dynamic */
2913		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2914			vsi->tx_itr_setting = ixl_tx_itr;
2915		/* Update the hardware if needed */
2916		if (txr->itr != vsi->tx_itr_setting) {
2917			txr->itr = vsi->tx_itr_setting;
2918			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
2919			    que->me), txr->itr);
2920		}
2921	}
2922	txr->bytes = 0;
2923	txr->packets = 0;
2924	return;
2925}
2926
2927
2928static void
2929ixl_add_hw_stats(struct ixl_pf *pf)
2930{
2931	device_t dev = pf->dev;
2932	struct ixl_vsi *vsi = &pf->vsi;
2933	struct ixl_queue *queues = vsi->queues;
2934	struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
2935	struct i40e_hw_port_stats *pf_stats = &pf->stats;
2936
2937	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2938	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2939	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2940
2941	struct sysctl_oid *vsi_node, *queue_node;
2942	struct sysctl_oid_list *vsi_list, *queue_list;
2943
2944	struct tx_ring *txr;
2945	struct rx_ring *rxr;
2946
2947	/* Driver statistics */
2948	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2949			CTLFLAG_RD, &pf->watchdog_events,
2950			"Watchdog timeouts");
2951	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2952			CTLFLAG_RD, &pf->admin_irq,
2953			"Admin Queue IRQ Handled");
2954
2955	/* VSI statistics */
2956#define QUEUE_NAME_LEN 32
2957	char queue_namebuf[QUEUE_NAME_LEN];
2958
2959	// ERJ: Only one vsi now, re-do when >1 VSI enabled
2960	// snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
2961	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2962				   CTLFLAG_RD, NULL, "VSI-specific stats");
2963	vsi_list = SYSCTL_CHILDREN(vsi_node);
2964
2965	ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
2966
2967	/* Queue statistics */
2968	for (int q = 0; q < vsi->num_queues; q++) {
2969		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2970		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2971					     CTLFLAG_RD, NULL, "Queue #");
2972		queue_list = SYSCTL_CHILDREN(queue_node);
2973
2974		txr = &(queues[q].txr);
2975		rxr = &(queues[q].rxr);
2976
2977		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2978				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2979				"m_defrag() failed");
2980		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2981				CTLFLAG_RD, &(queues[q].dropped_pkts),
2982				"Driver dropped packets");
2983		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2984				CTLFLAG_RD, &(queues[q].irqs),
2985				"irqs on this queue");
2986		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2987				CTLFLAG_RD, &(queues[q].tso),
2988				"TSO");
2989		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2990				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2991				"Driver tx dma failure in xmit");
2992		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2993				CTLFLAG_RD, &(txr->no_desc),
2994				"Queue No Descriptor Available");
2995		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2996				CTLFLAG_RD, &(txr->total_packets),
2997				"Queue Packets Transmitted");
2998		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2999				CTLFLAG_RD, &(txr->tx_bytes),
3000				"Queue Bytes Transmitted");
3001		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3002				CTLFLAG_RD, &(rxr->rx_packets),
3003				"Queue Packets Received");
3004		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3005				CTLFLAG_RD, &(rxr->rx_bytes),
3006				"Queue Bytes Received");
3007	}
3008
3009	/* MAC stats */
3010	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
3011}
3012
3013static void
3014ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
3015	struct sysctl_oid_list *child,
3016	struct i40e_eth_stats *eth_stats)
3017{
3018	struct ixl_sysctl_info ctls[] =
3019	{
3020		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3021		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
3022			"Unicast Packets Received"},
3023		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
3024			"Multicast Packets Received"},
3025		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
3026			"Broadcast Packets Received"},
3027		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
3028		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3029		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3030		{&eth_stats->tx_multicast, "mcast_pkts_txd",
3031			"Multicast Packets Transmitted"},
3032		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
3033			"Broadcast Packets Transmitted"},
3034		{&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
3035		// end
3036		{0,0,0}
3037	};
3038
3039	struct ixl_sysctl_info *entry = ctls;
3040	while (entry->stat != 0)
3041	{
3042		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
3043				CTLFLAG_RD, entry->stat,
3044				entry->description);
3045		entry++;
3046	}
3047}
3048
3049static void
3050ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
3051	struct sysctl_oid_list *child,
3052	struct i40e_hw_port_stats *stats)
3053{
3054	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
3055				    CTLFLAG_RD, NULL, "Mac Statistics");
3056	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
3057
3058	struct i40e_eth_stats *eth_stats = &stats->eth;
3059	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
3060
3061	struct ixl_sysctl_info ctls[] =
3062	{
3063		{&stats->crc_errors, "crc_errors", "CRC Errors"},
3064		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
3065		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
3066		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
3067		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
3068		/* Packet Reception Stats */
3069		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
3070		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
3071		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
3072		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
3073		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
3074		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
3075		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
3076		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
3077		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
3078		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
3079		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
3080		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
3081		/* Packet Transmission Stats */
3082		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
3083		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
3084		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
3085		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
3086		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
3087		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
3088		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
3089		/* Flow control */
3090		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
3091		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
3092		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
3093		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
3094		/* End */
3095		{0,0,0}
3096	};
3097
3098	struct ixl_sysctl_info *entry = ctls;
3099	while (entry->stat != 0)
3100	{
3101		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
3102				CTLFLAG_RD, entry->stat,
3103				entry->description);
3104		entry++;
3105	}
3106}
3107
3108/*
3109** ixl_config_rss - setup RSS
3110**  - note this is done for the single vsi
3111*/
3112static void ixl_config_rss(struct ixl_vsi *vsi)
3113{
3114	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3115	struct i40e_hw	*hw = vsi->hw;
3116	u32		lut = 0;
3117	u64		set_hena, hena;
3118	int		i, j;
3119
3120	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
3121	    0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
3122	    0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
3123	    0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
3124
3125	/* Fill out hash function seed */
3126	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3127                wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
3128
3129	/* Enable PCTYPES for RSS: */
3130	set_hena =
3131		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
3132		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
3133		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
3134		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
3135		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
3136		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
3137		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
3138		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
3139		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
3140		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
3141		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
3142
3143	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
3144	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
3145	hena |= set_hena;
3146	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
3147	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
3148
3149	/* Populate the LUT with max no. of queues in round robin fashion */
3150	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
3151		if (j == vsi->num_queues)
3152			j = 0;
3153		/* lut = 4-byte sliding window of 4 lut entries */
3154		lut = (lut << 8) | (j &
3155		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
3156		/* On i = 3, we have 4 entries in lut; write to the register */
3157		if ((i & 3) == 3)
3158			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
3159	}
3160	ixl_flush(hw);
3161}
3162
3163
3164/*
3165** This routine is run via an vlan config EVENT,
3166** it enables us to use the HW Filter table since
3167** we can get the vlan id. This just creates the
3168** entry in the soft version of the VFTA, init will
3169** repopulate the real table.
3170*/
3171static void
3172ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3173{
3174	struct ixl_vsi	*vsi = ifp->if_softc;
3175	struct i40e_hw	*hw = vsi->hw;
3176	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3177
3178	if (ifp->if_softc !=  arg)   /* Not our event */
3179		return;
3180
3181	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3182		return;
3183
3184	IXL_PF_LOCK(pf);
3185	++vsi->num_vlans;
3186	ixl_add_filter(vsi, hw->mac.addr, vtag);
3187	IXL_PF_UNLOCK(pf);
3188}
3189
3190/*
3191** This routine is run via an vlan
3192** unconfig EVENT, remove our entry
3193** in the soft vfta.
3194*/
3195static void
3196ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3197{
3198	struct ixl_vsi	*vsi = ifp->if_softc;
3199	struct i40e_hw	*hw = vsi->hw;
3200	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
3201
3202	if (ifp->if_softc !=  arg)
3203		return;
3204
3205	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3206		return;
3207
3208	IXL_PF_LOCK(pf);
3209	--vsi->num_vlans;
3210	ixl_del_filter(vsi, hw->mac.addr, vtag);
3211	IXL_PF_UNLOCK(pf);
3212}
3213
3214/*
3215** This routine updates vlan filters, called by init
3216** it scans the filter table and then updates the hw
3217** after a soft reset.
3218*/
3219static void
3220ixl_setup_vlan_filters(struct ixl_vsi *vsi)
3221{
3222	struct ixl_mac_filter	*f;
3223	int			cnt = 0, flags;
3224
3225	if (vsi->num_vlans == 0)
3226		return;
3227	/*
3228	** Scan the filter list for vlan entries,
3229	** mark them for addition and then call
3230	** for the AQ update.
3231	*/
3232	SLIST_FOREACH(f, &vsi->ftl, next) {
3233		if (f->flags & IXL_FILTER_VLAN) {
3234			f->flags |=
3235			    (IXL_FILTER_ADD |
3236			    IXL_FILTER_USED);
3237			cnt++;
3238		}
3239	}
3240	if (cnt == 0) {
3241		printf("setup vlan: no filters found!\n");
3242		return;
3243	}
3244	flags = IXL_FILTER_VLAN;
3245	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3246	ixl_add_hw_filters(vsi, flags, cnt);
3247	return;
3248}
3249
3250/*
3251** Initialize filter list and add filters that the hardware
3252** needs to know about.
3253*/
3254static void
3255ixl_init_filters(struct ixl_vsi *vsi)
3256{
3257	/* Add broadcast address */
3258	u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
3259	ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
3260}
3261
3262/*
3263** This routine adds mulicast filters
3264*/
3265static void
3266ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
3267{
3268	struct ixl_mac_filter *f;
3269
3270	/* Does one already exist */
3271	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3272	if (f != NULL)
3273		return;
3274
3275	f = ixl_get_filter(vsi);
3276	if (f == NULL) {
3277		printf("WARNING: no filter available!!\n");
3278		return;
3279	}
3280	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3281	f->vlan = IXL_VLAN_ANY;
3282	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
3283	    | IXL_FILTER_MC);
3284
3285	return;
3286}
3287
3288/*
3289** This routine adds macvlan filters
3290*/
3291static void
3292ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3293{
3294	struct ixl_mac_filter	*f, *tmp;
3295	device_t		dev = vsi->dev;
3296
3297	DEBUGOUT("ixl_add_filter: begin");
3298
3299	/* Does one already exist */
3300	f = ixl_find_filter(vsi, macaddr, vlan);
3301	if (f != NULL)
3302		return;
3303	/*
3304	** Is this the first vlan being registered, if so we
3305	** need to remove the ANY filter that indicates we are
3306	** not in a vlan, and replace that with a 0 filter.
3307	*/
3308	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
3309		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
3310		if (tmp != NULL) {
3311			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
3312			ixl_add_filter(vsi, macaddr, 0);
3313		}
3314	}
3315
3316	f = ixl_get_filter(vsi);
3317	if (f == NULL) {
3318		device_printf(dev, "WARNING: no filter available!!\n");
3319		return;
3320	}
3321	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
3322	f->vlan = vlan;
3323	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
3324	if (f->vlan != IXL_VLAN_ANY)
3325		f->flags |= IXL_FILTER_VLAN;
3326
3327	ixl_add_hw_filters(vsi, f->flags, 1);
3328	return;
3329}
3330
3331static void
3332ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3333{
3334	struct ixl_mac_filter *f;
3335
3336	f = ixl_find_filter(vsi, macaddr, vlan);
3337	if (f == NULL)
3338		return;
3339
3340	f->flags |= IXL_FILTER_DEL;
3341	ixl_del_hw_filters(vsi, 1);
3342
3343	/* Check if this is the last vlan removal */
3344	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
3345		/* Switch back to a non-vlan filter */
3346		ixl_del_filter(vsi, macaddr, 0);
3347		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
3348	}
3349	return;
3350}
3351
3352/*
3353** Find the filter with both matching mac addr and vlan id
3354*/
3355static struct ixl_mac_filter *
3356ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
3357{
3358	struct ixl_mac_filter	*f;
3359	bool			match = FALSE;
3360
3361	SLIST_FOREACH(f, &vsi->ftl, next) {
3362		if (!cmp_etheraddr(f->macaddr, macaddr))
3363			continue;
3364		if (f->vlan == vlan) {
3365			match = TRUE;
3366			break;
3367		}
3368	}
3369
3370	if (!match)
3371		f = NULL;
3372	return (f);
3373}
3374
3375/*
3376** This routine takes additions to the vsi filter
3377** table and creates an Admin Queue call to create
3378** the filters in the hardware.
3379*/
3380static void
3381ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
3382{
3383	struct i40e_aqc_add_macvlan_element_data *a, *b;
3384	struct ixl_mac_filter	*f;
3385	struct i40e_hw	*hw = vsi->hw;
3386	device_t	dev = vsi->dev;
3387	int		err, j = 0;
3388
3389	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
3390	    M_DEVBUF, M_NOWAIT | M_ZERO);
3391	if (a == NULL) {
3392		device_printf(dev, "add hw filter failed to get memory\n");
3393		return;
3394	}
3395
3396	/*
3397	** Scan the filter list, each time we find one
3398	** we add it to the admin queue array and turn off
3399	** the add bit.
3400	*/
3401	SLIST_FOREACH(f, &vsi->ftl, next) {
3402		if (f->flags == flags) {
3403			b = &a[j]; // a pox on fvl long names :)
3404			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
3405			b->vlan_tag =
3406			    (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3407			b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3408			f->flags &= ~IXL_FILTER_ADD;
3409			j++;
3410		}
3411		if (j == cnt)
3412			break;
3413	}
3414	if (j > 0) {
3415		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
3416		if (err)
3417			device_printf(dev, "aq_add_macvlan failure %d\n",
3418			    hw->aq.asq_last_status);
3419		else
3420			vsi->hw_filters_add += j;
3421	}
3422	free(a, M_DEVBUF);
3423	return;
3424}
3425
3426/*
3427** This routine takes removals in the vsi filter
3428** table and creates an Admin Queue call to delete
3429** the filters in the hardware.
3430*/
3431static void
3432ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
3433{
3434	struct i40e_aqc_remove_macvlan_element_data *d, *e;
3435	struct i40e_hw		*hw = vsi->hw;
3436	device_t		dev = vsi->dev;
3437	struct ixl_mac_filter	*f, *f_temp;
3438	int			err, j = 0;
3439
3440	DEBUGOUT("ixl_del_hw_filters: begin\n");
3441
3442	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
3443	    M_DEVBUF, M_NOWAIT | M_ZERO);
3444	if (d == NULL) {
3445		printf("del hw filter failed to get memory\n");
3446		return;
3447	}
3448
3449	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
3450		if (f->flags & IXL_FILTER_DEL) {
3451			e = &d[j]; // a pox on fvl long names :)
3452			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
3453			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
3454			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3455			/* delete entry from vsi list */
3456			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
3457			free(f, M_DEVBUF);
3458			j++;
3459		}
3460		if (j == cnt)
3461			break;
3462	}
3463	if (j > 0) {
3464		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
3465		/* NOTE: returns ENOENT every time but seems to work fine,
3466		   so we'll ignore that specific error. */
3467		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
3468			int sc = 0;
3469			for (int i = 0; i < j; i++)
3470				sc += (!d[i].error_code);
3471			vsi->hw_filters_del += sc;
3472			device_printf(dev,
3473			    "Failed to remove %d/%d filters, aq error %d\n",
3474			    j - sc, j, hw->aq.asq_last_status);
3475		} else
3476			vsi->hw_filters_del += j;
3477	}
3478	free(d, M_DEVBUF);
3479
3480	DEBUGOUT("ixl_del_hw_filters: end\n");
3481	return;
3482}
3483
3484
3485static void
3486ixl_enable_rings(struct ixl_vsi *vsi)
3487{
3488	struct i40e_hw	*hw = vsi->hw;
3489	u32		reg;
3490
3491	for (int i = 0; i < vsi->num_queues; i++) {
3492		i40e_pre_tx_queue_cfg(hw, i, TRUE);
3493
3494		reg = rd32(hw, I40E_QTX_ENA(i));
3495		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3496		    I40E_QTX_ENA_QENA_STAT_MASK;
3497		wr32(hw, I40E_QTX_ENA(i), reg);
3498		/* Verify the enable took */
3499		for (int j = 0; j < 10; j++) {
3500			reg = rd32(hw, I40E_QTX_ENA(i));
3501			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3502				break;
3503			i40e_msec_delay(10);
3504		}
3505		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
3506			printf("TX queue %d disabled!\n", i);
3507
3508		reg = rd32(hw, I40E_QRX_ENA(i));
3509		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3510		    I40E_QRX_ENA_QENA_STAT_MASK;
3511		wr32(hw, I40E_QRX_ENA(i), reg);
3512		/* Verify the enable took */
3513		for (int j = 0; j < 10; j++) {
3514			reg = rd32(hw, I40E_QRX_ENA(i));
3515			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3516				break;
3517			i40e_msec_delay(10);
3518		}
3519		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3520			printf("RX queue %d disabled!\n", i);
3521	}
3522}
3523
3524static void
3525ixl_disable_rings(struct ixl_vsi *vsi)
3526{
3527	struct i40e_hw	*hw = vsi->hw;
3528	u32		reg;
3529
3530	for (int i = 0; i < vsi->num_queues; i++) {
3531		i40e_pre_tx_queue_cfg(hw, i, FALSE);
3532		i40e_usec_delay(500);
3533
3534		reg = rd32(hw, I40E_QTX_ENA(i));
3535		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3536		wr32(hw, I40E_QTX_ENA(i), reg);
3537		/* Verify the disable took */
3538		for (int j = 0; j < 10; j++) {
3539			reg = rd32(hw, I40E_QTX_ENA(i));
3540			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3541				break;
3542			i40e_msec_delay(10);
3543		}
3544		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3545			printf("TX queue %d still enabled!\n", i);
3546
3547		reg = rd32(hw, I40E_QRX_ENA(i));
3548		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3549		wr32(hw, I40E_QRX_ENA(i), reg);
3550		/* Verify the disable took */
3551		for (int j = 0; j < 10; j++) {
3552			reg = rd32(hw, I40E_QRX_ENA(i));
3553			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3554				break;
3555			i40e_msec_delay(10);
3556		}
3557		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3558			printf("RX queue %d still enabled!\n", i);
3559	}
3560}
3561
3562/**
3563 * ixl_handle_mdd_event
3564 *
3565 * Called from interrupt handler to identify possibly malicious vfs
3566 * (But also detects events from the PF, as well)
3567 **/
3568static void ixl_handle_mdd_event(struct ixl_pf *pf)
3569{
3570	struct i40e_hw *hw = &pf->hw;
3571	device_t dev = pf->dev;
3572	bool mdd_detected = false;
3573	bool pf_mdd_detected = false;
3574	u32 reg;
3575
3576	/* find what triggered the MDD event */
3577	reg = rd32(hw, I40E_GL_MDET_TX);
3578	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
3579		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
3580				I40E_GL_MDET_TX_PF_NUM_SHIFT;
3581		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
3582				I40E_GL_MDET_TX_EVENT_SHIFT;
3583		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
3584				I40E_GL_MDET_TX_QUEUE_SHIFT;
3585		device_printf(dev,
3586			 "Malicious Driver Detection event 0x%02x"
3587			 " on TX queue %d pf number 0x%02x\n",
3588			 event, queue, pf_num);
3589		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
3590		mdd_detected = true;
3591	}
3592	reg = rd32(hw, I40E_GL_MDET_RX);
3593	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
3594		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
3595				I40E_GL_MDET_RX_FUNCTION_SHIFT;
3596		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
3597				I40E_GL_MDET_RX_EVENT_SHIFT;
3598		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
3599				I40E_GL_MDET_RX_QUEUE_SHIFT;
3600		device_printf(dev,
3601			 "Malicious Driver Detection event 0x%02x"
3602			 " on RX queue %d of function 0x%02x\n",
3603			 event, queue, func);
3604		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
3605		mdd_detected = true;
3606	}
3607
3608	if (mdd_detected) {
3609		reg = rd32(hw, I40E_PF_MDET_TX);
3610		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
3611			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
3612			device_printf(dev,
3613				 "MDD TX event is for this function 0x%08x",
3614				 reg);
3615			pf_mdd_detected = true;
3616		}
3617		reg = rd32(hw, I40E_PF_MDET_RX);
3618		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
3619			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
3620			device_printf(dev,
3621				 "MDD RX event is for this function 0x%08x",
3622				 reg);
3623			pf_mdd_detected = true;
3624		}
3625	}
3626
3627	/* re-enable mdd interrupt cause */
3628	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3629	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3630	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3631	ixl_flush(hw);
3632}
3633
3634static void
3635ixl_enable_intr(struct ixl_vsi *vsi)
3636{
3637	struct i40e_hw		*hw = vsi->hw;
3638	struct ixl_queue	*que = vsi->queues;
3639
3640	if (ixl_enable_msix) {
3641		ixl_enable_adminq(hw);
3642		for (int i = 0; i < vsi->num_queues; i++, que++)
3643			ixl_enable_queue(hw, que->me);
3644	} else
3645		ixl_enable_legacy(hw);
3646}
3647
3648static void
3649ixl_disable_intr(struct ixl_vsi *vsi)
3650{
3651	struct i40e_hw		*hw = vsi->hw;
3652	struct ixl_queue	*que = vsi->queues;
3653
3654	if (ixl_enable_msix) {
3655		ixl_disable_adminq(hw);
3656		for (int i = 0; i < vsi->num_queues; i++, que++)
3657			ixl_disable_queue(hw, que->me);
3658	} else
3659		ixl_disable_legacy(hw);
3660}
3661
3662static void
3663ixl_enable_adminq(struct i40e_hw *hw)
3664{
3665	u32		reg;
3666
3667	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3668	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3669	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3670	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3671	ixl_flush(hw);
3672	return;
3673}
3674
3675static void
3676ixl_disable_adminq(struct i40e_hw *hw)
3677{
3678	u32		reg;
3679
3680	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3681	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3682
3683	return;
3684}
3685
3686static void
3687ixl_enable_queue(struct i40e_hw *hw, int id)
3688{
3689	u32		reg;
3690
3691	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3692	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3693	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3694	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3695}
3696
3697static void
3698ixl_disable_queue(struct i40e_hw *hw, int id)
3699{
3700	u32		reg;
3701
3702	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3703	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
3704
3705	return;
3706}
3707
3708static void
3709ixl_enable_legacy(struct i40e_hw *hw)
3710{
3711	u32		reg;
3712	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3713	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3714	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3715	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3716}
3717
3718static void
3719ixl_disable_legacy(struct i40e_hw *hw)
3720{
3721	u32		reg;
3722
3723	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
3724	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
3725
3726	return;
3727}
3728
3729static void
3730ixl_update_stats_counters(struct ixl_pf *pf)
3731{
3732	struct i40e_hw	*hw = &pf->hw;
3733	struct ixl_vsi *vsi = &pf->vsi;
3734	struct ifnet	*ifp = vsi->ifp;
3735
3736	struct i40e_hw_port_stats *nsd = &pf->stats;
3737	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
3738
3739	/* Update hw stats */
3740	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
3741			   pf->stat_offsets_loaded,
3742			   &osd->crc_errors, &nsd->crc_errors);
3743	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
3744			   pf->stat_offsets_loaded,
3745			   &osd->illegal_bytes, &nsd->illegal_bytes);
3746	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
3747			   I40E_GLPRT_GORCL(hw->port),
3748			   pf->stat_offsets_loaded,
3749			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
3750	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
3751			   I40E_GLPRT_GOTCL(hw->port),
3752			   pf->stat_offsets_loaded,
3753			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
3754	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
3755			   pf->stat_offsets_loaded,
3756			   &osd->eth.rx_discards,
3757			   &nsd->eth.rx_discards);
3758	ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
3759			   pf->stat_offsets_loaded,
3760			   &osd->eth.tx_discards,
3761			   &nsd->eth.tx_discards);
3762	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
3763			   I40E_GLPRT_UPRCL(hw->port),
3764			   pf->stat_offsets_loaded,
3765			   &osd->eth.rx_unicast,
3766			   &nsd->eth.rx_unicast);
3767	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
3768			   I40E_GLPRT_UPTCL(hw->port),
3769			   pf->stat_offsets_loaded,
3770			   &osd->eth.tx_unicast,
3771			   &nsd->eth.tx_unicast);
3772	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
3773			   I40E_GLPRT_MPRCL(hw->port),
3774			   pf->stat_offsets_loaded,
3775			   &osd->eth.rx_multicast,
3776			   &nsd->eth.rx_multicast);
3777	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
3778			   I40E_GLPRT_MPTCL(hw->port),
3779			   pf->stat_offsets_loaded,
3780			   &osd->eth.tx_multicast,
3781			   &nsd->eth.tx_multicast);
3782	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
3783			   I40E_GLPRT_BPRCL(hw->port),
3784			   pf->stat_offsets_loaded,
3785			   &osd->eth.rx_broadcast,
3786			   &nsd->eth.rx_broadcast);
3787	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
3788			   I40E_GLPRT_BPTCL(hw->port),
3789			   pf->stat_offsets_loaded,
3790			   &osd->eth.tx_broadcast,
3791			   &nsd->eth.tx_broadcast);
3792
3793	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
3794			   pf->stat_offsets_loaded,
3795			   &osd->tx_dropped_link_down,
3796			   &nsd->tx_dropped_link_down);
3797	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
3798			   pf->stat_offsets_loaded,
3799			   &osd->mac_local_faults,
3800			   &nsd->mac_local_faults);
3801	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
3802			   pf->stat_offsets_loaded,
3803			   &osd->mac_remote_faults,
3804			   &nsd->mac_remote_faults);
3805	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
3806			   pf->stat_offsets_loaded,
3807			   &osd->rx_length_errors,
3808			   &nsd->rx_length_errors);
3809
3810	/* Flow control (LFC) stats */
3811	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
3812			   pf->stat_offsets_loaded,
3813			   &osd->link_xon_rx, &nsd->link_xon_rx);
3814	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
3815			   pf->stat_offsets_loaded,
3816			   &osd->link_xon_tx, &nsd->link_xon_tx);
3817	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3818			   pf->stat_offsets_loaded,
3819			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
3820	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3821			   pf->stat_offsets_loaded,
3822			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
3823
3824	/* Priority flow control stats */
3825#if 0
3826	for (int i = 0; i < 8; i++) {
3827		ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3828				   pf->stat_offsets_loaded,
3829				   &osd->priority_xon_rx[i],
3830				   &nsd->priority_xon_rx[i]);
3831		ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3832				   pf->stat_offsets_loaded,
3833				   &osd->priority_xon_tx[i],
3834				   &nsd->priority_xon_tx[i]);
3835		ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3836				   pf->stat_offsets_loaded,
3837				   &osd->priority_xoff_tx[i],
3838				   &nsd->priority_xoff_tx[i]);
3839		ixl_stat_update32(hw,
3840				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3841				   pf->stat_offsets_loaded,
3842				   &osd->priority_xon_2_xoff[i],
3843				   &nsd->priority_xon_2_xoff[i]);
3844	}
3845#endif
3846
3847	/* Packet size stats rx */
3848	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
3849			   I40E_GLPRT_PRC64L(hw->port),
3850			   pf->stat_offsets_loaded,
3851			   &osd->rx_size_64, &nsd->rx_size_64);
3852	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
3853			   I40E_GLPRT_PRC127L(hw->port),
3854			   pf->stat_offsets_loaded,
3855			   &osd->rx_size_127, &nsd->rx_size_127);
3856	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
3857			   I40E_GLPRT_PRC255L(hw->port),
3858			   pf->stat_offsets_loaded,
3859			   &osd->rx_size_255, &nsd->rx_size_255);
3860	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
3861			   I40E_GLPRT_PRC511L(hw->port),
3862			   pf->stat_offsets_loaded,
3863			   &osd->rx_size_511, &nsd->rx_size_511);
3864	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
3865			   I40E_GLPRT_PRC1023L(hw->port),
3866			   pf->stat_offsets_loaded,
3867			   &osd->rx_size_1023, &nsd->rx_size_1023);
3868	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
3869			   I40E_GLPRT_PRC1522L(hw->port),
3870			   pf->stat_offsets_loaded,
3871			   &osd->rx_size_1522, &nsd->rx_size_1522);
3872	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
3873			   I40E_GLPRT_PRC9522L(hw->port),
3874			   pf->stat_offsets_loaded,
3875			   &osd->rx_size_big, &nsd->rx_size_big);
3876
3877	/* Packet size stats tx */
3878	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
3879			   I40E_GLPRT_PTC64L(hw->port),
3880			   pf->stat_offsets_loaded,
3881			   &osd->tx_size_64, &nsd->tx_size_64);
3882	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
3883			   I40E_GLPRT_PTC127L(hw->port),
3884			   pf->stat_offsets_loaded,
3885			   &osd->tx_size_127, &nsd->tx_size_127);
3886	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
3887			   I40E_GLPRT_PTC255L(hw->port),
3888			   pf->stat_offsets_loaded,
3889			   &osd->tx_size_255, &nsd->tx_size_255);
3890	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
3891			   I40E_GLPRT_PTC511L(hw->port),
3892			   pf->stat_offsets_loaded,
3893			   &osd->tx_size_511, &nsd->tx_size_511);
3894	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
3895			   I40E_GLPRT_PTC1023L(hw->port),
3896			   pf->stat_offsets_loaded,
3897			   &osd->tx_size_1023, &nsd->tx_size_1023);
3898	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
3899			   I40E_GLPRT_PTC1522L(hw->port),
3900			   pf->stat_offsets_loaded,
3901			   &osd->tx_size_1522, &nsd->tx_size_1522);
3902	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
3903			   I40E_GLPRT_PTC9522L(hw->port),
3904			   pf->stat_offsets_loaded,
3905			   &osd->tx_size_big, &nsd->tx_size_big);
3906
3907	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
3908			   pf->stat_offsets_loaded,
3909			   &osd->rx_undersize, &nsd->rx_undersize);
3910	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
3911			   pf->stat_offsets_loaded,
3912			   &osd->rx_fragments, &nsd->rx_fragments);
3913	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
3914			   pf->stat_offsets_loaded,
3915			   &osd->rx_oversize, &nsd->rx_oversize);
3916	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
3917			   pf->stat_offsets_loaded,
3918			   &osd->rx_jabber, &nsd->rx_jabber);
3919	pf->stat_offsets_loaded = true;
3920	/* End hw stats */
3921
3922	/* Update vsi stats */
3923	ixl_update_eth_stats(vsi);
3924
3925	/* OS statistics */
3926	// ERJ - these are per-port, update all vsis?
3927	ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
3928}
3929
3930/*
3931** Tasklet handler for MSIX Adminq interrupts
3932**  - do outside interrupt since it might sleep
3933*/
3934static void
3935ixl_do_adminq(void *context, int pending)
3936{
3937	struct ixl_pf			*pf = context;
3938	struct i40e_hw			*hw = &pf->hw;
3939	struct ixl_vsi			*vsi = &pf->vsi;
3940	struct i40e_arq_event_info	event;
3941	i40e_status			ret;
3942	u32				reg, loop = 0;
3943	u16				opcode, result;
3944
3945	event.msg_len = IXL_AQ_BUF_SZ;
3946	event.msg_buf = malloc(event.msg_len,
3947	    M_DEVBUF, M_NOWAIT | M_ZERO);
3948	if (!event.msg_buf) {
3949		printf("Unable to allocate adminq memory\n");
3950		return;
3951	}
3952
3953	/* clean and process any events */
3954	do {
3955		ret = i40e_clean_arq_element(hw, &event, &result);
3956		if (ret)
3957			break;
3958		opcode = LE16_TO_CPU(event.desc.opcode);
3959		switch (opcode) {
3960		case i40e_aqc_opc_get_link_status:
3961			vsi->link_up = ixl_config_link(hw);
3962			ixl_update_link_status(pf);
3963			break;
3964		case i40e_aqc_opc_send_msg_to_pf:
3965			/* process pf/vf communication here */
3966			break;
3967		case i40e_aqc_opc_event_lan_overflow:
3968			break;
3969		default:
3970#ifdef IXL_DEBUG
3971			printf("AdminQ unknown event %x\n", opcode);
3972#endif
3973			break;
3974		}
3975
3976	} while (result && (loop++ < IXL_ADM_LIMIT));
3977
3978	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3979	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3980	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3981	free(event.msg_buf, M_DEVBUF);
3982
3983	if (pf->msix > 1)
3984		ixl_enable_adminq(&pf->hw);
3985	else
3986		ixl_enable_intr(vsi);
3987}
3988
3989static int
3990ixl_debug_info(SYSCTL_HANDLER_ARGS)
3991{
3992	struct ixl_pf	*pf;
3993	int		error, input = 0;
3994
3995	error = sysctl_handle_int(oidp, &input, 0, req);
3996
3997	if (error || !req->newptr)
3998		return (error);
3999
4000	if (input == 1) {
4001		pf = (struct ixl_pf *)arg1;
4002		ixl_print_debug_info(pf);
4003	}
4004
4005	return (error);
4006}
4007
4008static void
4009ixl_print_debug_info(struct ixl_pf *pf)
4010{
4011	struct i40e_hw		*hw = &pf->hw;
4012	struct ixl_vsi		*vsi = &pf->vsi;
4013	struct ixl_queue	*que = vsi->queues;
4014	struct rx_ring		*rxr = &que->rxr;
4015	struct tx_ring		*txr = &que->txr;
4016	u32			reg;
4017
4018
4019	printf("Queue irqs = %lx\n", que->irqs);
4020	printf("AdminQ irqs = %lx\n", pf->admin_irq);
4021	printf("RX next check = %x\n", rxr->next_check);
4022	printf("RX not ready = %lx\n", rxr->not_done);
4023	printf("RX packets = %lx\n", rxr->rx_packets);
4024	printf("TX desc avail = %x\n", txr->avail);
4025
4026	reg = rd32(hw, I40E_GLV_GORCL(0xc));
4027	 printf("RX Bytes = %x\n", reg);
4028	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
4029	 printf("Port RX Bytes = %x\n", reg);
4030	reg = rd32(hw, I40E_GLV_RDPC(0xc));
4031	 printf("RX discard = %x\n", reg);
4032	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
4033	 printf("Port RX discard = %x\n", reg);
4034
4035	reg = rd32(hw, I40E_GLV_TEPC(0xc));
4036	 printf("TX errors = %x\n", reg);
4037	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
4038	 printf("TX Bytes = %x\n", reg);
4039
4040	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
4041	 printf("RX undersize = %x\n", reg);
4042	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
4043	 printf("RX fragments = %x\n", reg);
4044	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
4045	 printf("RX oversize = %x\n", reg);
4046	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
4047	 printf("RX length error = %x\n", reg);
4048	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
4049	 printf("mac remote fault = %x\n", reg);
4050	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
4051	 printf("mac local fault = %x\n", reg);
4052}
4053
4054/**
4055 * Update VSI-specific ethernet statistics counters.
4056 **/
4057void ixl_update_eth_stats(struct ixl_vsi *vsi)
4058{
4059	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
4060	struct i40e_hw *hw = &pf->hw;
4061	struct ifnet *ifp = vsi->ifp;
4062	struct i40e_eth_stats *es;
4063	struct i40e_eth_stats *oes;
4064	u16 stat_idx = vsi->info.stat_counter_idx;
4065
4066	es = &vsi->eth_stats;
4067	oes = &vsi->eth_stats_offsets;
4068
4069	/* Gather up the stats that the hw collects */
4070	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
4071			   vsi->stat_offsets_loaded,
4072			   &oes->tx_errors, &es->tx_errors);
4073	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
4074			   vsi->stat_offsets_loaded,
4075			   &oes->rx_discards, &es->rx_discards);
4076
4077	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
4078			   I40E_GLV_GORCL(stat_idx),
4079			   vsi->stat_offsets_loaded,
4080			   &oes->rx_bytes, &es->rx_bytes);
4081	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
4082			   I40E_GLV_UPRCL(stat_idx),
4083			   vsi->stat_offsets_loaded,
4084			   &oes->rx_unicast, &es->rx_unicast);
4085	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
4086			   I40E_GLV_MPRCL(stat_idx),
4087			   vsi->stat_offsets_loaded,
4088			   &oes->rx_multicast, &es->rx_multicast);
4089	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
4090			   I40E_GLV_BPRCL(stat_idx),
4091			   vsi->stat_offsets_loaded,
4092			   &oes->rx_broadcast, &es->rx_broadcast);
4093
4094	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
4095			   I40E_GLV_GOTCL(stat_idx),
4096			   vsi->stat_offsets_loaded,
4097			   &oes->tx_bytes, &es->tx_bytes);
4098	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
4099			   I40E_GLV_UPTCL(stat_idx),
4100			   vsi->stat_offsets_loaded,
4101			   &oes->tx_unicast, &es->tx_unicast);
4102	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
4103			   I40E_GLV_MPTCL(stat_idx),
4104			   vsi->stat_offsets_loaded,
4105			   &oes->tx_multicast, &es->tx_multicast);
4106	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
4107			   I40E_GLV_BPTCL(stat_idx),
4108			   vsi->stat_offsets_loaded,
4109			   &oes->tx_broadcast, &es->tx_broadcast);
4110	vsi->stat_offsets_loaded = true;
4111
4112	/* Update ifnet stats */
4113	ifp->if_ipackets = es->rx_unicast +
4114	                   es->rx_multicast +
4115			   es->rx_broadcast;
4116	ifp->if_opackets = es->tx_unicast +
4117	                   es->tx_multicast +
4118			   es->tx_broadcast;
4119	ifp->if_ibytes = es->rx_bytes;
4120	ifp->if_obytes = es->tx_bytes;
4121	ifp->if_imcasts = es->rx_multicast;
4122	ifp->if_omcasts = es->tx_multicast;
4123
4124	ifp->if_oerrors = es->tx_errors;
4125	ifp->if_iqdrops = es->rx_discards;
4126	ifp->if_noproto = es->rx_unknown_protocol;
4127	ifp->if_collisions = 0;
4128}
4129
4130/**
4131 * Reset all of the stats for the given pf
4132 **/
4133void ixl_pf_reset_stats(struct ixl_pf *pf)
4134{
4135	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
4136	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
4137	pf->stat_offsets_loaded = false;
4138}
4139
4140/**
4141 * Resets all stats of the given vsi
4142 **/
4143void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
4144{
4145	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
4146	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
4147	vsi->stat_offsets_loaded = false;
4148}
4149
4150/**
4151 * Read and update a 48 bit stat from the hw
4152 *
4153 * Since the device stats are not reset at PFReset, they likely will not
4154 * be zeroed when the driver starts.  We'll save the first values read
4155 * and use them as offsets to be subtracted from the raw values in order
4156 * to report stats that count from zero.
4157 **/
4158static void
4159ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
4160	bool offset_loaded, u64 *offset, u64 *stat)
4161{
4162	u64 new_data;
4163
4164#if __FreeBSD__ >= 10 && __amd64__
4165	new_data = rd64(hw, loreg);
4166#else
4167	/*
4168	 * Use two rd32's instead of one rd64; FreeBSD versions before
4169	 * 10 don't support 8 byte bus reads/writes.
4170	 */
4171	new_data = rd32(hw, loreg);
4172	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
4173#endif
4174
4175	if (!offset_loaded)
4176		*offset = new_data;
4177	if (new_data >= *offset)
4178		*stat = new_data - *offset;
4179	else
4180		*stat = (new_data + ((u64)1 << 48)) - *offset;
4181	*stat &= 0xFFFFFFFFFFFFULL;
4182}
4183
4184/**
4185 * Read and update a 32 bit stat from the hw
4186 **/
4187static void
4188ixl_stat_update32(struct i40e_hw *hw, u32 reg,
4189	bool offset_loaded, u64 *offset, u64 *stat)
4190{
4191	u32 new_data;
4192
4193	new_data = rd32(hw, reg);
4194	if (!offset_loaded)
4195		*offset = new_data;
4196	if (new_data >= *offset)
4197		*stat = (u32)(new_data - *offset);
4198	else
4199		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
4200}
4201
4202/*
4203** Set flow control using sysctl:
4204** 	0 - off
4205**	1 - rx pause
4206**	2 - tx pause
4207**	3 - full
4208*/
4209static int
4210ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
4211{
4212	/*
4213	 * TODO: ensure flow control is disabled if
4214	 * priority flow control is enabled
4215	 *
4216	 * TODO: ensure tx CRC by hardware should be enabled
4217	 * if tx flow control is enabled.
4218	 */
4219	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4220	struct i40e_hw *hw = &pf->hw;
4221	device_t dev = pf->dev;
4222	int requested_fc = 0, error = 0;
4223	enum i40e_status_code aq_error = 0;
4224	u8 fc_aq_err = 0;
4225
4226	aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
4227	if (aq_error) {
4228		device_printf(dev,
4229		    "%s: Error retrieving link info from aq, %d\n",
4230		    __func__, aq_error);
4231		return (EAGAIN);
4232	}
4233
4234	/* Read in new mode */
4235	requested_fc = hw->fc.current_mode;
4236	error = sysctl_handle_int(oidp, &requested_fc, 0, req);
4237	if ((error) || (req->newptr == NULL))
4238		return (error);
4239	if (requested_fc < 0 || requested_fc > 3) {
4240		device_printf(dev,
4241		    "Invalid fc mode; valid modes are 0 through 3\n");
4242		return (EINVAL);
4243	}
4244
4245	/*
4246	** Changing flow control mode currently does not work on
4247	** 40GBASE-CR4 PHYs
4248	*/
4249	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
4250	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
4251		device_printf(dev, "Changing flow control mode unsupported"
4252		    " on 40GBase-CR4 media.\n");
4253		return (ENODEV);
4254	}
4255
4256	/* Set fc ability for port */
4257	hw->fc.requested_mode = requested_fc;
4258	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
4259	if (aq_error) {
4260		device_printf(dev,
4261		    "%s: Error setting new fc mode %d; fc_err %#x\n",
4262		    __func__, aq_error, fc_aq_err);
4263		return (EAGAIN);
4264	}
4265
4266	if (hw->fc.current_mode != hw->fc.requested_mode) {
4267		device_printf(dev, "%s: FC set failure:\n", __func__);
4268		device_printf(dev, "%s: Current: %s / Requested: %s\n",
4269		    __func__,
4270		    ixl_fc_string[hw->fc.current_mode],
4271		    ixl_fc_string[hw->fc.requested_mode]);
4272	}
4273
4274	return (0);
4275}
4276
4277static int
4278ixl_current_speed(SYSCTL_HANDLER_ARGS)
4279{
4280	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4281	struct i40e_hw *hw = &pf->hw;
4282	int error = 0, index = 0;
4283
4284	char *speeds[] = {
4285		"Unknown",
4286		"100M",
4287		"1G",
4288		"10G",
4289		"40G",
4290		"20G"
4291	};
4292
4293	ixl_update_link_status(pf);
4294
4295	switch (hw->phy.link_info.link_speed) {
4296	case I40E_LINK_SPEED_100MB:
4297		index = 1;
4298		break;
4299	case I40E_LINK_SPEED_1GB:
4300		index = 2;
4301		break;
4302	case I40E_LINK_SPEED_10GB:
4303		index = 3;
4304		break;
4305	case I40E_LINK_SPEED_40GB:
4306		index = 4;
4307		break;
4308	case I40E_LINK_SPEED_20GB:
4309		index = 5;
4310		break;
4311	case I40E_LINK_SPEED_UNKNOWN:
4312	default:
4313		index = 0;
4314		break;
4315	}
4316
4317	error = sysctl_handle_string(oidp, speeds[index],
4318	    strlen(speeds[index]), req);
4319	return (error);
4320}
4321
4322/*
4323** Control link advertise speed:
4324**	Flags:
4325**	0x1 - advertise 100 Mb
4326**	0x2 - advertise 1G
4327**	0x4 - advertise 10G
4328**
4329** Does not work on 40G devices.
4330*/
4331static int
4332ixl_set_advertise(SYSCTL_HANDLER_ARGS)
4333{
4334	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4335	struct i40e_hw *hw = &pf->hw;
4336	device_t dev = pf->dev;
4337	struct i40e_aq_get_phy_abilities_resp abilities;
4338	struct i40e_aq_set_phy_config config;
4339	int requested_ls = 0;
4340	enum i40e_status_code aq_error = 0;
4341	int error = 0;
4342
4343	/*
4344	** FW doesn't support changing advertised speed
4345	** for 40G devices; speed is always 40G.
4346	*/
4347	if (i40e_is_40G_device(hw->device_id))
4348		return (ENODEV);
4349
4350	/* Read in new mode */
4351	requested_ls = pf->advertised_speed;
4352	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
4353	if ((error) || (req->newptr == NULL))
4354		return (error);
4355	if (requested_ls < 1 || requested_ls > 7) {
4356		device_printf(dev,
4357		    "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4358		return (EINVAL);
4359	}
4360
4361	/* Exit if no change */
4362	if (pf->advertised_speed == requested_ls)
4363		return (0);
4364
4365	/* Get current capability information */
4366	aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
4367	if (aq_error) {
4368		device_printf(dev, "%s: Error getting phy capabilities %d,"
4369		    " aq error: %d\n", __func__, aq_error,
4370		    hw->aq.asq_last_status);
4371		return (EAGAIN);
4372	}
4373
4374	/* Prepare new config */
4375	bzero(&config, sizeof(config));
4376	config.phy_type = abilities.phy_type;
4377	config.abilities = abilities.abilities
4378	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4379	config.eee_capability = abilities.eee_capability;
4380	config.eeer = abilities.eeer_val;
4381	config.low_power_ctrl = abilities.d3_lpan;
4382	/* Translate into aq cmd link_speed */
4383	if (requested_ls & 0x4)
4384		config.link_speed |= I40E_LINK_SPEED_10GB;
4385	if (requested_ls & 0x2)
4386		config.link_speed |= I40E_LINK_SPEED_1GB;
4387	if (requested_ls & 0x1)
4388		config.link_speed |= I40E_LINK_SPEED_100MB;
4389
4390	/* Do aq command & restart link */
4391	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
4392	if (aq_error) {
4393		device_printf(dev, "%s: Error setting new phy config %d,"
4394		    " aq error: %d\n", __func__, aq_error,
4395		    hw->aq.asq_last_status);
4396		return (EAGAIN);
4397	}
4398
4399	pf->advertised_speed = requested_ls;
4400	ixl_update_link_status(pf);
4401	return (0);
4402}
4403
4404/*
4405** Get the width and transaction speed of
4406** the bus this adapter is plugged into.
4407*/
4408static u16
4409ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
4410{
4411        u16                     link;
4412        u32                     offset;
4413
4414
4415        /* Get the PCI Express Capabilities offset */
4416        pci_find_cap(dev, PCIY_EXPRESS, &offset);
4417
4418        /* ...and read the Link Status Register */
4419        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
4420
4421        switch (link & I40E_PCI_LINK_WIDTH) {
4422        case I40E_PCI_LINK_WIDTH_1:
4423                hw->bus.width = i40e_bus_width_pcie_x1;
4424                break;
4425        case I40E_PCI_LINK_WIDTH_2:
4426                hw->bus.width = i40e_bus_width_pcie_x2;
4427                break;
4428        case I40E_PCI_LINK_WIDTH_4:
4429                hw->bus.width = i40e_bus_width_pcie_x4;
4430                break;
4431        case I40E_PCI_LINK_WIDTH_8:
4432                hw->bus.width = i40e_bus_width_pcie_x8;
4433                break;
4434        default:
4435                hw->bus.width = i40e_bus_width_unknown;
4436                break;
4437        }
4438
4439        switch (link & I40E_PCI_LINK_SPEED) {
4440        case I40E_PCI_LINK_SPEED_2500:
4441                hw->bus.speed = i40e_bus_speed_2500;
4442                break;
4443        case I40E_PCI_LINK_SPEED_5000:
4444                hw->bus.speed = i40e_bus_speed_5000;
4445                break;
4446        case I40E_PCI_LINK_SPEED_8000:
4447                hw->bus.speed = i40e_bus_speed_8000;
4448                break;
4449        default:
4450                hw->bus.speed = i40e_bus_speed_unknown;
4451                break;
4452        }
4453
4454
4455        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
4456            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
4457            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
4458            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
4459            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
4460            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
4461            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
4462            ("Unknown"));
4463
4464        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
4465            (hw->bus.speed < i40e_bus_speed_8000)) {
4466                device_printf(dev, "PCI-Express bandwidth available"
4467                    " for this device\n     is not sufficient for"
4468                    " normal operation.\n");
4469                device_printf(dev, "For expected performance a x8 "
4470                    "PCIE Gen3 slot is required.\n");
4471        }
4472
4473        return (link);
4474}
4475
4476#ifdef IXL_DEBUG
4477static int
4478ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
4479{
4480	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4481	struct i40e_hw *hw = &pf->hw;
4482	struct i40e_link_status link_status;
4483	char buf[512];
4484
4485	enum i40e_status_code aq_error = 0;
4486
4487	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
4488	if (aq_error) {
4489		printf("i40e_aq_get_link_info() error %d\n", aq_error);
4490		return (EPERM);
4491	}
4492
4493	sprintf(buf, "\n"
4494	    "PHY Type : %#04x\n"
4495	    "Speed    : %#04x\n"
4496	    "Link info: %#04x\n"
4497	    "AN info  : %#04x\n"
4498	    "Ext info : %#04x",
4499	    link_status.phy_type, link_status.link_speed,
4500	    link_status.link_info, link_status.an_info,
4501	    link_status.ext_info);
4502
4503	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4504}
4505
4506static int
4507ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
4508{
4509	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4510	struct i40e_hw *hw = &pf->hw;
4511	struct i40e_aq_get_phy_abilities_resp abilities_resp;
4512	char buf[512];
4513
4514	enum i40e_status_code aq_error = 0;
4515
4516	// TODO: Print out list of qualified modules as well?
4517	aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
4518	if (aq_error) {
4519		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
4520		return (EPERM);
4521	}
4522
4523	sprintf(buf, "\n"
4524	    "PHY Type : %#010x\n"
4525	    "Speed    : %#04x\n"
4526	    "Abilities: %#04x\n"
4527	    "EEE cap  : %#06x\n"
4528	    "EEER reg : %#010x\n"
4529	    "D3 Lpan  : %#04x",
4530	    abilities_resp.phy_type, abilities_resp.link_speed,
4531	    abilities_resp.abilities, abilities_resp.eee_capability,
4532	    abilities_resp.eeer_val, abilities_resp.d3_lpan);
4533
4534	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
4535}
4536
4537static int
4538ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
4539{
4540	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4541	struct ixl_vsi *vsi = &pf->vsi;
4542	struct ixl_mac_filter *f;
4543	char *buf, *buf_i;
4544
4545	int error = 0;
4546	int ftl_len = 0;
4547	int ftl_counter = 0;
4548	int buf_len = 0;
4549	int entry_len = 42;
4550
4551	SLIST_FOREACH(f, &vsi->ftl, next) {
4552		ftl_len++;
4553	}
4554
4555	if (ftl_len < 1) {
4556		sysctl_handle_string(oidp, "(none)", 6, req);
4557		return (0);
4558	}
4559
4560	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
4561	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
4562
4563	sprintf(buf_i++, "\n");
4564	SLIST_FOREACH(f, &vsi->ftl, next) {
4565		sprintf(buf_i,
4566		    MAC_FORMAT ", vlan %4d, flags %#06x",
4567		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
4568		buf_i += entry_len;
4569		/* don't print '\n' for last entry */
4570		if (++ftl_counter != ftl_len) {
4571			sprintf(buf_i, "\n");
4572			buf_i++;
4573		}
4574	}
4575
4576	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
4577	if (error)
4578		printf("sysctl error: %d\n", error);
4579	free(buf, M_DEVBUF);
4580	return error;
4581}
4582
4583#define IXL_SW_RES_SIZE 0x14
4584static int
4585ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
4586{
4587	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4588	struct i40e_hw *hw = &pf->hw;
4589	device_t dev = pf->dev;
4590	struct sbuf *buf;
4591	int error = 0;
4592
4593	u8 num_entries;
4594	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
4595
4596	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4597	if (!buf) {
4598		device_printf(dev, "Could not allocate sbuf for output.\n");
4599		return (ENOMEM);
4600	}
4601
4602	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
4603				resp,
4604				IXL_SW_RES_SIZE,
4605				NULL);
4606	if (error) {
4607		device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
4608		    __func__, error, hw->aq.asq_last_status);
4609		sbuf_delete(buf);
4610		return error;
4611	}
4612	device_printf(dev, "Num_entries: %d\n", num_entries);
4613
4614	sbuf_cat(buf, "\n");
4615	sbuf_printf(buf,
4616	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
4617	    "     | (this)     | (all) | (this) | (all)       \n");
4618	for (int i = 0; i < num_entries; i++) {
4619		sbuf_printf(buf,
4620		    "%#4x | %10d   %5d   %6d   %12d",
4621		    resp[i].resource_type,
4622		    resp[i].guaranteed,
4623		    resp[i].total,
4624		    resp[i].used,
4625		    resp[i].total_unalloced);
4626		if (i < num_entries - 1)
4627			sbuf_cat(buf, "\n");
4628	}
4629
4630	error = sbuf_finish(buf);
4631	if (error) {
4632		device_printf(dev, "Error finishing sbuf: %d\n", error);
4633		sbuf_delete(buf);
4634		return error;
4635	}
4636
4637	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4638	if (error)
4639		device_printf(dev, "sysctl error: %d\n", error);
4640	sbuf_delete(buf);
4641	return error;
4642
4643}
4644
4645/*
4646** Dump TX desc given index.
4647** Doesn't work; don't use.
4648** TODO: Also needs a queue index input!
4649**/
4650static int
4651ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
4652{
4653	struct ixl_pf *pf = (struct ixl_pf *)arg1;
4654	device_t dev = pf->dev;
4655	struct sbuf *buf;
4656	int error = 0;
4657
4658	u16 desc_idx = 0;
4659
4660	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
4661	if (!buf) {
4662		device_printf(dev, "Could not allocate sbuf for output.\n");
4663		return (ENOMEM);
4664	}
4665
4666	/* Read in index */
4667	error = sysctl_handle_int(oidp, &desc_idx, 0, req);
4668	if (error)
4669		return (error);
4670	if (req->newptr == NULL)
4671		return (EIO); // fix
4672	if (desc_idx > 1024) { // fix
4673		device_printf(dev,
4674		    "Invalid descriptor index, needs to be < 1024\n"); // fix
4675		return (EINVAL);
4676	}
4677
4678	// Don't use this sysctl yet
4679	if (TRUE)
4680		return (ENODEV);
4681
4682	sbuf_cat(buf, "\n");
4683
4684	// set to queue 1?
4685	struct ixl_queue *que = pf->vsi.queues;
4686	struct tx_ring *txr = &(que[1].txr);
4687	struct i40e_tx_desc *txd = &txr->base[desc_idx];
4688
4689	sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
4690	sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
4691	sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
4692
4693	error = sbuf_finish(buf);
4694	if (error) {
4695		device_printf(dev, "Error finishing sbuf: %d\n", error);
4696		sbuf_delete(buf);
4697		return error;
4698	}
4699
4700	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
4701	if (error)
4702		device_printf(dev, "sysctl error: %d\n", error);
4703	sbuf_delete(buf);
4704	return error;
4705}
4706#endif
4707
4708