if_ixlv.c revision 333343
1/******************************************************************************
2
3  Copyright (c) 2013-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixl/if_ixlv.c 333343 2018-05-07 23:23:11Z erj $*/
34
35#include "ixl.h"
36#include "ixlv.h"
37
38/*********************************************************************
39 *  Driver version
40 *********************************************************************/
41#define IXLV_DRIVER_VERSION_MAJOR	1
42#define IXLV_DRIVER_VERSION_MINOR	5
43#define IXLV_DRIVER_VERSION_BUILD	4
44
45char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
46			     __XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
47			     __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
48
49/*********************************************************************
50 *  PCI Device ID Table
51 *
52 *  Used by probe to select devices to load on
53 *  Last field stores an index into ixlv_strings
54 *  Last entry must be all 0s
55 *
56 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
57 *********************************************************************/
58
59static ixl_vendor_info_t ixlv_vendor_info_array[] =
60{
61	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
62	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
63	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
64	/* required last entry */
65	{0, 0, 0, 0, 0}
66};
67
68/*********************************************************************
69 *  Table of branding strings
70 *********************************************************************/
71
72static char    *ixlv_strings[] = {
73	"Intel(R) Ethernet Connection 700 Series VF Driver"
74};
75
76
77/*********************************************************************
78 *  Function prototypes
79 *********************************************************************/
80static int      ixlv_probe(device_t);
81static int      ixlv_attach(device_t);
82static int      ixlv_detach(device_t);
83static int      ixlv_shutdown(device_t);
84static void	ixlv_init_locked(struct ixlv_sc *);
85static int	ixlv_allocate_pci_resources(struct ixlv_sc *);
86static void	ixlv_free_pci_resources(struct ixlv_sc *);
87static int	ixlv_assign_msix(struct ixlv_sc *);
88static int	ixlv_init_msix(struct ixlv_sc *);
89static int	ixlv_init_taskqueue(struct ixlv_sc *);
90static int	ixlv_setup_queues(struct ixlv_sc *);
91static void	ixlv_config_rss(struct ixlv_sc *);
92static void	ixlv_stop(struct ixlv_sc *);
93static void	ixlv_add_multi(struct ixl_vsi *);
94static void	ixlv_del_multi(struct ixl_vsi *);
95static void	ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
96static void	ixlv_free_queues(struct ixl_vsi *);
97static int	ixlv_setup_interface(device_t, struct ixlv_sc *);
98static int	ixlv_teardown_adminq_msix(struct ixlv_sc *);
99
100static int	ixlv_media_change(struct ifnet *);
101static void	ixlv_media_status(struct ifnet *, struct ifmediareq *);
102
103static void	ixlv_local_timer(void *);
104
105static int	ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106static int	ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107static void	ixlv_init_filters(struct ixlv_sc *);
108static void	ixlv_free_filters(struct ixlv_sc *);
109
110static void	ixlv_msix_que(void *);
111static void	ixlv_msix_adminq(void *);
112static void	ixlv_do_adminq(void *, int);
113static void	ixlv_do_adminq_locked(struct ixlv_sc *sc);
114static void	ixlv_handle_que(void *, int);
115static int	ixlv_reset(struct ixlv_sc *);
116static int	ixlv_reset_complete(struct i40e_hw *);
117static void	ixlv_set_queue_rx_itr(struct ixl_queue *);
118static void	ixlv_set_queue_tx_itr(struct ixl_queue *);
119static void	ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120		    enum i40e_status_code);
121static void	ixlv_configure_itr(struct ixlv_sc *);
122
123static void	ixlv_enable_adminq_irq(struct i40e_hw *);
124static void	ixlv_disable_adminq_irq(struct i40e_hw *);
125static void	ixlv_enable_queue_irq(struct i40e_hw *, int);
126static void	ixlv_disable_queue_irq(struct i40e_hw *, int);
127
128static void	ixlv_setup_vlan_filters(struct ixlv_sc *);
129static void	ixlv_register_vlan(void *, struct ifnet *, u16);
130static void	ixlv_unregister_vlan(void *, struct ifnet *, u16);
131
132static void	ixlv_init_hw(struct ixlv_sc *);
133static int	ixlv_setup_vc(struct ixlv_sc *);
134static int	ixlv_vf_config(struct ixlv_sc *);
135
136static void	ixlv_cap_txcsum_tso(struct ixl_vsi *,
137		    struct ifnet *, int);
138
139static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
140static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
141
142static void	ixlv_add_sysctls(struct ixlv_sc *);
143#ifdef IXL_DEBUG
144static int 	ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
145static int 	ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
146#endif
147
148/*********************************************************************
149 *  FreeBSD Device Interface Entry Points
150 *********************************************************************/
151
152static device_method_t ixlv_methods[] = {
153	/* Device interface */
154	DEVMETHOD(device_probe, ixlv_probe),
155	DEVMETHOD(device_attach, ixlv_attach),
156	DEVMETHOD(device_detach, ixlv_detach),
157	DEVMETHOD(device_shutdown, ixlv_shutdown),
158	{0, 0}
159};
160
161static driver_t ixlv_driver = {
162	"ixlv", ixlv_methods, sizeof(struct ixlv_sc),
163};
164
165devclass_t ixlv_devclass;
166DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
167
168MODULE_DEPEND(ixlv, pci, 1, 1, 1);
169MODULE_DEPEND(ixlv, ether, 1, 1, 1);
170
171/*
172** TUNEABLE PARAMETERS:
173*/
174
175static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
176                   "IXLV driver parameters");
177
178/*
179** Number of descriptors per ring:
180** - TX and RX sizes are independently configurable
181*/
182static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
183TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
184SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
185    &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
186
187static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
188TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
189SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
190    &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
191
192/* Set to zero to auto calculate  */
193int ixlv_max_queues = 0;
194TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
195SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
196    &ixlv_max_queues, 0, "Number of Queues");
197
198/*
199** Number of entries in Tx queue buf_ring.
200** Increasing this will reduce the number of
201** errors when transmitting fragmented UDP
202** packets.
203*/
204static int ixlv_txbrsz = DEFAULT_TXBRSZ;
205TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
206SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
207    &ixlv_txbrsz, 0, "TX Buf Ring Size");
208
209/*
210 * Different method for processing TX descriptor
211 * completion.
212 */
213static int ixlv_enable_head_writeback = 0;
214TUNABLE_INT("hw.ixlv.enable_head_writeback",
215    &ixlv_enable_head_writeback);
216SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217    &ixlv_enable_head_writeback, 0,
218    "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
219
220/*
221** Controls for Interrupt Throttling
222**      - true/false for dynamic adjustment
223**      - default values for static ITR
224*/
225int ixlv_dynamic_rx_itr = 0;
226TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
227SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
228    &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
229
230int ixlv_dynamic_tx_itr = 0;
231TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
232SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
233    &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
234
235int ixlv_rx_itr = IXL_ITR_8K;
236TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
237SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
238    &ixlv_rx_itr, 0, "RX Interrupt Rate");
239
240int ixlv_tx_itr = IXL_ITR_4K;
241TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
242SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
243    &ixlv_tx_itr, 0, "TX Interrupt Rate");
244
245
246/*********************************************************************
247 *  Device identification routine
248 *
249 *  ixlv_probe determines if the driver should be loaded on
250 *  the hardware based on PCI vendor/device id of the device.
251 *
252 *  return BUS_PROBE_DEFAULT on success, positive on failure
253 *********************************************************************/
254
255static int
256ixlv_probe(device_t dev)
257{
258	ixl_vendor_info_t *ent;
259
260	u16	pci_vendor_id, pci_device_id;
261	u16	pci_subvendor_id, pci_subdevice_id;
262	char	device_name[256];
263
264#if 0
265	INIT_DEBUGOUT("ixlv_probe: begin");
266#endif
267
268	pci_vendor_id = pci_get_vendor(dev);
269	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
270		return (ENXIO);
271
272	pci_device_id = pci_get_device(dev);
273	pci_subvendor_id = pci_get_subvendor(dev);
274	pci_subdevice_id = pci_get_subdevice(dev);
275
276	ent = ixlv_vendor_info_array;
277	while (ent->vendor_id != 0) {
278		if ((pci_vendor_id == ent->vendor_id) &&
279		    (pci_device_id == ent->device_id) &&
280
281		    ((pci_subvendor_id == ent->subvendor_id) ||
282		     (ent->subvendor_id == 0)) &&
283
284		    ((pci_subdevice_id == ent->subdevice_id) ||
285		     (ent->subdevice_id == 0))) {
286			sprintf(device_name, "%s, Version - %s",
287				ixlv_strings[ent->index],
288				ixlv_driver_version);
289			device_set_desc_copy(dev, device_name);
290			return (BUS_PROBE_DEFAULT);
291		}
292		ent++;
293	}
294	return (ENXIO);
295}
296
297/*********************************************************************
298 *  Device initialization routine
299 *
300 *  The attach entry point is called when the driver is being loaded.
301 *  This routine identifies the type of hardware, allocates all resources
302 *  and initializes the hardware.
303 *
304 *  return 0 on success, positive on failure
305 *********************************************************************/
306
307static int
308ixlv_attach(device_t dev)
309{
310	struct ixlv_sc	*sc;
311	struct i40e_hw	*hw;
312	struct ixl_vsi 	*vsi;
313	int            	error = 0;
314
315	INIT_DBG_DEV(dev, "begin");
316
317	/* Allocate, clear, and link in our primary soft structure */
318	sc = device_get_softc(dev);
319	sc->dev = sc->osdep.dev = dev;
320	hw = &sc->hw;
321	vsi = &sc->vsi;
322	vsi->dev = dev;
323
324	/* Initialize hw struct */
325	ixlv_init_hw(sc);
326
327	/* Allocate filter lists */
328	ixlv_init_filters(sc);
329
330	/* Save this tunable */
331	vsi->enable_head_writeback = ixlv_enable_head_writeback;
332
333	/* Core Lock Init */
334	mtx_init(&sc->mtx, device_get_nameunit(dev),
335	    "IXL SC Lock", MTX_DEF);
336
337	/* Set up the timer callout */
338	callout_init_mtx(&sc->timer, &sc->mtx, 0);
339
340	/* Do PCI setup - map BAR0, etc */
341	if (ixlv_allocate_pci_resources(sc)) {
342		device_printf(dev, "%s: Allocation of PCI resources failed\n",
343		    __func__);
344		error = ENXIO;
345		goto err_early;
346	}
347
348	INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
349
350	error = i40e_set_mac_type(hw);
351	if (error) {
352		device_printf(dev, "%s: set_mac_type failed: %d\n",
353		    __func__, error);
354		goto err_pci_res;
355	}
356
357	error = ixlv_reset_complete(hw);
358	if (error) {
359		device_printf(dev, "%s: Device is still being reset\n",
360		    __func__);
361		goto err_pci_res;
362	}
363
364	INIT_DBG_DEV(dev, "VF Device is ready for configuration");
365
366	error = ixlv_setup_vc(sc);
367	if (error) {
368		device_printf(dev, "%s: Error setting up PF comms, %d\n",
369		    __func__, error);
370		goto err_pci_res;
371	}
372
373	INIT_DBG_DEV(dev, "PF API version verified");
374
375	/* Need API version before sending reset message */
376	error = ixlv_reset(sc);
377	if (error) {
378		device_printf(dev, "VF reset failed; reload the driver\n");
379		goto err_aq;
380	}
381
382	INIT_DBG_DEV(dev, "VF reset complete");
383
384	/* Ask for VF config from PF */
385	error = ixlv_vf_config(sc);
386	if (error) {
387		device_printf(dev, "Error getting configuration from PF: %d\n",
388		    error);
389		goto err_aq;
390	}
391
392	device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
393	    sc->vf_res->num_vsis,
394	    sc->vf_res->num_queue_pairs,
395	    sc->vf_res->max_vectors,
396	    sc->vf_res->rss_key_size,
397	    sc->vf_res->rss_lut_size);
398#ifdef IXL_DEBUG
399	device_printf(dev, "Offload flags: 0x%b\n",
400	    sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
401#endif
402
403	/* got VF config message back from PF, now we can parse it */
404	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
405		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
406			sc->vsi_res = &sc->vf_res->vsi_res[i];
407	}
408	if (!sc->vsi_res) {
409		device_printf(dev, "%s: no LAN VSI found\n", __func__);
410		error = EIO;
411		goto err_res_buf;
412	}
413
414	INIT_DBG_DEV(dev, "Resource Acquisition complete");
415
416	/* If no mac address was assigned just make a random one */
417	if (!ixlv_check_ether_addr(hw->mac.addr)) {
418		u8 addr[ETHER_ADDR_LEN];
419		arc4rand(&addr, sizeof(addr), 0);
420		addr[0] &= 0xFE;
421		addr[0] |= 0x02;
422		bcopy(addr, hw->mac.addr, sizeof(addr));
423	}
424
425	/* Now that the number of queues for this VF is known, set up interrupts */
426	sc->msix = ixlv_init_msix(sc);
427	/* We fail without MSIX support */
428	if (sc->msix == 0) {
429		error = ENXIO;
430		goto err_res_buf;
431	}
432
433	vsi->id = sc->vsi_res->vsi_id;
434	vsi->back = (void *)sc;
435	vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
436
437	ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
438
439	/* This allocates the memory and early settings */
440	if (ixlv_setup_queues(sc) != 0) {
441		device_printf(dev, "%s: setup queues failed!\n",
442		    __func__);
443		error = EIO;
444		goto out;
445	}
446
447	/* Do queue interrupt setup */
448	if (ixlv_assign_msix(sc) != 0) {
449		device_printf(dev, "%s: allocating queue interrupts failed!\n",
450		    __func__);
451		error = ENXIO;
452		goto out;
453	}
454
455	INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
456
457	/* Setup the stack interface */
458	if (ixlv_setup_interface(dev, sc) != 0) {
459		device_printf(dev, "%s: setup interface failed!\n",
460		    __func__);
461		error = EIO;
462		goto out;
463	}
464
465	INIT_DBG_DEV(dev, "Interface setup complete");
466
467	/* Start AdminQ taskqueue */
468	ixlv_init_taskqueue(sc);
469
470	/* We expect a link state message, so schedule the AdminQ task now */
471	taskqueue_enqueue(sc->tq, &sc->aq_irq);
472
473	/* Initialize stats */
474	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
475	ixlv_add_sysctls(sc);
476
477	/* Register for VLAN events */
478	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
479	    ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
480	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
481	    ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
482
483	/* We want AQ enabled early */
484	ixlv_enable_adminq_irq(hw);
485
486	/* Set things up to run init */
487	sc->init_state = IXLV_INIT_READY;
488
489	ixl_vc_init_mgr(sc, &sc->vc_mgr);
490
491	INIT_DBG_DEV(dev, "end");
492	return (error);
493
494out:
495	ixlv_free_queues(vsi);
496	ixlv_teardown_adminq_msix(sc);
497err_res_buf:
498	free(sc->vf_res, M_DEVBUF);
499err_aq:
500	i40e_shutdown_adminq(hw);
501err_pci_res:
502	ixlv_free_pci_resources(sc);
503err_early:
504	mtx_destroy(&sc->mtx);
505	ixlv_free_filters(sc);
506	INIT_DBG_DEV(dev, "end: error %d", error);
507	return (error);
508}
509
510/*********************************************************************
511 *  Device removal routine
512 *
513 *  The detach entry point is called when the driver is being removed.
514 *  This routine stops the adapter and deallocates all the resources
515 *  that were allocated for driver operation.
516 *
517 *  return 0 on success, positive on failure
518 *********************************************************************/
519
520static int
521ixlv_detach(device_t dev)
522{
523	struct ixlv_sc	*sc = device_get_softc(dev);
524	struct ixl_vsi 	*vsi = &sc->vsi;
525	struct i40e_hw	*hw = &sc->hw;
526	enum i40e_status_code	status;
527
528	INIT_DBG_DEV(dev, "begin");
529
530	/* Make sure VLANS are not using driver */
531	if (vsi->ifp->if_vlantrunk != NULL) {
532		if_printf(vsi->ifp, "Vlan in use, detach first\n");
533		return (EBUSY);
534	}
535
536	/* Remove all the media and link information */
537	ifmedia_removeall(&sc->media);
538
539	/* Stop driver */
540	ether_ifdetach(vsi->ifp);
541	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
542		mtx_lock(&sc->mtx);
543		ixlv_stop(sc);
544		mtx_unlock(&sc->mtx);
545	}
546
547	/* Unregister VLAN events */
548	if (vsi->vlan_attach != NULL)
549		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
550	if (vsi->vlan_detach != NULL)
551		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
552
553	/* Drain VC mgr */
554	callout_drain(&sc->vc_mgr.callout);
555
556	ixlv_disable_adminq_irq(hw);
557	ixlv_teardown_adminq_msix(sc);
558	/* Drain admin queue taskqueue */
559	taskqueue_free(sc->tq);
560	status = i40e_shutdown_adminq(&sc->hw);
561	if (status != I40E_SUCCESS) {
562		device_printf(dev,
563		    "i40e_shutdown_adminq() failed with status %s\n",
564		    i40e_stat_str(hw, status));
565	}
566
567	if_free(vsi->ifp);
568	free(sc->vf_res, M_DEVBUF);
569	ixlv_free_queues(vsi);
570	ixlv_free_pci_resources(sc);
571	ixlv_free_filters(sc);
572
573	bus_generic_detach(dev);
574	mtx_destroy(&sc->mtx);
575	INIT_DBG_DEV(dev, "end");
576	return (0);
577}
578
579/*********************************************************************
580 *
581 *  Shutdown entry point
582 *
583 **********************************************************************/
584
585static int
586ixlv_shutdown(device_t dev)
587{
588	struct ixlv_sc	*sc = device_get_softc(dev);
589
590	INIT_DBG_DEV(dev, "begin");
591
592	mtx_lock(&sc->mtx);
593	ixlv_stop(sc);
594	mtx_unlock(&sc->mtx);
595
596	INIT_DBG_DEV(dev, "end");
597	return (0);
598}
599
600/*
601 * Configure TXCSUM(IPV6) and TSO(4/6)
602 *	- the hardware handles these together so we
603 *	  need to tweak them
604 */
605static void
606ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
607{
608	/* Enable/disable TXCSUM/TSO4 */
609	if (!(ifp->if_capenable & IFCAP_TXCSUM)
610	    && !(ifp->if_capenable & IFCAP_TSO4)) {
611		if (mask & IFCAP_TXCSUM) {
612			ifp->if_capenable |= IFCAP_TXCSUM;
613			/* enable TXCSUM, restore TSO if previously enabled */
614			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
615				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
616				ifp->if_capenable |= IFCAP_TSO4;
617			}
618		}
619		else if (mask & IFCAP_TSO4) {
620			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
621			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
622			if_printf(ifp,
623			    "TSO4 requires txcsum, enabling both...\n");
624		}
625	} else if((ifp->if_capenable & IFCAP_TXCSUM)
626	    && !(ifp->if_capenable & IFCAP_TSO4)) {
627		if (mask & IFCAP_TXCSUM)
628			ifp->if_capenable &= ~IFCAP_TXCSUM;
629		else if (mask & IFCAP_TSO4)
630			ifp->if_capenable |= IFCAP_TSO4;
631	} else if((ifp->if_capenable & IFCAP_TXCSUM)
632	    && (ifp->if_capenable & IFCAP_TSO4)) {
633		if (mask & IFCAP_TXCSUM) {
634			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
635			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
636			if_printf(ifp,
637			    "TSO4 requires txcsum, disabling both...\n");
638		} else if (mask & IFCAP_TSO4)
639			ifp->if_capenable &= ~IFCAP_TSO4;
640	}
641
642	/* Enable/disable TXCSUM_IPV6/TSO6 */
643	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
644	    && !(ifp->if_capenable & IFCAP_TSO6)) {
645		if (mask & IFCAP_TXCSUM_IPV6) {
646			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
647			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
648				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
649				ifp->if_capenable |= IFCAP_TSO6;
650			}
651		} else if (mask & IFCAP_TSO6) {
652			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
653			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
654			if_printf(ifp,
655			    "TSO6 requires txcsum6, enabling both...\n");
656		}
657	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
658	    && !(ifp->if_capenable & IFCAP_TSO6)) {
659		if (mask & IFCAP_TXCSUM_IPV6)
660			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
661		else if (mask & IFCAP_TSO6)
662			ifp->if_capenable |= IFCAP_TSO6;
663	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
664	    && (ifp->if_capenable & IFCAP_TSO6)) {
665		if (mask & IFCAP_TXCSUM_IPV6) {
666			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
667			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
668			if_printf(ifp,
669			    "TSO6 requires txcsum6, disabling both...\n");
670		} else if (mask & IFCAP_TSO6)
671			ifp->if_capenable &= ~IFCAP_TSO6;
672	}
673}
674
675/*********************************************************************
676 *  Ioctl entry point
677 *
678 *  ixlv_ioctl is called when the user wants to configure the
679 *  interface.
680 *
681 *  return 0 on success, positive on failure
682 **********************************************************************/
683
684static int
685ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
686{
687	struct ixl_vsi		*vsi = ifp->if_softc;
688	struct ixlv_sc	*sc = vsi->back;
689	struct ifreq		*ifr = (struct ifreq *)data;
690#if defined(INET) || defined(INET6)
691	struct ifaddr 		*ifa = (struct ifaddr *)data;
692	bool			avoid_reset = FALSE;
693#endif
694	int             	error = 0;
695
696
697	switch (command) {
698
699        case SIOCSIFADDR:
700#ifdef INET
701		if (ifa->ifa_addr->sa_family == AF_INET)
702			avoid_reset = TRUE;
703#endif
704#ifdef INET6
705		if (ifa->ifa_addr->sa_family == AF_INET6)
706			avoid_reset = TRUE;
707#endif
708#if defined(INET) || defined(INET6)
709		/*
710		** Calling init results in link renegotiation,
711		** so we avoid doing it when possible.
712		*/
713		if (avoid_reset) {
714			ifp->if_flags |= IFF_UP;
715			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
716				ixlv_init(vsi);
717#ifdef INET
718			if (!(ifp->if_flags & IFF_NOARP))
719				arp_ifinit(ifp, ifa);
720#endif
721		} else
722			error = ether_ioctl(ifp, command, data);
723		break;
724#endif
725	case SIOCSIFMTU:
726		IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
727		mtx_lock(&sc->mtx);
728		if (ifr->ifr_mtu > IXL_MAX_FRAME -
729		    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
730			error = EINVAL;
731			IOCTL_DBG_IF(ifp, "mtu too large");
732		} else {
733			IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
734			// ERJ: Interestingly enough, these types don't match
735			ifp->if_mtu = (u_long)ifr->ifr_mtu;
736			vsi->max_frame_size =
737			    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
738			    + ETHER_VLAN_ENCAP_LEN;
739			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
740				ixlv_init_locked(sc);
741		}
742		mtx_unlock(&sc->mtx);
743		break;
744	case SIOCSIFFLAGS:
745		IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
746		mtx_lock(&sc->mtx);
747		if (ifp->if_flags & IFF_UP) {
748			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
749				ixlv_init_locked(sc);
750		} else
751			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
752				ixlv_stop(sc);
753		sc->if_flags = ifp->if_flags;
754		mtx_unlock(&sc->mtx);
755		break;
756	case SIOCADDMULTI:
757		IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
758		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
759			mtx_lock(&sc->mtx);
760			ixlv_disable_intr(vsi);
761			ixlv_add_multi(vsi);
762			ixlv_enable_intr(vsi);
763			mtx_unlock(&sc->mtx);
764		}
765		break;
766	case SIOCDELMULTI:
767		IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
768		if (sc->init_state == IXLV_RUNNING) {
769			mtx_lock(&sc->mtx);
770			ixlv_disable_intr(vsi);
771			ixlv_del_multi(vsi);
772			ixlv_enable_intr(vsi);
773			mtx_unlock(&sc->mtx);
774		}
775		break;
776	case SIOCSIFMEDIA:
777	case SIOCGIFMEDIA:
778		IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
779		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
780		break;
781	case SIOCSIFCAP:
782	{
783		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
784		IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
785
786		ixlv_cap_txcsum_tso(vsi, ifp, mask);
787
788		if (mask & IFCAP_RXCSUM)
789			ifp->if_capenable ^= IFCAP_RXCSUM;
790		if (mask & IFCAP_RXCSUM_IPV6)
791			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
792		if (mask & IFCAP_LRO)
793			ifp->if_capenable ^= IFCAP_LRO;
794		if (mask & IFCAP_VLAN_HWTAGGING)
795			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
796		if (mask & IFCAP_VLAN_HWFILTER)
797			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
798		if (mask & IFCAP_VLAN_HWTSO)
799			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
800		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
801			ixlv_init(vsi);
802		}
803		VLAN_CAPABILITIES(ifp);
804
805		break;
806	}
807
808	default:
809		IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
810		error = ether_ioctl(ifp, command, data);
811		break;
812	}
813
814	return (error);
815}
816
817/*
818** To do a reinit on the VF is unfortunately more complicated
819** than a physical device, we must have the PF more or less
820** completely recreate our memory, so many things that were
821** done only once at attach in traditional drivers now must be
822** redone at each reinitialization. This function does that
823** 'prelude' so we can then call the normal locked init code.
824*/
825int
826ixlv_reinit_locked(struct ixlv_sc *sc)
827{
828	struct i40e_hw		*hw = &sc->hw;
829	struct ixl_vsi		*vsi = &sc->vsi;
830	struct ifnet		*ifp = vsi->ifp;
831	struct ixlv_mac_filter  *mf, *mf_temp;
832	struct ixlv_vlan_filter	*vf;
833	int			error = 0;
834
835	INIT_DBG_IF(ifp, "begin");
836
837	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
838		ixlv_stop(sc);
839
840	error = ixlv_reset(sc);
841
842	INIT_DBG_IF(ifp, "VF was reset");
843
844	/* set the state in case we went thru RESET */
845	sc->init_state = IXLV_RUNNING;
846
847	/*
848	** Resetting the VF drops all filters from hardware;
849	** we need to mark them to be re-added in init.
850	*/
851	SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
852		if (mf->flags & IXL_FILTER_DEL) {
853			SLIST_REMOVE(sc->mac_filters, mf,
854			    ixlv_mac_filter, next);
855			free(mf, M_DEVBUF);
856		} else
857			mf->flags |= IXL_FILTER_ADD;
858	}
859	if (vsi->num_vlans != 0)
860		SLIST_FOREACH(vf, sc->vlan_filters, next)
861			vf->flags = IXL_FILTER_ADD;
862	else { /* clean any stale filters */
863		while (!SLIST_EMPTY(sc->vlan_filters)) {
864			vf = SLIST_FIRST(sc->vlan_filters);
865			SLIST_REMOVE_HEAD(sc->vlan_filters, next);
866			free(vf, M_DEVBUF);
867		}
868	}
869
870	ixlv_enable_adminq_irq(hw);
871	ixl_vc_flush(&sc->vc_mgr);
872
873	INIT_DBG_IF(ifp, "end");
874	return (error);
875}
876
877static void
878ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
879	enum i40e_status_code code)
880{
881	struct ixlv_sc *sc;
882
883	sc = arg;
884
885	/*
886	 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
887	 * happens while a command is in progress, so we don't print an error
888	 * in that case.
889	 */
890	if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
891		if_printf(sc->vsi.ifp,
892		    "Error %s waiting for PF to complete operation %d\n",
893		    i40e_stat_str(&sc->hw, code), cmd->request);
894	}
895}
896
897static void
898ixlv_init_locked(struct ixlv_sc *sc)
899{
900	struct i40e_hw		*hw = &sc->hw;
901	struct ixl_vsi		*vsi = &sc->vsi;
902	struct ixl_queue	*que = vsi->queues;
903	struct ifnet		*ifp = vsi->ifp;
904	int			 error = 0;
905
906	INIT_DBG_IF(ifp, "begin");
907
908	IXLV_CORE_LOCK_ASSERT(sc);
909
910	/* Do a reinit first if an init has already been done */
911	if ((sc->init_state == IXLV_RUNNING) ||
912	    (sc->init_state == IXLV_RESET_REQUIRED) ||
913	    (sc->init_state == IXLV_RESET_PENDING))
914		error = ixlv_reinit_locked(sc);
915	/* Don't bother with init if we failed reinit */
916	if (error)
917		goto init_done;
918
919	/* Remove existing MAC filter if new MAC addr is set */
920	if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
921		error = ixlv_del_mac_filter(sc, hw->mac.addr);
922		if (error == 0)
923			ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
924			    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
925			    sc);
926	}
927
928	/* Check for an LAA mac address... */
929	bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
930
931	ifp->if_hwassist = 0;
932	if (ifp->if_capenable & IFCAP_TSO)
933		ifp->if_hwassist |= CSUM_TSO;
934	if (ifp->if_capenable & IFCAP_TXCSUM)
935		ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
936	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
937		ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
938
939	/* Add mac filter for this VF to PF */
940	if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
941		error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
942		if (!error || error == EEXIST)
943			ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
944			    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
945			    sc);
946	}
947
948	/* Setup vlan's if needed */
949	ixlv_setup_vlan_filters(sc);
950
951	/* Prepare the queues for operation */
952	for (int i = 0; i < vsi->num_queues; i++, que++) {
953		struct  rx_ring	*rxr = &que->rxr;
954
955		ixl_init_tx_ring(que);
956
957		if (vsi->max_frame_size <= MCLBYTES)
958			rxr->mbuf_sz = MCLBYTES;
959		else
960			rxr->mbuf_sz = MJUMPAGESIZE;
961		ixl_init_rx_ring(que);
962	}
963
964	/* Set initial ITR values */
965	ixlv_configure_itr(sc);
966
967	/* Configure queues */
968	ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
969	    IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
970
971	/* Set up RSS */
972	ixlv_config_rss(sc);
973
974	/* Map vectors */
975	ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
976	    IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
977
978	/* Enable queues */
979	ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
980	    IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
981
982	/* Start the local timer */
983	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
984
985	sc->init_state = IXLV_RUNNING;
986
987init_done:
988	INIT_DBG_IF(ifp, "end");
989	return;
990}
991
992/*
993**  Init entry point for the stack
994*/
995void
996ixlv_init(void *arg)
997{
998	struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
999	struct ixlv_sc *sc = vsi->back;
1000	int retries = 0;
1001
1002	/* Prevent init from running again while waiting for AQ calls
1003	 * made in init_locked() to complete. */
1004	mtx_lock(&sc->mtx);
1005	if (sc->init_in_progress) {
1006		mtx_unlock(&sc->mtx);
1007		return;
1008	} else
1009		sc->init_in_progress = true;
1010
1011	ixlv_init_locked(sc);
1012	mtx_unlock(&sc->mtx);
1013
1014	/* Wait for init_locked to finish */
1015	while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
1016	    && ++retries < IXLV_MAX_INIT_WAIT) {
1017		i40e_msec_pause(25);
1018	}
1019	if (retries >= IXLV_MAX_INIT_WAIT) {
1020		if_printf(vsi->ifp,
1021		    "Init failed to complete in allotted time!\n");
1022	}
1023
1024	mtx_lock(&sc->mtx);
1025	sc->init_in_progress = false;
1026	mtx_unlock(&sc->mtx);
1027}
1028
1029/*
1030 * ixlv_attach() helper function; gathers information about
1031 * the (virtual) hardware for use elsewhere in the driver.
1032 */
1033static void
1034ixlv_init_hw(struct ixlv_sc *sc)
1035{
1036	struct i40e_hw *hw = &sc->hw;
1037	device_t dev = sc->dev;
1038
1039	/* Save off the information about this board */
1040	hw->vendor_id = pci_get_vendor(dev);
1041	hw->device_id = pci_get_device(dev);
1042	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1043	hw->subsystem_vendor_id =
1044	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1045	hw->subsystem_device_id =
1046	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1047
1048	hw->bus.device = pci_get_slot(dev);
1049	hw->bus.func = pci_get_function(dev);
1050}
1051
1052/*
1053 * ixlv_attach() helper function; initalizes the admin queue
1054 * and attempts to establish contact with the PF by
1055 * retrying the initial "API version" message several times
1056 * or until the PF responds.
1057 */
1058static int
1059ixlv_setup_vc(struct ixlv_sc *sc)
1060{
1061	struct i40e_hw *hw = &sc->hw;
1062	device_t dev = sc->dev;
1063	int error = 0, ret_error = 0, asq_retries = 0;
1064	bool send_api_ver_retried = 0;
1065
1066	/* Need to set these AQ paramters before initializing AQ */
1067	hw->aq.num_arq_entries = IXL_AQ_LEN;
1068	hw->aq.num_asq_entries = IXL_AQ_LEN;
1069	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
1070	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
1071
1072	for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
1073		/* Initialize admin queue */
1074		error = i40e_init_adminq(hw);
1075		if (error) {
1076			device_printf(dev, "%s: init_adminq failed: %d\n",
1077			    __func__, error);
1078			ret_error = 1;
1079			continue;
1080		}
1081
1082		INIT_DBG_DEV(dev, "Initialized Admin Queue; starting"
1083		    " send_api_ver attempt %d", i+1);
1084
1085retry_send:
1086		/* Send VF's API version */
1087		error = ixlv_send_api_ver(sc);
1088		if (error) {
1089			i40e_shutdown_adminq(hw);
1090			ret_error = 2;
1091			device_printf(dev, "%s: unable to send api"
1092			    " version to PF on attempt %d, error %d\n",
1093			    __func__, i+1, error);
1094		}
1095
1096		asq_retries = 0;
1097		while (!i40e_asq_done(hw)) {
1098			if (++asq_retries > IXLV_AQ_MAX_ERR) {
1099				i40e_shutdown_adminq(hw);
1100				device_printf(dev, "Admin Queue timeout "
1101				    "(waiting for send_api_ver), %d more tries...\n",
1102				    IXLV_AQ_MAX_ERR - (i + 1));
1103				ret_error = 3;
1104				break;
1105			}
1106			i40e_msec_pause(10);
1107		}
1108		if (asq_retries > IXLV_AQ_MAX_ERR)
1109			continue;
1110
1111		INIT_DBG_DEV(dev, "Sent API version message to PF");
1112
1113		/* Verify that the VF accepts the PF's API version */
1114		error = ixlv_verify_api_ver(sc);
1115		if (error == ETIMEDOUT) {
1116			if (!send_api_ver_retried) {
1117				/* Resend message, one more time */
1118				send_api_ver_retried = true;
1119				device_printf(dev,
1120				    "%s: Timeout while verifying API version on first"
1121				    " try!\n", __func__);
1122				goto retry_send;
1123			} else {
1124				device_printf(dev,
1125				    "%s: Timeout while verifying API version on second"
1126				    " try!\n", __func__);
1127				ret_error = 4;
1128				break;
1129			}
1130		}
1131		if (error) {
1132			device_printf(dev,
1133			    "%s: Unable to verify API version,"
1134			    " error %s\n", __func__, i40e_stat_str(hw, error));
1135			ret_error = 5;
1136		}
1137		break;
1138	}
1139
1140	if (ret_error >= 4)
1141		i40e_shutdown_adminq(hw);
1142	return (ret_error);
1143}
1144
1145/*
1146 * ixlv_attach() helper function; asks the PF for this VF's
1147 * configuration, and saves the information if it receives it.
1148 */
1149static int
1150ixlv_vf_config(struct ixlv_sc *sc)
1151{
1152	struct i40e_hw *hw = &sc->hw;
1153	device_t dev = sc->dev;
1154	int bufsz, error = 0, ret_error = 0;
1155	int asq_retries, retried = 0;
1156
1157retry_config:
1158	error = ixlv_send_vf_config_msg(sc);
1159	if (error) {
1160		device_printf(dev,
1161		    "%s: Unable to send VF config request, attempt %d,"
1162		    " error %d\n", __func__, retried + 1, error);
1163		ret_error = 2;
1164	}
1165
1166	asq_retries = 0;
1167	while (!i40e_asq_done(hw)) {
1168		if (++asq_retries > IXLV_AQ_MAX_ERR) {
1169			device_printf(dev, "%s: Admin Queue timeout "
1170			    "(waiting for send_vf_config_msg), attempt %d\n",
1171			    __func__, retried + 1);
1172			ret_error = 3;
1173			goto fail;
1174		}
1175		i40e_msec_pause(10);
1176	}
1177
1178	INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1179	    retried + 1);
1180
1181	if (!sc->vf_res) {
1182		bufsz = sizeof(struct virtchnl_vf_resource) +
1183		    (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
1184		sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1185		if (!sc->vf_res) {
1186			device_printf(dev,
1187			    "%s: Unable to allocate memory for VF configuration"
1188			    " message from PF on attempt %d\n", __func__, retried + 1);
1189			ret_error = 1;
1190			goto fail;
1191		}
1192	}
1193
1194	/* Check for VF config response */
1195	error = ixlv_get_vf_config(sc);
1196	if (error == ETIMEDOUT) {
1197		/* The 1st time we timeout, send the configuration message again */
1198		if (!retried) {
1199			retried++;
1200			goto retry_config;
1201		}
1202		device_printf(dev,
1203		    "%s: ixlv_get_vf_config() timed out waiting for a response\n",
1204		    __func__);
1205	}
1206	if (error) {
1207		device_printf(dev,
1208		    "%s: Unable to get VF configuration from PF after %d tries!\n",
1209		    __func__, retried + 1);
1210		ret_error = 4;
1211	}
1212	goto done;
1213
1214fail:
1215	free(sc->vf_res, M_DEVBUF);
1216done:
1217	return (ret_error);
1218}
1219
1220/*
1221 * Allocate MSI/X vectors, setup the AQ vector early
1222 */
1223static int
1224ixlv_init_msix(struct ixlv_sc *sc)
1225{
1226	device_t dev = sc->dev;
1227	int rid, want, vectors, queues, available;
1228	int auto_max_queues;
1229
1230	rid = PCIR_BAR(IXL_MSIX_BAR);
1231	sc->msix_mem = bus_alloc_resource_any(dev,
1232	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1233       	if (!sc->msix_mem) {
1234		/* May not be enabled */
1235		device_printf(sc->dev,
1236		    "Unable to map MSIX table\n");
1237		goto fail;
1238	}
1239
1240	available = pci_msix_count(dev);
1241	if (available == 0) { /* system has msix disabled */
1242		bus_release_resource(dev, SYS_RES_MEMORY,
1243		    rid, sc->msix_mem);
1244		sc->msix_mem = NULL;
1245		goto fail;
1246	}
1247
1248	/* Clamp queues to number of CPUs and # of MSI-X vectors available */
1249	auto_max_queues = min(mp_ncpus, available - 1);
1250	/* Clamp queues to # assigned to VF by PF */
1251	auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
1252
1253	/* Override with tunable value if tunable is less than autoconfig count */
1254	if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
1255		queues = ixlv_max_queues;
1256	/* Use autoconfig amount if that's lower */
1257	else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
1258		device_printf(dev, "ixlv_max_queues (%d) is too large, using "
1259		    "autoconfig amount (%d)...\n",
1260		    ixlv_max_queues, auto_max_queues);
1261		queues = auto_max_queues;
1262	}
1263	/* Limit maximum auto-configured queues to 8 if no user value is set */
1264	else
1265		queues = min(auto_max_queues, 8);
1266
1267#ifdef  RSS
1268	/* If we're doing RSS, clamp at the number of RSS buckets */
1269	if (queues > rss_getnumbuckets())
1270		queues = rss_getnumbuckets();
1271#endif
1272
1273	/*
1274	** Want one vector (RX/TX pair) per queue
1275	** plus an additional for the admin queue.
1276	*/
1277	want = queues + 1;
1278	if (want <= available)	/* Have enough */
1279		vectors = want;
1280	else {
1281		device_printf(sc->dev,
1282		    "MSIX Configuration Problem, "
1283		    "%d vectors available but %d wanted!\n",
1284		    available, want);
1285		goto fail;
1286	}
1287
1288#ifdef RSS
1289	/*
1290	* If we're doing RSS, the number of queues needs to
1291	* match the number of RSS buckets that are configured.
1292	*
1293	* + If there's more queues than RSS buckets, we'll end
1294	*   up with queues that get no traffic.
1295	*
1296	* + If there's more RSS buckets than queues, we'll end
1297	*   up having multiple RSS buckets map to the same queue,
1298	*   so there'll be some contention.
1299	*/
1300	if (queues != rss_getnumbuckets()) {
1301		device_printf(dev,
1302		    "%s: queues (%d) != RSS buckets (%d)"
1303		    "; performance will be impacted.\n",
1304		     __func__, queues, rss_getnumbuckets());
1305	}
1306#endif
1307
1308	if (pci_alloc_msix(dev, &vectors) == 0) {
1309		device_printf(sc->dev,
1310		    "Using MSIX interrupts with %d vectors\n", vectors);
1311		sc->msix = vectors;
1312		sc->vsi.num_queues = queues;
1313	}
1314
1315	/* Next we need to setup the vector for the Admin Queue */
1316	rid = 1;	/* zero vector + 1 */
1317	sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1318	    &rid, RF_SHAREABLE | RF_ACTIVE);
1319	if (sc->res == NULL) {
1320		device_printf(dev, "Unable to allocate"
1321		    " bus resource: AQ interrupt \n");
1322		goto fail;
1323	}
1324	if (bus_setup_intr(dev, sc->res,
1325	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1326	    ixlv_msix_adminq, sc, &sc->tag)) {
1327		sc->res = NULL;
1328		device_printf(dev, "Failed to register AQ handler");
1329		goto fail;
1330	}
1331	bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1332
1333	return (vectors);
1334
1335fail:
1336	/* The VF driver MUST use MSIX */
1337	return (0);
1338}
1339
1340static int
1341ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1342{
1343	int             rid;
1344	device_t        dev = sc->dev;
1345
1346	rid = PCIR_BAR(0);
1347	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1348	    &rid, RF_ACTIVE);
1349
1350	if (!(sc->pci_mem)) {
1351		device_printf(dev, "Unable to allocate bus resource: memory\n");
1352		return (ENXIO);
1353	}
1354
1355	sc->osdep.mem_bus_space_tag =
1356		rman_get_bustag(sc->pci_mem);
1357	sc->osdep.mem_bus_space_handle =
1358		rman_get_bushandle(sc->pci_mem);
1359	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1360	sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1361	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1362	sc->hw.back = &sc->osdep;
1363
1364	ixl_set_busmaster(dev);
1365	ixl_set_msix_enable(dev);
1366
1367	/* Disable adminq interrupts (just in case) */
1368	ixlv_disable_adminq_irq(&sc->hw);
1369
1370	return (0);
1371}
1372
1373/*
1374 * Free MSI-X related resources for a single queue
1375 */
1376static void
1377ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
1378{
1379	device_t                dev = sc->dev;
1380
1381	/*
1382	**  Release all msix queue resources:
1383	*/
1384	if (que->tag != NULL) {
1385		bus_teardown_intr(dev, que->res, que->tag);
1386		que->tag = NULL;
1387	}
1388	if (que->res != NULL) {
1389		int rid = que->msix + 1;
1390		bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1391		que->res = NULL;
1392	}
1393	if (que->tq != NULL) {
1394		taskqueue_free(que->tq);
1395		que->tq = NULL;
1396	}
1397}
1398
1399static void
1400ixlv_free_pci_resources(struct ixlv_sc *sc)
1401{
1402	device_t                dev = sc->dev;
1403
1404	pci_release_msi(dev);
1405
1406	if (sc->msix_mem != NULL)
1407		bus_release_resource(dev, SYS_RES_MEMORY,
1408		    PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
1409
1410	if (sc->pci_mem != NULL)
1411		bus_release_resource(dev, SYS_RES_MEMORY,
1412		    PCIR_BAR(0), sc->pci_mem);
1413}
1414
1415/*
1416 * Create taskqueue and tasklet for Admin Queue interrupts.
1417 */
1418static int
1419ixlv_init_taskqueue(struct ixlv_sc *sc)
1420{
1421	int error = 0;
1422
1423	TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1424
1425	sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1426	    taskqueue_thread_enqueue, &sc->tq);
1427	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1428	    device_get_nameunit(sc->dev));
1429
1430	return (error);
1431}
1432
1433/*********************************************************************
1434 *
1435 *  Setup MSIX Interrupt resources and handlers for the VSI queues
1436 *
1437 **********************************************************************/
1438static int
1439ixlv_assign_msix(struct ixlv_sc *sc)
1440{
1441	device_t	dev = sc->dev;
1442	struct 		ixl_vsi *vsi = &sc->vsi;
1443	struct 		ixl_queue *que = vsi->queues;
1444	struct		tx_ring	 *txr;
1445	int 		error, rid, vector = 1;
1446#ifdef	RSS
1447	cpuset_t	cpu_mask;
1448#endif
1449
1450	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1451		int cpu_id = i;
1452		rid = vector + 1;
1453		txr = &que->txr;
1454		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1455		    RF_SHAREABLE | RF_ACTIVE);
1456		if (que->res == NULL) {
1457			device_printf(dev,"Unable to allocate"
1458		    	    " bus resource: que interrupt [%d]\n", vector);
1459			return (ENXIO);
1460		}
1461		/* Set the handler function */
1462		error = bus_setup_intr(dev, que->res,
1463		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1464		    ixlv_msix_que, que, &que->tag);
1465		if (error) {
1466			que->tag = NULL;
1467			device_printf(dev, "Failed to register que handler");
1468			return (error);
1469		}
1470		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1471		/* Bind the vector to a CPU */
1472#ifdef RSS
1473		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1474#endif
1475		bus_bind_intr(dev, que->res, cpu_id);
1476		que->msix = vector;
1477		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1478		TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1479		que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1480		    taskqueue_thread_enqueue, &que->tq);
1481#ifdef RSS
1482		CPU_SETOF(cpu_id, &cpu_mask);
1483		taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
1484		    &cpu_mask, "%s (bucket %d)",
1485		    device_get_nameunit(dev), cpu_id);
1486#else
1487                taskqueue_start_threads(&que->tq, 1, PI_NET,
1488                    "%s que", device_get_nameunit(dev));
1489#endif
1490
1491	}
1492
1493	return (0);
1494}
1495
1496/*
1497** Requests a VF reset from the PF.
1498**
1499** Requires the VF's Admin Queue to be initialized.
1500*/
1501static int
1502ixlv_reset(struct ixlv_sc *sc)
1503{
1504	struct i40e_hw	*hw = &sc->hw;
1505	device_t	dev = sc->dev;
1506	int		error = 0;
1507
1508	/* Ask the PF to reset us if we are initiating */
1509	if (sc->init_state != IXLV_RESET_PENDING)
1510		ixlv_request_reset(sc);
1511
1512	i40e_msec_pause(100);
1513	error = ixlv_reset_complete(hw);
1514	if (error) {
1515		device_printf(dev, "%s: VF reset failed\n",
1516		    __func__);
1517		return (error);
1518	}
1519
1520	error = i40e_shutdown_adminq(hw);
1521	if (error) {
1522		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1523		    __func__, error);
1524		return (error);
1525	}
1526
1527	error = i40e_init_adminq(hw);
1528	if (error) {
1529		device_printf(dev, "%s: init_adminq failed: %d\n",
1530		    __func__, error);
1531		return(error);
1532	}
1533
1534	return (0);
1535}
1536
1537static int
1538ixlv_reset_complete(struct i40e_hw *hw)
1539{
1540	u32 reg;
1541
1542	/* Wait up to ~10 seconds */
1543	for (int i = 0; i < 100; i++) {
1544		reg = rd32(hw, I40E_VFGEN_RSTAT) &
1545		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1546
1547                if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1548		    (reg == VIRTCHNL_VFR_COMPLETED))
1549			return (0);
1550		i40e_msec_pause(100);
1551	}
1552
1553	return (EBUSY);
1554}
1555
1556
1557/*********************************************************************
1558 *
1559 *  Setup networking device structure and register an interface.
1560 *
1561 **********************************************************************/
1562static int
1563ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1564{
1565	struct ifnet		*ifp;
1566	struct ixl_vsi		*vsi = &sc->vsi;
1567	struct ixl_queue	*que = vsi->queues;
1568
1569	INIT_DBG_DEV(dev, "begin");
1570
1571	ifp = vsi->ifp = if_alloc(IFT_ETHER);
1572	if (ifp == NULL) {
1573		device_printf(dev, "%s: could not allocate ifnet"
1574		    " structure!\n", __func__);
1575		return (-1);
1576	}
1577
1578	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1579
1580	ifp->if_mtu = ETHERMTU;
1581#if __FreeBSD_version >= 1100000
1582	ifp->if_baudrate = IF_Gbps(40);
1583#else
1584	if_initbaudrate(ifp, IF_Gbps(40));
1585#endif
1586	ifp->if_init = ixlv_init;
1587	ifp->if_softc = vsi;
1588	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1589	ifp->if_ioctl = ixlv_ioctl;
1590
1591#if __FreeBSD_version >= 1100000
1592	if_setgetcounterfn(ifp, ixl_get_counter);
1593#endif
1594
1595	ifp->if_transmit = ixl_mq_start;
1596
1597	ifp->if_qflush = ixl_qflush;
1598	ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
1599
1600	ether_ifattach(ifp, sc->hw.mac.addr);
1601
1602	vsi->max_frame_size =
1603	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1604	    + ETHER_VLAN_ENCAP_LEN;
1605
1606	ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
1607	ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
1608	ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
1609
1610	/*
1611	 * Tell the upper layer(s) we support long frames.
1612	 */
1613	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1614
1615	ifp->if_capabilities |= IFCAP_HWCSUM;
1616	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1617	ifp->if_capabilities |= IFCAP_TSO;
1618	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1619
1620	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1621			     |  IFCAP_VLAN_HWTSO
1622			     |  IFCAP_VLAN_MTU
1623			     |  IFCAP_VLAN_HWCSUM
1624			     |  IFCAP_LRO;
1625	ifp->if_capenable = ifp->if_capabilities;
1626
1627	/*
1628	** Don't turn this on by default, if vlans are
1629	** created on another pseudo device (eg. lagg)
1630	** then vlan events are not passed thru, breaking
1631	** operation, but with HW FILTER off it works. If
1632	** using vlans directly on the ixl driver you can
1633	** enable this and get full hardware tag filtering.
1634	*/
1635	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1636
1637	/*
1638	 * Specify the media types supported by this adapter and register
1639	 * callbacks to update media and link information
1640	 */
1641	ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1642		     ixlv_media_status);
1643
1644	/* Media types based on reported link speed over AdminQ */
1645	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1646	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1647	ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1648	ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
1649	ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
1650
1651	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1652	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1653
1654	INIT_DBG_DEV(dev, "end");
1655	return (0);
1656}
1657
1658/*
1659** Allocate and setup a single queue
1660*/
1661static int
1662ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
1663{
1664	device_t		dev = sc->dev;
1665	struct tx_ring		*txr;
1666	struct rx_ring		*rxr;
1667	int 			rsize, tsize;
1668	int			error = I40E_SUCCESS;
1669
1670	txr = &que->txr;
1671	txr->que = que;
1672	txr->tail = I40E_QTX_TAIL1(que->me);
1673	/* Initialize the TX lock */
1674	snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1675	    device_get_nameunit(dev), que->me);
1676	mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1677	/*
1678	 * Create the TX descriptor ring
1679	 *
1680	 * In Head Writeback mode, the descriptor ring is one bigger
1681	 * than the number of descriptors for space for the HW to
1682	 * write back index of last completed descriptor.
1683	 */
1684	if (sc->vsi.enable_head_writeback) {
1685		tsize = roundup2((que->num_tx_desc *
1686		    sizeof(struct i40e_tx_desc)) +
1687		    sizeof(u32), DBA_ALIGN);
1688	} else {
1689		tsize = roundup2((que->num_tx_desc *
1690		    sizeof(struct i40e_tx_desc)), DBA_ALIGN);
1691	}
1692	if (i40e_allocate_dma_mem(&sc->hw,
1693	    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1694		device_printf(dev,
1695		    "Unable to allocate TX Descriptor memory\n");
1696		error = ENOMEM;
1697		goto err_destroy_tx_mtx;
1698	}
1699	txr->base = (struct i40e_tx_desc *)txr->dma.va;
1700	bzero((void *)txr->base, tsize);
1701	/* Now allocate transmit soft structs for the ring */
1702	if (ixl_allocate_tx_data(que)) {
1703		device_printf(dev,
1704		    "Critical Failure setting up TX structures\n");
1705		error = ENOMEM;
1706		goto err_free_tx_dma;
1707	}
1708	/* Allocate a buf ring */
1709	txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1710	    M_WAITOK, &txr->mtx);
1711	if (txr->br == NULL) {
1712		device_printf(dev,
1713		    "Critical Failure setting up TX buf ring\n");
1714		error = ENOMEM;
1715		goto err_free_tx_data;
1716	}
1717
1718	/*
1719	 * Next the RX queues...
1720	 */
1721	rsize = roundup2(que->num_rx_desc *
1722	    sizeof(union i40e_rx_desc), DBA_ALIGN);
1723	rxr = &que->rxr;
1724	rxr->que = que;
1725	rxr->tail = I40E_QRX_TAIL1(que->me);
1726
1727	/* Initialize the RX side lock */
1728	snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1729	    device_get_nameunit(dev), que->me);
1730	mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1731
1732	if (i40e_allocate_dma_mem(&sc->hw,
1733	    &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1734		device_printf(dev,
1735		    "Unable to allocate RX Descriptor memory\n");
1736		error = ENOMEM;
1737		goto err_destroy_rx_mtx;
1738	}
1739	rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1740	bzero((void *)rxr->base, rsize);
1741
1742	/* Allocate receive soft structs for the ring */
1743	if (ixl_allocate_rx_data(que)) {
1744		device_printf(dev,
1745		    "Critical Failure setting up receive structs\n");
1746		error = ENOMEM;
1747		goto err_free_rx_dma;
1748	}
1749
1750	return (0);
1751
1752err_free_rx_dma:
1753	i40e_free_dma_mem(&sc->hw, &rxr->dma);
1754err_destroy_rx_mtx:
1755	mtx_destroy(&rxr->mtx);
1756	/* err_free_tx_buf_ring */
1757	buf_ring_free(txr->br, M_DEVBUF);
1758err_free_tx_data:
1759	ixl_free_que_tx(que);
1760err_free_tx_dma:
1761	i40e_free_dma_mem(&sc->hw, &txr->dma);
1762err_destroy_tx_mtx:
1763	mtx_destroy(&txr->mtx);
1764
1765	return (error);
1766}
1767
1768/*
1769** Allocate and setup the interface queues
1770*/
1771static int
1772ixlv_setup_queues(struct ixlv_sc *sc)
1773{
1774	device_t		dev = sc->dev;
1775	struct ixl_vsi		*vsi;
1776	struct ixl_queue	*que;
1777	int			i;
1778	int			error = I40E_SUCCESS;
1779
1780	vsi = &sc->vsi;
1781	vsi->back = (void *)sc;
1782	vsi->hw = &sc->hw;
1783	vsi->num_vlans = 0;
1784
1785	/* Get memory for the station queues */
1786	if (!(vsi->queues =
1787		(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1788		vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1789			device_printf(dev, "Unable to allocate queue memory\n");
1790			return ENOMEM;
1791	}
1792
1793	for (i = 0; i < vsi->num_queues; i++) {
1794		que = &vsi->queues[i];
1795		que->num_tx_desc = vsi->num_tx_desc;
1796		que->num_rx_desc = vsi->num_rx_desc;
1797		que->me = i;
1798		que->vsi = vsi;
1799
1800		if (ixlv_setup_queue(sc, que)) {
1801			error = ENOMEM;
1802			goto err_free_queues;
1803		}
1804	}
1805
1806	return (0);
1807
1808err_free_queues:
1809	while (i--)
1810		ixlv_free_queue(sc, &vsi->queues[i]);
1811
1812	free(vsi->queues, M_DEVBUF);
1813
1814	return (error);
1815}
1816
1817/*
1818** This routine is run via an vlan config EVENT,
1819** it enables us to use the HW Filter table since
1820** we can get the vlan id. This just creates the
1821** entry in the soft version of the VFTA, init will
1822** repopulate the real table.
1823*/
1824static void
1825ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1826{
1827	struct ixl_vsi		*vsi = arg;
1828	struct ixlv_sc		*sc = vsi->back;
1829	struct ixlv_vlan_filter	*v;
1830
1831
1832	if (ifp->if_softc != arg)   /* Not our event */
1833		return;
1834
1835	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1836		return;
1837
1838	/* Sanity check - make sure it doesn't already exist */
1839	SLIST_FOREACH(v, sc->vlan_filters, next) {
1840		if (v->vlan == vtag)
1841			return;
1842	}
1843
1844	mtx_lock(&sc->mtx);
1845	++vsi->num_vlans;
1846	v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1847	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1848	v->vlan = vtag;
1849	v->flags = IXL_FILTER_ADD;
1850	ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1851	    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1852	mtx_unlock(&sc->mtx);
1853	return;
1854}
1855
1856/*
1857** This routine is run via an vlan
1858** unconfig EVENT, remove our entry
1859** in the soft vfta.
1860*/
1861static void
1862ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1863{
1864	struct ixl_vsi		*vsi = arg;
1865	struct ixlv_sc		*sc = vsi->back;
1866	struct ixlv_vlan_filter	*v;
1867	int			i = 0;
1868
1869	if (ifp->if_softc != arg)
1870		return;
1871
1872	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1873		return;
1874
1875	mtx_lock(&sc->mtx);
1876	SLIST_FOREACH(v, sc->vlan_filters, next) {
1877		if (v->vlan == vtag) {
1878			v->flags = IXL_FILTER_DEL;
1879			++i;
1880			--vsi->num_vlans;
1881		}
1882	}
1883	if (i)
1884		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1885		    IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1886	mtx_unlock(&sc->mtx);
1887	return;
1888}
1889
1890/*
1891** Get a new filter and add it to the mac filter list.
1892*/
1893static struct ixlv_mac_filter *
1894ixlv_get_mac_filter(struct ixlv_sc *sc)
1895{
1896	struct ixlv_mac_filter	*f;
1897
1898	f = malloc(sizeof(struct ixlv_mac_filter),
1899	    M_DEVBUF, M_NOWAIT | M_ZERO);
1900	if (f)
1901		SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1902
1903	return (f);
1904}
1905
1906/*
1907** Find the filter with matching MAC address
1908*/
1909static struct ixlv_mac_filter *
1910ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1911{
1912	struct ixlv_mac_filter	*f;
1913	bool				match = FALSE;
1914
1915	SLIST_FOREACH(f, sc->mac_filters, next) {
1916		if (cmp_etheraddr(f->macaddr, macaddr)) {
1917			match = TRUE;
1918			break;
1919		}
1920	}
1921
1922	if (!match)
1923		f = NULL;
1924	return (f);
1925}
1926
1927static int
1928ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
1929{
1930	device_t		dev = sc->dev;
1931	int			error = 0;
1932
1933	if (sc->tag != NULL) {
1934		bus_teardown_intr(dev, sc->res, sc->tag);
1935		if (error) {
1936			device_printf(dev, "bus_teardown_intr() for"
1937			    " interrupt 0 failed\n");
1938			// return (ENXIO);
1939		}
1940		sc->tag = NULL;
1941	}
1942	if (sc->res != NULL) {
1943		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1944		if (error) {
1945			device_printf(dev, "bus_release_resource() for"
1946			    " interrupt 0 failed\n");
1947			// return (ENXIO);
1948		}
1949		sc->res = NULL;
1950	}
1951
1952	return (0);
1953
1954}
1955
1956/*
1957** Admin Queue interrupt handler
1958*/
1959static void
1960ixlv_msix_adminq(void *arg)
1961{
1962	struct ixlv_sc	*sc = arg;
1963	struct i40e_hw	*hw = &sc->hw;
1964	u32		reg, mask;
1965
1966        reg = rd32(hw, I40E_VFINT_ICR01);
1967        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1968
1969        reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1970        reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1971        wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1972
1973	/* schedule task */
1974	taskqueue_enqueue(sc->tq, &sc->aq_irq);
1975	return;
1976}
1977
1978void
1979ixlv_enable_intr(struct ixl_vsi *vsi)
1980{
1981	struct i40e_hw		*hw = vsi->hw;
1982	struct ixl_queue	*que = vsi->queues;
1983
1984	ixlv_enable_adminq_irq(hw);
1985	for (int i = 0; i < vsi->num_queues; i++, que++)
1986		ixlv_enable_queue_irq(hw, que->me);
1987}
1988
1989void
1990ixlv_disable_intr(struct ixl_vsi *vsi)
1991{
1992        struct i40e_hw          *hw = vsi->hw;
1993        struct ixl_queue       *que = vsi->queues;
1994
1995	ixlv_disable_adminq_irq(hw);
1996	for (int i = 0; i < vsi->num_queues; i++, que++)
1997		ixlv_disable_queue_irq(hw, que->me);
1998}
1999
2000
2001static void
2002ixlv_disable_adminq_irq(struct i40e_hw *hw)
2003{
2004	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
2005	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
2006	/* flush */
2007	rd32(hw, I40E_VFGEN_RSTAT);
2008	return;
2009}
2010
2011static void
2012ixlv_enable_adminq_irq(struct i40e_hw *hw)
2013{
2014	wr32(hw, I40E_VFINT_DYN_CTL01,
2015	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
2016	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
2017	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
2018	/* flush */
2019	rd32(hw, I40E_VFGEN_RSTAT);
2020	return;
2021}
2022
2023static void
2024ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
2025{
2026	u32		reg;
2027
2028	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2029	    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
2030	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
2031	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
2032}
2033
2034static void
2035ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
2036{
2037	wr32(hw, I40E_VFINT_DYN_CTLN1(id),
2038	    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
2039	rd32(hw, I40E_VFGEN_RSTAT);
2040	return;
2041}
2042
2043/*
2044 * Get initial ITR values from tunable values.
2045 */
2046static void
2047ixlv_configure_itr(struct ixlv_sc *sc)
2048{
2049	struct i40e_hw		*hw = &sc->hw;
2050	struct ixl_vsi		*vsi = &sc->vsi;
2051	struct ixl_queue	*que = vsi->queues;
2052
2053	vsi->rx_itr_setting = ixlv_rx_itr;
2054	vsi->tx_itr_setting = ixlv_tx_itr;
2055
2056	for (int i = 0; i < vsi->num_queues; i++, que++) {
2057		struct tx_ring	*txr = &que->txr;
2058		struct rx_ring 	*rxr = &que->rxr;
2059
2060		wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
2061		    vsi->rx_itr_setting);
2062		rxr->itr = vsi->rx_itr_setting;
2063		rxr->latency = IXL_AVE_LATENCY;
2064
2065		wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
2066		    vsi->tx_itr_setting);
2067		txr->itr = vsi->tx_itr_setting;
2068		txr->latency = IXL_AVE_LATENCY;
2069	}
2070}
2071
2072/*
2073** Provide a update to the queue RX
2074** interrupt moderation value.
2075*/
2076static void
2077ixlv_set_queue_rx_itr(struct ixl_queue *que)
2078{
2079	struct ixl_vsi	*vsi = que->vsi;
2080	struct i40e_hw	*hw = vsi->hw;
2081	struct rx_ring	*rxr = &que->rxr;
2082	u16		rx_itr;
2083	u16		rx_latency = 0;
2084	int		rx_bytes;
2085
2086
2087	/* Idle, do nothing */
2088	if (rxr->bytes == 0)
2089		return;
2090
2091	if (ixlv_dynamic_rx_itr) {
2092		rx_bytes = rxr->bytes/rxr->itr;
2093		rx_itr = rxr->itr;
2094
2095		/* Adjust latency range */
2096		switch (rxr->latency) {
2097		case IXL_LOW_LATENCY:
2098			if (rx_bytes > 10) {
2099				rx_latency = IXL_AVE_LATENCY;
2100				rx_itr = IXL_ITR_20K;
2101			}
2102			break;
2103		case IXL_AVE_LATENCY:
2104			if (rx_bytes > 20) {
2105				rx_latency = IXL_BULK_LATENCY;
2106				rx_itr = IXL_ITR_8K;
2107			} else if (rx_bytes <= 10) {
2108				rx_latency = IXL_LOW_LATENCY;
2109				rx_itr = IXL_ITR_100K;
2110			}
2111			break;
2112		case IXL_BULK_LATENCY:
2113			if (rx_bytes <= 20) {
2114				rx_latency = IXL_AVE_LATENCY;
2115				rx_itr = IXL_ITR_20K;
2116			}
2117			break;
2118       		 }
2119
2120		rxr->latency = rx_latency;
2121
2122		if (rx_itr != rxr->itr) {
2123			/* do an exponential smoothing */
2124			rx_itr = (10 * rx_itr * rxr->itr) /
2125			    ((9 * rx_itr) + rxr->itr);
2126			rxr->itr = min(rx_itr, IXL_MAX_ITR);
2127			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2128			    que->me), rxr->itr);
2129		}
2130	} else { /* We may have have toggled to non-dynamic */
2131		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
2132			vsi->rx_itr_setting = ixlv_rx_itr;
2133		/* Update the hardware if needed */
2134		if (rxr->itr != vsi->rx_itr_setting) {
2135			rxr->itr = vsi->rx_itr_setting;
2136			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
2137			    que->me), rxr->itr);
2138		}
2139	}
2140	rxr->bytes = 0;
2141	rxr->packets = 0;
2142	return;
2143}
2144
2145
2146/*
2147** Provide a update to the queue TX
2148** interrupt moderation value.
2149*/
2150static void
2151ixlv_set_queue_tx_itr(struct ixl_queue *que)
2152{
2153	struct ixl_vsi	*vsi = que->vsi;
2154	struct i40e_hw	*hw = vsi->hw;
2155	struct tx_ring	*txr = &que->txr;
2156	u16		tx_itr;
2157	u16		tx_latency = 0;
2158	int		tx_bytes;
2159
2160
2161	/* Idle, do nothing */
2162	if (txr->bytes == 0)
2163		return;
2164
2165	if (ixlv_dynamic_tx_itr) {
2166		tx_bytes = txr->bytes/txr->itr;
2167		tx_itr = txr->itr;
2168
2169		switch (txr->latency) {
2170		case IXL_LOW_LATENCY:
2171			if (tx_bytes > 10) {
2172				tx_latency = IXL_AVE_LATENCY;
2173				tx_itr = IXL_ITR_20K;
2174			}
2175			break;
2176		case IXL_AVE_LATENCY:
2177			if (tx_bytes > 20) {
2178				tx_latency = IXL_BULK_LATENCY;
2179				tx_itr = IXL_ITR_8K;
2180			} else if (tx_bytes <= 10) {
2181				tx_latency = IXL_LOW_LATENCY;
2182				tx_itr = IXL_ITR_100K;
2183			}
2184			break;
2185		case IXL_BULK_LATENCY:
2186			if (tx_bytes <= 20) {
2187				tx_latency = IXL_AVE_LATENCY;
2188				tx_itr = IXL_ITR_20K;
2189			}
2190			break;
2191		}
2192
2193		txr->latency = tx_latency;
2194
2195		if (tx_itr != txr->itr) {
2196       	         /* do an exponential smoothing */
2197			tx_itr = (10 * tx_itr * txr->itr) /
2198			    ((9 * tx_itr) + txr->itr);
2199			txr->itr = min(tx_itr, IXL_MAX_ITR);
2200			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2201			    que->me), txr->itr);
2202		}
2203
2204	} else { /* We may have have toggled to non-dynamic */
2205		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2206			vsi->tx_itr_setting = ixlv_tx_itr;
2207		/* Update the hardware if needed */
2208		if (txr->itr != vsi->tx_itr_setting) {
2209			txr->itr = vsi->tx_itr_setting;
2210			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2211			    que->me), txr->itr);
2212		}
2213	}
2214	txr->bytes = 0;
2215	txr->packets = 0;
2216	return;
2217}
2218
2219
2220/*
2221**
2222** MSIX Interrupt Handlers and Tasklets
2223**
2224*/
2225static void
2226ixlv_handle_que(void *context, int pending)
2227{
2228	struct ixl_queue *que = context;
2229	struct ixl_vsi *vsi = que->vsi;
2230	struct i40e_hw  *hw = vsi->hw;
2231	struct tx_ring  *txr = &que->txr;
2232	struct ifnet    *ifp = vsi->ifp;
2233	bool		more;
2234
2235	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2236		more = ixl_rxeof(que, IXL_RX_LIMIT);
2237		mtx_lock(&txr->mtx);
2238		ixl_txeof(que);
2239		if (!drbr_empty(ifp, txr->br))
2240			ixl_mq_start_locked(ifp, txr);
2241		mtx_unlock(&txr->mtx);
2242		if (more) {
2243			taskqueue_enqueue(que->tq, &que->task);
2244			return;
2245		}
2246	}
2247
2248	/* Reenable this interrupt - hmmm */
2249	ixlv_enable_queue_irq(hw, que->me);
2250	return;
2251}
2252
2253
2254/*********************************************************************
2255 *
2256 *  MSIX Queue Interrupt Service routine
2257 *
2258 **********************************************************************/
2259static void
2260ixlv_msix_que(void *arg)
2261{
2262	struct ixl_queue	*que = arg;
2263	struct ixl_vsi	*vsi = que->vsi;
2264	struct i40e_hw	*hw = vsi->hw;
2265	struct tx_ring	*txr = &que->txr;
2266	bool		more_tx, more_rx;
2267
2268	/* Spurious interrupts are ignored */
2269	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2270		return;
2271
2272	++que->irqs;
2273
2274	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2275
2276	mtx_lock(&txr->mtx);
2277	more_tx = ixl_txeof(que);
2278	/*
2279	** Make certain that if the stack
2280	** has anything queued the task gets
2281	** scheduled to handle it.
2282	*/
2283	if (!drbr_empty(vsi->ifp, txr->br))
2284		more_tx = 1;
2285	mtx_unlock(&txr->mtx);
2286
2287	ixlv_set_queue_rx_itr(que);
2288	ixlv_set_queue_tx_itr(que);
2289
2290	if (more_tx || more_rx)
2291		taskqueue_enqueue(que->tq, &que->task);
2292	else
2293		ixlv_enable_queue_irq(hw, que->me);
2294
2295	return;
2296}
2297
2298
2299/*********************************************************************
2300 *
2301 *  Media Ioctl callback
2302 *
2303 *  This routine is called whenever the user queries the status of
2304 *  the interface using ifconfig.
2305 *
2306 **********************************************************************/
2307static void
2308ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2309{
2310	struct ixl_vsi		*vsi = ifp->if_softc;
2311	struct ixlv_sc	*sc = vsi->back;
2312
2313	INIT_DBG_IF(ifp, "begin");
2314
2315	mtx_lock(&sc->mtx);
2316
2317	ixlv_update_link_status(sc);
2318
2319	ifmr->ifm_status = IFM_AVALID;
2320	ifmr->ifm_active = IFM_ETHER;
2321
2322	if (!sc->link_up) {
2323		mtx_unlock(&sc->mtx);
2324		INIT_DBG_IF(ifp, "end: link not up");
2325		return;
2326	}
2327
2328	ifmr->ifm_status |= IFM_ACTIVE;
2329	/* Hardware is always full-duplex */
2330	ifmr->ifm_active |= IFM_FDX;
2331
2332	/* Based on the link speed reported by the PF over the AdminQ, choose a
2333	 * PHY type to report. This isn't 100% correct since we don't really
2334	 * know the underlying PHY type of the PF, but at least we can report
2335	 * a valid link speed...
2336	 */
2337	switch (sc->link_speed) {
2338	case VIRTCHNL_LINK_SPEED_100MB:
2339		ifmr->ifm_active |= IFM_100_TX;
2340		break;
2341	case VIRTCHNL_LINK_SPEED_1GB:
2342		ifmr->ifm_active |= IFM_1000_T;
2343		break;
2344	case VIRTCHNL_LINK_SPEED_10GB:
2345		ifmr->ifm_active |= IFM_10G_SR;
2346		break;
2347	case VIRTCHNL_LINK_SPEED_20GB:
2348	case VIRTCHNL_LINK_SPEED_25GB:
2349		ifmr->ifm_active |= IFM_25G_SR;
2350		break;
2351	case VIRTCHNL_LINK_SPEED_40GB:
2352		ifmr->ifm_active |= IFM_40G_SR4;
2353		break;
2354	default:
2355		ifmr->ifm_active |= IFM_UNKNOWN;
2356		break;
2357	}
2358
2359	mtx_unlock(&sc->mtx);
2360	INIT_DBG_IF(ifp, "end");
2361	return;
2362}
2363
2364/*********************************************************************
2365 *
2366 *  Media Ioctl callback
2367 *
2368 *  This routine is called when the user changes speed/duplex using
2369 *  media/mediopt option with ifconfig.
2370 *
2371 **********************************************************************/
2372static int
2373ixlv_media_change(struct ifnet * ifp)
2374{
2375	struct ixl_vsi *vsi = ifp->if_softc;
2376	struct ifmedia *ifm = &vsi->media;
2377
2378	INIT_DBG_IF(ifp, "begin");
2379
2380	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2381		return (EINVAL);
2382
2383	if_printf(ifp, "Changing speed is not supported\n");
2384
2385	INIT_DBG_IF(ifp, "end");
2386	return (ENODEV);
2387}
2388
2389
2390/*********************************************************************
2391 *  Multicast Initialization
2392 *
2393 *  This routine is called by init to reset a fresh state.
2394 *
2395 **********************************************************************/
2396
2397static void
2398ixlv_init_multi(struct ixl_vsi *vsi)
2399{
2400	struct ixlv_mac_filter *f;
2401	struct ixlv_sc	*sc = vsi->back;
2402	int			mcnt = 0;
2403
2404	IOCTL_DBG_IF(vsi->ifp, "begin");
2405
2406	/* First clear any multicast filters */
2407	SLIST_FOREACH(f, sc->mac_filters, next) {
2408		if ((f->flags & IXL_FILTER_USED)
2409		    && (f->flags & IXL_FILTER_MC)) {
2410			f->flags |= IXL_FILTER_DEL;
2411			mcnt++;
2412		}
2413	}
2414	if (mcnt > 0)
2415		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2416		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2417		    sc);
2418
2419	IOCTL_DBG_IF(vsi->ifp, "end");
2420}
2421
2422static void
2423ixlv_add_multi(struct ixl_vsi *vsi)
2424{
2425	struct ifmultiaddr	*ifma;
2426	struct ifnet		*ifp = vsi->ifp;
2427	struct ixlv_sc	*sc = vsi->back;
2428	int			mcnt = 0;
2429
2430	IOCTL_DBG_IF(ifp, "begin");
2431
2432	if_maddr_rlock(ifp);
2433	/*
2434	** Get a count, to decide if we
2435	** simply use multicast promiscuous.
2436	*/
2437	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2438		if (ifma->ifma_addr->sa_family != AF_LINK)
2439			continue;
2440		mcnt++;
2441	}
2442	if_maddr_runlock(ifp);
2443
2444	/* TODO: Remove -- cannot set promiscuous mode in a VF */
2445	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2446		/* delete all multicast filters */
2447		ixlv_init_multi(vsi);
2448		sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
2449		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2450		    IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2451		    sc);
2452		IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2453		return;
2454	}
2455
2456	mcnt = 0;
2457	if_maddr_rlock(ifp);
2458	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2459		if (ifma->ifma_addr->sa_family != AF_LINK)
2460			continue;
2461		if (!ixlv_add_mac_filter(sc,
2462		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2463		    IXL_FILTER_MC))
2464			mcnt++;
2465	}
2466	if_maddr_runlock(ifp);
2467	/*
2468	** Notify AQ task that sw filters need to be
2469	** added to hw list
2470	*/
2471	if (mcnt > 0)
2472		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2473		    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2474		    sc);
2475
2476	IOCTL_DBG_IF(ifp, "end");
2477}
2478
2479static void
2480ixlv_del_multi(struct ixl_vsi *vsi)
2481{
2482	struct ixlv_mac_filter *f;
2483	struct ifmultiaddr	*ifma;
2484	struct ifnet		*ifp = vsi->ifp;
2485	struct ixlv_sc	*sc = vsi->back;
2486	int			mcnt = 0;
2487	bool		match = FALSE;
2488
2489	IOCTL_DBG_IF(ifp, "begin");
2490
2491	/* Search for removed multicast addresses */
2492	if_maddr_rlock(ifp);
2493	SLIST_FOREACH(f, sc->mac_filters, next) {
2494		if ((f->flags & IXL_FILTER_USED)
2495		    && (f->flags & IXL_FILTER_MC)) {
2496			/* check if mac address in filter is in sc's list */
2497			match = FALSE;
2498			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2499				if (ifma->ifma_addr->sa_family != AF_LINK)
2500					continue;
2501				u8 *mc_addr =
2502				    (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2503				if (cmp_etheraddr(f->macaddr, mc_addr)) {
2504					match = TRUE;
2505					break;
2506				}
2507			}
2508			/* if this filter is not in the sc's list, remove it */
2509			if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2510				f->flags |= IXL_FILTER_DEL;
2511				mcnt++;
2512				IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2513				    MAC_FORMAT_ARGS(f->macaddr));
2514			}
2515			else if (match == FALSE)
2516				IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2517				    MAC_FORMAT_ARGS(f->macaddr));
2518		}
2519	}
2520	if_maddr_runlock(ifp);
2521
2522	if (mcnt > 0)
2523		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2524		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2525		    sc);
2526
2527	IOCTL_DBG_IF(ifp, "end");
2528}
2529
2530/*********************************************************************
2531 *  Timer routine
2532 *
2533 *  This routine checks for link status,updates statistics,
2534 *  and runs the watchdog check.
2535 *
2536 **********************************************************************/
2537
2538static void
2539ixlv_local_timer(void *arg)
2540{
2541	struct ixlv_sc		*sc = arg;
2542	struct i40e_hw		*hw = &sc->hw;
2543	struct ixl_vsi		*vsi = &sc->vsi;
2544	u32			val;
2545
2546	IXLV_CORE_LOCK_ASSERT(sc);
2547
2548	/* If Reset is in progress just bail */
2549	if (sc->init_state == IXLV_RESET_PENDING)
2550		return;
2551
2552	/* Check for when PF triggers a VF reset */
2553	val = rd32(hw, I40E_VFGEN_RSTAT) &
2554	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2555
2556	if (val != VIRTCHNL_VFR_VFACTIVE
2557	    && val != VIRTCHNL_VFR_COMPLETED) {
2558		DDPRINTF(sc->dev, "reset in progress! (%d)", val);
2559		return;
2560	}
2561
2562	ixlv_request_stats(sc);
2563
2564	/* clean and process any events */
2565	taskqueue_enqueue(sc->tq, &sc->aq_irq);
2566
2567	/* Increment stat when a queue shows hung */
2568	if (ixl_queue_hang_check(vsi))
2569		sc->watchdog_events++;
2570
2571	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2572}
2573
2574/*
2575** Note: this routine updates the OS on the link state
2576**	the real check of the hardware only happens with
2577**	a link interrupt.
2578*/
2579void
2580ixlv_update_link_status(struct ixlv_sc *sc)
2581{
2582	struct ixl_vsi		*vsi = &sc->vsi;
2583	struct ifnet		*ifp = vsi->ifp;
2584
2585	if (sc->link_up){
2586		if (vsi->link_active == FALSE) {
2587			if (bootverbose)
2588				if_printf(ifp,"Link is Up, %s\n",
2589				    ixlv_vc_speed_to_string(sc->link_speed));
2590			vsi->link_active = TRUE;
2591			if_link_state_change(ifp, LINK_STATE_UP);
2592		}
2593	} else { /* Link down */
2594		if (vsi->link_active == TRUE) {
2595			if (bootverbose)
2596				if_printf(ifp,"Link is Down\n");
2597			if_link_state_change(ifp, LINK_STATE_DOWN);
2598			vsi->link_active = FALSE;
2599		}
2600	}
2601
2602	return;
2603}
2604
2605/*********************************************************************
2606 *
2607 *  This routine disables all traffic on the adapter by issuing a
2608 *  global reset on the MAC and deallocates TX/RX buffers.
2609 *
2610 **********************************************************************/
2611
2612static void
2613ixlv_stop(struct ixlv_sc *sc)
2614{
2615	struct ifnet *ifp;
2616	int start;
2617
2618	ifp = sc->vsi.ifp;
2619	INIT_DBG_IF(ifp, "begin");
2620
2621	IXLV_CORE_LOCK_ASSERT(sc);
2622
2623	ixl_vc_flush(&sc->vc_mgr);
2624	ixlv_disable_queues(sc);
2625
2626	start = ticks;
2627	while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2628	    ((ticks - start) < hz/10))
2629		ixlv_do_adminq_locked(sc);
2630
2631	/* Stop the local timer */
2632	callout_stop(&sc->timer);
2633
2634	INIT_DBG_IF(ifp, "end");
2635}
2636
2637/* Free a single queue struct */
2638static void
2639ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
2640{
2641	struct tx_ring *txr = &que->txr;
2642	struct rx_ring *rxr = &que->rxr;
2643
2644	if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2645		return;
2646	IXL_TX_LOCK(txr);
2647	if (txr->br)
2648		buf_ring_free(txr->br, M_DEVBUF);
2649	ixl_free_que_tx(que);
2650	if (txr->base)
2651		i40e_free_dma_mem(&sc->hw, &txr->dma);
2652	IXL_TX_UNLOCK(txr);
2653	IXL_TX_LOCK_DESTROY(txr);
2654
2655	if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2656		return;
2657	IXL_RX_LOCK(rxr);
2658	ixl_free_que_rx(que);
2659	if (rxr->base)
2660		i40e_free_dma_mem(&sc->hw, &rxr->dma);
2661	IXL_RX_UNLOCK(rxr);
2662	IXL_RX_LOCK_DESTROY(rxr);
2663}
2664
2665/*********************************************************************
2666 *
2667 *  Free all station queue structs.
2668 *
2669 **********************************************************************/
2670static void
2671ixlv_free_queues(struct ixl_vsi *vsi)
2672{
2673	struct ixlv_sc	*sc = (struct ixlv_sc *)vsi->back;
2674	struct ixl_queue	*que = vsi->queues;
2675
2676	for (int i = 0; i < vsi->num_queues; i++, que++) {
2677		/* First, free the MSI-X resources */
2678		ixlv_free_msix_resources(sc, que);
2679		/* Then free other queue data */
2680		ixlv_free_queue(sc, que);
2681	}
2682
2683	free(vsi->queues, M_DEVBUF);
2684}
2685
2686static void
2687ixlv_config_rss_reg(struct ixlv_sc *sc)
2688{
2689	struct i40e_hw	*hw = &sc->hw;
2690	struct ixl_vsi	*vsi = &sc->vsi;
2691	u32		lut = 0;
2692	u64		set_hena = 0, hena;
2693	int		i, j, que_id;
2694	u32		rss_seed[IXL_RSS_KEY_SIZE_REG];
2695#ifdef RSS
2696	u32		rss_hash_config;
2697#endif
2698
2699	/* Don't set up RSS if using a single queue */
2700	if (vsi->num_queues == 1) {
2701		wr32(hw, I40E_VFQF_HENA(0), 0);
2702		wr32(hw, I40E_VFQF_HENA(1), 0);
2703		ixl_flush(hw);
2704		return;
2705	}
2706
2707#ifdef RSS
2708	/* Fetch the configured RSS key */
2709	rss_getkey((uint8_t *) &rss_seed);
2710#else
2711	ixl_get_default_rss_key(rss_seed);
2712#endif
2713
2714	/* Fill out hash function seed */
2715	for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
2716                wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2717
2718	/* Enable PCTYPES for RSS: */
2719#ifdef RSS
2720	rss_hash_config = rss_gethashconfig();
2721	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2722                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2723	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2724                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2725	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2726                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2727	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2728                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2729	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2730		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2731	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2732                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2733        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2734                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2735#else
2736	set_hena = IXL_DEFAULT_RSS_HENA_XL710;
2737#endif
2738	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2739	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2740	hena |= set_hena;
2741	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2742	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2743
2744	/* Populate the LUT with max no. of queues in round robin fashion */
2745	for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
2746                if (j == vsi->num_queues)
2747                        j = 0;
2748#ifdef RSS
2749		/*
2750		 * Fetch the RSS bucket id for the given indirection entry.
2751		 * Cap it at the number of configured buckets (which is
2752		 * num_queues.)
2753		 */
2754		que_id = rss_get_indirection_to_bucket(i);
2755		que_id = que_id % vsi->num_queues;
2756#else
2757		que_id = j;
2758#endif
2759                /* lut = 4-byte sliding window of 4 lut entries */
2760                lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
2761                /* On i = 3, we have 4 entries in lut; write to the register */
2762                if ((i & 3) == 3) {
2763                        wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2764			DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2765		}
2766        }
2767	ixl_flush(hw);
2768}
2769
2770static void
2771ixlv_config_rss_pf(struct ixlv_sc *sc)
2772{
2773	ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
2774	    IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
2775
2776	ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
2777	    IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
2778
2779	ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
2780	    IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
2781}
2782
2783/*
2784** ixlv_config_rss - setup RSS
2785**
2786** RSS keys and table are cleared on VF reset.
2787*/
2788static void
2789ixlv_config_rss(struct ixlv_sc *sc)
2790{
2791	if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
2792		DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
2793		ixlv_config_rss_reg(sc);
2794	} else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2795		DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
2796		ixlv_config_rss_pf(sc);
2797	} else
2798		device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2799}
2800
2801/*
2802** This routine refreshes vlan filters, called by init
2803** it scans the filter table and then updates the AQ
2804*/
2805static void
2806ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2807{
2808	struct ixl_vsi			*vsi = &sc->vsi;
2809	struct ixlv_vlan_filter	*f;
2810	int				cnt = 0;
2811
2812	if (vsi->num_vlans == 0)
2813		return;
2814	/*
2815	** Scan the filter table for vlan entries,
2816	** and if found call for the AQ update.
2817	*/
2818	SLIST_FOREACH(f, sc->vlan_filters, next)
2819                if (f->flags & IXL_FILTER_ADD)
2820			cnt++;
2821	if (cnt > 0)
2822		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2823		    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2824}
2825
2826
2827/*
2828** This routine adds new MAC filters to the sc's list;
2829** these are later added in hardware by sending a virtual
2830** channel message.
2831*/
2832static int
2833ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2834{
2835	struct ixlv_mac_filter	*f;
2836
2837	/* Does one already exist? */
2838	f = ixlv_find_mac_filter(sc, macaddr);
2839	if (f != NULL) {
2840		IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2841		    MAC_FORMAT_ARGS(macaddr));
2842		return (EEXIST);
2843	}
2844
2845	/* If not, get a new empty filter */
2846	f = ixlv_get_mac_filter(sc);
2847	if (f == NULL) {
2848		if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2849		    __func__);
2850		return (ENOMEM);
2851	}
2852
2853	IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2854	    MAC_FORMAT_ARGS(macaddr));
2855
2856	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2857	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2858	f->flags |= flags;
2859	return (0);
2860}
2861
2862/*
2863** Marks a MAC filter for deletion.
2864*/
2865static int
2866ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2867{
2868	struct ixlv_mac_filter	*f;
2869
2870	f = ixlv_find_mac_filter(sc, macaddr);
2871	if (f == NULL)
2872		return (ENOENT);
2873
2874	f->flags |= IXL_FILTER_DEL;
2875	return (0);
2876}
2877
2878/*
2879** Tasklet handler for MSIX Adminq interrupts
2880**  - done outside interrupt context since it might sleep
2881*/
2882static void
2883ixlv_do_adminq(void *context, int pending)
2884{
2885	struct ixlv_sc		*sc = context;
2886
2887	mtx_lock(&sc->mtx);
2888	ixlv_do_adminq_locked(sc);
2889	mtx_unlock(&sc->mtx);
2890	return;
2891}
2892
2893static void
2894ixlv_do_adminq_locked(struct ixlv_sc *sc)
2895{
2896	struct i40e_hw			*hw = &sc->hw;
2897	struct i40e_arq_event_info	event;
2898	struct virtchnl_msg	*v_msg;
2899	device_t			dev = sc->dev;
2900	u16				result = 0;
2901	u32				reg, oldreg;
2902	i40e_status			ret;
2903	bool				aq_error = false;
2904
2905	IXLV_CORE_LOCK_ASSERT(sc);
2906
2907	event.buf_len = IXL_AQ_BUF_SZ;
2908        event.msg_buf = sc->aq_buffer;
2909	v_msg = (struct virtchnl_msg *)&event.desc;
2910
2911	do {
2912		ret = i40e_clean_arq_element(hw, &event, &result);
2913		if (ret)
2914			break;
2915		ixlv_vc_completion(sc, v_msg->v_opcode,
2916		    v_msg->v_retval, event.msg_buf, event.msg_len);
2917		if (result != 0)
2918			bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2919	} while (result);
2920
2921	/* check for Admin queue errors */
2922	oldreg = reg = rd32(hw, hw->aq.arq.len);
2923	if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2924		device_printf(dev, "ARQ VF Error detected\n");
2925		reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2926		aq_error = true;
2927	}
2928	if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2929		device_printf(dev, "ARQ Overflow Error detected\n");
2930		reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2931		aq_error = true;
2932	}
2933	if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2934		device_printf(dev, "ARQ Critical Error detected\n");
2935		reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2936		aq_error = true;
2937	}
2938	if (oldreg != reg)
2939		wr32(hw, hw->aq.arq.len, reg);
2940
2941	oldreg = reg = rd32(hw, hw->aq.asq.len);
2942	if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2943		device_printf(dev, "ASQ VF Error detected\n");
2944		reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2945		aq_error = true;
2946	}
2947	if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2948		device_printf(dev, "ASQ Overflow Error detected\n");
2949		reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2950		aq_error = true;
2951	}
2952	if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2953		device_printf(dev, "ASQ Critical Error detected\n");
2954		reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2955		aq_error = true;
2956	}
2957	if (oldreg != reg)
2958		wr32(hw, hw->aq.asq.len, reg);
2959
2960	if (aq_error) {
2961		/* Need to reset adapter */
2962		device_printf(dev, "WARNING: Resetting!\n");
2963		sc->init_state = IXLV_RESET_REQUIRED;
2964		ixlv_stop(sc);
2965		ixlv_init_locked(sc);
2966	}
2967	ixlv_enable_adminq_irq(hw);
2968}
2969
2970static void
2971ixlv_add_sysctls(struct ixlv_sc *sc)
2972{
2973	device_t dev = sc->dev;
2974	struct ixl_vsi *vsi = &sc->vsi;
2975	struct i40e_eth_stats *es = &vsi->eth_stats;
2976
2977	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2978	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2979	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2980
2981	struct sysctl_oid *vsi_node, *queue_node;
2982	struct sysctl_oid_list *vsi_list, *queue_list;
2983
2984#define QUEUE_NAME_LEN 32
2985	char queue_namebuf[QUEUE_NAME_LEN];
2986
2987	struct ixl_queue *queues = vsi->queues;
2988	struct tx_ring *txr;
2989	struct rx_ring *rxr;
2990
2991	/* Driver statistics sysctls */
2992	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
2993			CTLFLAG_RD, &sc->watchdog_events,
2994			"Watchdog timeouts");
2995	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
2996			CTLFLAG_RD, &sc->admin_irq,
2997			"Admin Queue IRQ Handled");
2998
2999	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
3000			CTLFLAG_RD, &vsi->num_tx_desc, 0,
3001			"TX ring size");
3002	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
3003			CTLFLAG_RD, &vsi->num_rx_desc, 0,
3004			"RX ring size");
3005
3006	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
3007			CTLTYPE_STRING | CTLFLAG_RD,
3008			sc, 0, ixlv_sysctl_current_speed,
3009			"A", "Current Port Speed");
3010
3011	/* VSI statistics sysctls */
3012	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
3013				   CTLFLAG_RD, NULL, "VSI-specific statistics");
3014	vsi_list = SYSCTL_CHILDREN(vsi_node);
3015
3016	struct ixl_sysctl_info ctls[] =
3017	{
3018		{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
3019		{&es->rx_unicast, "ucast_pkts_rcvd",
3020			"Unicast Packets Received"},
3021		{&es->rx_multicast, "mcast_pkts_rcvd",
3022			"Multicast Packets Received"},
3023		{&es->rx_broadcast, "bcast_pkts_rcvd",
3024			"Broadcast Packets Received"},
3025		{&es->rx_discards, "rx_discards", "Discarded RX packets"},
3026		{&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
3027		{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
3028		{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
3029		{&es->tx_multicast, "mcast_pkts_txd",
3030			"Multicast Packets Transmitted"},
3031		{&es->tx_broadcast, "bcast_pkts_txd",
3032			"Broadcast Packets Transmitted"},
3033		{&es->tx_errors, "tx_errors", "TX packet errors"},
3034		// end
3035		{0,0,0}
3036	};
3037	struct ixl_sysctl_info *entry = ctls;
3038	while (entry->stat != NULL)
3039	{
3040		SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
3041				CTLFLAG_RD, entry->stat,
3042				entry->description);
3043		entry++;
3044	}
3045
3046	/* Queue sysctls */
3047	for (int q = 0; q < vsi->num_queues; q++) {
3048		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
3049		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
3050					     CTLFLAG_RD, NULL, "Queue Name");
3051		queue_list = SYSCTL_CHILDREN(queue_node);
3052
3053		txr = &(queues[q].txr);
3054		rxr = &(queues[q].rxr);
3055
3056		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
3057				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
3058				"m_defrag() failed");
3059		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
3060				CTLFLAG_RD, &(queues[q].dropped_pkts),
3061				"Driver dropped packets");
3062		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
3063				CTLFLAG_RD, &(queues[q].irqs),
3064				"irqs on this queue");
3065		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
3066				CTLFLAG_RD, &(queues[q].tso),
3067				"TSO");
3068		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
3069				CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
3070				"Driver tx dma failure in xmit");
3071		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3072				CTLFLAG_RD, &(txr->no_desc),
3073				"Queue No Descriptor Available");
3074		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3075				CTLFLAG_RD, &(txr->total_packets),
3076				"Queue Packets Transmitted");
3077		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
3078				CTLFLAG_RD, &(txr->tx_bytes),
3079				"Queue Bytes Transmitted");
3080		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3081				CTLFLAG_RD, &(rxr->rx_packets),
3082				"Queue Packets Received");
3083		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3084				CTLFLAG_RD, &(rxr->rx_bytes),
3085				"Queue Bytes Received");
3086		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
3087				CTLFLAG_RD, &(rxr->itr), 0,
3088				"Queue Rx ITR Interval");
3089		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
3090				CTLFLAG_RD, &(txr->itr), 0,
3091				"Queue Tx ITR Interval");
3092
3093#ifdef IXL_DEBUG
3094		/* Examine queue state */
3095		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
3096				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3097				sizeof(struct ixl_queue),
3098				ixlv_sysctl_qtx_tail_handler, "IU",
3099				"Queue Transmit Descriptor Tail");
3100		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
3101				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
3102				sizeof(struct ixl_queue),
3103				ixlv_sysctl_qrx_tail_handler, "IU",
3104				"Queue Receive Descriptor Tail");
3105		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
3106				CTLFLAG_RD, &(txr.watchdog_timer), 0,
3107				"Ticks before watchdog event is triggered");
3108#endif
3109	}
3110}
3111
3112static void
3113ixlv_init_filters(struct ixlv_sc *sc)
3114{
3115	sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
3116	    M_DEVBUF, M_NOWAIT | M_ZERO);
3117	SLIST_INIT(sc->mac_filters);
3118	sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
3119	    M_DEVBUF, M_NOWAIT | M_ZERO);
3120	SLIST_INIT(sc->vlan_filters);
3121	return;
3122}
3123
3124static void
3125ixlv_free_filters(struct ixlv_sc *sc)
3126{
3127	struct ixlv_mac_filter *f;
3128	struct ixlv_vlan_filter *v;
3129
3130	while (!SLIST_EMPTY(sc->mac_filters)) {
3131		f = SLIST_FIRST(sc->mac_filters);
3132		SLIST_REMOVE_HEAD(sc->mac_filters, next);
3133		free(f, M_DEVBUF);
3134	}
3135	free(sc->mac_filters, M_DEVBUF);
3136	while (!SLIST_EMPTY(sc->vlan_filters)) {
3137		v = SLIST_FIRST(sc->vlan_filters);
3138		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
3139		free(v, M_DEVBUF);
3140	}
3141	free(sc->vlan_filters, M_DEVBUF);
3142	return;
3143}
3144
3145static char *
3146ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
3147{
3148	int index;
3149
3150	char *speeds[] = {
3151		"Unknown",
3152		"100 Mbps",
3153		"1 Gbps",
3154		"10 Gbps",
3155		"40 Gbps",
3156		"20 Gbps",
3157		"25 Gbps",
3158	};
3159
3160	switch (link_speed) {
3161	case VIRTCHNL_LINK_SPEED_100MB:
3162		index = 1;
3163		break;
3164	case VIRTCHNL_LINK_SPEED_1GB:
3165		index = 2;
3166		break;
3167	case VIRTCHNL_LINK_SPEED_10GB:
3168		index = 3;
3169		break;
3170	case VIRTCHNL_LINK_SPEED_40GB:
3171		index = 4;
3172		break;
3173	case VIRTCHNL_LINK_SPEED_20GB:
3174		index = 5;
3175		break;
3176	case VIRTCHNL_LINK_SPEED_25GB:
3177		index = 6;
3178		break;
3179	case VIRTCHNL_LINK_SPEED_UNKNOWN:
3180	default:
3181		index = 0;
3182		break;
3183	}
3184
3185	return speeds[index];
3186}
3187
3188static int
3189ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
3190{
3191	struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
3192	int error = 0;
3193
3194	error = sysctl_handle_string(oidp,
3195	  ixlv_vc_speed_to_string(sc->link_speed),
3196	  8, req);
3197	return (error);
3198}
3199
3200#ifdef IXL_DEBUG
3201/**
3202 * ixlv_sysctl_qtx_tail_handler
3203 * Retrieves I40E_QTX_TAIL1 value from hardware
3204 * for a sysctl.
3205 */
3206static int
3207ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
3208{
3209	struct ixl_queue *que;
3210	int error;
3211	u32 val;
3212
3213	que = ((struct ixl_queue *)oidp->oid_arg1);
3214	if (!que) return 0;
3215
3216	val = rd32(que->vsi->hw, que->txr.tail);
3217	error = sysctl_handle_int(oidp, &val, 0, req);
3218	if (error || !req->newptr)
3219		return error;
3220	return (0);
3221}
3222
3223/**
3224 * ixlv_sysctl_qrx_tail_handler
3225 * Retrieves I40E_QRX_TAIL1 value from hardware
3226 * for a sysctl.
3227 */
3228static int
3229ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
3230{
3231	struct ixl_queue *que;
3232	int error;
3233	u32 val;
3234
3235	que = ((struct ixl_queue *)oidp->oid_arg1);
3236	if (!que) return 0;
3237
3238	val = rd32(que->vsi->hw, que->rxr.tail);
3239	error = sysctl_handle_int(oidp, &val, 0, req);
3240	if (error || !req->newptr)
3241		return error;
3242	return (0);
3243}
3244#endif
3245
3246