1/******************************************************************************
2
3  Copyright (c) 2013-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#ifndef IXL_STANDALONE_BUILD
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#endif
39
40#include "ixl.h"
41#include "ixlv.h"
42
43#ifdef RSS
44#include <net/rss_config.h>
45#endif
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50char ixlv_driver_version[] = "1.2.6";
51
52/*********************************************************************
53 *  PCI Device ID Table
54 *
55 *  Used by probe to select devices to load on
56 *  Last field stores an index into ixlv_strings
57 *  Last entry must be all 0s
58 *
59 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 *********************************************************************/
61
62static ixl_vendor_info_t ixlv_vendor_info_array[] =
63{
64	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
65	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
66	/* required last entry */
67	{0, 0, 0, 0, 0}
68};
69
70/*********************************************************************
71 *  Table of branding strings
72 *********************************************************************/
73
74static char    *ixlv_strings[] = {
75	"Intel(R) Ethernet Connection XL710 VF Driver"
76};
77
78
79/*********************************************************************
80 *  Function prototypes
81 *********************************************************************/
82static int      ixlv_probe(device_t);
83static int      ixlv_attach(device_t);
84static int      ixlv_detach(device_t);
85static int      ixlv_shutdown(device_t);
86static void	ixlv_init_locked(struct ixlv_sc *);
87static int	ixlv_allocate_pci_resources(struct ixlv_sc *);
88static void	ixlv_free_pci_resources(struct ixlv_sc *);
89static int	ixlv_assign_msix(struct ixlv_sc *);
90static int	ixlv_init_msix(struct ixlv_sc *);
91static int	ixlv_init_taskqueue(struct ixlv_sc *);
92static int	ixlv_setup_queues(struct ixlv_sc *);
93static void	ixlv_config_rss(struct ixlv_sc *);
94static void	ixlv_stop(struct ixlv_sc *);
95static void	ixlv_add_multi(struct ixl_vsi *);
96static void	ixlv_del_multi(struct ixl_vsi *);
97static void	ixlv_free_queues(struct ixl_vsi *);
98static int	ixlv_setup_interface(device_t, struct ixlv_sc *);
99
100static int	ixlv_media_change(struct ifnet *);
101static void	ixlv_media_status(struct ifnet *, struct ifmediareq *);
102
103static void	ixlv_local_timer(void *);
104
105static int	ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
106static int	ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
107static void	ixlv_init_filters(struct ixlv_sc *);
108static void	ixlv_free_filters(struct ixlv_sc *);
109
110static void	ixlv_msix_que(void *);
111static void	ixlv_msix_adminq(void *);
112static void	ixlv_do_adminq(void *, int);
113static void	ixlv_do_adminq_locked(struct ixlv_sc *sc);
114static void	ixlv_handle_que(void *, int);
115static int	ixlv_reset(struct ixlv_sc *);
116static int	ixlv_reset_complete(struct i40e_hw *);
117static void	ixlv_set_queue_rx_itr(struct ixl_queue *);
118static void	ixlv_set_queue_tx_itr(struct ixl_queue *);
119static void	ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
120		    enum i40e_status_code);
121
122static void	ixlv_enable_adminq_irq(struct i40e_hw *);
123static void	ixlv_disable_adminq_irq(struct i40e_hw *);
124static void	ixlv_enable_queue_irq(struct i40e_hw *, int);
125static void	ixlv_disable_queue_irq(struct i40e_hw *, int);
126
127static void	ixlv_setup_vlan_filters(struct ixlv_sc *);
128static void	ixlv_register_vlan(void *, struct ifnet *, u16);
129static void	ixlv_unregister_vlan(void *, struct ifnet *, u16);
130
131static void	ixlv_init_hw(struct ixlv_sc *);
132static int	ixlv_setup_vc(struct ixlv_sc *);
133static int	ixlv_vf_config(struct ixlv_sc *);
134
135static void	ixlv_cap_txcsum_tso(struct ixl_vsi *,
136		    struct ifnet *, int);
137
138static void	ixlv_add_sysctls(struct ixlv_sc *);
139static int 	ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
140static int 	ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
141
142/*********************************************************************
143 *  FreeBSD Device Interface Entry Points
144 *********************************************************************/
145
146static device_method_t ixlv_methods[] = {
147	/* Device interface */
148	DEVMETHOD(device_probe, ixlv_probe),
149	DEVMETHOD(device_attach, ixlv_attach),
150	DEVMETHOD(device_detach, ixlv_detach),
151	DEVMETHOD(device_shutdown, ixlv_shutdown),
152	{0, 0}
153};
154
155static driver_t ixlv_driver = {
156	"ixlv", ixlv_methods, sizeof(struct ixlv_sc),
157};
158
159devclass_t ixlv_devclass;
160DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
161
162MODULE_DEPEND(ixlv, pci, 1, 1, 1);
163MODULE_DEPEND(ixlv, ether, 1, 1, 1);
164
165/*
166** TUNEABLE PARAMETERS:
167*/
168
169static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
170                   "IXLV driver parameters");
171
172/*
173** Number of descriptors per ring:
174**   - TX and RX are the same size
175*/
176static int ixlv_ringsz = DEFAULT_RING;
177TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
178SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
179    &ixlv_ringsz, 0, "Descriptor Ring Size");
180
181/* Set to zero to auto calculate  */
182int ixlv_max_queues = 0;
183TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
184SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
185    &ixlv_max_queues, 0, "Number of Queues");
186
187/*
188** Number of entries in Tx queue buf_ring.
189** Increasing this will reduce the number of
190** errors when transmitting fragmented UDP
191** packets.
192*/
193static int ixlv_txbrsz = DEFAULT_TXBRSZ;
194TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
195SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
196    &ixlv_txbrsz, 0, "TX Buf Ring Size");
197
198/*
199** Controls for Interrupt Throttling
200**      - true/false for dynamic adjustment
201**      - default values for static ITR
202*/
203int ixlv_dynamic_rx_itr = 0;
204TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
205SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
206    &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
207
208int ixlv_dynamic_tx_itr = 0;
209TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
210SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
211    &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
212
213int ixlv_rx_itr = IXL_ITR_8K;
214TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
215SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
216    &ixlv_rx_itr, 0, "RX Interrupt Rate");
217
218int ixlv_tx_itr = IXL_ITR_4K;
219TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
220SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
221    &ixlv_tx_itr, 0, "TX Interrupt Rate");
222
223
224/*********************************************************************
225 *  Device identification routine
226 *
227 *  ixlv_probe determines if the driver should be loaded on
228 *  the hardware based on PCI vendor/device id of the device.
229 *
230 *  return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
232
233static int
234ixlv_probe(device_t dev)
235{
236	ixl_vendor_info_t *ent;
237
238	u16	pci_vendor_id, pci_device_id;
239	u16	pci_subvendor_id, pci_subdevice_id;
240	char	device_name[256];
241
242	INIT_DEBUGOUT("ixlv_probe: begin");
243
244	pci_vendor_id = pci_get_vendor(dev);
245	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
246		return (ENXIO);
247
248	pci_device_id = pci_get_device(dev);
249	pci_subvendor_id = pci_get_subvendor(dev);
250	pci_subdevice_id = pci_get_subdevice(dev);
251
252	ent = ixlv_vendor_info_array;
253	while (ent->vendor_id != 0) {
254		if ((pci_vendor_id == ent->vendor_id) &&
255		    (pci_device_id == ent->device_id) &&
256
257		    ((pci_subvendor_id == ent->subvendor_id) ||
258		     (ent->subvendor_id == 0)) &&
259
260		    ((pci_subdevice_id == ent->subdevice_id) ||
261		     (ent->subdevice_id == 0))) {
262			sprintf(device_name, "%s, Version - %s",
263				ixlv_strings[ent->index],
264				ixlv_driver_version);
265			device_set_desc_copy(dev, device_name);
266			return (BUS_PROBE_DEFAULT);
267		}
268		ent++;
269	}
270	return (ENXIO);
271}
272
273/*********************************************************************
274 *  Device initialization routine
275 *
276 *  The attach entry point is called when the driver is being loaded.
277 *  This routine identifies the type of hardware, allocates all resources
278 *  and initializes the hardware.
279 *
280 *  return 0 on success, positive on failure
281 *********************************************************************/
282
283static int
284ixlv_attach(device_t dev)
285{
286	struct ixlv_sc	*sc;
287	struct i40e_hw	*hw;
288	struct ixl_vsi 	*vsi;
289	int            	error = 0;
290
291	INIT_DBG_DEV(dev, "begin");
292
293	/* Allocate, clear, and link in our primary soft structure */
294	sc = device_get_softc(dev);
295	sc->dev = sc->osdep.dev = dev;
296	hw = &sc->hw;
297	vsi = &sc->vsi;
298	vsi->dev = dev;
299
300	/* Initialize hw struct */
301	ixlv_init_hw(sc);
302
303	/* Allocate filter lists */
304	ixlv_init_filters(sc);
305
306	/* Core Lock Init*/
307	mtx_init(&sc->mtx, device_get_nameunit(dev),
308	    "IXL SC Lock", MTX_DEF);
309
310	/* Set up the timer callout */
311	callout_init_mtx(&sc->timer, &sc->mtx, 0);
312
313	/* Do PCI setup - map BAR0, etc */
314	if (ixlv_allocate_pci_resources(sc)) {
315		device_printf(dev, "%s: Allocation of PCI resources failed\n",
316		    __func__);
317		error = ENXIO;
318		goto err_early;
319	}
320
321	INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
322
323	error = i40e_set_mac_type(hw);
324	if (error) {
325		device_printf(dev, "%s: set_mac_type failed: %d\n",
326		    __func__, error);
327		goto err_pci_res;
328	}
329
330	error = ixlv_reset_complete(hw);
331	if (error) {
332		device_printf(dev, "%s: Device is still being reset\n",
333		    __func__);
334		goto err_pci_res;
335	}
336
337	INIT_DBG_DEV(dev, "VF Device is ready for configuration");
338
339	error = ixlv_setup_vc(sc);
340	if (error) {
341		device_printf(dev, "%s: Error setting up PF comms, %d\n",
342		    __func__, error);
343		goto err_pci_res;
344	}
345
346	INIT_DBG_DEV(dev, "PF API version verified");
347
348	/* TODO: Figure out why MDD events occur when this reset is removed. */
349	/* Need API version before sending reset message */
350	error = ixlv_reset(sc);
351	if (error) {
352		device_printf(dev, "VF reset failed; reload the driver\n");
353		goto err_aq;
354	}
355
356	INIT_DBG_DEV(dev, "VF reset complete");
357
358	/* Ask for VF config from PF */
359	error = ixlv_vf_config(sc);
360	if (error) {
361		device_printf(dev, "Error getting configuration from PF: %d\n",
362		    error);
363		goto err_aq;
364	}
365
366	INIT_DBG_DEV(dev, "VF config from PF:");
367	INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
368	    sc->vf_res->num_vsis,
369	    sc->vf_res->num_queue_pairs,
370	    sc->vf_res->max_vectors,
371	    sc->vf_res->max_mtu);
372	INIT_DBG_DEV(dev, "Offload flags: %#010x",
373	    sc->vf_res->vf_offload_flags);
374
375	// TODO: Move this into ixlv_vf_config?
376	/* got VF config message back from PF, now we can parse it */
377	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
378		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
379			sc->vsi_res = &sc->vf_res->vsi_res[i];
380	}
381	if (!sc->vsi_res) {
382		device_printf(dev, "%s: no LAN VSI found\n", __func__);
383		error = EIO;
384		goto err_res_buf;
385	}
386
387	INIT_DBG_DEV(dev, "Resource Acquisition complete");
388
389	/* If no mac address was assigned just make a random one */
390	if (!ixlv_check_ether_addr(hw->mac.addr)) {
391		u8 addr[ETHER_ADDR_LEN];
392		arc4rand(&addr, sizeof(addr), 0);
393		addr[0] &= 0xFE;
394		addr[0] |= 0x02;
395		bcopy(addr, hw->mac.addr, sizeof(addr));
396	}
397
398	vsi->id = sc->vsi_res->vsi_id;
399	vsi->back = (void *)sc;
400	sc->link_up = TRUE;
401
402	/* This allocates the memory and early settings */
403	if (ixlv_setup_queues(sc) != 0) {
404		device_printf(dev, "%s: setup queues failed!\n",
405		    __func__);
406		error = EIO;
407		goto out;
408	}
409
410	/* Setup the stack interface */
411	if (ixlv_setup_interface(dev, sc) != 0) {
412		device_printf(dev, "%s: setup interface failed!\n",
413		    __func__);
414		error = EIO;
415		goto out;
416	}
417
418	INIT_DBG_DEV(dev, "Queue memory and interface setup");
419
420	/* Do queue interrupt setup */
421	ixlv_assign_msix(sc);
422
423	/* Start AdminQ taskqueue */
424	ixlv_init_taskqueue(sc);
425
426	/* Initialize stats */
427	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
428	ixlv_add_sysctls(sc);
429
430	/* Register for VLAN events */
431	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
432	    ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
433	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
434	    ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
435
436	/* We want AQ enabled early */
437	ixlv_enable_adminq_irq(hw);
438
439	/* Set things up to run init */
440	sc->init_state = IXLV_INIT_READY;
441
442	ixl_vc_init_mgr(sc, &sc->vc_mgr);
443
444	INIT_DBG_DEV(dev, "end");
445	return (error);
446
447out:
448	ixlv_free_queues(vsi);
449err_res_buf:
450	free(sc->vf_res, M_DEVBUF);
451err_aq:
452	i40e_shutdown_adminq(hw);
453err_pci_res:
454	ixlv_free_pci_resources(sc);
455err_early:
456	mtx_destroy(&sc->mtx);
457	ixlv_free_filters(sc);
458	INIT_DBG_DEV(dev, "end: error %d", error);
459	return (error);
460}
461
462/*********************************************************************
463 *  Device removal routine
464 *
465 *  The detach entry point is called when the driver is being removed.
466 *  This routine stops the adapter and deallocates all the resources
467 *  that were allocated for driver operation.
468 *
469 *  return 0 on success, positive on failure
470 *********************************************************************/
471
472static int
473ixlv_detach(device_t dev)
474{
475	struct ixlv_sc	*sc = device_get_softc(dev);
476	struct ixl_vsi 	*vsi = &sc->vsi;
477
478	INIT_DBG_DEV(dev, "begin");
479
480	/* Make sure VLANS are not using driver */
481	if (vsi->ifp->if_vlantrunk != NULL) {
482		if_printf(vsi->ifp, "Vlan in use, detach first\n");
483		INIT_DBG_DEV(dev, "end");
484		return (EBUSY);
485	}
486
487	/* Stop driver */
488	ether_ifdetach(vsi->ifp);
489	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
490		mtx_lock(&sc->mtx);
491		ixlv_stop(sc);
492		mtx_unlock(&sc->mtx);
493	}
494
495	/* Unregister VLAN events */
496	if (vsi->vlan_attach != NULL)
497		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
498	if (vsi->vlan_detach != NULL)
499		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
500
501	/* Drain VC mgr */
502	callout_drain(&sc->vc_mgr.callout);
503
504	i40e_shutdown_adminq(&sc->hw);
505	taskqueue_free(sc->tq);
506	if_free(vsi->ifp);
507	free(sc->vf_res, M_DEVBUF);
508	ixlv_free_pci_resources(sc);
509	ixlv_free_queues(vsi);
510	mtx_destroy(&sc->mtx);
511	ixlv_free_filters(sc);
512
513	bus_generic_detach(dev);
514	INIT_DBG_DEV(dev, "end");
515	return (0);
516}
517
518/*********************************************************************
519 *
520 *  Shutdown entry point
521 *
522 **********************************************************************/
523
524static int
525ixlv_shutdown(device_t dev)
526{
527	struct ixlv_sc	*sc = device_get_softc(dev);
528
529	INIT_DBG_DEV(dev, "begin");
530
531	mtx_lock(&sc->mtx);
532	ixlv_stop(sc);
533	mtx_unlock(&sc->mtx);
534
535	INIT_DBG_DEV(dev, "end");
536	return (0);
537}
538
539/*
540 * Configure TXCSUM(IPV6) and TSO(4/6)
541 *	- the hardware handles these together so we
542 *	  need to tweak them
543 */
544static void
545ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
546{
547	/* Enable/disable TXCSUM/TSO4 */
548	if (!(ifp->if_capenable & IFCAP_TXCSUM)
549	    && !(ifp->if_capenable & IFCAP_TSO4)) {
550		if (mask & IFCAP_TXCSUM) {
551			ifp->if_capenable |= IFCAP_TXCSUM;
552			/* enable TXCSUM, restore TSO if previously enabled */
553			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
554				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
555				ifp->if_capenable |= IFCAP_TSO4;
556			}
557		}
558		else if (mask & IFCAP_TSO4) {
559			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
560			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
561			if_printf(ifp,
562			    "TSO4 requires txcsum, enabling both...\n");
563		}
564	} else if((ifp->if_capenable & IFCAP_TXCSUM)
565	    && !(ifp->if_capenable & IFCAP_TSO4)) {
566		if (mask & IFCAP_TXCSUM)
567			ifp->if_capenable &= ~IFCAP_TXCSUM;
568		else if (mask & IFCAP_TSO4)
569			ifp->if_capenable |= IFCAP_TSO4;
570	} else if((ifp->if_capenable & IFCAP_TXCSUM)
571	    && (ifp->if_capenable & IFCAP_TSO4)) {
572		if (mask & IFCAP_TXCSUM) {
573			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
574			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
575			if_printf(ifp,
576			    "TSO4 requires txcsum, disabling both...\n");
577		} else if (mask & IFCAP_TSO4)
578			ifp->if_capenable &= ~IFCAP_TSO4;
579	}
580
581	/* Enable/disable TXCSUM_IPV6/TSO6 */
582	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
583	    && !(ifp->if_capenable & IFCAP_TSO6)) {
584		if (mask & IFCAP_TXCSUM_IPV6) {
585			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
586			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
587				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
588				ifp->if_capenable |= IFCAP_TSO6;
589			}
590		} else if (mask & IFCAP_TSO6) {
591			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
592			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
593			if_printf(ifp,
594			    "TSO6 requires txcsum6, enabling both...\n");
595		}
596	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
597	    && !(ifp->if_capenable & IFCAP_TSO6)) {
598		if (mask & IFCAP_TXCSUM_IPV6)
599			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
600		else if (mask & IFCAP_TSO6)
601			ifp->if_capenable |= IFCAP_TSO6;
602	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
603	    && (ifp->if_capenable & IFCAP_TSO6)) {
604		if (mask & IFCAP_TXCSUM_IPV6) {
605			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
606			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
607			if_printf(ifp,
608			    "TSO6 requires txcsum6, disabling both...\n");
609		} else if (mask & IFCAP_TSO6)
610			ifp->if_capenable &= ~IFCAP_TSO6;
611	}
612}
613
614/*********************************************************************
615 *  Ioctl entry point
616 *
617 *  ixlv_ioctl is called when the user wants to configure the
618 *  interface.
619 *
620 *  return 0 on success, positive on failure
621 **********************************************************************/
622
623static int
624ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
625{
626	struct ixl_vsi		*vsi = ifp->if_softc;
627	struct ixlv_sc	*sc = vsi->back;
628	struct ifreq		*ifr = (struct ifreq *)data;
629#if defined(INET) || defined(INET6)
630	struct ifaddr 		*ifa = (struct ifaddr *)data;
631	bool			avoid_reset = FALSE;
632#endif
633	int             	error = 0;
634
635
636	switch (command) {
637
638        case SIOCSIFADDR:
639#ifdef INET
640		if (ifa->ifa_addr->sa_family == AF_INET)
641			avoid_reset = TRUE;
642#endif
643#ifdef INET6
644		if (ifa->ifa_addr->sa_family == AF_INET6)
645			avoid_reset = TRUE;
646#endif
647#if defined(INET) || defined(INET6)
648		/*
649		** Calling init results in link renegotiation,
650		** so we avoid doing it when possible.
651		*/
652		if (avoid_reset) {
653			ifp->if_flags |= IFF_UP;
654			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
655				ixlv_init(vsi);
656#ifdef INET
657			if (!(ifp->if_flags & IFF_NOARP))
658				arp_ifinit(ifp, ifa);
659#endif
660		} else
661			error = ether_ioctl(ifp, command, data);
662		break;
663#endif
664	case SIOCSIFMTU:
665		IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
666		mtx_lock(&sc->mtx);
667		if (ifr->ifr_mtu > IXL_MAX_FRAME -
668		    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
669			error = EINVAL;
670			IOCTL_DBG_IF(ifp, "mtu too large");
671		} else {
672			IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
673			// ERJ: Interestingly enough, these types don't match
674			ifp->if_mtu = (u_long)ifr->ifr_mtu;
675			vsi->max_frame_size =
676			    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
677			    + ETHER_VLAN_ENCAP_LEN;
678			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
679				ixlv_init_locked(sc);
680		}
681		mtx_unlock(&sc->mtx);
682		break;
683	case SIOCSIFFLAGS:
684		IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
685		mtx_lock(&sc->mtx);
686		if (ifp->if_flags & IFF_UP) {
687			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
688				ixlv_init_locked(sc);
689		} else
690			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
691				ixlv_stop(sc);
692		sc->if_flags = ifp->if_flags;
693		mtx_unlock(&sc->mtx);
694		break;
695	case SIOCADDMULTI:
696		IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
697		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
698			mtx_lock(&sc->mtx);
699			ixlv_disable_intr(vsi);
700			ixlv_add_multi(vsi);
701			ixlv_enable_intr(vsi);
702			mtx_unlock(&sc->mtx);
703		}
704		break;
705	case SIOCDELMULTI:
706		IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
707		if (sc->init_state == IXLV_RUNNING) {
708			mtx_lock(&sc->mtx);
709			ixlv_disable_intr(vsi);
710			ixlv_del_multi(vsi);
711			ixlv_enable_intr(vsi);
712			mtx_unlock(&sc->mtx);
713		}
714		break;
715	case SIOCSIFMEDIA:
716	case SIOCGIFMEDIA:
717		IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
718		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
719		break;
720	case SIOCSIFCAP:
721	{
722		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
723		IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
724
725		ixlv_cap_txcsum_tso(vsi, ifp, mask);
726
727		if (mask & IFCAP_RXCSUM)
728			ifp->if_capenable ^= IFCAP_RXCSUM;
729		if (mask & IFCAP_RXCSUM_IPV6)
730			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
731		if (mask & IFCAP_LRO)
732			ifp->if_capenable ^= IFCAP_LRO;
733		if (mask & IFCAP_VLAN_HWTAGGING)
734			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
735		if (mask & IFCAP_VLAN_HWFILTER)
736			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
737		if (mask & IFCAP_VLAN_HWTSO)
738			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
739		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
740			ixlv_init(vsi);
741		}
742		VLAN_CAPABILITIES(ifp);
743
744		break;
745	}
746
747	default:
748		IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
749		error = ether_ioctl(ifp, command, data);
750		break;
751	}
752
753	return (error);
754}
755
756/*
757** To do a reinit on the VF is unfortunately more complicated
758** than a physical device, we must have the PF more or less
759** completely recreate our memory, so many things that were
760** done only once at attach in traditional drivers now must be
761** redone at each reinitialization. This function does that
762** 'prelude' so we can then call the normal locked init code.
763*/
764int
765ixlv_reinit_locked(struct ixlv_sc *sc)
766{
767	struct i40e_hw		*hw = &sc->hw;
768	struct ixl_vsi		*vsi = &sc->vsi;
769	struct ifnet		*ifp = vsi->ifp;
770	struct ixlv_mac_filter  *mf, *mf_temp;
771	struct ixlv_vlan_filter	*vf;
772	int			error = 0;
773
774	INIT_DBG_IF(ifp, "begin");
775
776	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
777		ixlv_stop(sc);
778
779	error = ixlv_reset(sc);
780
781	INIT_DBG_IF(ifp, "VF was reset");
782
783	/* set the state in case we went thru RESET */
784	sc->init_state = IXLV_RUNNING;
785
786	/*
787	** Resetting the VF drops all filters from hardware;
788	** we need to mark them to be re-added in init.
789	*/
790	SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
791		if (mf->flags & IXL_FILTER_DEL) {
792			SLIST_REMOVE(sc->mac_filters, mf,
793			    ixlv_mac_filter, next);
794			free(mf, M_DEVBUF);
795		} else
796			mf->flags |= IXL_FILTER_ADD;
797	}
798	if (vsi->num_vlans != 0)
799		SLIST_FOREACH(vf, sc->vlan_filters, next)
800			vf->flags = IXL_FILTER_ADD;
801	else { /* clean any stale filters */
802		while (!SLIST_EMPTY(sc->vlan_filters)) {
803			vf = SLIST_FIRST(sc->vlan_filters);
804			SLIST_REMOVE_HEAD(sc->vlan_filters, next);
805			free(vf, M_DEVBUF);
806		}
807	}
808
809	ixlv_enable_adminq_irq(hw);
810	ixl_vc_flush(&sc->vc_mgr);
811
812	INIT_DBG_IF(ifp, "end");
813	return (error);
814}
815
816static void
817ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
818	enum i40e_status_code code)
819{
820	struct ixlv_sc *sc;
821
822	sc = arg;
823
824	/*
825	 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
826	 * happens while a command is in progress, so we don't print an error
827	 * in that case.
828	 */
829	if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
830		if_printf(sc->vsi.ifp,
831		    "Error %d waiting for PF to complete operation %d\n",
832		    code, cmd->request);
833	}
834}
835
836static void
837ixlv_init_locked(struct ixlv_sc *sc)
838{
839	struct i40e_hw		*hw = &sc->hw;
840	struct ixl_vsi		*vsi = &sc->vsi;
841	struct ixl_queue	*que = vsi->queues;
842	struct ifnet		*ifp = vsi->ifp;
843	int			 error = 0;
844
845	INIT_DBG_IF(ifp, "begin");
846
847	IXLV_CORE_LOCK_ASSERT(sc);
848
849	/* Do a reinit first if an init has already been done */
850	if ((sc->init_state == IXLV_RUNNING) ||
851	    (sc->init_state == IXLV_RESET_REQUIRED) ||
852	    (sc->init_state == IXLV_RESET_PENDING))
853		error = ixlv_reinit_locked(sc);
854	/* Don't bother with init if we failed reinit */
855	if (error)
856		goto init_done;
857
858	/* Remove existing MAC filter if new MAC addr is set */
859	if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
860		error = ixlv_del_mac_filter(sc, hw->mac.addr);
861		if (error == 0)
862			ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd,
863			    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
864			    sc);
865	}
866
867	/* Check for an LAA mac address... */
868	bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
869
870	ifp->if_hwassist = 0;
871	if (ifp->if_capenable & IFCAP_TSO)
872		ifp->if_hwassist |= CSUM_TSO;
873	if (ifp->if_capenable & IFCAP_TXCSUM)
874		ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
875	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
876		ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
877
878	/* Add mac filter for this VF to PF */
879	if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
880		error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
881		if (!error || error == EEXIST)
882			ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
883			    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
884			    sc);
885	}
886
887	/* Setup vlan's if needed */
888	ixlv_setup_vlan_filters(sc);
889
890	/* Prepare the queues for operation */
891	for (int i = 0; i < vsi->num_queues; i++, que++) {
892		struct  rx_ring	*rxr = &que->rxr;
893
894		ixl_init_tx_ring(que);
895
896		if (vsi->max_frame_size <= MCLBYTES)
897			rxr->mbuf_sz = MCLBYTES;
898		else
899			rxr->mbuf_sz = MJUMPAGESIZE;
900		ixl_init_rx_ring(que);
901	}
902
903	/* Configure queues */
904	ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
905	    IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
906
907	/* Set up RSS */
908	ixlv_config_rss(sc);
909
910	/* Map vectors */
911	ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd,
912	    IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
913
914	/* Enable queues */
915	ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
916	    IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
917
918	/* Start the local timer */
919	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
920
921	sc->init_state = IXLV_RUNNING;
922
923init_done:
924	INIT_DBG_IF(ifp, "end");
925	return;
926}
927
928/*
929**  Init entry point for the stack
930*/
931void
932ixlv_init(void *arg)
933{
934	struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
935	struct ixlv_sc *sc = vsi->back;
936	int retries = 0;
937
938	mtx_lock(&sc->mtx);
939	ixlv_init_locked(sc);
940	mtx_unlock(&sc->mtx);
941
942	/* Wait for init_locked to finish */
943	while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
944	    && ++retries < 100) {
945		i40e_msec_delay(10);
946	}
947	if (retries >= IXLV_AQ_MAX_ERR)
948		if_printf(vsi->ifp,
949		    "Init failed to complete in alloted time!\n");
950}
951
952/*
953 * ixlv_attach() helper function; gathers information about
954 * the (virtual) hardware for use elsewhere in the driver.
955 */
956static void
957ixlv_init_hw(struct ixlv_sc *sc)
958{
959	struct i40e_hw *hw = &sc->hw;
960	device_t dev = sc->dev;
961
962	/* Save off the information about this board */
963	hw->vendor_id = pci_get_vendor(dev);
964	hw->device_id = pci_get_device(dev);
965	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
966	hw->subsystem_vendor_id =
967	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
968	hw->subsystem_device_id =
969	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
970
971	hw->bus.device = pci_get_slot(dev);
972	hw->bus.func = pci_get_function(dev);
973}
974
975/*
976 * ixlv_attach() helper function; initalizes the admin queue
977 * and attempts to establish contact with the PF by
978 * retrying the initial "API version" message several times
979 * or until the PF responds.
980 */
981static int
982ixlv_setup_vc(struct ixlv_sc *sc)
983{
984	struct i40e_hw *hw = &sc->hw;
985	device_t dev = sc->dev;
986	int error = 0, ret_error = 0, asq_retries = 0;
987	bool send_api_ver_retried = 0;
988
989	/* Need to set these AQ paramters before initializing AQ */
990	hw->aq.num_arq_entries = IXL_AQ_LEN;
991	hw->aq.num_asq_entries = IXL_AQ_LEN;
992	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
993	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
994
995	for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
996		/* Initialize admin queue */
997		error = i40e_init_adminq(hw);
998		if (error) {
999			device_printf(dev, "%s: init_adminq failed: %d\n",
1000			    __func__, error);
1001			ret_error = 1;
1002			continue;
1003		}
1004
1005		INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
1006
1007retry_send:
1008		/* Send VF's API version */
1009		error = ixlv_send_api_ver(sc);
1010		if (error) {
1011			i40e_shutdown_adminq(hw);
1012			ret_error = 2;
1013			device_printf(dev, "%s: unable to send api"
1014			    " version to PF on attempt %d, error %d\n",
1015			    __func__, i+1, error);
1016		}
1017
1018		asq_retries = 0;
1019		while (!i40e_asq_done(hw)) {
1020			if (++asq_retries > IXLV_AQ_MAX_ERR) {
1021				i40e_shutdown_adminq(hw);
1022				DDPRINTF(dev, "Admin Queue timeout "
1023				    "(waiting for send_api_ver), %d more retries...",
1024				    IXLV_AQ_MAX_ERR - (i + 1));
1025				ret_error = 3;
1026				break;
1027			}
1028			i40e_msec_delay(10);
1029		}
1030		if (asq_retries > IXLV_AQ_MAX_ERR)
1031			continue;
1032
1033		INIT_DBG_DEV(dev, "Sent API version message to PF");
1034
1035		/* Verify that the VF accepts the PF's API version */
1036		error = ixlv_verify_api_ver(sc);
1037		if (error == ETIMEDOUT) {
1038			if (!send_api_ver_retried) {
1039				/* Resend message, one more time */
1040				send_api_ver_retried++;
1041				device_printf(dev,
1042				    "%s: Timeout while verifying API version on first"
1043				    " try!\n", __func__);
1044				goto retry_send;
1045			} else {
1046				device_printf(dev,
1047				    "%s: Timeout while verifying API version on second"
1048				    " try!\n", __func__);
1049				ret_error = 4;
1050				break;
1051			}
1052		}
1053		if (error) {
1054			device_printf(dev,
1055			    "%s: Unable to verify API version,"
1056			    " error %d\n", __func__, error);
1057			ret_error = 5;
1058		}
1059		break;
1060	}
1061
1062	if (ret_error >= 4)
1063		i40e_shutdown_adminq(hw);
1064	return (ret_error);
1065}
1066
1067/*
1068 * ixlv_attach() helper function; asks the PF for this VF's
1069 * configuration, and saves the information if it receives it.
1070 */
1071static int
1072ixlv_vf_config(struct ixlv_sc *sc)
1073{
1074	struct i40e_hw *hw = &sc->hw;
1075	device_t dev = sc->dev;
1076	int bufsz, error = 0, ret_error = 0;
1077	int asq_retries, retried = 0;
1078
1079retry_config:
1080	error = ixlv_send_vf_config_msg(sc);
1081	if (error) {
1082		device_printf(dev,
1083		    "%s: Unable to send VF config request, attempt %d,"
1084		    " error %d\n", __func__, retried + 1, error);
1085		ret_error = 2;
1086	}
1087
1088	asq_retries = 0;
1089	while (!i40e_asq_done(hw)) {
1090		if (++asq_retries > IXLV_AQ_MAX_ERR) {
1091			device_printf(dev, "%s: Admin Queue timeout "
1092			    "(waiting for send_vf_config_msg), attempt %d\n",
1093			    __func__, retried + 1);
1094			ret_error = 3;
1095			goto fail;
1096		}
1097		i40e_msec_delay(10);
1098	}
1099
1100	INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
1101	    retried + 1);
1102
1103	if (!sc->vf_res) {
1104		bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1105		    (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1106		sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
1107		if (!sc->vf_res) {
1108			device_printf(dev,
1109			    "%s: Unable to allocate memory for VF configuration"
1110			    " message from PF on attempt %d\n", __func__, retried + 1);
1111			ret_error = 1;
1112			goto fail;
1113		}
1114	}
1115
1116	/* Check for VF config response */
1117	error = ixlv_get_vf_config(sc);
1118	if (error == ETIMEDOUT) {
1119		/* The 1st time we timeout, send the configuration message again */
1120		if (!retried) {
1121			retried++;
1122			goto retry_config;
1123		}
1124	}
1125	if (error) {
1126		device_printf(dev,
1127		    "%s: Unable to get VF configuration from PF after %d tries!\n",
1128		    __func__, retried + 1);
1129		ret_error = 4;
1130	}
1131	goto done;
1132
1133fail:
1134	free(sc->vf_res, M_DEVBUF);
1135done:
1136	return (ret_error);
1137}
1138
1139/*
1140 * Allocate MSI/X vectors, setup the AQ vector early
1141 */
1142static int
1143ixlv_init_msix(struct ixlv_sc *sc)
1144{
1145	device_t dev = sc->dev;
1146	int rid, want, vectors, queues, available;
1147
1148	rid = PCIR_BAR(IXL_BAR);
1149	sc->msix_mem = bus_alloc_resource_any(dev,
1150	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1151       	if (!sc->msix_mem) {
1152		/* May not be enabled */
1153		device_printf(sc->dev,
1154		    "Unable to map MSIX table \n");
1155		goto fail;
1156	}
1157
1158	available = pci_msix_count(dev);
1159	if (available == 0) { /* system has msix disabled */
1160		bus_release_resource(dev, SYS_RES_MEMORY,
1161		    rid, sc->msix_mem);
1162		sc->msix_mem = NULL;
1163		goto fail;
1164	}
1165
1166	/* Figure out a reasonable auto config value */
1167	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1168
1169	/* Override with hardcoded value if sane */
1170	if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1171		queues = ixlv_max_queues;
1172#ifdef  RSS
1173	/* If we're doing RSS, clamp at the number of RSS buckets */
1174	if (queues > rss_getnumbuckets())
1175		queues = rss_getnumbuckets();
1176#endif
1177	/* Enforce the VF max value */
1178	if (queues > IXLV_MAX_QUEUES)
1179		queues = IXLV_MAX_QUEUES;
1180
1181	/*
1182	** Want one vector (RX/TX pair) per queue
1183	** plus an additional for the admin queue.
1184	*/
1185	want = queues + 1;
1186	if (want <= available)	/* Have enough */
1187		vectors = want;
1188	else {
1189		device_printf(sc->dev,
1190		    "MSIX Configuration Problem, "
1191		    "%d vectors available but %d wanted!\n",
1192		    available, want);
1193		goto fail;
1194	}
1195
1196#ifdef RSS
1197	/*
1198	* If we're doing RSS, the number of queues needs to
1199	* match the number of RSS buckets that are configured.
1200	*
1201	* + If there's more queues than RSS buckets, we'll end
1202	*   up with queues that get no traffic.
1203	*
1204	* + If there's more RSS buckets than queues, we'll end
1205	*   up having multiple RSS buckets map to the same queue,
1206	*   so there'll be some contention.
1207	*/
1208	if (queues != rss_getnumbuckets()) {
1209		device_printf(dev,
1210		    "%s: queues (%d) != RSS buckets (%d)"
1211		    "; performance will be impacted.\n",
1212		     __func__, queues, rss_getnumbuckets());
1213	}
1214#endif
1215
1216	if (pci_alloc_msix(dev, &vectors) == 0) {
1217		device_printf(sc->dev,
1218		    "Using MSIX interrupts with %d vectors\n", vectors);
1219		sc->msix = vectors;
1220		sc->vsi.num_queues = queues;
1221	}
1222
1223	/*
1224	** Explicitly set the guest PCI BUSMASTER capability
1225	** and we must rewrite the ENABLE in the MSIX control
1226	** register again at this point to cause the host to
1227	** successfully initialize us.
1228	*/
1229	{
1230		u16 pci_cmd_word;
1231		int msix_ctrl;
1232		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1233		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1234		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1235		pci_find_cap(dev, PCIY_MSIX, &rid);
1236		rid += PCIR_MSIX_CTRL;
1237		msix_ctrl = pci_read_config(dev, rid, 2);
1238		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1239		pci_write_config(dev, rid, msix_ctrl, 2);
1240	}
1241
1242	/* Next we need to setup the vector for the Admin Queue */
1243	rid = 1;	// zero vector + 1
1244	sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1245	    &rid, RF_SHAREABLE | RF_ACTIVE);
1246	if (sc->res == NULL) {
1247		device_printf(dev,"Unable to allocate"
1248		    " bus resource: AQ interrupt \n");
1249		goto fail;
1250	}
1251	if (bus_setup_intr(dev, sc->res,
1252	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1253	    ixlv_msix_adminq, sc, &sc->tag)) {
1254		sc->res = NULL;
1255		device_printf(dev, "Failed to register AQ handler");
1256		goto fail;
1257	}
1258	bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1259
1260	return (vectors);
1261
1262fail:
1263	/* The VF driver MUST use MSIX */
1264	return (0);
1265}
1266
1267static int
1268ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1269{
1270	int             rid;
1271	device_t        dev = sc->dev;
1272
1273	rid = PCIR_BAR(0);
1274	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1275	    &rid, RF_ACTIVE);
1276
1277	if (!(sc->pci_mem)) {
1278		device_printf(dev,"Unable to allocate bus resource: memory\n");
1279		return (ENXIO);
1280	}
1281
1282	sc->osdep.mem_bus_space_tag =
1283		rman_get_bustag(sc->pci_mem);
1284	sc->osdep.mem_bus_space_handle =
1285		rman_get_bushandle(sc->pci_mem);
1286	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1287	sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
1288	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1289
1290	sc->hw.back = &sc->osdep;
1291
1292	/* Disable adminq interrupts */
1293	ixlv_disable_adminq_irq(&sc->hw);
1294
1295	/*
1296	** Now setup MSI/X, it will return
1297	** us the number of supported vectors
1298	*/
1299	sc->msix = ixlv_init_msix(sc);
1300
1301	/* We fail without MSIX support */
1302	if (sc->msix == 0)
1303		return (ENXIO);
1304
1305	return (0);
1306}
1307
1308static void
1309ixlv_free_pci_resources(struct ixlv_sc *sc)
1310{
1311	struct ixl_vsi         *vsi = &sc->vsi;
1312	struct ixl_queue       *que = vsi->queues;
1313	device_t                dev = sc->dev;
1314
1315	/* We may get here before stations are setup */
1316	if (que == NULL)
1317		goto early;
1318
1319	/*
1320	**  Release all msix queue resources:
1321	*/
1322	for (int i = 0; i < vsi->num_queues; i++, que++) {
1323		int rid = que->msix + 1;
1324		if (que->tag != NULL) {
1325			bus_teardown_intr(dev, que->res, que->tag);
1326			que->tag = NULL;
1327		}
1328		if (que->res != NULL)
1329			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1330	}
1331
1332early:
1333	/* Clean the AdminQ interrupt */
1334	if (sc->tag != NULL) {
1335		bus_teardown_intr(dev, sc->res, sc->tag);
1336		sc->tag = NULL;
1337	}
1338	if (sc->res != NULL)
1339		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1340
1341	pci_release_msi(dev);
1342
1343	if (sc->msix_mem != NULL)
1344		bus_release_resource(dev, SYS_RES_MEMORY,
1345		    PCIR_BAR(IXL_BAR), sc->msix_mem);
1346
1347	if (sc->pci_mem != NULL)
1348		bus_release_resource(dev, SYS_RES_MEMORY,
1349		    PCIR_BAR(0), sc->pci_mem);
1350
1351	return;
1352}
1353
1354/*
1355 * Create taskqueue and tasklet for Admin Queue interrupts.
1356 */
1357static int
1358ixlv_init_taskqueue(struct ixlv_sc *sc)
1359{
1360	int error = 0;
1361
1362	TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1363
1364	sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1365	    taskqueue_thread_enqueue, &sc->tq);
1366	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1367	    device_get_nameunit(sc->dev));
1368
1369	return (error);
1370}
1371
1372/*********************************************************************
1373 *
1374 *  Setup MSIX Interrupt resources and handlers for the VSI queues
1375 *
1376 **********************************************************************/
1377static int
1378ixlv_assign_msix(struct ixlv_sc *sc)
1379{
1380	device_t	dev = sc->dev;
1381	struct 		ixl_vsi *vsi = &sc->vsi;
1382	struct 		ixl_queue *que = vsi->queues;
1383	struct		tx_ring	 *txr;
1384	int 		error, rid, vector = 1;
1385
1386	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1387		int cpu_id = i;
1388		rid = vector + 1;
1389		txr = &que->txr;
1390		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1391		    RF_SHAREABLE | RF_ACTIVE);
1392		if (que->res == NULL) {
1393			device_printf(dev,"Unable to allocate"
1394		    	    " bus resource: que interrupt [%d]\n", vector);
1395			return (ENXIO);
1396		}
1397		/* Set the handler function */
1398		error = bus_setup_intr(dev, que->res,
1399		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1400		    ixlv_msix_que, que, &que->tag);
1401		if (error) {
1402			que->res = NULL;
1403			device_printf(dev, "Failed to register que handler");
1404			return (error);
1405		}
1406		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1407		/* Bind the vector to a CPU */
1408#ifdef RSS
1409		cpu_id = rss_getcpu(i % rss_getnumbuckets());
1410#endif
1411		bus_bind_intr(dev, que->res, cpu_id);
1412		que->msix = vector;
1413		vsi->que_mask |= (u64)(1 << que->msix);
1414		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1415		TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1416		que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1417		    taskqueue_thread_enqueue, &que->tq);
1418#ifdef RSS
1419		taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
1420		    cpu_id, "%s (bucket %d)",
1421		    device_get_nameunit(dev), cpu_id);
1422#else
1423                taskqueue_start_threads(&que->tq, 1, PI_NET,
1424                    "%s que", device_get_nameunit(dev));
1425#endif
1426
1427	}
1428
1429	return (0);
1430}
1431
1432/*
1433** Requests a VF reset from the PF.
1434**
1435** Requires the VF's Admin Queue to be initialized.
1436*/
1437static int
1438ixlv_reset(struct ixlv_sc *sc)
1439{
1440	struct i40e_hw	*hw = &sc->hw;
1441	device_t	dev = sc->dev;
1442	int		error = 0;
1443
1444	/* Ask the PF to reset us if we are initiating */
1445	if (sc->init_state != IXLV_RESET_PENDING)
1446		ixlv_request_reset(sc);
1447
1448	i40e_msec_delay(100);
1449	error = ixlv_reset_complete(hw);
1450	if (error) {
1451		device_printf(dev, "%s: VF reset failed\n",
1452		    __func__);
1453		return (error);
1454	}
1455
1456	error = i40e_shutdown_adminq(hw);
1457	if (error) {
1458		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1459		    __func__, error);
1460		return (error);
1461	}
1462
1463	error = i40e_init_adminq(hw);
1464	if (error) {
1465		device_printf(dev, "%s: init_adminq failed: %d\n",
1466		    __func__, error);
1467		return(error);
1468	}
1469
1470	return (0);
1471}
1472
1473static int
1474ixlv_reset_complete(struct i40e_hw *hw)
1475{
1476	u32 reg;
1477
1478	for (int i = 0; i < 100; i++) {
1479		reg = rd32(hw, I40E_VFGEN_RSTAT) &
1480		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1481
1482                if ((reg == I40E_VFR_VFACTIVE) ||
1483		    (reg == I40E_VFR_COMPLETED))
1484			return (0);
1485		i40e_msec_delay(100);
1486	}
1487
1488	return (EBUSY);
1489}
1490
1491
1492/*********************************************************************
1493 *
1494 *  Setup networking device structure and register an interface.
1495 *
1496 **********************************************************************/
1497static int
1498ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1499{
1500	struct ifnet		*ifp;
1501	struct ixl_vsi		*vsi = &sc->vsi;
1502	struct ixl_queue	*que = vsi->queues;
1503
1504	INIT_DBG_DEV(dev, "begin");
1505
1506	ifp = vsi->ifp = if_alloc(IFT_ETHER);
1507	if (ifp == NULL) {
1508		device_printf(dev, "%s: could not allocate ifnet"
1509		    " structure!\n", __func__);
1510		return (-1);
1511	}
1512
1513	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1514
1515	ifp->if_mtu = ETHERMTU;
1516	ifp->if_baudrate = 4000000000;  // ??
1517	ifp->if_init = ixlv_init;
1518	ifp->if_softc = vsi;
1519	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1520	ifp->if_ioctl = ixlv_ioctl;
1521
1522#if __FreeBSD_version >= 1100000
1523	if_setgetcounterfn(ifp, ixl_get_counter);
1524#endif
1525
1526	ifp->if_transmit = ixl_mq_start;
1527
1528	ifp->if_qflush = ixl_qflush;
1529	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1530
1531	ether_ifattach(ifp, sc->hw.mac.addr);
1532
1533	vsi->max_frame_size =
1534	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1535	    + ETHER_VLAN_ENCAP_LEN;
1536
1537	/*
1538	 * Tell the upper layer(s) we support long frames.
1539	 */
1540	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1541
1542	ifp->if_capabilities |= IFCAP_HWCSUM;
1543	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1544	ifp->if_capabilities |= IFCAP_TSO;
1545	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1546
1547	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1548			     |  IFCAP_VLAN_HWTSO
1549			     |  IFCAP_VLAN_MTU
1550			     |  IFCAP_VLAN_HWCSUM
1551			     |  IFCAP_LRO;
1552	ifp->if_capenable = ifp->if_capabilities;
1553
1554	/*
1555	** Don't turn this on by default, if vlans are
1556	** created on another pseudo device (eg. lagg)
1557	** then vlan events are not passed thru, breaking
1558	** operation, but with HW FILTER off it works. If
1559	** using vlans directly on the ixl driver you can
1560	** enable this and get full hardware tag filtering.
1561	*/
1562	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1563
1564	/*
1565	 * Specify the media types supported by this adapter and register
1566	 * callbacks to update media and link information
1567	 */
1568	ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1569		     ixlv_media_status);
1570
1571	// JFV Add media types later?
1572
1573	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1574	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1575
1576	INIT_DBG_DEV(dev, "end");
1577	return (0);
1578}
1579
1580/*
1581** Allocate and setup the interface queues
1582*/
1583static int
1584ixlv_setup_queues(struct ixlv_sc *sc)
1585{
1586	device_t		dev = sc->dev;
1587	struct ixl_vsi		*vsi;
1588	struct ixl_queue	*que;
1589	struct tx_ring		*txr;
1590	struct rx_ring		*rxr;
1591	int 			rsize, tsize;
1592	int			error = I40E_SUCCESS;
1593
1594	vsi = &sc->vsi;
1595	vsi->back = (void *)sc;
1596	vsi->hw = &sc->hw;
1597	vsi->num_vlans = 0;
1598
1599	/* Get memory for the station queues */
1600	if (!(vsi->queues =
1601		(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1602		vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1603			device_printf(dev, "Unable to allocate queue memory\n");
1604			error = ENOMEM;
1605			goto early;
1606	}
1607
1608	for (int i = 0; i < vsi->num_queues; i++) {
1609		que = &vsi->queues[i];
1610		que->num_desc = ixlv_ringsz;
1611		que->me = i;
1612		que->vsi = vsi;
1613		/* mark the queue as active */
1614		vsi->active_queues |= (u64)1 << que->me;
1615
1616		txr = &que->txr;
1617		txr->que = que;
1618		txr->tail = I40E_QTX_TAIL1(que->me);
1619		/* Initialize the TX lock */
1620		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1621		    device_get_nameunit(dev), que->me);
1622		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1623		/*
1624		** Create the TX descriptor ring, the extra int is
1625		** added as the location for HEAD WB.
1626		*/
1627		tsize = roundup2((que->num_desc *
1628		    sizeof(struct i40e_tx_desc)) +
1629		    sizeof(u32), DBA_ALIGN);
1630		if (i40e_allocate_dma_mem(&sc->hw,
1631		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
1632			device_printf(dev,
1633			    "Unable to allocate TX Descriptor memory\n");
1634			error = ENOMEM;
1635			goto fail;
1636		}
1637		txr->base = (struct i40e_tx_desc *)txr->dma.va;
1638		bzero((void *)txr->base, tsize);
1639		/* Now allocate transmit soft structs for the ring */
1640		if (ixl_allocate_tx_data(que)) {
1641			device_printf(dev,
1642			    "Critical Failure setting up TX structures\n");
1643			error = ENOMEM;
1644			goto fail;
1645		}
1646		/* Allocate a buf ring */
1647		txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1648		    M_WAITOK, &txr->mtx);
1649		if (txr->br == NULL) {
1650			device_printf(dev,
1651			    "Critical Failure setting up TX buf ring\n");
1652			error = ENOMEM;
1653			goto fail;
1654		}
1655
1656		/*
1657		 * Next the RX queues...
1658		 */
1659		rsize = roundup2(que->num_desc *
1660		    sizeof(union i40e_rx_desc), DBA_ALIGN);
1661		rxr = &que->rxr;
1662		rxr->que = que;
1663		rxr->tail = I40E_QRX_TAIL1(que->me);
1664
1665		/* Initialize the RX side lock */
1666		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1667		    device_get_nameunit(dev), que->me);
1668		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1669
1670		if (i40e_allocate_dma_mem(&sc->hw,
1671		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
1672			device_printf(dev,
1673			    "Unable to allocate RX Descriptor memory\n");
1674			error = ENOMEM;
1675			goto fail;
1676		}
1677		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1678		bzero((void *)rxr->base, rsize);
1679
1680		/* Allocate receive soft structs for the ring*/
1681		if (ixl_allocate_rx_data(que)) {
1682			device_printf(dev,
1683			    "Critical Failure setting up receive structs\n");
1684			error = ENOMEM;
1685			goto fail;
1686		}
1687	}
1688
1689	return (0);
1690
1691fail:
1692	for (int i = 0; i < vsi->num_queues; i++) {
1693		que = &vsi->queues[i];
1694		rxr = &que->rxr;
1695		txr = &que->txr;
1696		if (rxr->base)
1697			i40e_free_dma_mem(&sc->hw, &rxr->dma);
1698		if (txr->base)
1699			i40e_free_dma_mem(&sc->hw, &txr->dma);
1700	}
1701	free(vsi->queues, M_DEVBUF);
1702
1703early:
1704	return (error);
1705}
1706
1707/*
1708** This routine is run via an vlan config EVENT,
1709** it enables us to use the HW Filter table since
1710** we can get the vlan id. This just creates the
1711** entry in the soft version of the VFTA, init will
1712** repopulate the real table.
1713*/
1714static void
1715ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1716{
1717	struct ixl_vsi		*vsi = arg;
1718	struct ixlv_sc		*sc = vsi->back;
1719	struct ixlv_vlan_filter	*v;
1720
1721
1722	if (ifp->if_softc != arg)   /* Not our event */
1723		return;
1724
1725	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1726		return;
1727
1728	/* Sanity check - make sure it doesn't already exist */
1729	SLIST_FOREACH(v, sc->vlan_filters, next) {
1730		if (v->vlan == vtag)
1731			return;
1732	}
1733
1734	mtx_lock(&sc->mtx);
1735	++vsi->num_vlans;
1736	v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1737	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1738	v->vlan = vtag;
1739	v->flags = IXL_FILTER_ADD;
1740	ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
1741	    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
1742	mtx_unlock(&sc->mtx);
1743	return;
1744}
1745
1746/*
1747** This routine is run via an vlan
1748** unconfig EVENT, remove our entry
1749** in the soft vfta.
1750*/
1751static void
1752ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1753{
1754	struct ixl_vsi		*vsi = arg;
1755	struct ixlv_sc		*sc = vsi->back;
1756	struct ixlv_vlan_filter	*v;
1757	int			i = 0;
1758
1759	if (ifp->if_softc != arg)
1760		return;
1761
1762	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1763		return;
1764
1765	mtx_lock(&sc->mtx);
1766	SLIST_FOREACH(v, sc->vlan_filters, next) {
1767		if (v->vlan == vtag) {
1768			v->flags = IXL_FILTER_DEL;
1769			++i;
1770			--vsi->num_vlans;
1771		}
1772	}
1773	if (i)
1774		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
1775		    IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
1776	mtx_unlock(&sc->mtx);
1777	return;
1778}
1779
1780/*
1781** Get a new filter and add it to the mac filter list.
1782*/
1783static struct ixlv_mac_filter *
1784ixlv_get_mac_filter(struct ixlv_sc *sc)
1785{
1786	struct ixlv_mac_filter	*f;
1787
1788	f = malloc(sizeof(struct ixlv_mac_filter),
1789	    M_DEVBUF, M_NOWAIT | M_ZERO);
1790	if (f)
1791		SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1792
1793	return (f);
1794}
1795
1796/*
1797** Find the filter with matching MAC address
1798*/
1799static struct ixlv_mac_filter *
1800ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1801{
1802	struct ixlv_mac_filter	*f;
1803	bool				match = FALSE;
1804
1805	SLIST_FOREACH(f, sc->mac_filters, next) {
1806		if (cmp_etheraddr(f->macaddr, macaddr)) {
1807			match = TRUE;
1808			break;
1809		}
1810	}
1811
1812	if (!match)
1813		f = NULL;
1814	return (f);
1815}
1816
1817/*
1818** Admin Queue interrupt handler
1819*/
1820static void
1821ixlv_msix_adminq(void *arg)
1822{
1823	struct ixlv_sc	*sc = arg;
1824	struct i40e_hw	*hw = &sc->hw;
1825	u32		reg, mask;
1826
1827        reg = rd32(hw, I40E_VFINT_ICR01);
1828        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1829
1830        reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1831        reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
1832        wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1833
1834	/* schedule task */
1835	taskqueue_enqueue(sc->tq, &sc->aq_irq);
1836	return;
1837}
1838
1839void
1840ixlv_enable_intr(struct ixl_vsi *vsi)
1841{
1842	struct i40e_hw		*hw = vsi->hw;
1843	struct ixl_queue	*que = vsi->queues;
1844
1845	ixlv_enable_adminq_irq(hw);
1846	for (int i = 0; i < vsi->num_queues; i++, que++)
1847		ixlv_enable_queue_irq(hw, que->me);
1848}
1849
1850void
1851ixlv_disable_intr(struct ixl_vsi *vsi)
1852{
1853        struct i40e_hw          *hw = vsi->hw;
1854        struct ixl_queue       *que = vsi->queues;
1855
1856	ixlv_disable_adminq_irq(hw);
1857	for (int i = 0; i < vsi->num_queues; i++, que++)
1858		ixlv_disable_queue_irq(hw, que->me);
1859}
1860
1861
1862static void
1863ixlv_disable_adminq_irq(struct i40e_hw *hw)
1864{
1865	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1866	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1867	/* flush */
1868	rd32(hw, I40E_VFGEN_RSTAT);
1869	return;
1870}
1871
1872static void
1873ixlv_enable_adminq_irq(struct i40e_hw *hw)
1874{
1875	wr32(hw, I40E_VFINT_DYN_CTL01,
1876	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
1877	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1878	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1879	/* flush */
1880	rd32(hw, I40E_VFGEN_RSTAT);
1881	return;
1882}
1883
1884static void
1885ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1886{
1887	u32		reg;
1888
1889	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1890	    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
1891	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1892}
1893
1894static void
1895ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1896{
1897	wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1898	rd32(hw, I40E_VFGEN_RSTAT);
1899	return;
1900}
1901
1902
1903/*
1904** Provide a update to the queue RX
1905** interrupt moderation value.
1906*/
1907static void
1908ixlv_set_queue_rx_itr(struct ixl_queue *que)
1909{
1910	struct ixl_vsi	*vsi = que->vsi;
1911	struct i40e_hw	*hw = vsi->hw;
1912	struct rx_ring	*rxr = &que->rxr;
1913	u16		rx_itr;
1914	u16		rx_latency = 0;
1915	int		rx_bytes;
1916
1917
1918	/* Idle, do nothing */
1919	if (rxr->bytes == 0)
1920		return;
1921
1922	if (ixlv_dynamic_rx_itr) {
1923		rx_bytes = rxr->bytes/rxr->itr;
1924		rx_itr = rxr->itr;
1925
1926		/* Adjust latency range */
1927		switch (rxr->latency) {
1928		case IXL_LOW_LATENCY:
1929			if (rx_bytes > 10) {
1930				rx_latency = IXL_AVE_LATENCY;
1931				rx_itr = IXL_ITR_20K;
1932			}
1933			break;
1934		case IXL_AVE_LATENCY:
1935			if (rx_bytes > 20) {
1936				rx_latency = IXL_BULK_LATENCY;
1937				rx_itr = IXL_ITR_8K;
1938			} else if (rx_bytes <= 10) {
1939				rx_latency = IXL_LOW_LATENCY;
1940				rx_itr = IXL_ITR_100K;
1941			}
1942			break;
1943		case IXL_BULK_LATENCY:
1944			if (rx_bytes <= 20) {
1945				rx_latency = IXL_AVE_LATENCY;
1946				rx_itr = IXL_ITR_20K;
1947			}
1948			break;
1949       		 }
1950
1951		rxr->latency = rx_latency;
1952
1953		if (rx_itr != rxr->itr) {
1954			/* do an exponential smoothing */
1955			rx_itr = (10 * rx_itr * rxr->itr) /
1956			    ((9 * rx_itr) + rxr->itr);
1957			rxr->itr = rx_itr & IXL_MAX_ITR;
1958			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1959			    que->me), rxr->itr);
1960		}
1961	} else { /* We may have have toggled to non-dynamic */
1962		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1963			vsi->rx_itr_setting = ixlv_rx_itr;
1964		/* Update the hardware if needed */
1965		if (rxr->itr != vsi->rx_itr_setting) {
1966			rxr->itr = vsi->rx_itr_setting;
1967			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1968			    que->me), rxr->itr);
1969		}
1970	}
1971	rxr->bytes = 0;
1972	rxr->packets = 0;
1973	return;
1974}
1975
1976
1977/*
1978** Provide a update to the queue TX
1979** interrupt moderation value.
1980*/
1981static void
1982ixlv_set_queue_tx_itr(struct ixl_queue *que)
1983{
1984	struct ixl_vsi	*vsi = que->vsi;
1985	struct i40e_hw	*hw = vsi->hw;
1986	struct tx_ring	*txr = &que->txr;
1987	u16		tx_itr;
1988	u16		tx_latency = 0;
1989	int		tx_bytes;
1990
1991
1992	/* Idle, do nothing */
1993	if (txr->bytes == 0)
1994		return;
1995
1996	if (ixlv_dynamic_tx_itr) {
1997		tx_bytes = txr->bytes/txr->itr;
1998		tx_itr = txr->itr;
1999
2000		switch (txr->latency) {
2001		case IXL_LOW_LATENCY:
2002			if (tx_bytes > 10) {
2003				tx_latency = IXL_AVE_LATENCY;
2004				tx_itr = IXL_ITR_20K;
2005			}
2006			break;
2007		case IXL_AVE_LATENCY:
2008			if (tx_bytes > 20) {
2009				tx_latency = IXL_BULK_LATENCY;
2010				tx_itr = IXL_ITR_8K;
2011			} else if (tx_bytes <= 10) {
2012				tx_latency = IXL_LOW_LATENCY;
2013				tx_itr = IXL_ITR_100K;
2014			}
2015			break;
2016		case IXL_BULK_LATENCY:
2017			if (tx_bytes <= 20) {
2018				tx_latency = IXL_AVE_LATENCY;
2019				tx_itr = IXL_ITR_20K;
2020			}
2021			break;
2022		}
2023
2024		txr->latency = tx_latency;
2025
2026		if (tx_itr != txr->itr) {
2027       	         /* do an exponential smoothing */
2028			tx_itr = (10 * tx_itr * txr->itr) /
2029			    ((9 * tx_itr) + txr->itr);
2030			txr->itr = tx_itr & IXL_MAX_ITR;
2031			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2032			    que->me), txr->itr);
2033		}
2034
2035	} else { /* We may have have toggled to non-dynamic */
2036		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
2037			vsi->tx_itr_setting = ixlv_tx_itr;
2038		/* Update the hardware if needed */
2039		if (txr->itr != vsi->tx_itr_setting) {
2040			txr->itr = vsi->tx_itr_setting;
2041			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
2042			    que->me), txr->itr);
2043		}
2044	}
2045	txr->bytes = 0;
2046	txr->packets = 0;
2047	return;
2048}
2049
2050
2051/*
2052**
2053** MSIX Interrupt Handlers and Tasklets
2054**
2055*/
2056static void
2057ixlv_handle_que(void *context, int pending)
2058{
2059	struct ixl_queue *que = context;
2060	struct ixl_vsi *vsi = que->vsi;
2061	struct i40e_hw  *hw = vsi->hw;
2062	struct tx_ring  *txr = &que->txr;
2063	struct ifnet    *ifp = vsi->ifp;
2064	bool		more;
2065
2066	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2067		more = ixl_rxeof(que, IXL_RX_LIMIT);
2068		mtx_lock(&txr->mtx);
2069		ixl_txeof(que);
2070		if (!drbr_empty(ifp, txr->br))
2071			ixl_mq_start_locked(ifp, txr);
2072		mtx_unlock(&txr->mtx);
2073		if (more) {
2074			taskqueue_enqueue(que->tq, &que->task);
2075			return;
2076		}
2077	}
2078
2079	/* Reenable this interrupt - hmmm */
2080	ixlv_enable_queue_irq(hw, que->me);
2081	return;
2082}
2083
2084
2085/*********************************************************************
2086 *
2087 *  MSIX Queue Interrupt Service routine
2088 *
2089 **********************************************************************/
2090static void
2091ixlv_msix_que(void *arg)
2092{
2093	struct ixl_queue	*que = arg;
2094	struct ixl_vsi	*vsi = que->vsi;
2095	struct i40e_hw	*hw = vsi->hw;
2096	struct tx_ring	*txr = &que->txr;
2097	bool		more_tx, more_rx;
2098
2099	/* Spurious interrupts are ignored */
2100	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
2101		return;
2102
2103	++que->irqs;
2104
2105	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
2106
2107	mtx_lock(&txr->mtx);
2108	more_tx = ixl_txeof(que);
2109	/*
2110	** Make certain that if the stack
2111	** has anything queued the task gets
2112	** scheduled to handle it.
2113	*/
2114	if (!drbr_empty(vsi->ifp, txr->br))
2115		more_tx = 1;
2116	mtx_unlock(&txr->mtx);
2117
2118	ixlv_set_queue_rx_itr(que);
2119	ixlv_set_queue_tx_itr(que);
2120
2121	if (more_tx || more_rx)
2122		taskqueue_enqueue(que->tq, &que->task);
2123	else
2124		ixlv_enable_queue_irq(hw, que->me);
2125
2126	return;
2127}
2128
2129
2130/*********************************************************************
2131 *
2132 *  Media Ioctl callback
2133 *
2134 *  This routine is called whenever the user queries the status of
2135 *  the interface using ifconfig.
2136 *
2137 **********************************************************************/
2138static void
2139ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
2140{
2141	struct ixl_vsi		*vsi = ifp->if_softc;
2142	struct ixlv_sc	*sc = vsi->back;
2143
2144	INIT_DBG_IF(ifp, "begin");
2145
2146	mtx_lock(&sc->mtx);
2147
2148	ixlv_update_link_status(sc);
2149
2150	ifmr->ifm_status = IFM_AVALID;
2151	ifmr->ifm_active = IFM_ETHER;
2152
2153	if (!sc->link_up) {
2154		mtx_unlock(&sc->mtx);
2155		INIT_DBG_IF(ifp, "end: link not up");
2156		return;
2157	}
2158
2159	ifmr->ifm_status |= IFM_ACTIVE;
2160	/* Hardware is always full-duplex */
2161	ifmr->ifm_active |= IFM_FDX;
2162	mtx_unlock(&sc->mtx);
2163	INIT_DBG_IF(ifp, "end");
2164	return;
2165}
2166
2167/*********************************************************************
2168 *
2169 *  Media Ioctl callback
2170 *
2171 *  This routine is called when the user changes speed/duplex using
2172 *  media/mediopt option with ifconfig.
2173 *
2174 **********************************************************************/
2175static int
2176ixlv_media_change(struct ifnet * ifp)
2177{
2178	struct ixl_vsi *vsi = ifp->if_softc;
2179	struct ifmedia *ifm = &vsi->media;
2180
2181	INIT_DBG_IF(ifp, "begin");
2182
2183	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2184		return (EINVAL);
2185
2186	INIT_DBG_IF(ifp, "end");
2187	return (0);
2188}
2189
2190
2191/*********************************************************************
2192 *  Multicast Initialization
2193 *
2194 *  This routine is called by init to reset a fresh state.
2195 *
2196 **********************************************************************/
2197
2198static void
2199ixlv_init_multi(struct ixl_vsi *vsi)
2200{
2201	struct ixlv_mac_filter *f;
2202	struct ixlv_sc	*sc = vsi->back;
2203	int			mcnt = 0;
2204
2205	IOCTL_DBG_IF(vsi->ifp, "begin");
2206
2207	/* First clear any multicast filters */
2208	SLIST_FOREACH(f, sc->mac_filters, next) {
2209		if ((f->flags & IXL_FILTER_USED)
2210		    && (f->flags & IXL_FILTER_MC)) {
2211			f->flags |= IXL_FILTER_DEL;
2212			mcnt++;
2213		}
2214	}
2215	if (mcnt > 0)
2216		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2217		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2218		    sc);
2219
2220	IOCTL_DBG_IF(vsi->ifp, "end");
2221}
2222
2223static void
2224ixlv_add_multi(struct ixl_vsi *vsi)
2225{
2226	struct ifmultiaddr	*ifma;
2227	struct ifnet		*ifp = vsi->ifp;
2228	struct ixlv_sc	*sc = vsi->back;
2229	int			mcnt = 0;
2230
2231	IOCTL_DBG_IF(ifp, "begin");
2232
2233	if_maddr_rlock(ifp);
2234	/*
2235	** Get a count, to decide if we
2236	** simply use multicast promiscuous.
2237	*/
2238	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2239		if (ifma->ifma_addr->sa_family != AF_LINK)
2240			continue;
2241		mcnt++;
2242	}
2243	if_maddr_runlock(ifp);
2244
2245	// TODO: Remove -- cannot set promiscuous mode in a VF
2246	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2247		/* delete all multicast filters */
2248		ixlv_init_multi(vsi);
2249		sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2250		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2251		    IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
2252		    sc);
2253		IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2254		return;
2255	}
2256
2257	mcnt = 0;
2258	if_maddr_rlock(ifp);
2259	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2260		if (ifma->ifma_addr->sa_family != AF_LINK)
2261			continue;
2262		if (!ixlv_add_mac_filter(sc,
2263		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2264		    IXL_FILTER_MC))
2265			mcnt++;
2266	}
2267	if_maddr_runlock(ifp);
2268	/*
2269	** Notify AQ task that sw filters need to be
2270	** added to hw list
2271	*/
2272	if (mcnt > 0)
2273		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
2274		    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
2275		    sc);
2276
2277	IOCTL_DBG_IF(ifp, "end");
2278}
2279
2280static void
2281ixlv_del_multi(struct ixl_vsi *vsi)
2282{
2283	struct ixlv_mac_filter *f;
2284	struct ifmultiaddr	*ifma;
2285	struct ifnet		*ifp = vsi->ifp;
2286	struct ixlv_sc	*sc = vsi->back;
2287	int			mcnt = 0;
2288	bool		match = FALSE;
2289
2290	IOCTL_DBG_IF(ifp, "begin");
2291
2292	/* Search for removed multicast addresses */
2293	if_maddr_rlock(ifp);
2294	SLIST_FOREACH(f, sc->mac_filters, next) {
2295		if ((f->flags & IXL_FILTER_USED)
2296		    && (f->flags & IXL_FILTER_MC)) {
2297			/* check if mac address in filter is in sc's list */
2298			match = FALSE;
2299			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2300				if (ifma->ifma_addr->sa_family != AF_LINK)
2301					continue;
2302				u8 *mc_addr =
2303				    (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2304				if (cmp_etheraddr(f->macaddr, mc_addr)) {
2305					match = TRUE;
2306					break;
2307				}
2308			}
2309			/* if this filter is not in the sc's list, remove it */
2310			if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2311				f->flags |= IXL_FILTER_DEL;
2312				mcnt++;
2313				IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2314				    MAC_FORMAT_ARGS(f->macaddr));
2315			}
2316			else if (match == FALSE)
2317				IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2318				    MAC_FORMAT_ARGS(f->macaddr));
2319		}
2320	}
2321	if_maddr_runlock(ifp);
2322
2323	if (mcnt > 0)
2324		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
2325		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
2326		    sc);
2327
2328	IOCTL_DBG_IF(ifp, "end");
2329}
2330
2331/*********************************************************************
2332 *  Timer routine
2333 *
2334 *  This routine checks for link status,updates statistics,
2335 *  and runs the watchdog check.
2336 *
2337 **********************************************************************/
2338
2339static void
2340ixlv_local_timer(void *arg)
2341{
2342	struct ixlv_sc	*sc = arg;
2343	struct i40e_hw		*hw = &sc->hw;
2344	struct ixl_vsi		*vsi = &sc->vsi;
2345	struct ixl_queue	*que = vsi->queues;
2346	device_t		dev = sc->dev;
2347	int			hung = 0;
2348	u32			mask, val;
2349
2350	IXLV_CORE_LOCK_ASSERT(sc);
2351
2352	/* If Reset is in progress just bail */
2353	if (sc->init_state == IXLV_RESET_PENDING)
2354		return;
2355
2356	/* Check for when PF triggers a VF reset */
2357	val = rd32(hw, I40E_VFGEN_RSTAT) &
2358	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2359
2360	if (val != I40E_VFR_VFACTIVE
2361	    && val != I40E_VFR_COMPLETED) {
2362		DDPRINTF(dev, "reset in progress! (%d)", val);
2363		return;
2364	}
2365
2366	ixlv_request_stats(sc);
2367
2368	/* clean and process any events */
2369	taskqueue_enqueue(sc->tq, &sc->aq_irq);
2370
2371	/*
2372	** Check status on the queues for a hang
2373	*/
2374	mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
2375	    I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
2376
2377	for (int i = 0; i < vsi->num_queues; i++,que++) {
2378		/* Any queues with outstanding work get a sw irq */
2379		if (que->busy)
2380			wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2381		/*
2382		** Each time txeof runs without cleaning, but there
2383		** are uncleaned descriptors it increments busy. If
2384		** we get to 5 we declare it hung.
2385		*/
2386		if (que->busy == IXL_QUEUE_HUNG) {
2387			++hung;
2388			/* Mark the queue as inactive */
2389			vsi->active_queues &= ~((u64)1 << que->me);
2390			continue;
2391		} else {
2392			/* Check if we've come back from hung */
2393			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2394				vsi->active_queues |= ((u64)1 << que->me);
2395		}
2396		if (que->busy >= IXL_MAX_TX_BUSY) {
2397			device_printf(dev,"Warning queue %d "
2398			    "appears to be hung!\n", i);
2399			que->busy = IXL_QUEUE_HUNG;
2400			++hung;
2401		}
2402	}
2403	/* Only reset when all queues show hung */
2404	if (hung == vsi->num_queues)
2405		goto hung;
2406	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2407	return;
2408
2409hung:
2410	device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2411	sc->init_state = IXLV_RESET_REQUIRED;
2412	ixlv_init_locked(sc);
2413}
2414
2415/*
2416** Note: this routine updates the OS on the link state
2417**	the real check of the hardware only happens with
2418**	a link interrupt.
2419*/
2420void
2421ixlv_update_link_status(struct ixlv_sc *sc)
2422{
2423	struct ixl_vsi		*vsi = &sc->vsi;
2424	struct ifnet		*ifp = vsi->ifp;
2425
2426	if (sc->link_up){
2427		if (vsi->link_active == FALSE) {
2428			if (bootverbose)
2429				if_printf(ifp,"Link is Up, %d Gbps\n",
2430				    (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2431			vsi->link_active = TRUE;
2432			if_link_state_change(ifp, LINK_STATE_UP);
2433		}
2434	} else { /* Link down */
2435		if (vsi->link_active == TRUE) {
2436			if (bootverbose)
2437				if_printf(ifp,"Link is Down\n");
2438			if_link_state_change(ifp, LINK_STATE_DOWN);
2439			vsi->link_active = FALSE;
2440		}
2441	}
2442
2443	return;
2444}
2445
2446/*********************************************************************
2447 *
2448 *  This routine disables all traffic on the adapter by issuing a
2449 *  global reset on the MAC and deallocates TX/RX buffers.
2450 *
2451 **********************************************************************/
2452
2453static void
2454ixlv_stop(struct ixlv_sc *sc)
2455{
2456	struct ifnet *ifp;
2457	int start;
2458
2459	ifp = sc->vsi.ifp;
2460	INIT_DBG_IF(ifp, "begin");
2461
2462	IXLV_CORE_LOCK_ASSERT(sc);
2463
2464	ixl_vc_flush(&sc->vc_mgr);
2465	ixlv_disable_queues(sc);
2466
2467	start = ticks;
2468	while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
2469	    ((ticks - start) < hz/10))
2470		ixlv_do_adminq_locked(sc);
2471
2472	/* Stop the local timer */
2473	callout_stop(&sc->timer);
2474
2475	INIT_DBG_IF(ifp, "end");
2476}
2477
2478
2479/*********************************************************************
2480 *
2481 *  Free all station queue structs.
2482 *
2483 **********************************************************************/
2484static void
2485ixlv_free_queues(struct ixl_vsi *vsi)
2486{
2487	struct ixlv_sc	*sc = (struct ixlv_sc *)vsi->back;
2488	struct ixl_queue	*que = vsi->queues;
2489
2490	for (int i = 0; i < vsi->num_queues; i++, que++) {
2491		struct tx_ring *txr = &que->txr;
2492		struct rx_ring *rxr = &que->rxr;
2493
2494		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2495			continue;
2496		IXL_TX_LOCK(txr);
2497		ixl_free_que_tx(que);
2498		if (txr->base)
2499			i40e_free_dma_mem(&sc->hw, &txr->dma);
2500		IXL_TX_UNLOCK(txr);
2501		IXL_TX_LOCK_DESTROY(txr);
2502
2503		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2504			continue;
2505		IXL_RX_LOCK(rxr);
2506		ixl_free_que_rx(que);
2507		if (rxr->base)
2508			i40e_free_dma_mem(&sc->hw, &rxr->dma);
2509		IXL_RX_UNLOCK(rxr);
2510		IXL_RX_LOCK_DESTROY(rxr);
2511
2512	}
2513	free(vsi->queues, M_DEVBUF);
2514}
2515
2516
2517/*
2518** ixlv_config_rss - setup RSS
2519**
2520** RSS keys and table are cleared on VF reset.
2521*/
2522static void
2523ixlv_config_rss(struct ixlv_sc *sc)
2524{
2525	struct i40e_hw	*hw = &sc->hw;
2526	struct ixl_vsi	*vsi = &sc->vsi;
2527	u32		lut = 0;
2528	u64		set_hena = 0, hena;
2529	int		i, j, que_id;
2530#ifdef RSS
2531	u32		rss_hash_config;
2532	u32		rss_seed[IXL_KEYSZ];
2533#else
2534	u32		rss_seed[IXL_KEYSZ] = {0x41b01687,
2535			    0x183cfd8c, 0xce880440, 0x580cbc3c,
2536			    0x35897377, 0x328b25e1, 0x4fa98922,
2537			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
2538#endif
2539
2540	/* Don't set up RSS if using a single queue */
2541	if (vsi->num_queues == 1) {
2542		wr32(hw, I40E_VFQF_HENA(0), 0);
2543		wr32(hw, I40E_VFQF_HENA(1), 0);
2544		ixl_flush(hw);
2545		return;
2546	}
2547
2548#ifdef RSS
2549	/* Fetch the configured RSS key */
2550	rss_getkey((uint8_t *) &rss_seed);
2551#endif
2552	/* Fill out hash function seed */
2553	for (i = 0; i <= IXL_KEYSZ; i++)
2554                wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
2555
2556	/* Enable PCTYPES for RSS: */
2557#ifdef RSS
2558	rss_hash_config = rss_gethashconfig();
2559	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2560                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
2561	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2562                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
2563	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2564                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
2565	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2566                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
2567	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2568		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
2569	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2570                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
2571        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2572                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
2573#else
2574	set_hena =
2575		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2576		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2577		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2578		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2579		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2580		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2581		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2582		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2583		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2584		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2585		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2586#endif
2587	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2588	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2589	hena |= set_hena;
2590	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2591	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2592
2593	/* Populate the LUT with max no. of queues in round robin fashion */
2594	for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
2595                if (j == vsi->num_queues)
2596                        j = 0;
2597#ifdef RSS
2598		/*
2599		 * Fetch the RSS bucket id for the given indirection entry.
2600		 * Cap it at the number of configured buckets (which is
2601		 * num_queues.)
2602		 */
2603		que_id = rss_get_indirection_to_bucket(i);
2604		que_id = que_id % vsi->num_queues;
2605#else
2606		que_id = j;
2607#endif
2608                /* lut = 4-byte sliding window of 4 lut entries */
2609                lut = (lut << 8) | (que_id & 0xF);
2610                /* On i = 3, we have 4 entries in lut; write to the register */
2611                if ((i & 3) == 3) {
2612                        wr32(hw, I40E_VFQF_HLUT(i), lut);
2613			DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
2614		}
2615        }
2616	ixl_flush(hw);
2617}
2618
2619
2620/*
2621** This routine refreshes vlan filters, called by init
2622** it scans the filter table and then updates the AQ
2623*/
2624static void
2625ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2626{
2627	struct ixl_vsi			*vsi = &sc->vsi;
2628	struct ixlv_vlan_filter	*f;
2629	int				cnt = 0;
2630
2631	if (vsi->num_vlans == 0)
2632		return;
2633	/*
2634	** Scan the filter table for vlan entries,
2635	** and if found call for the AQ update.
2636	*/
2637	SLIST_FOREACH(f, sc->vlan_filters, next)
2638                if (f->flags & IXL_FILTER_ADD)
2639			cnt++;
2640	if (cnt > 0)
2641		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
2642		    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
2643}
2644
2645
2646/*
2647** This routine adds new MAC filters to the sc's list;
2648** these are later added in hardware by sending a virtual
2649** channel message.
2650*/
2651static int
2652ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2653{
2654	struct ixlv_mac_filter	*f;
2655
2656	/* Does one already exist? */
2657	f = ixlv_find_mac_filter(sc, macaddr);
2658	if (f != NULL) {
2659		IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2660		    MAC_FORMAT_ARGS(macaddr));
2661		return (EEXIST);
2662	}
2663
2664	/* If not, get a new empty filter */
2665	f = ixlv_get_mac_filter(sc);
2666	if (f == NULL) {
2667		if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
2668		    __func__);
2669		return (ENOMEM);
2670	}
2671
2672	IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2673	    MAC_FORMAT_ARGS(macaddr));
2674
2675	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2676	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2677	f->flags |= flags;
2678	return (0);
2679}
2680
2681/*
2682** Marks a MAC filter for deletion.
2683*/
2684static int
2685ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
2686{
2687	struct ixlv_mac_filter	*f;
2688
2689	f = ixlv_find_mac_filter(sc, macaddr);
2690	if (f == NULL)
2691		return (ENOENT);
2692
2693	f->flags |= IXL_FILTER_DEL;
2694	return (0);
2695}
2696
2697/*
2698** Tasklet handler for MSIX Adminq interrupts
2699**  - done outside interrupt context since it might sleep
2700*/
2701static void
2702ixlv_do_adminq(void *context, int pending)
2703{
2704	struct ixlv_sc		*sc = context;
2705
2706	mtx_lock(&sc->mtx);
2707	ixlv_do_adminq_locked(sc);
2708	mtx_unlock(&sc->mtx);
2709	return;
2710}
2711
2712static void
2713ixlv_do_adminq_locked(struct ixlv_sc *sc)
2714{
2715	struct i40e_hw			*hw = &sc->hw;
2716	struct i40e_arq_event_info	event;
2717	struct i40e_virtchnl_msg	*v_msg;
2718	device_t			dev = sc->dev;
2719	u16				result = 0;
2720	u32				reg, oldreg;
2721	i40e_status			ret;
2722
2723	IXLV_CORE_LOCK_ASSERT(sc);
2724
2725	event.buf_len = IXL_AQ_BUF_SZ;
2726        event.msg_buf = sc->aq_buffer;
2727	v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2728
2729	do {
2730		ret = i40e_clean_arq_element(hw, &event, &result);
2731		if (ret)
2732			break;
2733		ixlv_vc_completion(sc, v_msg->v_opcode,
2734		    v_msg->v_retval, event.msg_buf, event.msg_len);
2735		if (result != 0)
2736			bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2737	} while (result);
2738
2739	/* check for Admin queue errors */
2740	oldreg = reg = rd32(hw, hw->aq.arq.len);
2741	if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2742		device_printf(dev, "ARQ VF Error detected\n");
2743		reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2744	}
2745	if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2746		device_printf(dev, "ARQ Overflow Error detected\n");
2747		reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2748	}
2749	if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2750		device_printf(dev, "ARQ Critical Error detected\n");
2751		reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2752	}
2753	if (oldreg != reg)
2754		wr32(hw, hw->aq.arq.len, reg);
2755
2756	oldreg = reg = rd32(hw, hw->aq.asq.len);
2757	if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2758		device_printf(dev, "ASQ VF Error detected\n");
2759		reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2760	}
2761	if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2762		device_printf(dev, "ASQ Overflow Error detected\n");
2763		reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2764	}
2765	if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2766		device_printf(dev, "ASQ Critical Error detected\n");
2767		reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2768	}
2769	if (oldreg != reg)
2770		wr32(hw, hw->aq.asq.len, reg);
2771
2772	ixlv_enable_adminq_irq(hw);
2773}
2774
2775static void
2776ixlv_add_sysctls(struct ixlv_sc *sc)
2777{
2778	device_t dev = sc->dev;
2779	struct ixl_vsi *vsi = &sc->vsi;
2780	struct i40e_eth_stats *es = &vsi->eth_stats;
2781
2782	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2783	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2784	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2785
2786	struct sysctl_oid *vsi_node, *queue_node;
2787	struct sysctl_oid_list *vsi_list, *queue_list;
2788
2789#define QUEUE_NAME_LEN 32
2790	char queue_namebuf[QUEUE_NAME_LEN];
2791
2792	struct ixl_queue *queues = vsi->queues;
2793	struct tx_ring *txr;
2794	struct rx_ring *rxr;
2795
2796	/* Driver statistics sysctls */
2797	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2798			CTLFLAG_RD, &sc->watchdog_events,
2799			"Watchdog timeouts");
2800	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2801			CTLFLAG_RD, &sc->admin_irq,
2802			"Admin Queue IRQ Handled");
2803
2804	/* VSI statistics sysctls */
2805	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2806				   CTLFLAG_RD, NULL, "VSI-specific statistics");
2807	vsi_list = SYSCTL_CHILDREN(vsi_node);
2808
2809	struct ixl_sysctl_info ctls[] =
2810	{
2811		{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2812		{&es->rx_unicast, "ucast_pkts_rcvd",
2813			"Unicast Packets Received"},
2814		{&es->rx_multicast, "mcast_pkts_rcvd",
2815			"Multicast Packets Received"},
2816		{&es->rx_broadcast, "bcast_pkts_rcvd",
2817			"Broadcast Packets Received"},
2818		{&es->rx_discards, "rx_discards", "Discarded RX packets"},
2819		{&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
2820		{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2821		{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2822		{&es->tx_multicast, "mcast_pkts_txd",
2823			"Multicast Packets Transmitted"},
2824		{&es->tx_broadcast, "bcast_pkts_txd",
2825			"Broadcast Packets Transmitted"},
2826		{&es->tx_errors, "tx_errors", "TX packet errors"},
2827		// end
2828		{0,0,0}
2829	};
2830	struct ixl_sysctl_info *entry = ctls;
2831	while (entry->stat != 0)
2832	{
2833		SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
2834				CTLFLAG_RD, entry->stat,
2835				entry->description);
2836		entry++;
2837	}
2838
2839	/* Queue sysctls */
2840	for (int q = 0; q < vsi->num_queues; q++) {
2841		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2842		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2843					     CTLFLAG_RD, NULL, "Queue Name");
2844		queue_list = SYSCTL_CHILDREN(queue_node);
2845
2846		txr = &(queues[q].txr);
2847		rxr = &(queues[q].rxr);
2848
2849		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2850				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2851				"m_defrag() failed");
2852		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
2853				CTLFLAG_RD, &(queues[q].dropped_pkts),
2854				"Driver dropped packets");
2855		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
2856				CTLFLAG_RD, &(queues[q].irqs),
2857				"irqs on this queue");
2858		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2859				CTLFLAG_RD, &(queues[q].tso),
2860				"TSO");
2861		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2862				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2863				"Driver tx dma failure in xmit");
2864		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2865				CTLFLAG_RD, &(txr->no_desc),
2866				"Queue No Descriptor Available");
2867		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2868				CTLFLAG_RD, &(txr->total_packets),
2869				"Queue Packets Transmitted");
2870		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2871				CTLFLAG_RD, &(txr->tx_bytes),
2872				"Queue Bytes Transmitted");
2873		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2874				CTLFLAG_RD, &(rxr->rx_packets),
2875				"Queue Packets Received");
2876		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2877				CTLFLAG_RD, &(rxr->rx_bytes),
2878				"Queue Bytes Received");
2879
2880		/* Examine queue state */
2881		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
2882				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2883				sizeof(struct ixl_queue),
2884				ixlv_sysctl_qtx_tail_handler, "IU",
2885				"Queue Transmit Descriptor Tail");
2886		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
2887				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
2888				sizeof(struct ixl_queue),
2889				ixlv_sysctl_qrx_tail_handler, "IU",
2890				"Queue Receive Descriptor Tail");
2891	}
2892}
2893
2894static void
2895ixlv_init_filters(struct ixlv_sc *sc)
2896{
2897	sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2898	    M_DEVBUF, M_NOWAIT | M_ZERO);
2899	SLIST_INIT(sc->mac_filters);
2900	sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2901	    M_DEVBUF, M_NOWAIT | M_ZERO);
2902	SLIST_INIT(sc->vlan_filters);
2903	return;
2904}
2905
2906static void
2907ixlv_free_filters(struct ixlv_sc *sc)
2908{
2909	struct ixlv_mac_filter *f;
2910	struct ixlv_vlan_filter *v;
2911
2912	while (!SLIST_EMPTY(sc->mac_filters)) {
2913		f = SLIST_FIRST(sc->mac_filters);
2914		SLIST_REMOVE_HEAD(sc->mac_filters, next);
2915		free(f, M_DEVBUF);
2916	}
2917	while (!SLIST_EMPTY(sc->vlan_filters)) {
2918		v = SLIST_FIRST(sc->vlan_filters);
2919		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2920		free(v, M_DEVBUF);
2921	}
2922	return;
2923}
2924
2925/**
2926 * ixlv_sysctl_qtx_tail_handler
2927 * Retrieves I40E_QTX_TAIL1 value from hardware
2928 * for a sysctl.
2929 */
2930static int
2931ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
2932{
2933	struct ixl_queue *que;
2934	int error;
2935	u32 val;
2936
2937	que = ((struct ixl_queue *)oidp->oid_arg1);
2938	if (!que) return 0;
2939
2940	val = rd32(que->vsi->hw, que->txr.tail);
2941	error = sysctl_handle_int(oidp, &val, 0, req);
2942	if (error || !req->newptr)
2943		return error;
2944	return (0);
2945}
2946
2947/**
2948 * ixlv_sysctl_qrx_tail_handler
2949 * Retrieves I40E_QRX_TAIL1 value from hardware
2950 * for a sysctl.
2951 */
2952static int
2953ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
2954{
2955	struct ixl_queue *que;
2956	int error;
2957	u32 val;
2958
2959	que = ((struct ixl_queue *)oidp->oid_arg1);
2960	if (!que) return 0;
2961
2962	val = rd32(que->vsi->hw, que->rxr.tail);
2963	error = sysctl_handle_int(oidp, &val, 0, req);
2964	if (error || !req->newptr)
2965		return error;
2966	return (0);
2967}
2968
2969